1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
26 #include "hclge_devlink.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET 1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
48 #define HCLGE_DFX_IGU_BD_OFFSET 4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
51 #define HCLGE_DFX_NCSI_BD_OFFSET 7
52 #define HCLGE_DFX_RTC_BD_OFFSET 8
53 #define HCLGE_DFX_PPP_BD_OFFSET 9
54 #define HCLGE_DFX_RCB_BD_OFFSET 10
55 #define HCLGE_DFX_TQP_BD_OFFSET 11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
58 #define HCLGE_LINK_STATUS_MS 10
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
76 static struct hnae3_ae_algo ae_algo;
78 static struct workqueue_struct *hclge_wq;
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 HCLGE_NIC_CSQ_DEPTH_REG,
98 HCLGE_NIC_CSQ_TAIL_REG,
99 HCLGE_NIC_CSQ_HEAD_REG,
100 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 HCLGE_NIC_CRQ_DEPTH_REG,
103 HCLGE_NIC_CRQ_TAIL_REG,
104 HCLGE_NIC_CRQ_HEAD_REG,
105 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 HCLGE_CMDQ_INTR_STS_REG,
107 HCLGE_CMDQ_INTR_EN_REG,
108 HCLGE_CMDQ_INTR_GEN_REG};
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 HCLGE_PF_OTHER_INT_REG,
112 HCLGE_MISC_RESET_STS_REG,
113 HCLGE_MISC_VECTOR_INT_STS,
114 HCLGE_GLOBAL_RESET_REG,
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 HCLGE_RING_RX_ADDR_H_REG,
120 HCLGE_RING_RX_BD_NUM_REG,
121 HCLGE_RING_RX_BD_LENGTH_REG,
122 HCLGE_RING_RX_MERGE_EN_REG,
123 HCLGE_RING_RX_TAIL_REG,
124 HCLGE_RING_RX_HEAD_REG,
125 HCLGE_RING_RX_FBD_NUM_REG,
126 HCLGE_RING_RX_OFFSET_REG,
127 HCLGE_RING_RX_FBD_OFFSET_REG,
128 HCLGE_RING_RX_STASH_REG,
129 HCLGE_RING_RX_BD_ERR_REG,
130 HCLGE_RING_TX_ADDR_L_REG,
131 HCLGE_RING_TX_ADDR_H_REG,
132 HCLGE_RING_TX_BD_NUM_REG,
133 HCLGE_RING_TX_PRIORITY_REG,
134 HCLGE_RING_TX_TC_REG,
135 HCLGE_RING_TX_MERGE_EN_REG,
136 HCLGE_RING_TX_TAIL_REG,
137 HCLGE_RING_TX_HEAD_REG,
138 HCLGE_RING_TX_FBD_NUM_REG,
139 HCLGE_RING_TX_OFFSET_REG,
140 HCLGE_RING_TX_EBD_NUM_REG,
141 HCLGE_RING_TX_EBD_OFFSET_REG,
142 HCLGE_RING_TX_BD_ERR_REG,
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 HCLGE_TQP_INTR_GL0_REG,
147 HCLGE_TQP_INTR_GL1_REG,
148 HCLGE_TQP_INTR_GL2_REG,
149 HCLGE_TQP_INTR_RL_REG};
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
153 "Serdes serial Loopback test",
154 "Serdes parallel Loopback test",
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 {"mac_tx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 {"mac_rx_mac_pause_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 {"mac_tx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 {"mac_rx_control_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 {"mac_tx_pfc_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 {"mac_tx_pfc_pri0_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 {"mac_tx_pfc_pri1_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 {"mac_tx_pfc_pri2_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 {"mac_tx_pfc_pri3_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 {"mac_tx_pfc_pri4_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 {"mac_tx_pfc_pri5_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 {"mac_tx_pfc_pri6_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 {"mac_tx_pfc_pri7_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 {"mac_rx_pfc_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 {"mac_rx_pfc_pri0_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 {"mac_rx_pfc_pri1_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 {"mac_rx_pfc_pri2_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 {"mac_rx_pfc_pri3_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 {"mac_rx_pfc_pri4_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 {"mac_rx_pfc_pri5_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 {"mac_rx_pfc_pri6_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 {"mac_rx_pfc_pri7_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 {"mac_tx_total_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 {"mac_tx_total_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 {"mac_tx_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 {"mac_tx_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 {"mac_tx_good_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 {"mac_tx_bad_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 {"mac_tx_uni_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 {"mac_tx_multi_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 {"mac_tx_broad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 {"mac_tx_undersize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 {"mac_tx_oversize_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 {"mac_tx_64_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 {"mac_tx_65_127_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 {"mac_tx_128_255_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 {"mac_tx_256_511_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 {"mac_tx_512_1023_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 {"mac_tx_1024_1518_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 {"mac_tx_1519_2047_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 {"mac_tx_2048_4095_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 {"mac_tx_4096_8191_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 {"mac_tx_8192_9216_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 {"mac_tx_9217_12287_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 {"mac_tx_12288_16383_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 {"mac_tx_1519_max_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 {"mac_tx_1519_max_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 {"mac_rx_total_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 {"mac_rx_total_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 {"mac_rx_good_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 {"mac_rx_bad_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 {"mac_rx_good_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 {"mac_rx_bad_oct_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 {"mac_rx_uni_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 {"mac_rx_multi_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 {"mac_rx_broad_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 {"mac_rx_undersize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 {"mac_rx_oversize_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 {"mac_rx_64_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 {"mac_rx_65_127_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 {"mac_rx_128_255_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 {"mac_rx_256_511_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 {"mac_rx_512_1023_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 {"mac_rx_1024_1518_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 {"mac_rx_1519_2047_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 {"mac_rx_2048_4095_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 {"mac_rx_4096_8191_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 {"mac_rx_8192_9216_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 {"mac_rx_9217_12287_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 {"mac_rx_12288_16383_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 {"mac_rx_1519_max_good_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 {"mac_rx_1519_max_bad_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
304 {"mac_tx_fragment_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 {"mac_tx_undermin_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 {"mac_tx_jabber_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 {"mac_tx_err_all_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 {"mac_tx_from_app_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 {"mac_tx_from_app_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 {"mac_rx_fragment_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 {"mac_rx_undermin_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 {"mac_rx_jabber_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 {"mac_rx_fcs_err_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 {"mac_rx_send_app_good_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 {"mac_rx_send_app_bad_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
332 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 .i_port_bitmap = 0x1,
339 static const u8 hclge_hash_key[] = {
340 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 HCLGE_DFX_BIOS_BD_OFFSET,
349 HCLGE_DFX_SSU_0_BD_OFFSET,
350 HCLGE_DFX_SSU_1_BD_OFFSET,
351 HCLGE_DFX_IGU_BD_OFFSET,
352 HCLGE_DFX_RPU_0_BD_OFFSET,
353 HCLGE_DFX_RPU_1_BD_OFFSET,
354 HCLGE_DFX_NCSI_BD_OFFSET,
355 HCLGE_DFX_RTC_BD_OFFSET,
356 HCLGE_DFX_PPP_BD_OFFSET,
357 HCLGE_DFX_RCB_BD_OFFSET,
358 HCLGE_DFX_TQP_BD_OFFSET,
359 HCLGE_DFX_SSU_2_BD_OFFSET
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 HCLGE_OPC_DFX_SSU_REG_0,
365 HCLGE_OPC_DFX_SSU_REG_1,
366 HCLGE_OPC_DFX_IGU_EGU_REG,
367 HCLGE_OPC_DFX_RPU_REG_0,
368 HCLGE_OPC_DFX_RPU_REG_1,
369 HCLGE_OPC_DFX_NCSI_REG,
370 HCLGE_OPC_DFX_RTC_REG,
371 HCLGE_OPC_DFX_PPP_REG,
372 HCLGE_OPC_DFX_RCB_REG,
373 HCLGE_OPC_DFX_TQP_REG,
374 HCLGE_OPC_DFX_SSU_REG_2
377 static const struct key_info meta_data_key_info[] = {
378 { PACKET_TYPE_ID, 6 },
385 { TUNNEL_PACKET, 1 },
388 static const struct key_info tuple_key_info[] = {
389 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 { INNER_DST_MAC, 48, KEY_OPT_MAC,
406 offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 offsetof(struct hclge_fd_rule, tuples.src_mac),
410 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 { INNER_L2_RSV, 16, KEY_OPT_LE16,
419 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 { INNER_IP_TOS, 8, KEY_OPT_U8,
422 offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 { INNER_IP_PROTO, 8, KEY_OPT_U8,
425 offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 { INNER_SRC_IP, 32, KEY_OPT_IP,
428 offsetof(struct hclge_fd_rule, tuples.src_ip),
429 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 { INNER_DST_IP, 32, KEY_OPT_IP,
431 offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 { INNER_L3_RSV, 16, KEY_OPT_LE16,
434 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 offsetof(struct hclge_fd_rule, tuples.src_port),
438 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 { INNER_DST_PORT, 16, KEY_OPT_LE16,
440 offsetof(struct hclge_fd_rule, tuples.dst_port),
441 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 { INNER_L4_RSV, 32, KEY_OPT_LE32,
443 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
449 #define HCLGE_MAC_CMD_NUM 21
451 u64 *data = (u64 *)(&hdev->mac_stats);
452 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
457 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
460 dev_err(&hdev->pdev->dev,
461 "Get MAC pkt stats fail, status = %d.\n", ret);
466 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 /* for special opcode 0032, only the first desc has the head */
468 if (unlikely(i == 0)) {
469 desc_data = (__le64 *)(&desc[i].data[0]);
470 n = HCLGE_RD_FIRST_STATS_NUM;
472 desc_data = (__le64 *)(&desc[i]);
473 n = HCLGE_RD_OTHER_STATS_NUM;
476 for (k = 0; k < n; k++) {
477 *data += le64_to_cpu(*desc_data);
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
488 u64 *data = (u64 *)(&hdev->mac_stats);
489 struct hclge_desc *desc;
494 /* This may be called inside atomic sections,
495 * so GFP_ATOMIC is more suitalbe here
497 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
501 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
508 for (i = 0; i < desc_num; i++) {
509 /* for special opcode 0034, only the first desc has the head */
511 desc_data = (__le64 *)(&desc[i].data[0]);
512 n = HCLGE_RD_FIRST_STATS_NUM;
514 desc_data = (__le64 *)(&desc[i]);
515 n = HCLGE_RD_OTHER_STATS_NUM;
518 for (k = 0; k < n; k++) {
519 *data += le64_to_cpu(*desc_data);
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
532 struct hclge_desc desc;
537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
542 desc_data = (__le32 *)(&desc.data[0]);
543 reg_num = le32_to_cpu(*desc_data);
545 *desc_num = 1 + ((reg_num - 3) >> 2) +
546 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
556 ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 /* The firmware supports the new statistics acquisition method */
559 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 else if (ret == -EOPNOTSUPP)
561 ret = hclge_mac_update_stats_defective(hdev);
563 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
570 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 struct hclge_vport *vport = hclge_get_vport(handle);
572 struct hclge_dev *hdev = vport->back;
573 struct hnae3_queue *queue;
574 struct hclge_desc desc[1];
575 struct hclge_tqp *tqp;
578 for (i = 0; i < kinfo->num_tqps; i++) {
579 queue = handle->kinfo.tqp[i];
580 tqp = container_of(queue, struct hclge_tqp, q);
581 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
585 desc[0].data[0] = cpu_to_le32(tqp->index);
586 ret = hclge_cmd_send(&hdev->hw, desc, 1);
588 dev_err(&hdev->pdev->dev,
589 "Query tqp stat fail, status = %d,queue = %d\n",
593 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 le32_to_cpu(desc[0].data[1]);
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 queue = handle->kinfo.tqp[i];
599 tqp = container_of(queue, struct hclge_tqp, q);
600 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601 hclge_cmd_setup_basic_desc(&desc[0],
602 HCLGE_OPC_QUERY_TX_STATS,
605 desc[0].data[0] = cpu_to_le32(tqp->index);
606 ret = hclge_cmd_send(&hdev->hw, desc, 1);
608 dev_err(&hdev->pdev->dev,
609 "Query tqp stat fail, status = %d,queue = %d\n",
613 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 le32_to_cpu(desc[0].data[1]);
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
622 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 struct hclge_tqp *tqp;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
632 for (i = 0; i < kinfo->num_tqps; i++) {
633 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
644 /* each tqp has TX & RX two queues */
645 return kinfo->num_tqps * (2);
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
650 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
654 for (i = 0; i < kinfo->num_tqps; i++) {
655 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 struct hclge_tqp, q);
657 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
659 buff = buff + ETH_GSTRING_LEN;
662 for (i = 0; i < kinfo->num_tqps; i++) {
663 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 struct hclge_tqp, q);
665 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
667 buff = buff + ETH_GSTRING_LEN;
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 const struct hclge_comm_stats_str strs[],
680 for (i = 0; i < size; i++)
681 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 const struct hclge_comm_stats_str strs[],
690 char *buff = (char *)data;
693 if (stringset != ETH_SS_STATS)
696 for (i = 0; i < size; i++) {
697 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 buff = buff + ETH_GSTRING_LEN;
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
706 struct hnae3_handle *handle;
709 handle = &hdev->vport[0].nic;
710 if (handle->client) {
711 status = hclge_tqps_update_stats(handle);
713 dev_err(&hdev->pdev->dev,
714 "Update TQPS stats fail, status = %d.\n",
719 status = hclge_mac_update_stats(hdev);
721 dev_err(&hdev->pdev->dev,
722 "Update MAC stats fail, status = %d.\n", status);
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 struct net_device_stats *net_stats)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
735 status = hclge_mac_update_stats(hdev);
737 dev_err(&hdev->pdev->dev,
738 "Update MAC stats fail, status = %d.\n",
741 status = hclge_tqps_update_stats(handle);
743 dev_err(&hdev->pdev->dev,
744 "Update TQPS stats fail, status = %d.\n",
747 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753 HNAE3_SUPPORT_PHY_LOOPBACK | \
754 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
757 struct hclge_vport *vport = hclge_get_vport(handle);
758 struct hclge_dev *hdev = vport->back;
761 /* Loopback test support rules:
762 * mac: only GE mode support
763 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 * phy: only support when phy device exist on board
766 if (stringset == ETH_SS_TEST) {
767 /* clear loopback bit flags at first */
768 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
774 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
781 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 hdev->hw.mac.phydev->drv->set_loopback) ||
783 hnae3_dev_phy_imp_supported(hdev)) {
785 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
787 } else if (stringset == ETH_SS_STATS) {
788 count = ARRAY_SIZE(g_mac_stats_string) +
789 hclge_tqps_get_sset_count(handle, stringset);
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
798 u8 *p = (char *)data;
801 if (stringset == ETH_SS_STATS) {
802 size = ARRAY_SIZE(g_mac_stats_string);
803 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
805 p = hclge_tqps_get_strings(handle, p);
806 } else if (stringset == ETH_SS_TEST) {
807 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
810 p += ETH_GSTRING_LEN;
812 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
815 p += ETH_GSTRING_LEN;
817 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
819 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
821 p += ETH_GSTRING_LEN;
823 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
826 p += ETH_GSTRING_LEN;
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
833 struct hclge_vport *vport = hclge_get_vport(handle);
834 struct hclge_dev *hdev = vport->back;
837 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 ARRAY_SIZE(g_mac_stats_string), data);
839 p = hclge_tqps_get_stats(handle, p);
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 struct hns3_mac_stats *mac_stats)
845 struct hclge_vport *vport = hclge_get_vport(handle);
846 struct hclge_dev *hdev = vport->back;
848 hclge_update_stats(handle, NULL);
850 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 struct hclge_func_status_cmd *status)
857 #define HCLGE_MAC_ID_MASK 0xF
859 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
862 /* Set the pf to main pf */
863 if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 hdev->flag |= HCLGE_FLAG_MAIN;
866 hdev->flag &= ~HCLGE_FLAG_MAIN;
868 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
872 static int hclge_query_function_status(struct hclge_dev *hdev)
874 #define HCLGE_QUERY_MAX_CNT 5
876 struct hclge_func_status_cmd *req;
877 struct hclge_desc desc;
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 req = (struct hclge_func_status_cmd *)desc.data;
885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
887 dev_err(&hdev->pdev->dev,
888 "query function status failed %d.\n", ret);
892 /* Check pf reset is done */
895 usleep_range(1000, 2000);
896 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
898 return hclge_parse_func_status(hdev, req);
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
903 struct hclge_pf_res_cmd *req;
904 struct hclge_desc desc;
907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
910 dev_err(&hdev->pdev->dev,
911 "query pf resource failed %d.\n", ret);
915 req = (struct hclge_pf_res_cmd *)desc.data;
916 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 le16_to_cpu(req->ext_tqp_num);
918 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
920 if (req->tx_buf_size)
922 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
924 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
926 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
928 if (req->dv_buf_size)
930 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
932 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
934 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
936 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 dev_err(&hdev->pdev->dev,
939 "only %u msi resources available, not enough for pf(min:2).\n",
944 if (hnae3_dev_roce_supported(hdev)) {
946 le16_to_cpu(req->pf_intr_vector_number_roce);
948 /* PF should have NIC vectors and Roce vectors,
949 * NIC vectors are queued before Roce vectors.
951 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
953 hdev->num_msi = hdev->num_nic_msi;
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
962 case HCLGE_FW_MAC_SPEED_10M:
963 *speed = HCLGE_MAC_SPEED_10M;
965 case HCLGE_FW_MAC_SPEED_100M:
966 *speed = HCLGE_MAC_SPEED_100M;
968 case HCLGE_FW_MAC_SPEED_1G:
969 *speed = HCLGE_MAC_SPEED_1G;
971 case HCLGE_FW_MAC_SPEED_10G:
972 *speed = HCLGE_MAC_SPEED_10G;
974 case HCLGE_FW_MAC_SPEED_25G:
975 *speed = HCLGE_MAC_SPEED_25G;
977 case HCLGE_FW_MAC_SPEED_40G:
978 *speed = HCLGE_MAC_SPEED_40G;
980 case HCLGE_FW_MAC_SPEED_50G:
981 *speed = HCLGE_MAC_SPEED_50G;
983 case HCLGE_FW_MAC_SPEED_100G:
984 *speed = HCLGE_MAC_SPEED_100G;
986 case HCLGE_FW_MAC_SPEED_200G:
987 *speed = HCLGE_MAC_SPEED_200G;
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1012 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013 if (speed == speed_bit_map[i].speed) {
1014 *speed_bit = speed_bit_map[i].speed_bit;
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1024 struct hclge_vport *vport = hclge_get_vport(handle);
1025 struct hclge_dev *hdev = vport->back;
1026 u32 speed_ability = hdev->hw.mac.speed_ability;
1030 ret = hclge_get_speed_bit(speed, &speed_bit);
1034 if (speed_bit & speed_ability)
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1159 mac->fec_ability = 0;
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1167 struct hclge_mac *mac = &hdev->hw.mac;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1189 struct hclge_mac *mac = &hdev->hw.mac;
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1205 unsigned long *supported = hdev->hw.mac.supported;
1207 /* default to support all speed for GE port */
1209 speed_ability = HCLGE_SUPPORT_GE;
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1238 u8 media_type = hdev->hw.mac.media_type;
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1277 return HCLGE_MAC_SPEED_1G;
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1282 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1283 #define SPEED_ABILITY_EXT_SHIFT 8
1285 struct hclge_cfg_param_cmd *req;
1286 u64 mac_addr_tmp_high;
1287 u16 speed_ability_ext;
1291 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1293 /* get the configuration */
1294 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 HCLGE_CFG_TQP_DESC_N_M,
1298 HCLGE_CFG_TQP_DESC_N_S);
1300 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_PHY_ADDR_M,
1302 HCLGE_CFG_PHY_ADDR_S);
1303 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_MEDIA_TP_M,
1305 HCLGE_CFG_MEDIA_TP_S);
1306 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_RX_BUF_LEN_M,
1308 HCLGE_CFG_RX_BUF_LEN_S);
1309 /* get mac_address */
1310 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 HCLGE_CFG_MAC_ADDR_H_M,
1313 HCLGE_CFG_MAC_ADDR_H_S);
1315 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1317 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_DEFAULT_SPEED_M,
1319 HCLGE_CFG_DEFAULT_SPEED_S);
1320 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 HCLGE_CFG_RSS_SIZE_M,
1322 HCLGE_CFG_RSS_SIZE_S);
1324 for (i = 0; i < ETH_ALEN; i++)
1325 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1327 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1330 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 HCLGE_CFG_SPEED_ABILITY_M,
1332 HCLGE_CFG_SPEED_ABILITY_S);
1333 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1338 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 HCLGE_CFG_VLAN_FLTR_CAP_S);
1342 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 HCLGE_CFG_UMV_TBL_SPACE_S);
1346 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1347 HCLGE_CFG_PF_RSS_SIZE_M,
1348 HCLGE_CFG_PF_RSS_SIZE_S);
1350 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1351 * power of 2, instead of reading out directly. This would
1352 * be more flexible for future changes and expansions.
1353 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1354 * it does not make sense if PF's field is 0. In this case, PF and VF
1355 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1357 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1358 1U << cfg->pf_rss_size_max :
1359 cfg->vf_rss_size_max;
1361 /* The unit of the tx spare buffer size queried from configuration
1362 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1365 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1366 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1367 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1368 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371 /* hclge_get_cfg: query the static parameter from flash
1372 * @hdev: pointer to struct hclge_dev
1373 * @hcfg: the config structure to be getted
1375 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1377 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1378 struct hclge_cfg_param_cmd *req;
1382 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1386 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1388 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1389 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1390 /* Len should be united by 4 bytes when send to hardware */
1391 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1392 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1393 req->offset = cpu_to_le32(offset);
1396 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1398 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1402 hclge_parse_cfg(hcfg, desc);
1407 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1409 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1411 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1413 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1414 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1415 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1416 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1417 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1418 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1419 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1420 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1423 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1424 struct hclge_desc *desc)
1426 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1427 struct hclge_dev_specs_0_cmd *req0;
1428 struct hclge_dev_specs_1_cmd *req1;
1430 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1431 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1434 ae_dev->dev_specs.rss_ind_tbl_size =
1435 le16_to_cpu(req0->rss_ind_tbl_size);
1436 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1437 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1438 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1439 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1440 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1441 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1442 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1443 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1446 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1448 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1450 if (!dev_specs->max_non_tso_bd_num)
1451 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1452 if (!dev_specs->rss_ind_tbl_size)
1453 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1454 if (!dev_specs->rss_key_size)
1455 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1456 if (!dev_specs->max_tm_rate)
1457 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1458 if (!dev_specs->max_qset_num)
1459 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1460 if (!dev_specs->max_int_gl)
1461 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1462 if (!dev_specs->max_frm_size)
1463 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1464 if (!dev_specs->umv_size)
1465 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1468 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1470 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1474 /* set default specifications as devices lower than version V3 do not
1475 * support querying specifications from firmware.
1477 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1478 hclge_set_default_dev_specs(hdev);
1482 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1483 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1485 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1487 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1489 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1493 hclge_parse_dev_specs(hdev, desc);
1494 hclge_check_dev_specs(hdev);
1499 static int hclge_get_cap(struct hclge_dev *hdev)
1503 ret = hclge_query_function_status(hdev);
1505 dev_err(&hdev->pdev->dev,
1506 "query function status error %d.\n", ret);
1510 /* get pf resource */
1511 return hclge_query_pf_resource(hdev);
1514 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1516 #define HCLGE_MIN_TX_DESC 64
1517 #define HCLGE_MIN_RX_DESC 64
1519 if (!is_kdump_kernel())
1522 dev_info(&hdev->pdev->dev,
1523 "Running kdump kernel. Using minimal resources\n");
1525 /* minimal queue pairs equals to the number of vports */
1526 hdev->num_tqps = hdev->num_req_vfs + 1;
1527 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1528 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1531 static int hclge_configure(struct hclge_dev *hdev)
1533 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1534 const struct cpumask *cpumask = cpu_online_mask;
1535 struct hclge_cfg cfg;
1539 ret = hclge_get_cfg(hdev, &cfg);
1543 hdev->base_tqp_pid = 0;
1544 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1545 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1546 hdev->rx_buf_len = cfg.rx_buf_len;
1547 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1548 hdev->hw.mac.media_type = cfg.media_type;
1549 hdev->hw.mac.phy_addr = cfg.phy_addr;
1550 hdev->num_tx_desc = cfg.tqp_desc_num;
1551 hdev->num_rx_desc = cfg.tqp_desc_num;
1552 hdev->tm_info.num_pg = 1;
1553 hdev->tc_max = cfg.tc_num;
1554 hdev->tm_info.hw_pfc_map = 0;
1556 hdev->wanted_umv_size = cfg.umv_space;
1558 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1559 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1560 hdev->gro_en = true;
1561 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1562 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1564 if (hnae3_dev_fd_supported(hdev)) {
1566 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1569 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1571 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1572 cfg.default_speed, ret);
1576 hclge_parse_link_mode(hdev, cfg.speed_ability);
1578 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1580 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1581 (hdev->tc_max < 1)) {
1582 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1587 /* Dev does not support DCB */
1588 if (!hnae3_dev_dcb_supported(hdev)) {
1592 hdev->pfc_max = hdev->tc_max;
1595 hdev->tm_info.num_tc = 1;
1597 /* Currently not support uncontiuous tc */
1598 for (i = 0; i < hdev->tm_info.num_tc; i++)
1599 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1601 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1603 hclge_init_kdump_kernel_config(hdev);
1605 /* Set the affinity based on numa node */
1606 node = dev_to_node(&hdev->pdev->dev);
1607 if (node != NUMA_NO_NODE)
1608 cpumask = cpumask_of_node(node);
1610 cpumask_copy(&hdev->affinity_mask, cpumask);
1615 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1618 struct hclge_cfg_tso_status_cmd *req;
1619 struct hclge_desc desc;
1621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1623 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1624 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1625 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1627 return hclge_cmd_send(&hdev->hw, &desc, 1);
1630 static int hclge_config_gro(struct hclge_dev *hdev)
1632 struct hclge_cfg_gro_status_cmd *req;
1633 struct hclge_desc desc;
1636 if (!hnae3_dev_gro_supported(hdev))
1639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1640 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1642 req->gro_en = hdev->gro_en ? 1 : 0;
1644 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1646 dev_err(&hdev->pdev->dev,
1647 "GRO hardware config cmd failed, ret = %d\n", ret);
1652 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1654 struct hclge_tqp *tqp;
1657 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1658 sizeof(struct hclge_tqp), GFP_KERNEL);
1664 for (i = 0; i < hdev->num_tqps; i++) {
1665 tqp->dev = &hdev->pdev->dev;
1668 tqp->q.ae_algo = &ae_algo;
1669 tqp->q.buf_size = hdev->rx_buf_len;
1670 tqp->q.tx_desc_num = hdev->num_tx_desc;
1671 tqp->q.rx_desc_num = hdev->num_rx_desc;
1673 /* need an extended offset to configure queues >=
1674 * HCLGE_TQP_MAX_SIZE_DEV_V2
1676 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1677 tqp->q.io_base = hdev->hw.io_base +
1678 HCLGE_TQP_REG_OFFSET +
1679 i * HCLGE_TQP_REG_SIZE;
1681 tqp->q.io_base = hdev->hw.io_base +
1682 HCLGE_TQP_REG_OFFSET +
1683 HCLGE_TQP_EXT_REG_OFFSET +
1684 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1693 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1694 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1696 struct hclge_tqp_map_cmd *req;
1697 struct hclge_desc desc;
1700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1702 req = (struct hclge_tqp_map_cmd *)desc.data;
1703 req->tqp_id = cpu_to_le16(tqp_pid);
1704 req->tqp_vf = func_id;
1705 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1707 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1708 req->tqp_vid = cpu_to_le16(tqp_vid);
1710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1712 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1717 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1719 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1720 struct hclge_dev *hdev = vport->back;
1723 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1724 alloced < num_tqps; i++) {
1725 if (!hdev->htqp[i].alloced) {
1726 hdev->htqp[i].q.handle = &vport->nic;
1727 hdev->htqp[i].q.tqp_index = alloced;
1728 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1729 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1730 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1731 hdev->htqp[i].alloced = true;
1735 vport->alloc_tqps = alloced;
1736 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1737 vport->alloc_tqps / hdev->tm_info.num_tc);
1739 /* ensure one to one mapping between irq and queue at default */
1740 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1741 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1746 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1747 u16 num_tx_desc, u16 num_rx_desc)
1750 struct hnae3_handle *nic = &vport->nic;
1751 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1752 struct hclge_dev *hdev = vport->back;
1755 kinfo->num_tx_desc = num_tx_desc;
1756 kinfo->num_rx_desc = num_rx_desc;
1758 kinfo->rx_buf_len = hdev->rx_buf_len;
1759 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1761 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1762 sizeof(struct hnae3_queue *), GFP_KERNEL);
1766 ret = hclge_assign_tqp(vport, num_tqps);
1768 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1773 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1774 struct hclge_vport *vport)
1776 struct hnae3_handle *nic = &vport->nic;
1777 struct hnae3_knic_private_info *kinfo;
1780 kinfo = &nic->kinfo;
1781 for (i = 0; i < vport->alloc_tqps; i++) {
1782 struct hclge_tqp *q =
1783 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1787 is_pf = !(vport->vport_id);
1788 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1797 static int hclge_map_tqp(struct hclge_dev *hdev)
1799 struct hclge_vport *vport = hdev->vport;
1802 num_vport = hdev->num_req_vfs + 1;
1803 for (i = 0; i < num_vport; i++) {
1806 ret = hclge_map_tqp_to_vport(hdev, vport);
1816 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1818 struct hnae3_handle *nic = &vport->nic;
1819 struct hclge_dev *hdev = vport->back;
1822 nic->pdev = hdev->pdev;
1823 nic->ae_algo = &ae_algo;
1824 nic->numa_node_mask = hdev->numa_node_mask;
1825 nic->kinfo.io_base = hdev->hw.io_base;
1827 ret = hclge_knic_setup(vport, num_tqps,
1828 hdev->num_tx_desc, hdev->num_rx_desc);
1830 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1835 static int hclge_alloc_vport(struct hclge_dev *hdev)
1837 struct pci_dev *pdev = hdev->pdev;
1838 struct hclge_vport *vport;
1844 /* We need to alloc a vport for main NIC of PF */
1845 num_vport = hdev->num_req_vfs + 1;
1847 if (hdev->num_tqps < num_vport) {
1848 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1849 hdev->num_tqps, num_vport);
1853 /* Alloc the same number of TQPs for every vport */
1854 tqp_per_vport = hdev->num_tqps / num_vport;
1855 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1857 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1862 hdev->vport = vport;
1863 hdev->num_alloc_vport = num_vport;
1865 if (IS_ENABLED(CONFIG_PCI_IOV))
1866 hdev->num_alloc_vfs = hdev->num_req_vfs;
1868 for (i = 0; i < num_vport; i++) {
1870 vport->vport_id = i;
1871 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1872 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1873 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1874 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1875 vport->req_vlan_fltr_en = true;
1876 INIT_LIST_HEAD(&vport->vlan_list);
1877 INIT_LIST_HEAD(&vport->uc_mac_list);
1878 INIT_LIST_HEAD(&vport->mc_mac_list);
1879 spin_lock_init(&vport->mac_list_lock);
1882 ret = hclge_vport_setup(vport, tqp_main_vport);
1884 ret = hclge_vport_setup(vport, tqp_per_vport);
1887 "vport setup failed for vport %d, %d\n",
1898 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1899 struct hclge_pkt_buf_alloc *buf_alloc)
1901 /* TX buffer size is unit by 128 byte */
1902 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1903 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1904 struct hclge_tx_buff_alloc_cmd *req;
1905 struct hclge_desc desc;
1909 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1911 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1912 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1913 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1915 req->tx_pkt_buff[i] =
1916 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1917 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1920 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1922 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1928 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1929 struct hclge_pkt_buf_alloc *buf_alloc)
1931 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1934 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1939 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1944 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1945 if (hdev->hw_tc_map & BIT(i))
1950 /* Get the number of pfc enabled TCs, which have private buffer */
1951 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1952 struct hclge_pkt_buf_alloc *buf_alloc)
1954 struct hclge_priv_buf *priv;
1958 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1959 priv = &buf_alloc->priv_buf[i];
1960 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1968 /* Get the number of pfc disabled TCs, which have private buffer */
1969 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1970 struct hclge_pkt_buf_alloc *buf_alloc)
1972 struct hclge_priv_buf *priv;
1976 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1977 priv = &buf_alloc->priv_buf[i];
1978 if (hdev->hw_tc_map & BIT(i) &&
1979 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1987 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1989 struct hclge_priv_buf *priv;
1993 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1994 priv = &buf_alloc->priv_buf[i];
1996 rx_priv += priv->buf_size;
2001 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2003 u32 i, total_tx_size = 0;
2005 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2006 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2008 return total_tx_size;
2011 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2012 struct hclge_pkt_buf_alloc *buf_alloc,
2015 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2016 u32 tc_num = hclge_get_tc_num(hdev);
2017 u32 shared_buf, aligned_mps;
2021 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2023 if (hnae3_dev_dcb_supported(hdev))
2024 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2027 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2028 + hdev->dv_buf_size;
2030 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2031 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2032 HCLGE_BUF_SIZE_UNIT);
2034 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2035 if (rx_all < rx_priv + shared_std)
2038 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2039 buf_alloc->s_buf.buf_size = shared_buf;
2040 if (hnae3_dev_dcb_supported(hdev)) {
2041 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2042 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2043 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2044 HCLGE_BUF_SIZE_UNIT);
2046 buf_alloc->s_buf.self.high = aligned_mps +
2047 HCLGE_NON_DCB_ADDITIONAL_BUF;
2048 buf_alloc->s_buf.self.low = aligned_mps;
2051 if (hnae3_dev_dcb_supported(hdev)) {
2052 hi_thrd = shared_buf - hdev->dv_buf_size;
2054 if (tc_num <= NEED_RESERVE_TC_NUM)
2055 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2059 hi_thrd = hi_thrd / tc_num;
2061 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2062 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2063 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2065 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2066 lo_thrd = aligned_mps;
2069 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2070 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2071 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2077 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2078 struct hclge_pkt_buf_alloc *buf_alloc)
2082 total_size = hdev->pkt_buf_size;
2084 /* alloc tx buffer for all enabled tc */
2085 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2086 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2088 if (hdev->hw_tc_map & BIT(i)) {
2089 if (total_size < hdev->tx_buf_size)
2092 priv->tx_buf_size = hdev->tx_buf_size;
2094 priv->tx_buf_size = 0;
2097 total_size -= priv->tx_buf_size;
2103 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2104 struct hclge_pkt_buf_alloc *buf_alloc)
2106 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2107 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2110 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2111 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2118 if (!(hdev->hw_tc_map & BIT(i)))
2123 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2124 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2125 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2126 HCLGE_BUF_SIZE_UNIT);
2129 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2133 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2136 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2139 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2140 struct hclge_pkt_buf_alloc *buf_alloc)
2142 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2146 /* let the last to be cleared first */
2147 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149 unsigned int mask = BIT((unsigned int)i);
2151 if (hdev->hw_tc_map & mask &&
2152 !(hdev->tm_info.hw_pfc_map & mask)) {
2153 /* Clear the no pfc TC private buffer */
2161 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2162 no_pfc_priv_num == 0)
2166 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2169 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2170 struct hclge_pkt_buf_alloc *buf_alloc)
2172 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2173 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2176 /* let the last to be cleared first */
2177 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2178 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2179 unsigned int mask = BIT((unsigned int)i);
2181 if (hdev->hw_tc_map & mask &&
2182 hdev->tm_info.hw_pfc_map & mask) {
2183 /* Reduce the number of pfc TC with private buffer */
2191 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2196 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2199 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2200 struct hclge_pkt_buf_alloc *buf_alloc)
2202 #define COMPENSATE_BUFFER 0x3C00
2203 #define COMPENSATE_HALF_MPS_NUM 5
2204 #define PRIV_WL_GAP 0x1800
2206 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2207 u32 tc_num = hclge_get_tc_num(hdev);
2208 u32 half_mps = hdev->mps >> 1;
2213 rx_priv = rx_priv / tc_num;
2215 if (tc_num <= NEED_RESERVE_TC_NUM)
2216 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2218 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2219 COMPENSATE_HALF_MPS_NUM * half_mps;
2220 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2221 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2222 if (rx_priv < min_rx_priv)
2225 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2226 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2233 if (!(hdev->hw_tc_map & BIT(i)))
2237 priv->buf_size = rx_priv;
2238 priv->wl.high = rx_priv - hdev->dv_buf_size;
2239 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2242 buf_alloc->s_buf.buf_size = 0;
2247 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2248 * @hdev: pointer to struct hclge_dev
2249 * @buf_alloc: pointer to buffer calculation data
2250 * @return: 0: calculate successful, negative: fail
2252 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2253 struct hclge_pkt_buf_alloc *buf_alloc)
2255 /* When DCB is not supported, rx private buffer is not allocated. */
2256 if (!hnae3_dev_dcb_supported(hdev)) {
2257 u32 rx_all = hdev->pkt_buf_size;
2259 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2260 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2266 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2269 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2272 /* try to decrease the buffer size */
2273 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2276 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2279 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2285 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2286 struct hclge_pkt_buf_alloc *buf_alloc)
2288 struct hclge_rx_priv_buff_cmd *req;
2289 struct hclge_desc desc;
2293 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2294 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2296 /* Alloc private buffer TCs */
2297 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2298 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2301 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2303 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2307 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2308 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2312 dev_err(&hdev->pdev->dev,
2313 "rx private buffer alloc cmd failed %d\n", ret);
2318 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2319 struct hclge_pkt_buf_alloc *buf_alloc)
2321 struct hclge_rx_priv_wl_buf *req;
2322 struct hclge_priv_buf *priv;
2323 struct hclge_desc desc[2];
2327 for (i = 0; i < 2; i++) {
2328 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2330 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2332 /* The first descriptor set the NEXT bit to 1 */
2334 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2336 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2338 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2339 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2341 priv = &buf_alloc->priv_buf[idx];
2342 req->tc_wl[j].high =
2343 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2344 req->tc_wl[j].high |=
2345 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2347 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2348 req->tc_wl[j].low |=
2349 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2353 /* Send 2 descriptor at one time */
2354 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2356 dev_err(&hdev->pdev->dev,
2357 "rx private waterline config cmd failed %d\n",
2362 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2363 struct hclge_pkt_buf_alloc *buf_alloc)
2365 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2366 struct hclge_rx_com_thrd *req;
2367 struct hclge_desc desc[2];
2368 struct hclge_tc_thrd *tc;
2372 for (i = 0; i < 2; i++) {
2373 hclge_cmd_setup_basic_desc(&desc[i],
2374 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2375 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2377 /* The first descriptor set the NEXT bit to 1 */
2379 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2381 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2383 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2384 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2386 req->com_thrd[j].high =
2387 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2388 req->com_thrd[j].high |=
2389 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2390 req->com_thrd[j].low =
2391 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2392 req->com_thrd[j].low |=
2393 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2397 /* Send 2 descriptors at one time */
2398 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2400 dev_err(&hdev->pdev->dev,
2401 "common threshold config cmd failed %d\n", ret);
2405 static int hclge_common_wl_config(struct hclge_dev *hdev,
2406 struct hclge_pkt_buf_alloc *buf_alloc)
2408 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2409 struct hclge_rx_com_wl *req;
2410 struct hclge_desc desc;
2413 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2415 req = (struct hclge_rx_com_wl *)desc.data;
2416 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2417 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2419 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2420 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2422 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2424 dev_err(&hdev->pdev->dev,
2425 "common waterline config cmd failed %d\n", ret);
2430 int hclge_buffer_alloc(struct hclge_dev *hdev)
2432 struct hclge_pkt_buf_alloc *pkt_buf;
2435 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2439 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2441 dev_err(&hdev->pdev->dev,
2442 "could not calc tx buffer size for all TCs %d\n", ret);
2446 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2448 dev_err(&hdev->pdev->dev,
2449 "could not alloc tx buffers %d\n", ret);
2453 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2455 dev_err(&hdev->pdev->dev,
2456 "could not calc rx priv buffer size for all TCs %d\n",
2461 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2463 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2468 if (hnae3_dev_dcb_supported(hdev)) {
2469 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2471 dev_err(&hdev->pdev->dev,
2472 "could not configure rx private waterline %d\n",
2477 ret = hclge_common_thrd_config(hdev, pkt_buf);
2479 dev_err(&hdev->pdev->dev,
2480 "could not configure common threshold %d\n",
2486 ret = hclge_common_wl_config(hdev, pkt_buf);
2488 dev_err(&hdev->pdev->dev,
2489 "could not configure common waterline %d\n", ret);
2496 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2498 struct hnae3_handle *roce = &vport->roce;
2499 struct hnae3_handle *nic = &vport->nic;
2500 struct hclge_dev *hdev = vport->back;
2502 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2504 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2507 roce->rinfo.base_vector = hdev->roce_base_vector;
2509 roce->rinfo.netdev = nic->kinfo.netdev;
2510 roce->rinfo.roce_io_base = hdev->hw.io_base;
2511 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2513 roce->pdev = nic->pdev;
2514 roce->ae_algo = nic->ae_algo;
2515 roce->numa_node_mask = nic->numa_node_mask;
2520 static int hclge_init_msi(struct hclge_dev *hdev)
2522 struct pci_dev *pdev = hdev->pdev;
2526 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2528 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2531 "failed(%d) to allocate MSI/MSI-X vectors\n",
2535 if (vectors < hdev->num_msi)
2536 dev_warn(&hdev->pdev->dev,
2537 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2538 hdev->num_msi, vectors);
2540 hdev->num_msi = vectors;
2541 hdev->num_msi_left = vectors;
2543 hdev->base_msi_vector = pdev->irq;
2544 hdev->roce_base_vector = hdev->base_msi_vector +
2547 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2548 sizeof(u16), GFP_KERNEL);
2549 if (!hdev->vector_status) {
2550 pci_free_irq_vectors(pdev);
2554 for (i = 0; i < hdev->num_msi; i++)
2555 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2557 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2558 sizeof(int), GFP_KERNEL);
2559 if (!hdev->vector_irq) {
2560 pci_free_irq_vectors(pdev);
2567 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2569 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2570 duplex = HCLGE_MAC_FULL;
2575 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2578 struct hclge_config_mac_speed_dup_cmd *req;
2579 struct hclge_desc desc;
2582 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2584 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2587 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2590 case HCLGE_MAC_SPEED_10M:
2591 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2592 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2594 case HCLGE_MAC_SPEED_100M:
2595 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2596 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2598 case HCLGE_MAC_SPEED_1G:
2599 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2600 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2602 case HCLGE_MAC_SPEED_10G:
2603 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2604 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2606 case HCLGE_MAC_SPEED_25G:
2607 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2608 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2610 case HCLGE_MAC_SPEED_40G:
2611 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2612 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2614 case HCLGE_MAC_SPEED_50G:
2615 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2616 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2618 case HCLGE_MAC_SPEED_100G:
2619 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2620 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2622 case HCLGE_MAC_SPEED_200G:
2623 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2624 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2627 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2631 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2634 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2636 dev_err(&hdev->pdev->dev,
2637 "mac speed/duplex config cmd failed %d.\n", ret);
2644 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2646 struct hclge_mac *mac = &hdev->hw.mac;
2649 duplex = hclge_check_speed_dup(duplex, speed);
2650 if (!mac->support_autoneg && mac->speed == speed &&
2651 mac->duplex == duplex)
2654 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2658 hdev->hw.mac.speed = speed;
2659 hdev->hw.mac.duplex = duplex;
2664 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2667 struct hclge_vport *vport = hclge_get_vport(handle);
2668 struct hclge_dev *hdev = vport->back;
2670 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2673 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2675 struct hclge_config_auto_neg_cmd *req;
2676 struct hclge_desc desc;
2680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2682 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2684 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2685 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2687 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2689 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2695 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2697 struct hclge_vport *vport = hclge_get_vport(handle);
2698 struct hclge_dev *hdev = vport->back;
2700 if (!hdev->hw.mac.support_autoneg) {
2702 dev_err(&hdev->pdev->dev,
2703 "autoneg is not supported by current port\n");
2710 return hclge_set_autoneg_en(hdev, enable);
2713 static int hclge_get_autoneg(struct hnae3_handle *handle)
2715 struct hclge_vport *vport = hclge_get_vport(handle);
2716 struct hclge_dev *hdev = vport->back;
2717 struct phy_device *phydev = hdev->hw.mac.phydev;
2720 return phydev->autoneg;
2722 return hdev->hw.mac.autoneg;
2725 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2727 struct hclge_vport *vport = hclge_get_vport(handle);
2728 struct hclge_dev *hdev = vport->back;
2731 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2733 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2736 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2739 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2741 struct hclge_vport *vport = hclge_get_vport(handle);
2742 struct hclge_dev *hdev = vport->back;
2744 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2745 return hclge_set_autoneg_en(hdev, !halt);
2750 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2752 struct hclge_config_fec_cmd *req;
2753 struct hclge_desc desc;
2756 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2758 req = (struct hclge_config_fec_cmd *)desc.data;
2759 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2760 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2761 if (fec_mode & BIT(HNAE3_FEC_RS))
2762 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2763 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2764 if (fec_mode & BIT(HNAE3_FEC_BASER))
2765 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2766 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2768 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2770 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2775 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2777 struct hclge_vport *vport = hclge_get_vport(handle);
2778 struct hclge_dev *hdev = vport->back;
2779 struct hclge_mac *mac = &hdev->hw.mac;
2782 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2783 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2787 ret = hclge_set_fec_hw(hdev, fec_mode);
2791 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2795 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2798 struct hclge_vport *vport = hclge_get_vport(handle);
2799 struct hclge_dev *hdev = vport->back;
2800 struct hclge_mac *mac = &hdev->hw.mac;
2803 *fec_ability = mac->fec_ability;
2805 *fec_mode = mac->fec_mode;
2808 static int hclge_mac_init(struct hclge_dev *hdev)
2810 struct hclge_mac *mac = &hdev->hw.mac;
2813 hdev->support_sfp_query = true;
2814 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2815 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2816 hdev->hw.mac.duplex);
2820 if (hdev->hw.mac.support_autoneg) {
2821 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2828 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2829 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2834 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2836 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2840 ret = hclge_set_default_loopback(hdev);
2844 ret = hclge_buffer_alloc(hdev);
2846 dev_err(&hdev->pdev->dev,
2847 "allocate buffer fail, ret=%d\n", ret);
2852 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2854 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2855 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2856 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2857 hclge_wq, &hdev->service_task, 0);
2860 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2862 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2863 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2864 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2865 hclge_wq, &hdev->service_task, 0);
2868 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2870 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2871 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2872 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2873 hclge_wq, &hdev->service_task, 0);
2876 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2878 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2879 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2880 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2881 hclge_wq, &hdev->service_task,
2885 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2887 struct hclge_link_status_cmd *req;
2888 struct hclge_desc desc;
2891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2892 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2894 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2899 req = (struct hclge_link_status_cmd *)desc.data;
2900 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2901 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2906 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2908 struct phy_device *phydev = hdev->hw.mac.phydev;
2910 *link_status = HCLGE_LINK_STATUS_DOWN;
2912 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2915 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2918 return hclge_get_mac_link_status(hdev, link_status);
2921 static void hclge_push_link_status(struct hclge_dev *hdev)
2923 struct hclge_vport *vport;
2927 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2928 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2930 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2931 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2934 ret = hclge_push_vf_link_status(vport);
2936 dev_err(&hdev->pdev->dev,
2937 "failed to push link status to vf%u, ret = %d\n",
2943 static void hclge_update_link_status(struct hclge_dev *hdev)
2945 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2946 struct hnae3_handle *handle = &hdev->vport[0].nic;
2947 struct hnae3_client *rclient = hdev->roce_client;
2948 struct hnae3_client *client = hdev->nic_client;
2955 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2958 ret = hclge_get_mac_phy_link(hdev, &state);
2960 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2964 if (state != hdev->hw.mac.link) {
2965 hdev->hw.mac.link = state;
2966 client->ops->link_status_change(handle, state);
2967 hclge_config_mac_tnl_int(hdev, state);
2968 if (rclient && rclient->ops->link_status_change)
2969 rclient->ops->link_status_change(rhandle, state);
2971 hclge_push_link_status(hdev);
2974 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2977 static void hclge_update_port_capability(struct hclge_dev *hdev,
2978 struct hclge_mac *mac)
2980 if (hnae3_dev_fec_supported(hdev))
2981 /* update fec ability by speed */
2982 hclge_convert_setting_fec(mac);
2984 /* firmware can not identify back plane type, the media type
2985 * read from configuration can help deal it
2987 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2988 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2989 mac->module_type = HNAE3_MODULE_TYPE_KR;
2990 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2991 mac->module_type = HNAE3_MODULE_TYPE_TP;
2993 if (mac->support_autoneg) {
2994 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2995 linkmode_copy(mac->advertising, mac->supported);
2997 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2999 linkmode_zero(mac->advertising);
3003 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3005 struct hclge_sfp_info_cmd *resp;
3006 struct hclge_desc desc;
3009 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3010 resp = (struct hclge_sfp_info_cmd *)desc.data;
3011 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3012 if (ret == -EOPNOTSUPP) {
3013 dev_warn(&hdev->pdev->dev,
3014 "IMP do not support get SFP speed %d\n", ret);
3017 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3021 *speed = le32_to_cpu(resp->speed);
3026 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3028 struct hclge_sfp_info_cmd *resp;
3029 struct hclge_desc desc;
3032 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3033 resp = (struct hclge_sfp_info_cmd *)desc.data;
3035 resp->query_type = QUERY_ACTIVE_SPEED;
3037 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3038 if (ret == -EOPNOTSUPP) {
3039 dev_warn(&hdev->pdev->dev,
3040 "IMP does not support get SFP info %d\n", ret);
3043 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3047 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3048 * set to mac->speed.
3050 if (!le32_to_cpu(resp->speed))
3053 mac->speed = le32_to_cpu(resp->speed);
3054 /* if resp->speed_ability is 0, it means it's an old version
3055 * firmware, do not update these params
3057 if (resp->speed_ability) {
3058 mac->module_type = le32_to_cpu(resp->module_type);
3059 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3060 mac->autoneg = resp->autoneg;
3061 mac->support_autoneg = resp->autoneg_ability;
3062 mac->speed_type = QUERY_ACTIVE_SPEED;
3063 if (!resp->active_fec)
3066 mac->fec_mode = BIT(resp->active_fec);
3068 mac->speed_type = QUERY_SFP_SPEED;
3074 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3075 struct ethtool_link_ksettings *cmd)
3077 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3078 struct hclge_vport *vport = hclge_get_vport(handle);
3079 struct hclge_phy_link_ksetting_0_cmd *req0;
3080 struct hclge_phy_link_ksetting_1_cmd *req1;
3081 u32 supported, advertising, lp_advertising;
3082 struct hclge_dev *hdev = vport->back;
3085 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3087 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3088 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3091 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3093 dev_err(&hdev->pdev->dev,
3094 "failed to get phy link ksetting, ret = %d.\n", ret);
3098 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3099 cmd->base.autoneg = req0->autoneg;
3100 cmd->base.speed = le32_to_cpu(req0->speed);
3101 cmd->base.duplex = req0->duplex;
3102 cmd->base.port = req0->port;
3103 cmd->base.transceiver = req0->transceiver;
3104 cmd->base.phy_address = req0->phy_address;
3105 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3106 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3107 supported = le32_to_cpu(req0->supported);
3108 advertising = le32_to_cpu(req0->advertising);
3109 lp_advertising = le32_to_cpu(req0->lp_advertising);
3110 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3112 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3114 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3117 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3118 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3119 cmd->base.master_slave_state = req1->master_slave_state;
3125 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3126 const struct ethtool_link_ksettings *cmd)
3128 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3129 struct hclge_vport *vport = hclge_get_vport(handle);
3130 struct hclge_phy_link_ksetting_0_cmd *req0;
3131 struct hclge_phy_link_ksetting_1_cmd *req1;
3132 struct hclge_dev *hdev = vport->back;
3136 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3137 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3138 (cmd->base.duplex != DUPLEX_HALF &&
3139 cmd->base.duplex != DUPLEX_FULL)))
3142 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3144 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3145 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3148 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3149 req0->autoneg = cmd->base.autoneg;
3150 req0->speed = cpu_to_le32(cmd->base.speed);
3151 req0->duplex = cmd->base.duplex;
3152 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3153 cmd->link_modes.advertising);
3154 req0->advertising = cpu_to_le32(advertising);
3155 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3157 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3158 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3160 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3162 dev_err(&hdev->pdev->dev,
3163 "failed to set phy link ksettings, ret = %d.\n", ret);
3167 hdev->hw.mac.autoneg = cmd->base.autoneg;
3168 hdev->hw.mac.speed = cmd->base.speed;
3169 hdev->hw.mac.duplex = cmd->base.duplex;
3170 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3175 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3177 struct ethtool_link_ksettings cmd;
3180 if (!hnae3_dev_phy_imp_supported(hdev))
3183 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3187 hdev->hw.mac.autoneg = cmd.base.autoneg;
3188 hdev->hw.mac.speed = cmd.base.speed;
3189 hdev->hw.mac.duplex = cmd.base.duplex;
3194 static int hclge_tp_port_init(struct hclge_dev *hdev)
3196 struct ethtool_link_ksettings cmd;
3198 if (!hnae3_dev_phy_imp_supported(hdev))
3201 cmd.base.autoneg = hdev->hw.mac.autoneg;
3202 cmd.base.speed = hdev->hw.mac.speed;
3203 cmd.base.duplex = hdev->hw.mac.duplex;
3204 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3206 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3209 static int hclge_update_port_info(struct hclge_dev *hdev)
3211 struct hclge_mac *mac = &hdev->hw.mac;
3212 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3215 /* get the port info from SFP cmd if not copper port */
3216 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3217 return hclge_update_tp_port_info(hdev);
3219 /* if IMP does not support get SFP/qSFP info, return directly */
3220 if (!hdev->support_sfp_query)
3223 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3224 ret = hclge_get_sfp_info(hdev, mac);
3226 ret = hclge_get_sfp_speed(hdev, &speed);
3228 if (ret == -EOPNOTSUPP) {
3229 hdev->support_sfp_query = false;
3235 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3236 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3237 hclge_update_port_capability(hdev, mac);
3240 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3243 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3244 return 0; /* do nothing if no SFP */
3246 /* must config full duplex for SFP */
3247 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3251 static int hclge_get_status(struct hnae3_handle *handle)
3253 struct hclge_vport *vport = hclge_get_vport(handle);
3254 struct hclge_dev *hdev = vport->back;
3256 hclge_update_link_status(hdev);
3258 return hdev->hw.mac.link;
3261 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3263 if (!pci_num_vf(hdev->pdev)) {
3264 dev_err(&hdev->pdev->dev,
3265 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3269 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3270 dev_err(&hdev->pdev->dev,
3271 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3272 vf, pci_num_vf(hdev->pdev));
3276 /* VF start from 1 in vport */
3277 vf += HCLGE_VF_VPORT_START_NUM;
3278 return &hdev->vport[vf];
3281 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3282 struct ifla_vf_info *ivf)
3284 struct hclge_vport *vport = hclge_get_vport(handle);
3285 struct hclge_dev *hdev = vport->back;
3287 vport = hclge_get_vf_vport(hdev, vf);
3292 ivf->linkstate = vport->vf_info.link_state;
3293 ivf->spoofchk = vport->vf_info.spoofchk;
3294 ivf->trusted = vport->vf_info.trusted;
3295 ivf->min_tx_rate = 0;
3296 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3297 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3298 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3299 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3300 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3305 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3308 struct hclge_vport *vport = hclge_get_vport(handle);
3309 struct hclge_dev *hdev = vport->back;
3313 vport = hclge_get_vf_vport(hdev, vf);
3317 link_state_old = vport->vf_info.link_state;
3318 vport->vf_info.link_state = link_state;
3320 ret = hclge_push_vf_link_status(vport);
3322 vport->vf_info.link_state = link_state_old;
3323 dev_err(&hdev->pdev->dev,
3324 "failed to push vf%d link status, ret = %d\n", vf, ret);
3330 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3332 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3334 /* fetch the events from their corresponding regs */
3335 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3336 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3337 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3338 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3340 /* Assumption: If by any chance reset and mailbox events are reported
3341 * together then we will only process reset event in this go and will
3342 * defer the processing of the mailbox events. Since, we would have not
3343 * cleared RX CMDQ event this time we would receive again another
3344 * interrupt from H/W just for the mailbox.
3346 * check for vector0 reset event sources
3348 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3349 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3350 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3351 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3352 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3353 hdev->rst_stats.imp_rst_cnt++;
3354 return HCLGE_VECTOR0_EVENT_RST;
3357 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3358 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3359 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3360 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3361 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3362 hdev->rst_stats.global_rst_cnt++;
3363 return HCLGE_VECTOR0_EVENT_RST;
3366 /* check for vector0 msix event and hardware error event source */
3367 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3368 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3369 return HCLGE_VECTOR0_EVENT_ERR;
3371 /* check for vector0 ptp event source */
3372 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3373 *clearval = msix_src_reg;
3374 return HCLGE_VECTOR0_EVENT_PTP;
3377 /* check for vector0 mailbox(=CMDQ RX) event source */
3378 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3379 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3380 *clearval = cmdq_src_reg;
3381 return HCLGE_VECTOR0_EVENT_MBX;
3384 /* print other vector0 event source */
3385 dev_info(&hdev->pdev->dev,
3386 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3387 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3389 return HCLGE_VECTOR0_EVENT_OTHER;
3392 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3395 switch (event_type) {
3396 case HCLGE_VECTOR0_EVENT_PTP:
3397 case HCLGE_VECTOR0_EVENT_RST:
3398 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3400 case HCLGE_VECTOR0_EVENT_MBX:
3401 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3408 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3410 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3411 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3412 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3413 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3414 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3417 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3419 writel(enable ? 1 : 0, vector->addr);
3422 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3424 struct hclge_dev *hdev = data;
3425 unsigned long flags;
3429 hclge_enable_vector(&hdev->misc_vector, false);
3430 event_cause = hclge_check_event_cause(hdev, &clearval);
3432 /* vector 0 interrupt is shared with reset and mailbox source events. */
3433 switch (event_cause) {
3434 case HCLGE_VECTOR0_EVENT_ERR:
3435 hclge_errhand_task_schedule(hdev);
3437 case HCLGE_VECTOR0_EVENT_RST:
3438 hclge_reset_task_schedule(hdev);
3440 case HCLGE_VECTOR0_EVENT_PTP:
3441 spin_lock_irqsave(&hdev->ptp->lock, flags);
3442 hclge_ptp_clean_tx_hwts(hdev);
3443 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3445 case HCLGE_VECTOR0_EVENT_MBX:
3446 /* If we are here then,
3447 * 1. Either we are not handling any mbx task and we are not
3450 * 2. We could be handling a mbx task but nothing more is
3452 * In both cases, we should schedule mbx task as there are more
3453 * mbx messages reported by this interrupt.
3455 hclge_mbx_task_schedule(hdev);
3458 dev_warn(&hdev->pdev->dev,
3459 "received unknown or unhandled event of vector0\n");
3463 hclge_clear_event_cause(hdev, event_cause, clearval);
3465 /* Enable interrupt if it is not caused by reset event or error event */
3466 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3467 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3468 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3469 hclge_enable_vector(&hdev->misc_vector, true);
3474 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3476 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3477 dev_warn(&hdev->pdev->dev,
3478 "vector(vector_id %d) has been freed.\n", vector_id);
3482 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3483 hdev->num_msi_left += 1;
3484 hdev->num_msi_used -= 1;
3487 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3489 struct hclge_misc_vector *vector = &hdev->misc_vector;
3491 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3493 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3494 hdev->vector_status[0] = 0;
3496 hdev->num_msi_left -= 1;
3497 hdev->num_msi_used += 1;
3500 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3501 const cpumask_t *mask)
3503 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3506 cpumask_copy(&hdev->affinity_mask, mask);
3509 static void hclge_irq_affinity_release(struct kref *ref)
3513 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3515 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3516 &hdev->affinity_mask);
3518 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3519 hdev->affinity_notify.release = hclge_irq_affinity_release;
3520 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3521 &hdev->affinity_notify);
3524 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3526 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3527 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3530 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3534 hclge_get_misc_vector(hdev);
3536 /* this would be explicitly freed in the end */
3537 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3538 HCLGE_NAME, pci_name(hdev->pdev));
3539 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3540 0, hdev->misc_vector.name, hdev);
3542 hclge_free_vector(hdev, 0);
3543 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3544 hdev->misc_vector.vector_irq);
3550 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3552 free_irq(hdev->misc_vector.vector_irq, hdev);
3553 hclge_free_vector(hdev, 0);
3556 int hclge_notify_client(struct hclge_dev *hdev,
3557 enum hnae3_reset_notify_type type)
3559 struct hnae3_handle *handle = &hdev->vport[0].nic;
3560 struct hnae3_client *client = hdev->nic_client;
3563 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3566 if (!client->ops->reset_notify)
3569 ret = client->ops->reset_notify(handle, type);
3571 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3577 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3578 enum hnae3_reset_notify_type type)
3580 struct hnae3_handle *handle = &hdev->vport[0].roce;
3581 struct hnae3_client *client = hdev->roce_client;
3584 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3587 if (!client->ops->reset_notify)
3590 ret = client->ops->reset_notify(handle, type);
3592 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3598 static int hclge_reset_wait(struct hclge_dev *hdev)
3600 #define HCLGE_RESET_WATI_MS 100
3601 #define HCLGE_RESET_WAIT_CNT 350
3603 u32 val, reg, reg_bit;
3606 switch (hdev->reset_type) {
3607 case HNAE3_IMP_RESET:
3608 reg = HCLGE_GLOBAL_RESET_REG;
3609 reg_bit = HCLGE_IMP_RESET_BIT;
3611 case HNAE3_GLOBAL_RESET:
3612 reg = HCLGE_GLOBAL_RESET_REG;
3613 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3615 case HNAE3_FUNC_RESET:
3616 reg = HCLGE_FUN_RST_ING;
3617 reg_bit = HCLGE_FUN_RST_ING_B;
3620 dev_err(&hdev->pdev->dev,
3621 "Wait for unsupported reset type: %d\n",
3626 val = hclge_read_dev(&hdev->hw, reg);
3627 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3628 msleep(HCLGE_RESET_WATI_MS);
3629 val = hclge_read_dev(&hdev->hw, reg);
3633 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3634 dev_warn(&hdev->pdev->dev,
3635 "Wait for reset timeout: %d\n", hdev->reset_type);
3642 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3644 struct hclge_vf_rst_cmd *req;
3645 struct hclge_desc desc;
3647 req = (struct hclge_vf_rst_cmd *)desc.data;
3648 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3649 req->dest_vfid = func_id;
3654 return hclge_cmd_send(&hdev->hw, &desc, 1);
3657 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3661 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3662 struct hclge_vport *vport = &hdev->vport[i];
3665 /* Send cmd to set/clear VF's FUNC_RST_ING */
3666 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3668 dev_err(&hdev->pdev->dev,
3669 "set vf(%u) rst failed %d!\n",
3670 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3675 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3678 /* Inform VF to process the reset.
3679 * hclge_inform_reset_assert_to_vf may fail if VF
3680 * driver is not loaded.
3682 ret = hclge_inform_reset_assert_to_vf(vport);
3684 dev_warn(&hdev->pdev->dev,
3685 "inform reset to vf(%u) failed %d!\n",
3686 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3693 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3695 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3696 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3697 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3700 hclge_mbx_handler(hdev);
3702 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3705 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3707 struct hclge_pf_rst_sync_cmd *req;
3708 struct hclge_desc desc;
3712 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3713 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3716 /* vf need to down netdev by mbx during PF or FLR reset */
3717 hclge_mailbox_service_task(hdev);
3719 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3720 /* for compatible with old firmware, wait
3721 * 100 ms for VF to stop IO
3723 if (ret == -EOPNOTSUPP) {
3724 msleep(HCLGE_RESET_SYNC_TIME);
3727 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3730 } else if (req->all_vf_ready) {
3733 msleep(HCLGE_PF_RESET_SYNC_TIME);
3734 hclge_cmd_reuse_desc(&desc, true);
3735 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3737 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3740 void hclge_report_hw_error(struct hclge_dev *hdev,
3741 enum hnae3_hw_error_type type)
3743 struct hnae3_client *client = hdev->nic_client;
3745 if (!client || !client->ops->process_hw_error ||
3746 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3749 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3752 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3756 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3757 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3758 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3759 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3760 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3763 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3764 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3765 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3766 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3770 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3772 struct hclge_desc desc;
3773 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3776 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3777 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3778 req->fun_reset_vfid = func_id;
3780 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3782 dev_err(&hdev->pdev->dev,
3783 "send function reset cmd fail, status =%d\n", ret);
3788 static void hclge_do_reset(struct hclge_dev *hdev)
3790 struct hnae3_handle *handle = &hdev->vport[0].nic;
3791 struct pci_dev *pdev = hdev->pdev;
3794 if (hclge_get_hw_reset_stat(handle)) {
3795 dev_info(&pdev->dev, "hardware reset not finish\n");
3796 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3797 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3798 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3802 switch (hdev->reset_type) {
3803 case HNAE3_IMP_RESET:
3804 dev_info(&pdev->dev, "IMP reset requested\n");
3805 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3806 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3807 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3809 case HNAE3_GLOBAL_RESET:
3810 dev_info(&pdev->dev, "global reset requested\n");
3811 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3812 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3813 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3815 case HNAE3_FUNC_RESET:
3816 dev_info(&pdev->dev, "PF reset requested\n");
3817 /* schedule again to check later */
3818 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3819 hclge_reset_task_schedule(hdev);
3822 dev_warn(&pdev->dev,
3823 "unsupported reset type: %d\n", hdev->reset_type);
3828 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3829 unsigned long *addr)
3831 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3832 struct hclge_dev *hdev = ae_dev->priv;
3834 /* return the highest priority reset level amongst all */
3835 if (test_bit(HNAE3_IMP_RESET, addr)) {
3836 rst_level = HNAE3_IMP_RESET;
3837 clear_bit(HNAE3_IMP_RESET, addr);
3838 clear_bit(HNAE3_GLOBAL_RESET, addr);
3839 clear_bit(HNAE3_FUNC_RESET, addr);
3840 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3841 rst_level = HNAE3_GLOBAL_RESET;
3842 clear_bit(HNAE3_GLOBAL_RESET, addr);
3843 clear_bit(HNAE3_FUNC_RESET, addr);
3844 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3845 rst_level = HNAE3_FUNC_RESET;
3846 clear_bit(HNAE3_FUNC_RESET, addr);
3847 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3848 rst_level = HNAE3_FLR_RESET;
3849 clear_bit(HNAE3_FLR_RESET, addr);
3852 if (hdev->reset_type != HNAE3_NONE_RESET &&
3853 rst_level < hdev->reset_type)
3854 return HNAE3_NONE_RESET;
3859 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3863 switch (hdev->reset_type) {
3864 case HNAE3_IMP_RESET:
3865 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3867 case HNAE3_GLOBAL_RESET:
3868 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3877 /* For revision 0x20, the reset interrupt source
3878 * can only be cleared after hardware reset done
3880 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3881 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3884 hclge_enable_vector(&hdev->misc_vector, true);
3887 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3891 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3893 reg_val |= HCLGE_NIC_SW_RST_RDY;
3895 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3897 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3900 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3904 ret = hclge_set_all_vf_rst(hdev, true);
3908 hclge_func_reset_sync_vf(hdev);
3913 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3918 switch (hdev->reset_type) {
3919 case HNAE3_FUNC_RESET:
3920 ret = hclge_func_reset_notify_vf(hdev);
3924 ret = hclge_func_reset_cmd(hdev, 0);
3926 dev_err(&hdev->pdev->dev,
3927 "asserting function reset fail %d!\n", ret);
3931 /* After performaning pf reset, it is not necessary to do the
3932 * mailbox handling or send any command to firmware, because
3933 * any mailbox handling or command to firmware is only valid
3934 * after hclge_cmd_init is called.
3936 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3937 hdev->rst_stats.pf_rst_cnt++;
3939 case HNAE3_FLR_RESET:
3940 ret = hclge_func_reset_notify_vf(hdev);
3944 case HNAE3_IMP_RESET:
3945 hclge_handle_imp_error(hdev);
3946 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3947 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3948 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3954 /* inform hardware that preparatory work is done */
3955 msleep(HCLGE_RESET_SYNC_TIME);
3956 hclge_reset_handshake(hdev, true);
3957 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3962 static void hclge_show_rst_info(struct hclge_dev *hdev)
3966 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3970 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3972 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3977 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3979 #define MAX_RESET_FAIL_CNT 5
3981 if (hdev->reset_pending) {
3982 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3983 hdev->reset_pending);
3985 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3986 HCLGE_RESET_INT_M) {
3987 dev_info(&hdev->pdev->dev,
3988 "reset failed because new reset interrupt\n");
3989 hclge_clear_reset_cause(hdev);
3991 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3992 hdev->rst_stats.reset_fail_cnt++;
3993 set_bit(hdev->reset_type, &hdev->reset_pending);
3994 dev_info(&hdev->pdev->dev,
3995 "re-schedule reset task(%u)\n",
3996 hdev->rst_stats.reset_fail_cnt);
4000 hclge_clear_reset_cause(hdev);
4002 /* recover the handshake status when reset fail */
4003 hclge_reset_handshake(hdev, true);
4005 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4007 hclge_show_rst_info(hdev);
4009 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4014 static void hclge_update_reset_level(struct hclge_dev *hdev)
4016 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4017 enum hnae3_reset_type reset_level;
4019 /* reset request will not be set during reset, so clear
4020 * pending reset request to avoid unnecessary reset
4021 * caused by the same reason.
4023 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4025 /* if default_reset_request has a higher level reset request,
4026 * it should be handled as soon as possible. since some errors
4027 * need this kind of reset to fix.
4029 reset_level = hclge_get_reset_level(ae_dev,
4030 &hdev->default_reset_request);
4031 if (reset_level != HNAE3_NONE_RESET)
4032 set_bit(reset_level, &hdev->reset_request);
4035 static int hclge_set_rst_done(struct hclge_dev *hdev)
4037 struct hclge_pf_rst_done_cmd *req;
4038 struct hclge_desc desc;
4041 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4043 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4045 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4046 /* To be compatible with the old firmware, which does not support
4047 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4050 if (ret == -EOPNOTSUPP) {
4051 dev_warn(&hdev->pdev->dev,
4052 "current firmware does not support command(0x%x)!\n",
4053 HCLGE_OPC_PF_RST_DONE);
4056 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4063 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4067 switch (hdev->reset_type) {
4068 case HNAE3_FUNC_RESET:
4069 case HNAE3_FLR_RESET:
4070 ret = hclge_set_all_vf_rst(hdev, false);
4072 case HNAE3_GLOBAL_RESET:
4073 case HNAE3_IMP_RESET:
4074 ret = hclge_set_rst_done(hdev);
4080 /* clear up the handshake status after re-initialize done */
4081 hclge_reset_handshake(hdev, false);
4086 static int hclge_reset_stack(struct hclge_dev *hdev)
4090 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4094 ret = hclge_reset_ae_dev(hdev->ae_dev);
4098 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4101 static int hclge_reset_prepare(struct hclge_dev *hdev)
4105 hdev->rst_stats.reset_cnt++;
4106 /* perform reset of the stack & ae device for a client */
4107 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4112 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4117 return hclge_reset_prepare_wait(hdev);
4120 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4124 hdev->rst_stats.hw_reset_done_cnt++;
4126 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4131 ret = hclge_reset_stack(hdev);
4136 hclge_clear_reset_cause(hdev);
4138 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4139 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4143 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4146 ret = hclge_reset_prepare_up(hdev);
4151 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4156 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4160 hdev->last_reset_time = jiffies;
4161 hdev->rst_stats.reset_fail_cnt = 0;
4162 hdev->rst_stats.reset_done_cnt++;
4163 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4165 hclge_update_reset_level(hdev);
4170 static void hclge_reset(struct hclge_dev *hdev)
4172 if (hclge_reset_prepare(hdev))
4175 if (hclge_reset_wait(hdev))
4178 if (hclge_reset_rebuild(hdev))
4184 if (hclge_reset_err_handle(hdev))
4185 hclge_reset_task_schedule(hdev);
4188 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4190 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4191 struct hclge_dev *hdev = ae_dev->priv;
4193 /* We might end up getting called broadly because of 2 below cases:
4194 * 1. Recoverable error was conveyed through APEI and only way to bring
4195 * normalcy is to reset.
4196 * 2. A new reset request from the stack due to timeout
4198 * check if this is a new reset request and we are not here just because
4199 * last reset attempt did not succeed and watchdog hit us again. We will
4200 * know this if last reset request did not occur very recently (watchdog
4201 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4202 * In case of new request we reset the "reset level" to PF reset.
4203 * And if it is a repeat reset request of the most recent one then we
4204 * want to make sure we throttle the reset request. Therefore, we will
4205 * not allow it again before 3*HZ times.
4208 if (time_before(jiffies, (hdev->last_reset_time +
4209 HCLGE_RESET_INTERVAL))) {
4210 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4214 if (hdev->default_reset_request) {
4216 hclge_get_reset_level(ae_dev,
4217 &hdev->default_reset_request);
4218 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4219 hdev->reset_level = HNAE3_FUNC_RESET;
4222 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4225 /* request reset & schedule reset task */
4226 set_bit(hdev->reset_level, &hdev->reset_request);
4227 hclge_reset_task_schedule(hdev);
4229 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4230 hdev->reset_level++;
4233 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4234 enum hnae3_reset_type rst_type)
4236 struct hclge_dev *hdev = ae_dev->priv;
4238 set_bit(rst_type, &hdev->default_reset_request);
4241 static void hclge_reset_timer(struct timer_list *t)
4243 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4245 /* if default_reset_request has no value, it means that this reset
4246 * request has already be handled, so just return here
4248 if (!hdev->default_reset_request)
4251 dev_info(&hdev->pdev->dev,
4252 "triggering reset in reset timer\n");
4253 hclge_reset_event(hdev->pdev, NULL);
4256 static void hclge_reset_subtask(struct hclge_dev *hdev)
4258 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4260 /* check if there is any ongoing reset in the hardware. This status can
4261 * be checked from reset_pending. If there is then, we need to wait for
4262 * hardware to complete reset.
4263 * a. If we are able to figure out in reasonable time that hardware
4264 * has fully resetted then, we can proceed with driver, client
4266 * b. else, we can come back later to check this status so re-sched
4269 hdev->last_reset_time = jiffies;
4270 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4271 if (hdev->reset_type != HNAE3_NONE_RESET)
4274 /* check if we got any *new* reset requests to be honored */
4275 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4276 if (hdev->reset_type != HNAE3_NONE_RESET)
4277 hclge_do_reset(hdev);
4279 hdev->reset_type = HNAE3_NONE_RESET;
4282 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4284 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4285 enum hnae3_reset_type reset_type;
4287 if (ae_dev->hw_err_reset_req) {
4288 reset_type = hclge_get_reset_level(ae_dev,
4289 &ae_dev->hw_err_reset_req);
4290 hclge_set_def_reset_request(ae_dev, reset_type);
4293 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4294 ae_dev->ops->reset_event(hdev->pdev, NULL);
4296 /* enable interrupt after error handling complete */
4297 hclge_enable_vector(&hdev->misc_vector, true);
4300 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4302 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4304 ae_dev->hw_err_reset_req = 0;
4306 if (hclge_find_error_source(hdev)) {
4307 hclge_handle_error_info_log(ae_dev);
4308 hclge_handle_mac_tnl(hdev);
4311 hclge_handle_err_reset_request(hdev);
4314 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4316 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4317 struct device *dev = &hdev->pdev->dev;
4320 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4321 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4322 if (hclge_handle_hw_msix_error
4323 (hdev, &hdev->default_reset_request))
4324 dev_info(dev, "received msix interrupt 0x%x\n",
4328 hclge_handle_hw_ras_error(ae_dev);
4330 hclge_handle_err_reset_request(hdev);
4333 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4335 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4338 if (hnae3_dev_ras_imp_supported(hdev))
4339 hclge_handle_err_recovery(hdev);
4341 hclge_misc_err_recovery(hdev);
4344 static void hclge_reset_service_task(struct hclge_dev *hdev)
4346 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4349 down(&hdev->reset_sem);
4350 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4352 hclge_reset_subtask(hdev);
4354 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4355 up(&hdev->reset_sem);
4358 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4362 /* start from vport 1 for PF is always alive */
4363 for (i = 1; i < hdev->num_alloc_vport; i++) {
4364 struct hclge_vport *vport = &hdev->vport[i];
4366 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4367 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4369 /* If vf is not alive, set to default value */
4370 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4371 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4375 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4377 unsigned long delta = round_jiffies_relative(HZ);
4379 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4382 /* Always handle the link updating to make sure link state is
4383 * updated when it is triggered by mbx.
4385 hclge_update_link_status(hdev);
4386 hclge_sync_mac_table(hdev);
4387 hclge_sync_promisc_mode(hdev);
4388 hclge_sync_fd_table(hdev);
4390 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4391 delta = jiffies - hdev->last_serv_processed;
4393 if (delta < round_jiffies_relative(HZ)) {
4394 delta = round_jiffies_relative(HZ) - delta;
4399 hdev->serv_processed_cnt++;
4400 hclge_update_vport_alive(hdev);
4402 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4403 hdev->last_serv_processed = jiffies;
4407 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4408 hclge_update_stats_for_all(hdev);
4410 hclge_update_port_info(hdev);
4411 hclge_sync_vlan_filter(hdev);
4413 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4414 hclge_rfs_filter_expire(hdev);
4416 hdev->last_serv_processed = jiffies;
4419 hclge_task_schedule(hdev, delta);
4422 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4424 unsigned long flags;
4426 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4427 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4428 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4431 /* to prevent concurrence with the irq handler */
4432 spin_lock_irqsave(&hdev->ptp->lock, flags);
4434 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4435 * handler may handle it just before spin_lock_irqsave().
4437 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4438 hclge_ptp_clean_tx_hwts(hdev);
4440 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4443 static void hclge_service_task(struct work_struct *work)
4445 struct hclge_dev *hdev =
4446 container_of(work, struct hclge_dev, service_task.work);
4448 hclge_errhand_service_task(hdev);
4449 hclge_reset_service_task(hdev);
4450 hclge_ptp_service_task(hdev);
4451 hclge_mailbox_service_task(hdev);
4452 hclge_periodic_service_task(hdev);
4454 /* Handle error recovery, reset and mbx again in case periodical task
4455 * delays the handling by calling hclge_task_schedule() in
4456 * hclge_periodic_service_task().
4458 hclge_errhand_service_task(hdev);
4459 hclge_reset_service_task(hdev);
4460 hclge_mailbox_service_task(hdev);
4463 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4465 /* VF handle has no client */
4466 if (!handle->client)
4467 return container_of(handle, struct hclge_vport, nic);
4468 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4469 return container_of(handle, struct hclge_vport, roce);
4471 return container_of(handle, struct hclge_vport, nic);
4474 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4475 struct hnae3_vector_info *vector_info)
4477 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4479 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4481 /* need an extend offset to config vector >= 64 */
4482 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4483 vector_info->io_addr = hdev->hw.io_base +
4484 HCLGE_VECTOR_REG_BASE +
4485 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4487 vector_info->io_addr = hdev->hw.io_base +
4488 HCLGE_VECTOR_EXT_REG_BASE +
4489 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4490 HCLGE_VECTOR_REG_OFFSET_H +
4491 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4492 HCLGE_VECTOR_REG_OFFSET;
4494 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4495 hdev->vector_irq[idx] = vector_info->vector;
4498 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4499 struct hnae3_vector_info *vector_info)
4501 struct hclge_vport *vport = hclge_get_vport(handle);
4502 struct hnae3_vector_info *vector = vector_info;
4503 struct hclge_dev *hdev = vport->back;
4508 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4509 vector_num = min(hdev->num_msi_left, vector_num);
4511 for (j = 0; j < vector_num; j++) {
4512 while (++i < hdev->num_nic_msi) {
4513 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4514 hclge_get_vector_info(hdev, i, vector);
4522 hdev->num_msi_left -= alloc;
4523 hdev->num_msi_used += alloc;
4528 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4532 for (i = 0; i < hdev->num_msi; i++)
4533 if (vector == hdev->vector_irq[i])
4539 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4541 struct hclge_vport *vport = hclge_get_vport(handle);
4542 struct hclge_dev *hdev = vport->back;
4545 vector_id = hclge_get_vector_index(hdev, vector);
4546 if (vector_id < 0) {
4547 dev_err(&hdev->pdev->dev,
4548 "Get vector index fail. vector = %d\n", vector);
4552 hclge_free_vector(hdev, vector_id);
4557 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4559 return HCLGE_RSS_KEY_SIZE;
4562 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4563 const u8 hfunc, const u8 *key)
4565 struct hclge_rss_config_cmd *req;
4566 unsigned int key_offset = 0;
4567 struct hclge_desc desc;
4572 key_counts = HCLGE_RSS_KEY_SIZE;
4573 req = (struct hclge_rss_config_cmd *)desc.data;
4575 while (key_counts) {
4576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4579 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4580 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4582 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4583 memcpy(req->hash_key,
4584 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4586 key_counts -= key_size;
4588 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4590 dev_err(&hdev->pdev->dev,
4591 "Configure RSS config fail, status = %d\n",
4599 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4601 struct hclge_rss_indirection_table_cmd *req;
4602 struct hclge_desc desc;
4603 int rss_cfg_tbl_num;
4611 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4612 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4613 HCLGE_RSS_CFG_TBL_SIZE;
4615 for (i = 0; i < rss_cfg_tbl_num; i++) {
4616 hclge_cmd_setup_basic_desc
4617 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4619 req->start_table_index =
4620 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4621 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4622 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4623 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4624 req->rss_qid_l[j] = qid & 0xff;
4626 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4627 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4628 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4629 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4631 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4633 dev_err(&hdev->pdev->dev,
4634 "Configure rss indir table fail,status = %d\n",
4642 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4643 u16 *tc_size, u16 *tc_offset)
4645 struct hclge_rss_tc_mode_cmd *req;
4646 struct hclge_desc desc;
4650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4651 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4653 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4656 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4657 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4658 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4659 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4660 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4661 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4662 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4664 req->rss_tc_mode[i] = cpu_to_le16(mode);
4667 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4669 dev_err(&hdev->pdev->dev,
4670 "Configure rss tc mode fail, status = %d\n", ret);
4675 static void hclge_get_rss_type(struct hclge_vport *vport)
4677 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4678 vport->rss_tuple_sets.ipv4_udp_en ||
4679 vport->rss_tuple_sets.ipv4_sctp_en ||
4680 vport->rss_tuple_sets.ipv6_tcp_en ||
4681 vport->rss_tuple_sets.ipv6_udp_en ||
4682 vport->rss_tuple_sets.ipv6_sctp_en)
4683 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4684 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4685 vport->rss_tuple_sets.ipv6_fragment_en)
4686 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4688 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4691 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4693 struct hclge_rss_input_tuple_cmd *req;
4694 struct hclge_desc desc;
4697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4699 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4701 /* Get the tuple cfg from pf */
4702 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4703 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4704 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4705 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4706 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4707 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4708 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4709 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4710 hclge_get_rss_type(&hdev->vport[0]);
4711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4713 dev_err(&hdev->pdev->dev,
4714 "Configure rss input fail, status = %d\n", ret);
4718 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4721 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4722 struct hclge_vport *vport = hclge_get_vport(handle);
4725 /* Get hash algorithm */
4727 switch (vport->rss_algo) {
4728 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4729 *hfunc = ETH_RSS_HASH_TOP;
4731 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4732 *hfunc = ETH_RSS_HASH_XOR;
4735 *hfunc = ETH_RSS_HASH_UNKNOWN;
4740 /* Get the RSS Key required by the user */
4742 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4744 /* Get indirect table */
4746 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4747 indir[i] = vport->rss_indirection_tbl[i];
4752 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4756 case ETH_RSS_HASH_TOP:
4757 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4759 case ETH_RSS_HASH_XOR:
4760 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4762 case ETH_RSS_HASH_NO_CHANGE:
4763 *hash_algo = vport->rss_algo;
4770 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4771 const u8 *key, const u8 hfunc)
4773 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4774 struct hclge_vport *vport = hclge_get_vport(handle);
4775 struct hclge_dev *hdev = vport->back;
4779 ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4781 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4785 /* Set the RSS Hash Key if specififed by the user */
4787 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4791 /* Update the shadow RSS key with user specified qids */
4792 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4794 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4795 vport->rss_hash_key);
4799 vport->rss_algo = hash_algo;
4801 /* Update the shadow RSS table with user specified qids */
4802 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4803 vport->rss_indirection_tbl[i] = indir[i];
4805 /* Update the hardware */
4806 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4809 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4811 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4813 if (nfc->data & RXH_L4_B_2_3)
4814 hash_sets |= HCLGE_D_PORT_BIT;
4816 hash_sets &= ~HCLGE_D_PORT_BIT;
4818 if (nfc->data & RXH_IP_SRC)
4819 hash_sets |= HCLGE_S_IP_BIT;
4821 hash_sets &= ~HCLGE_S_IP_BIT;
4823 if (nfc->data & RXH_IP_DST)
4824 hash_sets |= HCLGE_D_IP_BIT;
4826 hash_sets &= ~HCLGE_D_IP_BIT;
4828 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4829 hash_sets |= HCLGE_V_TAG_BIT;
4834 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4835 struct ethtool_rxnfc *nfc,
4836 struct hclge_rss_input_tuple_cmd *req)
4838 struct hclge_dev *hdev = vport->back;
4841 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4842 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4843 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4844 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4845 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4846 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4847 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4848 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4850 tuple_sets = hclge_get_rss_hash_bits(nfc);
4851 switch (nfc->flow_type) {
4853 req->ipv4_tcp_en = tuple_sets;
4856 req->ipv6_tcp_en = tuple_sets;
4859 req->ipv4_udp_en = tuple_sets;
4862 req->ipv6_udp_en = tuple_sets;
4865 req->ipv4_sctp_en = tuple_sets;
4868 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4869 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4872 req->ipv6_sctp_en = tuple_sets;
4875 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4878 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4887 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4888 struct ethtool_rxnfc *nfc)
4890 struct hclge_vport *vport = hclge_get_vport(handle);
4891 struct hclge_dev *hdev = vport->back;
4892 struct hclge_rss_input_tuple_cmd *req;
4893 struct hclge_desc desc;
4896 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4897 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4900 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4901 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4903 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4905 dev_err(&hdev->pdev->dev,
4906 "failed to init rss tuple cmd, ret = %d\n", ret);
4910 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4912 dev_err(&hdev->pdev->dev,
4913 "Set rss tuple fail, status = %d\n", ret);
4917 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4918 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4919 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4920 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4921 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4922 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4923 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4924 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4925 hclge_get_rss_type(vport);
4929 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4932 switch (flow_type) {
4934 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4937 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4940 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4943 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4946 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4949 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4953 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4962 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4966 if (tuple_sets & HCLGE_D_PORT_BIT)
4967 tuple_data |= RXH_L4_B_2_3;
4968 if (tuple_sets & HCLGE_S_PORT_BIT)
4969 tuple_data |= RXH_L4_B_0_1;
4970 if (tuple_sets & HCLGE_D_IP_BIT)
4971 tuple_data |= RXH_IP_DST;
4972 if (tuple_sets & HCLGE_S_IP_BIT)
4973 tuple_data |= RXH_IP_SRC;
4978 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4979 struct ethtool_rxnfc *nfc)
4981 struct hclge_vport *vport = hclge_get_vport(handle);
4987 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4988 if (ret || !tuple_sets)
4991 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4996 static int hclge_get_tc_size(struct hnae3_handle *handle)
4998 struct hclge_vport *vport = hclge_get_vport(handle);
4999 struct hclge_dev *hdev = vport->back;
5001 return hdev->pf_rss_size_max;
5004 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5006 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5007 struct hclge_vport *vport = hdev->vport;
5008 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5009 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5010 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5011 struct hnae3_tc_info *tc_info;
5016 tc_info = &vport->nic.kinfo.tc_info;
5017 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5018 rss_size = tc_info->tqp_count[i];
5021 if (!(hdev->hw_tc_map & BIT(i)))
5024 /* tc_size set to hardware is the log2 of roundup power of two
5025 * of rss_size, the acutal queue size is limited by indirection
5028 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5030 dev_err(&hdev->pdev->dev,
5031 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5036 roundup_size = roundup_pow_of_two(rss_size);
5037 roundup_size = ilog2(roundup_size);
5040 tc_size[i] = roundup_size;
5041 tc_offset[i] = tc_info->tqp_offset[i];
5044 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5047 int hclge_rss_init_hw(struct hclge_dev *hdev)
5049 struct hclge_vport *vport = hdev->vport;
5050 u16 *rss_indir = vport[0].rss_indirection_tbl;
5051 u8 *key = vport[0].rss_hash_key;
5052 u8 hfunc = vport[0].rss_algo;
5055 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5059 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5063 ret = hclge_set_rss_input_tuple(hdev);
5067 return hclge_init_rss_tc_mode(hdev);
5070 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5072 struct hclge_vport *vport = &hdev->vport[0];
5075 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5076 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5079 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5081 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5082 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5083 struct hclge_vport *vport = &hdev->vport[0];
5086 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5087 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5089 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5090 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5091 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5092 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5093 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5094 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5095 vport->rss_tuple_sets.ipv6_sctp_en =
5096 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5097 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5098 HCLGE_RSS_INPUT_TUPLE_SCTP;
5099 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5101 vport->rss_algo = rss_algo;
5103 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5104 sizeof(*rss_ind_tbl), GFP_KERNEL);
5108 vport->rss_indirection_tbl = rss_ind_tbl;
5109 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5111 hclge_rss_indir_init_cfg(hdev);
5116 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5117 int vector_id, bool en,
5118 struct hnae3_ring_chain_node *ring_chain)
5120 struct hclge_dev *hdev = vport->back;
5121 struct hnae3_ring_chain_node *node;
5122 struct hclge_desc desc;
5123 struct hclge_ctrl_vector_chain_cmd *req =
5124 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5125 enum hclge_cmd_status status;
5126 enum hclge_opcode_type op;
5127 u16 tqp_type_and_id;
5130 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5131 hclge_cmd_setup_basic_desc(&desc, op, false);
5132 req->int_vector_id_l = hnae3_get_field(vector_id,
5133 HCLGE_VECTOR_ID_L_M,
5134 HCLGE_VECTOR_ID_L_S);
5135 req->int_vector_id_h = hnae3_get_field(vector_id,
5136 HCLGE_VECTOR_ID_H_M,
5137 HCLGE_VECTOR_ID_H_S);
5140 for (node = ring_chain; node; node = node->next) {
5141 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5142 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5144 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5145 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5146 HCLGE_TQP_ID_S, node->tqp_index);
5147 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5149 hnae3_get_field(node->int_gl_idx,
5150 HNAE3_RING_GL_IDX_M,
5151 HNAE3_RING_GL_IDX_S));
5152 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5153 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5154 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5155 req->vfid = vport->vport_id;
5157 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5159 dev_err(&hdev->pdev->dev,
5160 "Map TQP fail, status is %d.\n",
5166 hclge_cmd_setup_basic_desc(&desc,
5169 req->int_vector_id_l =
5170 hnae3_get_field(vector_id,
5171 HCLGE_VECTOR_ID_L_M,
5172 HCLGE_VECTOR_ID_L_S);
5173 req->int_vector_id_h =
5174 hnae3_get_field(vector_id,
5175 HCLGE_VECTOR_ID_H_M,
5176 HCLGE_VECTOR_ID_H_S);
5181 req->int_cause_num = i;
5182 req->vfid = vport->vport_id;
5183 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5185 dev_err(&hdev->pdev->dev,
5186 "Map TQP fail, status is %d.\n", status);
5194 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5195 struct hnae3_ring_chain_node *ring_chain)
5197 struct hclge_vport *vport = hclge_get_vport(handle);
5198 struct hclge_dev *hdev = vport->back;
5201 vector_id = hclge_get_vector_index(hdev, vector);
5202 if (vector_id < 0) {
5203 dev_err(&hdev->pdev->dev,
5204 "failed to get vector index. vector=%d\n", vector);
5208 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5211 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5212 struct hnae3_ring_chain_node *ring_chain)
5214 struct hclge_vport *vport = hclge_get_vport(handle);
5215 struct hclge_dev *hdev = vport->back;
5218 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5221 vector_id = hclge_get_vector_index(hdev, vector);
5222 if (vector_id < 0) {
5223 dev_err(&handle->pdev->dev,
5224 "Get vector index fail. ret =%d\n", vector_id);
5228 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5230 dev_err(&handle->pdev->dev,
5231 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5237 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5238 bool en_uc, bool en_mc, bool en_bc)
5240 struct hclge_vport *vport = &hdev->vport[vf_id];
5241 struct hnae3_handle *handle = &vport->nic;
5242 struct hclge_promisc_cfg_cmd *req;
5243 struct hclge_desc desc;
5244 bool uc_tx_en = en_uc;
5248 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5250 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5253 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5256 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5257 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5258 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5259 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5260 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5261 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5262 req->extend_promisc = promisc_cfg;
5264 /* to be compatible with DEVICE_VERSION_V1/2 */
5266 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5267 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5268 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5269 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5270 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5271 req->promisc = promisc_cfg;
5273 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5275 dev_err(&hdev->pdev->dev,
5276 "failed to set vport %u promisc mode, ret = %d.\n",
5282 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5283 bool en_mc_pmc, bool en_bc_pmc)
5285 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5286 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5289 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5292 struct hclge_vport *vport = hclge_get_vport(handle);
5293 struct hclge_dev *hdev = vport->back;
5294 bool en_bc_pmc = true;
5296 /* For device whose version below V2, if broadcast promisc enabled,
5297 * vlan filter is always bypassed. So broadcast promisc should be
5298 * disabled until user enable promisc mode
5300 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5301 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5303 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5307 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5309 struct hclge_vport *vport = hclge_get_vport(handle);
5311 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5314 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5316 if (hlist_empty(&hdev->fd_rule_list))
5317 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5320 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5322 if (!test_bit(location, hdev->fd_bmap)) {
5323 set_bit(location, hdev->fd_bmap);
5324 hdev->hclge_fd_rule_num++;
5328 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5330 if (test_bit(location, hdev->fd_bmap)) {
5331 clear_bit(location, hdev->fd_bmap);
5332 hdev->hclge_fd_rule_num--;
5336 static void hclge_fd_free_node(struct hclge_dev *hdev,
5337 struct hclge_fd_rule *rule)
5339 hlist_del(&rule->rule_node);
5341 hclge_sync_fd_state(hdev);
5344 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5345 struct hclge_fd_rule *old_rule,
5346 struct hclge_fd_rule *new_rule,
5347 enum HCLGE_FD_NODE_STATE state)
5350 case HCLGE_FD_TO_ADD:
5351 case HCLGE_FD_ACTIVE:
5352 /* 1) if the new state is TO_ADD, just replace the old rule
5353 * with the same location, no matter its state, because the
5354 * new rule will be configured to the hardware.
5355 * 2) if the new state is ACTIVE, it means the new rule
5356 * has been configured to the hardware, so just replace
5357 * the old rule node with the same location.
5358 * 3) for it doesn't add a new node to the list, so it's
5359 * unnecessary to update the rule number and fd_bmap.
5361 new_rule->rule_node.next = old_rule->rule_node.next;
5362 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5363 memcpy(old_rule, new_rule, sizeof(*old_rule));
5366 case HCLGE_FD_DELETED:
5367 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5368 hclge_fd_free_node(hdev, old_rule);
5370 case HCLGE_FD_TO_DEL:
5371 /* if new request is TO_DEL, and old rule is existent
5372 * 1) the state of old rule is TO_DEL, we need do nothing,
5373 * because we delete rule by location, other rule content
5375 * 2) the state of old rule is ACTIVE, we need to change its
5376 * state to TO_DEL, so the rule will be deleted when periodic
5377 * task being scheduled.
5378 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5379 * been added to hardware, so we just delete the rule node from
5380 * fd_rule_list directly.
5382 if (old_rule->state == HCLGE_FD_TO_ADD) {
5383 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5384 hclge_fd_free_node(hdev, old_rule);
5387 old_rule->state = HCLGE_FD_TO_DEL;
5392 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5394 struct hclge_fd_rule **parent)
5396 struct hclge_fd_rule *rule;
5397 struct hlist_node *node;
5399 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5400 if (rule->location == location)
5402 else if (rule->location > location)
5404 /* record the parent node, use to keep the nodes in fd_rule_list
5413 /* insert fd rule node in ascend order according to rule->location */
5414 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5415 struct hclge_fd_rule *rule,
5416 struct hclge_fd_rule *parent)
5418 INIT_HLIST_NODE(&rule->rule_node);
5421 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5423 hlist_add_head(&rule->rule_node, hlist);
5426 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5427 struct hclge_fd_user_def_cfg *cfg)
5429 struct hclge_fd_user_def_cfg_cmd *req;
5430 struct hclge_desc desc;
5434 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5436 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5438 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5439 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5440 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5441 req->ol2_cfg = cpu_to_le16(data);
5444 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5445 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5446 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5447 req->ol3_cfg = cpu_to_le16(data);
5450 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5451 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5452 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5453 req->ol4_cfg = cpu_to_le16(data);
5455 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5457 dev_err(&hdev->pdev->dev,
5458 "failed to set fd user def data, ret= %d\n", ret);
5462 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5466 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5470 spin_lock_bh(&hdev->fd_rule_lock);
5472 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5474 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5477 spin_unlock_bh(&hdev->fd_rule_lock);
5480 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5481 struct hclge_fd_rule *rule)
5483 struct hlist_head *hlist = &hdev->fd_rule_list;
5484 struct hclge_fd_rule *fd_rule, *parent = NULL;
5485 struct hclge_fd_user_def_info *info, *old_info;
5486 struct hclge_fd_user_def_cfg *cfg;
5488 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5489 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5492 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5493 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5494 info = &rule->ep.user_def;
5496 if (!cfg->ref_cnt || cfg->offset == info->offset)
5499 if (cfg->ref_cnt > 1)
5502 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5504 old_info = &fd_rule->ep.user_def;
5505 if (info->layer == old_info->layer)
5510 dev_err(&hdev->pdev->dev,
5511 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5516 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5517 struct hclge_fd_rule *rule)
5519 struct hclge_fd_user_def_cfg *cfg;
5521 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5522 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5525 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5526 if (!cfg->ref_cnt) {
5527 cfg->offset = rule->ep.user_def.offset;
5528 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5533 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5534 struct hclge_fd_rule *rule)
5536 struct hclge_fd_user_def_cfg *cfg;
5538 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5539 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5542 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5547 if (!cfg->ref_cnt) {
5549 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5553 static void hclge_update_fd_list(struct hclge_dev *hdev,
5554 enum HCLGE_FD_NODE_STATE state, u16 location,
5555 struct hclge_fd_rule *new_rule)
5557 struct hlist_head *hlist = &hdev->fd_rule_list;
5558 struct hclge_fd_rule *fd_rule, *parent = NULL;
5560 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5562 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5563 if (state == HCLGE_FD_ACTIVE)
5564 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5565 hclge_sync_fd_user_def_cfg(hdev, true);
5567 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5571 /* it's unlikely to fail here, because we have checked the rule
5574 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5575 dev_warn(&hdev->pdev->dev,
5576 "failed to delete fd rule %u, it's inexistent\n",
5581 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5582 hclge_sync_fd_user_def_cfg(hdev, true);
5584 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5585 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5587 if (state == HCLGE_FD_TO_ADD) {
5588 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5589 hclge_task_schedule(hdev, 0);
5593 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5595 struct hclge_get_fd_mode_cmd *req;
5596 struct hclge_desc desc;
5599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5601 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5603 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5605 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5609 *fd_mode = req->mode;
5614 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5615 u32 *stage1_entry_num,
5616 u32 *stage2_entry_num,
5617 u16 *stage1_counter_num,
5618 u16 *stage2_counter_num)
5620 struct hclge_get_fd_allocation_cmd *req;
5621 struct hclge_desc desc;
5624 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5626 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5630 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5635 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5636 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5637 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5638 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5643 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5644 enum HCLGE_FD_STAGE stage_num)
5646 struct hclge_set_fd_key_config_cmd *req;
5647 struct hclge_fd_key_cfg *stage;
5648 struct hclge_desc desc;
5651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5653 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5654 stage = &hdev->fd_cfg.key_cfg[stage_num];
5655 req->stage = stage_num;
5656 req->key_select = stage->key_sel;
5657 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5658 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5659 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5660 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5661 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5662 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5664 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5666 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5671 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5673 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5675 spin_lock_bh(&hdev->fd_rule_lock);
5676 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5677 spin_unlock_bh(&hdev->fd_rule_lock);
5679 hclge_fd_set_user_def_cmd(hdev, cfg);
5682 static int hclge_init_fd_config(struct hclge_dev *hdev)
5684 #define LOW_2_WORDS 0x03
5685 struct hclge_fd_key_cfg *key_cfg;
5688 if (!hnae3_dev_fd_supported(hdev))
5691 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5695 switch (hdev->fd_cfg.fd_mode) {
5696 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5697 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5699 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5700 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5703 dev_err(&hdev->pdev->dev,
5704 "Unsupported flow director mode %u\n",
5705 hdev->fd_cfg.fd_mode);
5709 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5710 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5711 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5712 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5713 key_cfg->outer_sipv6_word_en = 0;
5714 key_cfg->outer_dipv6_word_en = 0;
5716 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5717 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5718 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5719 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5721 /* If use max 400bit key, we can support tuples for ether type */
5722 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5723 key_cfg->tuple_active |=
5724 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5725 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5726 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5729 /* roce_type is used to filter roce frames
5730 * dst_vport is used to specify the rule
5732 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5734 ret = hclge_get_fd_allocation(hdev,
5735 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5736 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5737 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5738 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5742 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5745 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5746 int loc, u8 *key, bool is_add)
5748 struct hclge_fd_tcam_config_1_cmd *req1;
5749 struct hclge_fd_tcam_config_2_cmd *req2;
5750 struct hclge_fd_tcam_config_3_cmd *req3;
5751 struct hclge_desc desc[3];
5754 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5755 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5756 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5757 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5758 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5760 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5761 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5762 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5764 req1->stage = stage;
5765 req1->xy_sel = sel_x ? 1 : 0;
5766 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5767 req1->index = cpu_to_le32(loc);
5768 req1->entry_vld = sel_x ? is_add : 0;
5771 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5772 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5773 sizeof(req2->tcam_data));
5774 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5775 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5778 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5780 dev_err(&hdev->pdev->dev,
5781 "config tcam key fail, ret=%d\n",
5787 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5788 struct hclge_fd_ad_data *action)
5790 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5791 struct hclge_fd_ad_config_cmd *req;
5792 struct hclge_desc desc;
5796 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5798 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5799 req->index = cpu_to_le32(loc);
5802 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5803 action->write_rule_id_to_bd);
5804 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5806 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5807 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5808 action->override_tc);
5809 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5810 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5813 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5814 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5815 action->forward_to_direct_queue);
5816 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5818 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5819 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5820 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5821 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5822 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5823 action->counter_id);
5825 req->ad_data = cpu_to_le64(ad_data);
5826 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5828 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5833 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5834 struct hclge_fd_rule *rule)
5836 int offset, moffset, ip_offset;
5837 enum HCLGE_FD_KEY_OPT key_opt;
5838 u16 tmp_x_s, tmp_y_s;
5839 u32 tmp_x_l, tmp_y_l;
5843 if (rule->unused_tuple & BIT(tuple_bit))
5846 key_opt = tuple_key_info[tuple_bit].key_opt;
5847 offset = tuple_key_info[tuple_bit].offset;
5848 moffset = tuple_key_info[tuple_bit].moffset;
5852 calc_x(*key_x, p[offset], p[moffset]);
5853 calc_y(*key_y, p[offset], p[moffset]);
5857 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5858 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5859 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5860 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5864 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5865 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5866 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5867 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5871 for (i = 0; i < ETH_ALEN; i++) {
5872 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5874 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5880 ip_offset = IPV4_INDEX * sizeof(u32);
5881 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5882 *(u32 *)(&p[moffset + ip_offset]));
5883 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5884 *(u32 *)(&p[moffset + ip_offset]));
5885 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5886 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5894 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5895 u8 vf_id, u8 network_port_id)
5897 u32 port_number = 0;
5899 if (port_type == HOST_PORT) {
5900 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5902 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5904 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5906 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5907 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5908 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5914 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5915 __le32 *key_x, __le32 *key_y,
5916 struct hclge_fd_rule *rule)
5918 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5919 u8 cur_pos = 0, tuple_size, shift_bits;
5922 for (i = 0; i < MAX_META_DATA; i++) {
5923 tuple_size = meta_data_key_info[i].key_length;
5924 tuple_bit = key_cfg->meta_data_active & BIT(i);
5926 switch (tuple_bit) {
5927 case BIT(ROCE_TYPE):
5928 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5929 cur_pos += tuple_size;
5931 case BIT(DST_VPORT):
5932 port_number = hclge_get_port_number(HOST_PORT, 0,
5934 hnae3_set_field(meta_data,
5935 GENMASK(cur_pos + tuple_size, cur_pos),
5936 cur_pos, port_number);
5937 cur_pos += tuple_size;
5944 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5945 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5946 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5948 *key_x = cpu_to_le32(tmp_x << shift_bits);
5949 *key_y = cpu_to_le32(tmp_y << shift_bits);
5952 /* A complete key is combined with meta data key and tuple key.
5953 * Meta data key is stored at the MSB region, and tuple key is stored at
5954 * the LSB region, unused bits will be filled 0.
5956 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5957 struct hclge_fd_rule *rule)
5959 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5960 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5961 u8 *cur_key_x, *cur_key_y;
5962 u8 meta_data_region;
5967 memset(key_x, 0, sizeof(key_x));
5968 memset(key_y, 0, sizeof(key_y));
5972 for (i = 0; i < MAX_TUPLE; i++) {
5975 tuple_size = tuple_key_info[i].key_length / 8;
5976 if (!(key_cfg->tuple_active & BIT(i)))
5979 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5982 cur_key_x += tuple_size;
5983 cur_key_y += tuple_size;
5987 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5988 MAX_META_DATA_LENGTH / 8;
5990 hclge_fd_convert_meta_data(key_cfg,
5991 (__le32 *)(key_x + meta_data_region),
5992 (__le32 *)(key_y + meta_data_region),
5995 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5998 dev_err(&hdev->pdev->dev,
5999 "fd key_y config fail, loc=%u, ret=%d\n",
6000 rule->queue_id, ret);
6004 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6007 dev_err(&hdev->pdev->dev,
6008 "fd key_x config fail, loc=%u, ret=%d\n",
6009 rule->queue_id, ret);
6013 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6014 struct hclge_fd_rule *rule)
6016 struct hclge_vport *vport = hdev->vport;
6017 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6018 struct hclge_fd_ad_data ad_data;
6020 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6021 ad_data.ad_id = rule->location;
6023 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6024 ad_data.drop_packet = true;
6025 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6026 ad_data.override_tc = true;
6028 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6030 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6032 ad_data.forward_to_direct_queue = true;
6033 ad_data.queue_id = rule->queue_id;
6036 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6037 ad_data.use_counter = true;
6038 ad_data.counter_id = rule->vf_id %
6039 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6041 ad_data.use_counter = false;
6042 ad_data.counter_id = 0;
6045 ad_data.use_next_stage = false;
6046 ad_data.next_input_key = 0;
6048 ad_data.write_rule_id_to_bd = true;
6049 ad_data.rule_id = rule->location;
6051 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6054 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6057 if (!spec || !unused_tuple)
6060 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6063 *unused_tuple |= BIT(INNER_SRC_IP);
6066 *unused_tuple |= BIT(INNER_DST_IP);
6069 *unused_tuple |= BIT(INNER_SRC_PORT);
6072 *unused_tuple |= BIT(INNER_DST_PORT);
6075 *unused_tuple |= BIT(INNER_IP_TOS);
6080 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6083 if (!spec || !unused_tuple)
6086 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6087 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6090 *unused_tuple |= BIT(INNER_SRC_IP);
6093 *unused_tuple |= BIT(INNER_DST_IP);
6096 *unused_tuple |= BIT(INNER_IP_TOS);
6099 *unused_tuple |= BIT(INNER_IP_PROTO);
6101 if (spec->l4_4_bytes)
6104 if (spec->ip_ver != ETH_RX_NFC_IP4)
6110 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6113 if (!spec || !unused_tuple)
6116 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6118 /* check whether src/dst ip address used */
6119 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6120 *unused_tuple |= BIT(INNER_SRC_IP);
6122 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6123 *unused_tuple |= BIT(INNER_DST_IP);
6126 *unused_tuple |= BIT(INNER_SRC_PORT);
6129 *unused_tuple |= BIT(INNER_DST_PORT);
6132 *unused_tuple |= BIT(INNER_IP_TOS);
6137 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6140 if (!spec || !unused_tuple)
6143 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6144 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6146 /* check whether src/dst ip address used */
6147 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6148 *unused_tuple |= BIT(INNER_SRC_IP);
6150 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6151 *unused_tuple |= BIT(INNER_DST_IP);
6153 if (!spec->l4_proto)
6154 *unused_tuple |= BIT(INNER_IP_PROTO);
6157 *unused_tuple |= BIT(INNER_IP_TOS);
6159 if (spec->l4_4_bytes)
6165 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6167 if (!spec || !unused_tuple)
6170 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6171 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6172 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6174 if (is_zero_ether_addr(spec->h_source))
6175 *unused_tuple |= BIT(INNER_SRC_MAC);
6177 if (is_zero_ether_addr(spec->h_dest))
6178 *unused_tuple |= BIT(INNER_DST_MAC);
6181 *unused_tuple |= BIT(INNER_ETH_TYPE);
6186 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6187 struct ethtool_rx_flow_spec *fs,
6190 if (fs->flow_type & FLOW_EXT) {
6191 if (fs->h_ext.vlan_etype) {
6192 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6196 if (!fs->h_ext.vlan_tci)
6197 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6199 if (fs->m_ext.vlan_tci &&
6200 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6201 dev_err(&hdev->pdev->dev,
6202 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6203 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6207 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6210 if (fs->flow_type & FLOW_MAC_EXT) {
6211 if (hdev->fd_cfg.fd_mode !=
6212 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6213 dev_err(&hdev->pdev->dev,
6214 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6218 if (is_zero_ether_addr(fs->h_ext.h_dest))
6219 *unused_tuple |= BIT(INNER_DST_MAC);
6221 *unused_tuple &= ~BIT(INNER_DST_MAC);
6227 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6228 struct hclge_fd_user_def_info *info)
6230 switch (flow_type) {
6232 info->layer = HCLGE_FD_USER_DEF_L2;
6233 *unused_tuple &= ~BIT(INNER_L2_RSV);
6236 case IPV6_USER_FLOW:
6237 info->layer = HCLGE_FD_USER_DEF_L3;
6238 *unused_tuple &= ~BIT(INNER_L3_RSV);
6244 info->layer = HCLGE_FD_USER_DEF_L4;
6245 *unused_tuple &= ~BIT(INNER_L4_RSV);
6254 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6256 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6259 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6260 struct ethtool_rx_flow_spec *fs,
6262 struct hclge_fd_user_def_info *info)
6264 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6265 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6266 u16 data, offset, data_mask, offset_mask;
6269 info->layer = HCLGE_FD_USER_DEF_NONE;
6270 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6272 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6275 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6276 * for data, and bit32~47 is used for offset.
6278 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6279 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6280 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6281 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6283 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6284 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6288 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6289 dev_err(&hdev->pdev->dev,
6290 "user-def offset[%u] should be no more than %u\n",
6291 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6295 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6296 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6300 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6302 dev_err(&hdev->pdev->dev,
6303 "unsupported flow type for user-def bytes, ret = %d\n",
6309 info->data_mask = data_mask;
6310 info->offset = offset;
6315 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6316 struct ethtool_rx_flow_spec *fs,
6318 struct hclge_fd_user_def_info *info)
6323 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6324 dev_err(&hdev->pdev->dev,
6325 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6327 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6331 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6335 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6336 switch (flow_type) {
6340 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6344 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6350 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6353 case IPV6_USER_FLOW:
6354 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6358 if (hdev->fd_cfg.fd_mode !=
6359 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6360 dev_err(&hdev->pdev->dev,
6361 "ETHER_FLOW is not supported in current fd mode!\n");
6365 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6369 dev_err(&hdev->pdev->dev,
6370 "unsupported protocol type, protocol type = %#x\n",
6376 dev_err(&hdev->pdev->dev,
6377 "failed to check flow union tuple, ret = %d\n",
6382 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6385 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6386 struct ethtool_rx_flow_spec *fs,
6387 struct hclge_fd_rule *rule, u8 ip_proto)
6389 rule->tuples.src_ip[IPV4_INDEX] =
6390 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6391 rule->tuples_mask.src_ip[IPV4_INDEX] =
6392 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6394 rule->tuples.dst_ip[IPV4_INDEX] =
6395 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6396 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6397 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6399 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6400 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6402 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6403 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6405 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6406 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6408 rule->tuples.ether_proto = ETH_P_IP;
6409 rule->tuples_mask.ether_proto = 0xFFFF;
6411 rule->tuples.ip_proto = ip_proto;
6412 rule->tuples_mask.ip_proto = 0xFF;
6415 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6416 struct ethtool_rx_flow_spec *fs,
6417 struct hclge_fd_rule *rule)
6419 rule->tuples.src_ip[IPV4_INDEX] =
6420 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6421 rule->tuples_mask.src_ip[IPV4_INDEX] =
6422 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6424 rule->tuples.dst_ip[IPV4_INDEX] =
6425 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6426 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6427 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6429 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6430 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6432 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6433 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6435 rule->tuples.ether_proto = ETH_P_IP;
6436 rule->tuples_mask.ether_proto = 0xFFFF;
6439 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6440 struct ethtool_rx_flow_spec *fs,
6441 struct hclge_fd_rule *rule, u8 ip_proto)
6443 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6445 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6448 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6450 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6453 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6454 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6456 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6457 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6459 rule->tuples.ether_proto = ETH_P_IPV6;
6460 rule->tuples_mask.ether_proto = 0xFFFF;
6462 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6463 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6465 rule->tuples.ip_proto = ip_proto;
6466 rule->tuples_mask.ip_proto = 0xFF;
6469 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6470 struct ethtool_rx_flow_spec *fs,
6471 struct hclge_fd_rule *rule)
6473 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6475 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6478 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6480 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6483 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6484 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6486 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6487 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6489 rule->tuples.ether_proto = ETH_P_IPV6;
6490 rule->tuples_mask.ether_proto = 0xFFFF;
6493 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6494 struct ethtool_rx_flow_spec *fs,
6495 struct hclge_fd_rule *rule)
6497 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6498 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6500 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6501 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6503 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6504 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6507 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6508 struct hclge_fd_rule *rule)
6510 switch (info->layer) {
6511 case HCLGE_FD_USER_DEF_L2:
6512 rule->tuples.l2_user_def = info->data;
6513 rule->tuples_mask.l2_user_def = info->data_mask;
6515 case HCLGE_FD_USER_DEF_L3:
6516 rule->tuples.l3_user_def = info->data;
6517 rule->tuples_mask.l3_user_def = info->data_mask;
6519 case HCLGE_FD_USER_DEF_L4:
6520 rule->tuples.l4_user_def = (u32)info->data << 16;
6521 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6527 rule->ep.user_def = *info;
6530 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6531 struct ethtool_rx_flow_spec *fs,
6532 struct hclge_fd_rule *rule,
6533 struct hclge_fd_user_def_info *info)
6535 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6537 switch (flow_type) {
6539 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6542 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6545 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6548 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6551 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6554 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6557 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6559 case IPV6_USER_FLOW:
6560 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6563 hclge_fd_get_ether_tuple(hdev, fs, rule);
6569 if (fs->flow_type & FLOW_EXT) {
6570 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6571 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6572 hclge_fd_get_user_def_tuple(info, rule);
6575 if (fs->flow_type & FLOW_MAC_EXT) {
6576 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6577 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6583 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6584 struct hclge_fd_rule *rule)
6588 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6592 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6595 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6596 struct hclge_fd_rule *rule)
6600 spin_lock_bh(&hdev->fd_rule_lock);
6602 if (hdev->fd_active_type != rule->rule_type &&
6603 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6604 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6605 dev_err(&hdev->pdev->dev,
6606 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6607 rule->rule_type, hdev->fd_active_type);
6608 spin_unlock_bh(&hdev->fd_rule_lock);
6612 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6616 ret = hclge_clear_arfs_rules(hdev);
6620 ret = hclge_fd_config_rule(hdev, rule);
6624 rule->state = HCLGE_FD_ACTIVE;
6625 hdev->fd_active_type = rule->rule_type;
6626 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6629 spin_unlock_bh(&hdev->fd_rule_lock);
6633 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6635 struct hclge_vport *vport = hclge_get_vport(handle);
6636 struct hclge_dev *hdev = vport->back;
6638 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6641 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6642 u16 *vport_id, u8 *action, u16 *queue_id)
6644 struct hclge_vport *vport = hdev->vport;
6646 if (ring_cookie == RX_CLS_FLOW_DISC) {
6647 *action = HCLGE_FD_ACTION_DROP_PACKET;
6649 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6650 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6653 /* To keep consistent with user's configuration, minus 1 when
6654 * printing 'vf', because vf id from ethtool is added 1 for vf.
6656 if (vf > hdev->num_req_vfs) {
6657 dev_err(&hdev->pdev->dev,
6658 "Error: vf id (%u) should be less than %u\n",
6659 vf - 1, hdev->num_req_vfs);
6663 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6664 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6667 dev_err(&hdev->pdev->dev,
6668 "Error: queue id (%u) > max tqp num (%u)\n",
6673 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6680 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6681 struct ethtool_rxnfc *cmd)
6683 struct hclge_vport *vport = hclge_get_vport(handle);
6684 struct hclge_dev *hdev = vport->back;
6685 struct hclge_fd_user_def_info info;
6686 u16 dst_vport_id = 0, q_index = 0;
6687 struct ethtool_rx_flow_spec *fs;
6688 struct hclge_fd_rule *rule;
6693 if (!hnae3_dev_fd_supported(hdev)) {
6694 dev_err(&hdev->pdev->dev,
6695 "flow table director is not supported\n");
6700 dev_err(&hdev->pdev->dev,
6701 "please enable flow director first\n");
6705 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6707 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6711 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6716 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6720 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6726 rule->flow_type = fs->flow_type;
6727 rule->location = fs->location;
6728 rule->unused_tuple = unused;
6729 rule->vf_id = dst_vport_id;
6730 rule->queue_id = q_index;
6731 rule->action = action;
6732 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6734 ret = hclge_add_fd_entry_common(hdev, rule);
6741 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6742 struct ethtool_rxnfc *cmd)
6744 struct hclge_vport *vport = hclge_get_vport(handle);
6745 struct hclge_dev *hdev = vport->back;
6746 struct ethtool_rx_flow_spec *fs;
6749 if (!hnae3_dev_fd_supported(hdev))
6752 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6754 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6757 spin_lock_bh(&hdev->fd_rule_lock);
6758 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6759 !test_bit(fs->location, hdev->fd_bmap)) {
6760 dev_err(&hdev->pdev->dev,
6761 "Delete fail, rule %u is inexistent\n", fs->location);
6762 spin_unlock_bh(&hdev->fd_rule_lock);
6766 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6771 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6774 spin_unlock_bh(&hdev->fd_rule_lock);
6778 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6781 struct hclge_fd_rule *rule;
6782 struct hlist_node *node;
6785 if (!hnae3_dev_fd_supported(hdev))
6788 spin_lock_bh(&hdev->fd_rule_lock);
6790 for_each_set_bit(location, hdev->fd_bmap,
6791 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6792 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6796 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6798 hlist_del(&rule->rule_node);
6801 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6802 hdev->hclge_fd_rule_num = 0;
6803 bitmap_zero(hdev->fd_bmap,
6804 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6807 spin_unlock_bh(&hdev->fd_rule_lock);
6810 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6812 hclge_clear_fd_rules_in_list(hdev, true);
6813 hclge_fd_disable_user_def(hdev);
6816 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6818 struct hclge_vport *vport = hclge_get_vport(handle);
6819 struct hclge_dev *hdev = vport->back;
6820 struct hclge_fd_rule *rule;
6821 struct hlist_node *node;
6823 /* Return ok here, because reset error handling will check this
6824 * return value. If error is returned here, the reset process will
6827 if (!hnae3_dev_fd_supported(hdev))
6830 /* if fd is disabled, should not restore it when reset */
6834 spin_lock_bh(&hdev->fd_rule_lock);
6835 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6836 if (rule->state == HCLGE_FD_ACTIVE)
6837 rule->state = HCLGE_FD_TO_ADD;
6839 spin_unlock_bh(&hdev->fd_rule_lock);
6840 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6845 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6846 struct ethtool_rxnfc *cmd)
6848 struct hclge_vport *vport = hclge_get_vport(handle);
6849 struct hclge_dev *hdev = vport->back;
6851 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6854 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6855 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6860 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6861 struct ethtool_tcpip4_spec *spec,
6862 struct ethtool_tcpip4_spec *spec_mask)
6864 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6865 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6866 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6868 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6869 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6870 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6872 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6873 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6874 0 : cpu_to_be16(rule->tuples_mask.src_port);
6876 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6877 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6878 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6880 spec->tos = rule->tuples.ip_tos;
6881 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6882 0 : rule->tuples_mask.ip_tos;
6885 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6886 struct ethtool_usrip4_spec *spec,
6887 struct ethtool_usrip4_spec *spec_mask)
6889 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6890 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6891 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6893 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6894 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6895 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6897 spec->tos = rule->tuples.ip_tos;
6898 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6899 0 : rule->tuples_mask.ip_tos;
6901 spec->proto = rule->tuples.ip_proto;
6902 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6903 0 : rule->tuples_mask.ip_proto;
6905 spec->ip_ver = ETH_RX_NFC_IP4;
6908 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6909 struct ethtool_tcpip6_spec *spec,
6910 struct ethtool_tcpip6_spec *spec_mask)
6912 cpu_to_be32_array(spec->ip6src,
6913 rule->tuples.src_ip, IPV6_SIZE);
6914 cpu_to_be32_array(spec->ip6dst,
6915 rule->tuples.dst_ip, IPV6_SIZE);
6916 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6917 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6919 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6922 if (rule->unused_tuple & BIT(INNER_DST_IP))
6923 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6925 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6928 spec->tclass = rule->tuples.ip_tos;
6929 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6930 0 : rule->tuples_mask.ip_tos;
6932 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6933 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6934 0 : cpu_to_be16(rule->tuples_mask.src_port);
6936 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6937 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6938 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6941 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6942 struct ethtool_usrip6_spec *spec,
6943 struct ethtool_usrip6_spec *spec_mask)
6945 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6946 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6947 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6948 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6950 cpu_to_be32_array(spec_mask->ip6src,
6951 rule->tuples_mask.src_ip, IPV6_SIZE);
6953 if (rule->unused_tuple & BIT(INNER_DST_IP))
6954 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6956 cpu_to_be32_array(spec_mask->ip6dst,
6957 rule->tuples_mask.dst_ip, IPV6_SIZE);
6959 spec->tclass = rule->tuples.ip_tos;
6960 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6961 0 : rule->tuples_mask.ip_tos;
6963 spec->l4_proto = rule->tuples.ip_proto;
6964 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6965 0 : rule->tuples_mask.ip_proto;
6968 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6969 struct ethhdr *spec,
6970 struct ethhdr *spec_mask)
6972 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6973 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6975 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6976 eth_zero_addr(spec_mask->h_source);
6978 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6980 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6981 eth_zero_addr(spec_mask->h_dest);
6983 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6985 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6986 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6987 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6990 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6991 struct hclge_fd_rule *rule)
6993 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6994 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6995 fs->h_ext.data[0] = 0;
6996 fs->h_ext.data[1] = 0;
6997 fs->m_ext.data[0] = 0;
6998 fs->m_ext.data[1] = 0;
7000 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
7001 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
7003 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
7004 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7008 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7009 struct hclge_fd_rule *rule)
7011 if (fs->flow_type & FLOW_EXT) {
7012 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7013 fs->m_ext.vlan_tci =
7014 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7015 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7017 hclge_fd_get_user_def_info(fs, rule);
7020 if (fs->flow_type & FLOW_MAC_EXT) {
7021 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7022 if (rule->unused_tuple & BIT(INNER_DST_MAC))
7023 eth_zero_addr(fs->m_u.ether_spec.h_dest);
7025 ether_addr_copy(fs->m_u.ether_spec.h_dest,
7026 rule->tuples_mask.dst_mac);
7030 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7031 struct ethtool_rxnfc *cmd)
7033 struct hclge_vport *vport = hclge_get_vport(handle);
7034 struct hclge_fd_rule *rule = NULL;
7035 struct hclge_dev *hdev = vport->back;
7036 struct ethtool_rx_flow_spec *fs;
7037 struct hlist_node *node2;
7039 if (!hnae3_dev_fd_supported(hdev))
7042 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7044 spin_lock_bh(&hdev->fd_rule_lock);
7046 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7047 if (rule->location >= fs->location)
7051 if (!rule || fs->location != rule->location) {
7052 spin_unlock_bh(&hdev->fd_rule_lock);
7057 fs->flow_type = rule->flow_type;
7058 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7062 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7063 &fs->m_u.tcp_ip4_spec);
7066 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7067 &fs->m_u.usr_ip4_spec);
7072 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7073 &fs->m_u.tcp_ip6_spec);
7075 case IPV6_USER_FLOW:
7076 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7077 &fs->m_u.usr_ip6_spec);
7079 /* The flow type of fd rule has been checked before adding in to rule
7080 * list. As other flow types have been handled, it must be ETHER_FLOW
7081 * for the default case
7084 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7085 &fs->m_u.ether_spec);
7089 hclge_fd_get_ext_info(fs, rule);
7091 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7092 fs->ring_cookie = RX_CLS_FLOW_DISC;
7096 fs->ring_cookie = rule->queue_id;
7097 vf_id = rule->vf_id;
7098 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7099 fs->ring_cookie |= vf_id;
7102 spin_unlock_bh(&hdev->fd_rule_lock);
7107 static int hclge_get_all_rules(struct hnae3_handle *handle,
7108 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7110 struct hclge_vport *vport = hclge_get_vport(handle);
7111 struct hclge_dev *hdev = vport->back;
7112 struct hclge_fd_rule *rule;
7113 struct hlist_node *node2;
7116 if (!hnae3_dev_fd_supported(hdev))
7119 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7121 spin_lock_bh(&hdev->fd_rule_lock);
7122 hlist_for_each_entry_safe(rule, node2,
7123 &hdev->fd_rule_list, rule_node) {
7124 if (cnt == cmd->rule_cnt) {
7125 spin_unlock_bh(&hdev->fd_rule_lock);
7129 if (rule->state == HCLGE_FD_TO_DEL)
7132 rule_locs[cnt] = rule->location;
7136 spin_unlock_bh(&hdev->fd_rule_lock);
7138 cmd->rule_cnt = cnt;
7143 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7144 struct hclge_fd_rule_tuples *tuples)
7146 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7147 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7149 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7150 tuples->ip_proto = fkeys->basic.ip_proto;
7151 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7153 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7154 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7155 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7159 for (i = 0; i < IPV6_SIZE; i++) {
7160 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7161 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7166 /* traverse all rules, check whether an existed rule has the same tuples */
7167 static struct hclge_fd_rule *
7168 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7169 const struct hclge_fd_rule_tuples *tuples)
7171 struct hclge_fd_rule *rule = NULL;
7172 struct hlist_node *node;
7174 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7175 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7182 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7183 struct hclge_fd_rule *rule)
7185 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7186 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7187 BIT(INNER_SRC_PORT);
7190 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7191 rule->state = HCLGE_FD_TO_ADD;
7192 if (tuples->ether_proto == ETH_P_IP) {
7193 if (tuples->ip_proto == IPPROTO_TCP)
7194 rule->flow_type = TCP_V4_FLOW;
7196 rule->flow_type = UDP_V4_FLOW;
7198 if (tuples->ip_proto == IPPROTO_TCP)
7199 rule->flow_type = TCP_V6_FLOW;
7201 rule->flow_type = UDP_V6_FLOW;
7203 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7204 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7207 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7208 u16 flow_id, struct flow_keys *fkeys)
7210 struct hclge_vport *vport = hclge_get_vport(handle);
7211 struct hclge_fd_rule_tuples new_tuples = {};
7212 struct hclge_dev *hdev = vport->back;
7213 struct hclge_fd_rule *rule;
7216 if (!hnae3_dev_fd_supported(hdev))
7219 /* when there is already fd rule existed add by user,
7220 * arfs should not work
7222 spin_lock_bh(&hdev->fd_rule_lock);
7223 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7224 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7225 spin_unlock_bh(&hdev->fd_rule_lock);
7229 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7231 /* check is there flow director filter existed for this flow,
7232 * if not, create a new filter for it;
7233 * if filter exist with different queue id, modify the filter;
7234 * if filter exist with same queue id, do nothing
7236 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7238 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7239 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7240 spin_unlock_bh(&hdev->fd_rule_lock);
7244 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7246 spin_unlock_bh(&hdev->fd_rule_lock);
7250 rule->location = bit_id;
7251 rule->arfs.flow_id = flow_id;
7252 rule->queue_id = queue_id;
7253 hclge_fd_build_arfs_rule(&new_tuples, rule);
7254 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7255 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7256 } else if (rule->queue_id != queue_id) {
7257 rule->queue_id = queue_id;
7258 rule->state = HCLGE_FD_TO_ADD;
7259 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7260 hclge_task_schedule(hdev, 0);
7262 spin_unlock_bh(&hdev->fd_rule_lock);
7263 return rule->location;
7266 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7268 #ifdef CONFIG_RFS_ACCEL
7269 struct hnae3_handle *handle = &hdev->vport[0].nic;
7270 struct hclge_fd_rule *rule;
7271 struct hlist_node *node;
7273 spin_lock_bh(&hdev->fd_rule_lock);
7274 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7275 spin_unlock_bh(&hdev->fd_rule_lock);
7278 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7279 if (rule->state != HCLGE_FD_ACTIVE)
7281 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7282 rule->arfs.flow_id, rule->location)) {
7283 rule->state = HCLGE_FD_TO_DEL;
7284 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7287 spin_unlock_bh(&hdev->fd_rule_lock);
7291 /* make sure being called after lock up with fd_rule_lock */
7292 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7294 #ifdef CONFIG_RFS_ACCEL
7295 struct hclge_fd_rule *rule;
7296 struct hlist_node *node;
7299 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7302 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7303 switch (rule->state) {
7304 case HCLGE_FD_TO_DEL:
7305 case HCLGE_FD_ACTIVE:
7306 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7307 rule->location, NULL, false);
7311 case HCLGE_FD_TO_ADD:
7312 hclge_fd_dec_rule_cnt(hdev, rule->location);
7313 hlist_del(&rule->rule_node);
7320 hclge_sync_fd_state(hdev);
7326 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7327 struct hclge_fd_rule *rule)
7329 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7330 struct flow_match_basic match;
7331 u16 ethtype_key, ethtype_mask;
7333 flow_rule_match_basic(flow, &match);
7334 ethtype_key = ntohs(match.key->n_proto);
7335 ethtype_mask = ntohs(match.mask->n_proto);
7337 if (ethtype_key == ETH_P_ALL) {
7341 rule->tuples.ether_proto = ethtype_key;
7342 rule->tuples_mask.ether_proto = ethtype_mask;
7343 rule->tuples.ip_proto = match.key->ip_proto;
7344 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7346 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7347 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7351 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7352 struct hclge_fd_rule *rule)
7354 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7355 struct flow_match_eth_addrs match;
7357 flow_rule_match_eth_addrs(flow, &match);
7358 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7359 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7360 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7361 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7363 rule->unused_tuple |= BIT(INNER_DST_MAC);
7364 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7368 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7369 struct hclge_fd_rule *rule)
7371 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7372 struct flow_match_vlan match;
7374 flow_rule_match_vlan(flow, &match);
7375 rule->tuples.vlan_tag1 = match.key->vlan_id |
7376 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7377 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7378 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7380 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7384 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7385 struct hclge_fd_rule *rule)
7389 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7390 struct flow_match_control match;
7392 flow_rule_match_control(flow, &match);
7393 addr_type = match.key->addr_type;
7396 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7397 struct flow_match_ipv4_addrs match;
7399 flow_rule_match_ipv4_addrs(flow, &match);
7400 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7401 rule->tuples_mask.src_ip[IPV4_INDEX] =
7402 be32_to_cpu(match.mask->src);
7403 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7404 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7405 be32_to_cpu(match.mask->dst);
7406 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7407 struct flow_match_ipv6_addrs match;
7409 flow_rule_match_ipv6_addrs(flow, &match);
7410 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7412 be32_to_cpu_array(rule->tuples_mask.src_ip,
7413 match.mask->src.s6_addr32, IPV6_SIZE);
7414 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7416 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7417 match.mask->dst.s6_addr32, IPV6_SIZE);
7419 rule->unused_tuple |= BIT(INNER_SRC_IP);
7420 rule->unused_tuple |= BIT(INNER_DST_IP);
7424 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7425 struct hclge_fd_rule *rule)
7427 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7428 struct flow_match_ports match;
7430 flow_rule_match_ports(flow, &match);
7432 rule->tuples.src_port = be16_to_cpu(match.key->src);
7433 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7434 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7435 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7437 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7438 rule->unused_tuple |= BIT(INNER_DST_PORT);
7442 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7443 struct flow_cls_offload *cls_flower,
7444 struct hclge_fd_rule *rule)
7446 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7447 struct flow_dissector *dissector = flow->match.dissector;
7449 if (dissector->used_keys &
7450 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7451 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7452 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7453 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7454 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7455 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7456 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7457 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7458 dissector->used_keys);
7462 hclge_get_cls_key_basic(flow, rule);
7463 hclge_get_cls_key_mac(flow, rule);
7464 hclge_get_cls_key_vlan(flow, rule);
7465 hclge_get_cls_key_ip(flow, rule);
7466 hclge_get_cls_key_port(flow, rule);
7471 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7472 struct flow_cls_offload *cls_flower, int tc)
7474 u32 prio = cls_flower->common.prio;
7476 if (tc < 0 || tc > hdev->tc_max) {
7477 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7482 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7483 dev_err(&hdev->pdev->dev,
7484 "prio %u should be in range[1, %u]\n",
7485 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7489 if (test_bit(prio - 1, hdev->fd_bmap)) {
7490 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7496 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7497 struct flow_cls_offload *cls_flower,
7500 struct hclge_vport *vport = hclge_get_vport(handle);
7501 struct hclge_dev *hdev = vport->back;
7502 struct hclge_fd_rule *rule;
7505 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7507 dev_err(&hdev->pdev->dev,
7508 "failed to check cls flower params, ret = %d\n", ret);
7512 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7516 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7522 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7523 rule->cls_flower.tc = tc;
7524 rule->location = cls_flower->common.prio - 1;
7526 rule->cls_flower.cookie = cls_flower->cookie;
7527 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7529 ret = hclge_add_fd_entry_common(hdev, rule);
7536 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7537 unsigned long cookie)
7539 struct hclge_fd_rule *rule;
7540 struct hlist_node *node;
7542 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7543 if (rule->cls_flower.cookie == cookie)
7550 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7551 struct flow_cls_offload *cls_flower)
7553 struct hclge_vport *vport = hclge_get_vport(handle);
7554 struct hclge_dev *hdev = vport->back;
7555 struct hclge_fd_rule *rule;
7558 spin_lock_bh(&hdev->fd_rule_lock);
7560 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7562 spin_unlock_bh(&hdev->fd_rule_lock);
7566 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7569 spin_unlock_bh(&hdev->fd_rule_lock);
7573 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7574 spin_unlock_bh(&hdev->fd_rule_lock);
7579 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7581 struct hclge_fd_rule *rule;
7582 struct hlist_node *node;
7585 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7588 spin_lock_bh(&hdev->fd_rule_lock);
7590 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7591 switch (rule->state) {
7592 case HCLGE_FD_TO_ADD:
7593 ret = hclge_fd_config_rule(hdev, rule);
7596 rule->state = HCLGE_FD_ACTIVE;
7598 case HCLGE_FD_TO_DEL:
7599 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7600 rule->location, NULL, false);
7603 hclge_fd_dec_rule_cnt(hdev, rule->location);
7604 hclge_fd_free_node(hdev, rule);
7613 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7615 spin_unlock_bh(&hdev->fd_rule_lock);
7618 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7620 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7621 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7623 hclge_clear_fd_rules_in_list(hdev, clear_list);
7626 hclge_sync_fd_user_def_cfg(hdev, false);
7628 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7631 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7633 struct hclge_vport *vport = hclge_get_vport(handle);
7634 struct hclge_dev *hdev = vport->back;
7636 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7637 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7640 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7642 struct hclge_vport *vport = hclge_get_vport(handle);
7643 struct hclge_dev *hdev = vport->back;
7645 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7648 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7650 struct hclge_vport *vport = hclge_get_vport(handle);
7651 struct hclge_dev *hdev = vport->back;
7653 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7656 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7658 struct hclge_vport *vport = hclge_get_vport(handle);
7659 struct hclge_dev *hdev = vport->back;
7661 return hdev->rst_stats.hw_reset_done_cnt;
7664 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7666 struct hclge_vport *vport = hclge_get_vport(handle);
7667 struct hclge_dev *hdev = vport->back;
7669 hdev->fd_en = enable;
7672 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7674 hclge_restore_fd_entries(handle);
7676 hclge_task_schedule(hdev, 0);
7679 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7681 struct hclge_desc desc;
7682 struct hclge_config_mac_mode_cmd *req =
7683 (struct hclge_config_mac_mode_cmd *)desc.data;
7687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7690 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7691 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7692 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7693 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7694 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7695 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7696 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7697 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7698 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7699 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7702 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7704 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7706 dev_err(&hdev->pdev->dev,
7707 "mac enable fail, ret =%d.\n", ret);
7710 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7711 u8 switch_param, u8 param_mask)
7713 struct hclge_mac_vlan_switch_cmd *req;
7714 struct hclge_desc desc;
7718 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7719 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7721 /* read current config parameter */
7722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7724 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7725 req->func_id = cpu_to_le32(func_id);
7727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7729 dev_err(&hdev->pdev->dev,
7730 "read mac vlan switch parameter fail, ret = %d\n", ret);
7734 /* modify and write new config parameter */
7735 hclge_cmd_reuse_desc(&desc, false);
7736 req->switch_param = (req->switch_param & param_mask) | switch_param;
7737 req->param_mask = param_mask;
7739 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7741 dev_err(&hdev->pdev->dev,
7742 "set mac vlan switch parameter fail, ret = %d\n", ret);
7746 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7749 #define HCLGE_PHY_LINK_STATUS_NUM 200
7751 struct phy_device *phydev = hdev->hw.mac.phydev;
7756 ret = phy_read_status(phydev);
7758 dev_err(&hdev->pdev->dev,
7759 "phy update link status fail, ret = %d\n", ret);
7763 if (phydev->link == link_ret)
7766 msleep(HCLGE_LINK_STATUS_MS);
7767 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7770 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7772 #define HCLGE_MAC_LINK_STATUS_NUM 100
7779 ret = hclge_get_mac_link_status(hdev, &link_status);
7782 if (link_status == link_ret)
7785 msleep(HCLGE_LINK_STATUS_MS);
7786 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7790 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7795 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7798 hclge_phy_link_status_wait(hdev, link_ret);
7800 return hclge_mac_link_status_wait(hdev, link_ret);
7803 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7805 struct hclge_config_mac_mode_cmd *req;
7806 struct hclge_desc desc;
7810 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7811 /* 1 Read out the MAC mode config at first */
7812 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7813 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7815 dev_err(&hdev->pdev->dev,
7816 "mac loopback get fail, ret =%d.\n", ret);
7820 /* 2 Then setup the loopback flag */
7821 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7822 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7824 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7826 /* 3 Config mac work mode with loopback flag
7827 * and its original configure parameters
7829 hclge_cmd_reuse_desc(&desc, false);
7830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7832 dev_err(&hdev->pdev->dev,
7833 "mac loopback set fail, ret =%d.\n", ret);
7837 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7838 enum hnae3_loop loop_mode)
7840 #define HCLGE_COMMON_LB_RETRY_MS 10
7841 #define HCLGE_COMMON_LB_RETRY_NUM 100
7843 struct hclge_common_lb_cmd *req;
7844 struct hclge_desc desc;
7848 req = (struct hclge_common_lb_cmd *)desc.data;
7849 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7851 switch (loop_mode) {
7852 case HNAE3_LOOP_SERIAL_SERDES:
7853 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7855 case HNAE3_LOOP_PARALLEL_SERDES:
7856 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7858 case HNAE3_LOOP_PHY:
7859 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7862 dev_err(&hdev->pdev->dev,
7863 "unsupported common loopback mode %d\n", loop_mode);
7868 req->enable = loop_mode_b;
7869 req->mask = loop_mode_b;
7871 req->mask = loop_mode_b;
7874 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7876 dev_err(&hdev->pdev->dev,
7877 "common loopback set fail, ret = %d\n", ret);
7882 msleep(HCLGE_COMMON_LB_RETRY_MS);
7883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7887 dev_err(&hdev->pdev->dev,
7888 "common loopback get, ret = %d\n", ret);
7891 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7892 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7894 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7895 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7897 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7898 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7904 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7905 enum hnae3_loop loop_mode)
7909 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7913 hclge_cfg_mac_mode(hdev, en);
7915 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7917 dev_err(&hdev->pdev->dev,
7918 "serdes loopback config mac mode timeout\n");
7923 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7924 struct phy_device *phydev)
7928 if (!phydev->suspended) {
7929 ret = phy_suspend(phydev);
7934 ret = phy_resume(phydev);
7938 return phy_loopback(phydev, true);
7941 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7942 struct phy_device *phydev)
7946 ret = phy_loopback(phydev, false);
7950 return phy_suspend(phydev);
7953 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7955 struct phy_device *phydev = hdev->hw.mac.phydev;
7959 if (hnae3_dev_phy_imp_supported(hdev))
7960 return hclge_set_common_loopback(hdev, en,
7966 ret = hclge_enable_phy_loopback(hdev, phydev);
7968 ret = hclge_disable_phy_loopback(hdev, phydev);
7970 dev_err(&hdev->pdev->dev,
7971 "set phy loopback fail, ret = %d\n", ret);
7975 hclge_cfg_mac_mode(hdev, en);
7977 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7979 dev_err(&hdev->pdev->dev,
7980 "phy loopback config mac mode timeout\n");
7985 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7986 u16 stream_id, bool enable)
7988 struct hclge_desc desc;
7989 struct hclge_cfg_com_tqp_queue_cmd *req =
7990 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7993 req->tqp_id = cpu_to_le16(tqp_id);
7994 req->stream_id = cpu_to_le16(stream_id);
7996 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7998 return hclge_cmd_send(&hdev->hw, &desc, 1);
8001 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
8003 struct hclge_vport *vport = hclge_get_vport(handle);
8004 struct hclge_dev *hdev = vport->back;
8008 for (i = 0; i < handle->kinfo.num_tqps; i++) {
8009 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8016 static int hclge_set_loopback(struct hnae3_handle *handle,
8017 enum hnae3_loop loop_mode, bool en)
8019 struct hclge_vport *vport = hclge_get_vport(handle);
8020 struct hclge_dev *hdev = vport->back;
8023 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8024 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8025 * the same, the packets are looped back in the SSU. If SSU loopback
8026 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8028 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8029 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8031 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8032 HCLGE_SWITCH_ALW_LPBK_MASK);
8037 switch (loop_mode) {
8038 case HNAE3_LOOP_APP:
8039 ret = hclge_set_app_loopback(hdev, en);
8041 case HNAE3_LOOP_SERIAL_SERDES:
8042 case HNAE3_LOOP_PARALLEL_SERDES:
8043 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8045 case HNAE3_LOOP_PHY:
8046 ret = hclge_set_phy_loopback(hdev, en);
8050 dev_err(&hdev->pdev->dev,
8051 "loop_mode %d is not supported\n", loop_mode);
8058 ret = hclge_tqp_enable(handle, en);
8060 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8061 en ? "enable" : "disable", ret);
8066 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8070 ret = hclge_set_app_loopback(hdev, false);
8074 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8078 return hclge_cfg_common_loopback(hdev, false,
8079 HNAE3_LOOP_PARALLEL_SERDES);
8082 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8084 struct hclge_vport *vport = hclge_get_vport(handle);
8085 struct hnae3_knic_private_info *kinfo;
8086 struct hnae3_queue *queue;
8087 struct hclge_tqp *tqp;
8090 kinfo = &vport->nic.kinfo;
8091 for (i = 0; i < kinfo->num_tqps; i++) {
8092 queue = handle->kinfo.tqp[i];
8093 tqp = container_of(queue, struct hclge_tqp, q);
8094 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8098 static void hclge_flush_link_update(struct hclge_dev *hdev)
8100 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8102 unsigned long last = hdev->serv_processed_cnt;
8105 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8106 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8107 last == hdev->serv_processed_cnt)
8111 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8113 struct hclge_vport *vport = hclge_get_vport(handle);
8114 struct hclge_dev *hdev = vport->back;
8117 hclge_task_schedule(hdev, 0);
8119 /* Set the DOWN flag here to disable link updating */
8120 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8122 /* flush memory to make sure DOWN is seen by service task */
8123 smp_mb__before_atomic();
8124 hclge_flush_link_update(hdev);
8128 static int hclge_ae_start(struct hnae3_handle *handle)
8130 struct hclge_vport *vport = hclge_get_vport(handle);
8131 struct hclge_dev *hdev = vport->back;
8134 hclge_cfg_mac_mode(hdev, true);
8135 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8136 hdev->hw.mac.link = 0;
8138 /* reset tqp stats */
8139 hclge_reset_tqp_stats(handle);
8141 hclge_mac_start_phy(hdev);
8146 static void hclge_ae_stop(struct hnae3_handle *handle)
8148 struct hclge_vport *vport = hclge_get_vport(handle);
8149 struct hclge_dev *hdev = vport->back;
8151 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8152 spin_lock_bh(&hdev->fd_rule_lock);
8153 hclge_clear_arfs_rules(hdev);
8154 spin_unlock_bh(&hdev->fd_rule_lock);
8156 /* If it is not PF reset or FLR, the firmware will disable the MAC,
8157 * so it only need to stop phy here.
8159 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8160 hdev->reset_type != HNAE3_FUNC_RESET &&
8161 hdev->reset_type != HNAE3_FLR_RESET) {
8162 hclge_mac_stop_phy(hdev);
8163 hclge_update_link_status(hdev);
8167 hclge_reset_tqp(handle);
8169 hclge_config_mac_tnl_int(hdev, false);
8172 hclge_cfg_mac_mode(hdev, false);
8174 hclge_mac_stop_phy(hdev);
8176 /* reset tqp stats */
8177 hclge_reset_tqp_stats(handle);
8178 hclge_update_link_status(hdev);
8181 int hclge_vport_start(struct hclge_vport *vport)
8183 struct hclge_dev *hdev = vport->back;
8185 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8186 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8187 vport->last_active_jiffies = jiffies;
8189 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8190 if (vport->vport_id) {
8191 hclge_restore_mac_table_common(vport);
8192 hclge_restore_vport_vlan_table(vport);
8194 hclge_restore_hw_table(hdev);
8198 clear_bit(vport->vport_id, hdev->vport_config_block);
8203 void hclge_vport_stop(struct hclge_vport *vport)
8205 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8208 static int hclge_client_start(struct hnae3_handle *handle)
8210 struct hclge_vport *vport = hclge_get_vport(handle);
8212 return hclge_vport_start(vport);
8215 static void hclge_client_stop(struct hnae3_handle *handle)
8217 struct hclge_vport *vport = hclge_get_vport(handle);
8219 hclge_vport_stop(vport);
8222 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8223 u16 cmdq_resp, u8 resp_code,
8224 enum hclge_mac_vlan_tbl_opcode op)
8226 struct hclge_dev *hdev = vport->back;
8229 dev_err(&hdev->pdev->dev,
8230 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8235 if (op == HCLGE_MAC_VLAN_ADD) {
8236 if (!resp_code || resp_code == 1)
8238 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8239 resp_code == HCLGE_ADD_MC_OVERFLOW)
8242 dev_err(&hdev->pdev->dev,
8243 "add mac addr failed for undefined, code=%u.\n",
8246 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8249 } else if (resp_code == 1) {
8250 dev_dbg(&hdev->pdev->dev,
8251 "remove mac addr failed for miss.\n");
8255 dev_err(&hdev->pdev->dev,
8256 "remove mac addr failed for undefined, code=%u.\n",
8259 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8262 } else if (resp_code == 1) {
8263 dev_dbg(&hdev->pdev->dev,
8264 "lookup mac addr failed for miss.\n");
8268 dev_err(&hdev->pdev->dev,
8269 "lookup mac addr failed for undefined, code=%u.\n",
8274 dev_err(&hdev->pdev->dev,
8275 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8280 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8282 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8284 unsigned int word_num;
8285 unsigned int bit_num;
8287 if (vfid > 255 || vfid < 0)
8290 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8291 word_num = vfid / 32;
8292 bit_num = vfid % 32;
8294 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8296 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8298 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8299 bit_num = vfid % 32;
8301 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8303 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8309 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8311 #define HCLGE_DESC_NUMBER 3
8312 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8315 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8316 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8317 if (desc[i].data[j])
8323 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8324 const u8 *addr, bool is_mc)
8326 const unsigned char *mac_addr = addr;
8327 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8328 (mac_addr[0]) | (mac_addr[1] << 8);
8329 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8331 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8333 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8334 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8337 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8338 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8341 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8342 struct hclge_mac_vlan_tbl_entry_cmd *req)
8344 struct hclge_dev *hdev = vport->back;
8345 struct hclge_desc desc;
8350 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8352 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8354 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8356 dev_err(&hdev->pdev->dev,
8357 "del mac addr failed for cmd_send, ret =%d.\n",
8361 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8362 retval = le16_to_cpu(desc.retval);
8364 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8365 HCLGE_MAC_VLAN_REMOVE);
8368 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8369 struct hclge_mac_vlan_tbl_entry_cmd *req,
8370 struct hclge_desc *desc,
8373 struct hclge_dev *hdev = vport->back;
8378 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8380 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8381 memcpy(desc[0].data,
8383 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8384 hclge_cmd_setup_basic_desc(&desc[1],
8385 HCLGE_OPC_MAC_VLAN_ADD,
8387 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8388 hclge_cmd_setup_basic_desc(&desc[2],
8389 HCLGE_OPC_MAC_VLAN_ADD,
8391 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8393 memcpy(desc[0].data,
8395 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8396 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8399 dev_err(&hdev->pdev->dev,
8400 "lookup mac addr failed for cmd_send, ret =%d.\n",
8404 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8405 retval = le16_to_cpu(desc[0].retval);
8407 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8408 HCLGE_MAC_VLAN_LKUP);
8411 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8412 struct hclge_mac_vlan_tbl_entry_cmd *req,
8413 struct hclge_desc *mc_desc)
8415 struct hclge_dev *hdev = vport->back;
8422 struct hclge_desc desc;
8424 hclge_cmd_setup_basic_desc(&desc,
8425 HCLGE_OPC_MAC_VLAN_ADD,
8427 memcpy(desc.data, req,
8428 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8429 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8430 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8431 retval = le16_to_cpu(desc.retval);
8433 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8435 HCLGE_MAC_VLAN_ADD);
8437 hclge_cmd_reuse_desc(&mc_desc[0], false);
8438 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8439 hclge_cmd_reuse_desc(&mc_desc[1], false);
8440 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8441 hclge_cmd_reuse_desc(&mc_desc[2], false);
8442 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8443 memcpy(mc_desc[0].data, req,
8444 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8445 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8446 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8447 retval = le16_to_cpu(mc_desc[0].retval);
8449 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8451 HCLGE_MAC_VLAN_ADD);
8455 dev_err(&hdev->pdev->dev,
8456 "add mac addr failed for cmd_send, ret =%d.\n",
8464 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8465 u16 *allocated_size)
8467 struct hclge_umv_spc_alc_cmd *req;
8468 struct hclge_desc desc;
8471 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8472 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8474 req->space_size = cpu_to_le32(space_size);
8476 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8478 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8483 *allocated_size = le32_to_cpu(desc.data[1]);
8488 static int hclge_init_umv_space(struct hclge_dev *hdev)
8490 u16 allocated_size = 0;
8493 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8497 if (allocated_size < hdev->wanted_umv_size)
8498 dev_warn(&hdev->pdev->dev,
8499 "failed to alloc umv space, want %u, get %u\n",
8500 hdev->wanted_umv_size, allocated_size);
8502 hdev->max_umv_size = allocated_size;
8503 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8504 hdev->share_umv_size = hdev->priv_umv_size +
8505 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8507 if (hdev->ae_dev->dev_specs.mc_mac_size)
8508 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8513 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8515 struct hclge_vport *vport;
8518 for (i = 0; i < hdev->num_alloc_vport; i++) {
8519 vport = &hdev->vport[i];
8520 vport->used_umv_num = 0;
8523 mutex_lock(&hdev->vport_lock);
8524 hdev->share_umv_size = hdev->priv_umv_size +
8525 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8526 mutex_unlock(&hdev->vport_lock);
8528 hdev->used_mc_mac_num = 0;
8531 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8533 struct hclge_dev *hdev = vport->back;
8537 mutex_lock(&hdev->vport_lock);
8539 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8540 hdev->share_umv_size == 0);
8543 mutex_unlock(&hdev->vport_lock);
8548 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8550 struct hclge_dev *hdev = vport->back;
8553 if (vport->used_umv_num > hdev->priv_umv_size)
8554 hdev->share_umv_size++;
8556 if (vport->used_umv_num > 0)
8557 vport->used_umv_num--;
8559 if (vport->used_umv_num >= hdev->priv_umv_size &&
8560 hdev->share_umv_size > 0)
8561 hdev->share_umv_size--;
8562 vport->used_umv_num++;
8566 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8569 struct hclge_mac_node *mac_node, *tmp;
8571 list_for_each_entry_safe(mac_node, tmp, list, node)
8572 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8578 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8579 enum HCLGE_MAC_NODE_STATE state)
8582 /* from set_rx_mode or tmp_add_list */
8583 case HCLGE_MAC_TO_ADD:
8584 if (mac_node->state == HCLGE_MAC_TO_DEL)
8585 mac_node->state = HCLGE_MAC_ACTIVE;
8587 /* only from set_rx_mode */
8588 case HCLGE_MAC_TO_DEL:
8589 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8590 list_del(&mac_node->node);
8593 mac_node->state = HCLGE_MAC_TO_DEL;
8596 /* only from tmp_add_list, the mac_node->state won't be
8599 case HCLGE_MAC_ACTIVE:
8600 if (mac_node->state == HCLGE_MAC_TO_ADD)
8601 mac_node->state = HCLGE_MAC_ACTIVE;
8607 int hclge_update_mac_list(struct hclge_vport *vport,
8608 enum HCLGE_MAC_NODE_STATE state,
8609 enum HCLGE_MAC_ADDR_TYPE mac_type,
8610 const unsigned char *addr)
8612 struct hclge_dev *hdev = vport->back;
8613 struct hclge_mac_node *mac_node;
8614 struct list_head *list;
8616 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8617 &vport->uc_mac_list : &vport->mc_mac_list;
8619 spin_lock_bh(&vport->mac_list_lock);
8621 /* if the mac addr is already in the mac list, no need to add a new
8622 * one into it, just check the mac addr state, convert it to a new
8623 * state, or just remove it, or do nothing.
8625 mac_node = hclge_find_mac_node(list, addr);
8627 hclge_update_mac_node(mac_node, state);
8628 spin_unlock_bh(&vport->mac_list_lock);
8629 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8633 /* if this address is never added, unnecessary to delete */
8634 if (state == HCLGE_MAC_TO_DEL) {
8635 spin_unlock_bh(&vport->mac_list_lock);
8636 dev_err(&hdev->pdev->dev,
8637 "failed to delete address %pM from mac list\n",
8642 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8644 spin_unlock_bh(&vport->mac_list_lock);
8648 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8650 mac_node->state = state;
8651 ether_addr_copy(mac_node->mac_addr, addr);
8652 list_add_tail(&mac_node->node, list);
8654 spin_unlock_bh(&vport->mac_list_lock);
8659 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8660 const unsigned char *addr)
8662 struct hclge_vport *vport = hclge_get_vport(handle);
8664 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8668 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8669 const unsigned char *addr)
8671 struct hclge_dev *hdev = vport->back;
8672 struct hclge_mac_vlan_tbl_entry_cmd req;
8673 struct hclge_desc desc;
8674 u16 egress_port = 0;
8677 /* mac addr check */
8678 if (is_zero_ether_addr(addr) ||
8679 is_broadcast_ether_addr(addr) ||
8680 is_multicast_ether_addr(addr)) {
8681 dev_err(&hdev->pdev->dev,
8682 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8683 addr, is_zero_ether_addr(addr),
8684 is_broadcast_ether_addr(addr),
8685 is_multicast_ether_addr(addr));
8689 memset(&req, 0, sizeof(req));
8691 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8692 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8694 req.egress_port = cpu_to_le16(egress_port);
8696 hclge_prepare_mac_addr(&req, addr, false);
8698 /* Lookup the mac address in the mac_vlan table, and add
8699 * it if the entry is inexistent. Repeated unicast entry
8700 * is not allowed in the mac vlan table.
8702 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8703 if (ret == -ENOENT) {
8704 mutex_lock(&hdev->vport_lock);
8705 if (!hclge_is_umv_space_full(vport, false)) {
8706 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8708 hclge_update_umv_space(vport, false);
8709 mutex_unlock(&hdev->vport_lock);
8712 mutex_unlock(&hdev->vport_lock);
8714 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8715 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8716 hdev->priv_umv_size);
8721 /* check if we just hit the duplicate */
8723 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8724 vport->vport_id, addr);
8728 dev_err(&hdev->pdev->dev,
8729 "PF failed to add unicast entry(%pM) in the MAC table\n",
8735 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8736 const unsigned char *addr)
8738 struct hclge_vport *vport = hclge_get_vport(handle);
8740 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8744 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8745 const unsigned char *addr)
8747 struct hclge_dev *hdev = vport->back;
8748 struct hclge_mac_vlan_tbl_entry_cmd req;
8751 /* mac addr check */
8752 if (is_zero_ether_addr(addr) ||
8753 is_broadcast_ether_addr(addr) ||
8754 is_multicast_ether_addr(addr)) {
8755 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8760 memset(&req, 0, sizeof(req));
8761 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8762 hclge_prepare_mac_addr(&req, addr, false);
8763 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8765 mutex_lock(&hdev->vport_lock);
8766 hclge_update_umv_space(vport, true);
8767 mutex_unlock(&hdev->vport_lock);
8768 } else if (ret == -ENOENT) {
8775 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8776 const unsigned char *addr)
8778 struct hclge_vport *vport = hclge_get_vport(handle);
8780 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8784 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8785 const unsigned char *addr)
8787 struct hclge_dev *hdev = vport->back;
8788 struct hclge_mac_vlan_tbl_entry_cmd req;
8789 struct hclge_desc desc[3];
8790 bool is_new_addr = false;
8793 /* mac addr check */
8794 if (!is_multicast_ether_addr(addr)) {
8795 dev_err(&hdev->pdev->dev,
8796 "Add mc mac err! invalid mac:%pM.\n",
8800 memset(&req, 0, sizeof(req));
8801 hclge_prepare_mac_addr(&req, addr, true);
8802 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8804 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8805 hdev->used_mc_mac_num >=
8806 hdev->ae_dev->dev_specs.mc_mac_size)
8811 /* This mac addr do not exist, add new entry for it */
8812 memset(desc[0].data, 0, sizeof(desc[0].data));
8813 memset(desc[1].data, 0, sizeof(desc[0].data));
8814 memset(desc[2].data, 0, sizeof(desc[0].data));
8816 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8819 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8820 if (status == -ENOSPC)
8822 else if (!status && is_new_addr)
8823 hdev->used_mc_mac_num++;
8828 /* if already overflow, not to print each time */
8829 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8830 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8834 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8835 const unsigned char *addr)
8837 struct hclge_vport *vport = hclge_get_vport(handle);
8839 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8843 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8844 const unsigned char *addr)
8846 struct hclge_dev *hdev = vport->back;
8847 struct hclge_mac_vlan_tbl_entry_cmd req;
8848 enum hclge_cmd_status status;
8849 struct hclge_desc desc[3];
8851 /* mac addr check */
8852 if (!is_multicast_ether_addr(addr)) {
8853 dev_dbg(&hdev->pdev->dev,
8854 "Remove mc mac err! invalid mac:%pM.\n",
8859 memset(&req, 0, sizeof(req));
8860 hclge_prepare_mac_addr(&req, addr, true);
8861 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8863 /* This mac addr exist, remove this handle's VFID for it */
8864 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8868 if (hclge_is_all_function_id_zero(desc)) {
8869 /* All the vfid is zero, so need to delete this entry */
8870 status = hclge_remove_mac_vlan_tbl(vport, &req);
8872 hdev->used_mc_mac_num--;
8874 /* Not all the vfid is zero, update the vfid */
8875 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8877 } else if (status == -ENOENT) {
8884 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8885 struct list_head *list,
8886 int (*sync)(struct hclge_vport *,
8887 const unsigned char *))
8889 struct hclge_mac_node *mac_node, *tmp;
8892 list_for_each_entry_safe(mac_node, tmp, list, node) {
8893 ret = sync(vport, mac_node->mac_addr);
8895 mac_node->state = HCLGE_MAC_ACTIVE;
8897 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8904 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8905 struct list_head *list,
8906 int (*unsync)(struct hclge_vport *,
8907 const unsigned char *))
8909 struct hclge_mac_node *mac_node, *tmp;
8912 list_for_each_entry_safe(mac_node, tmp, list, node) {
8913 ret = unsync(vport, mac_node->mac_addr);
8914 if (!ret || ret == -ENOENT) {
8915 list_del(&mac_node->node);
8918 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8925 static bool hclge_sync_from_add_list(struct list_head *add_list,
8926 struct list_head *mac_list)
8928 struct hclge_mac_node *mac_node, *tmp, *new_node;
8929 bool all_added = true;
8931 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8932 if (mac_node->state == HCLGE_MAC_TO_ADD)
8935 /* if the mac address from tmp_add_list is not in the
8936 * uc/mc_mac_list, it means have received a TO_DEL request
8937 * during the time window of adding the mac address into mac
8938 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8939 * then it will be removed at next time. else it must be TO_ADD,
8940 * this address hasn't been added into mac table,
8941 * so just remove the mac node.
8943 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8945 hclge_update_mac_node(new_node, mac_node->state);
8946 list_del(&mac_node->node);
8948 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8949 mac_node->state = HCLGE_MAC_TO_DEL;
8950 list_move_tail(&mac_node->node, mac_list);
8952 list_del(&mac_node->node);
8960 static void hclge_sync_from_del_list(struct list_head *del_list,
8961 struct list_head *mac_list)
8963 struct hclge_mac_node *mac_node, *tmp, *new_node;
8965 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8966 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8968 /* If the mac addr exists in the mac list, it means
8969 * received a new TO_ADD request during the time window
8970 * of configuring the mac address. For the mac node
8971 * state is TO_ADD, and the address is already in the
8972 * in the hardware(due to delete fail), so we just need
8973 * to change the mac node state to ACTIVE.
8975 new_node->state = HCLGE_MAC_ACTIVE;
8976 list_del(&mac_node->node);
8979 list_move_tail(&mac_node->node, mac_list);
8984 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8985 enum HCLGE_MAC_ADDR_TYPE mac_type,
8988 if (mac_type == HCLGE_MAC_ADDR_UC) {
8990 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8992 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8995 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8997 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
9001 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
9002 enum HCLGE_MAC_ADDR_TYPE mac_type)
9004 struct hclge_mac_node *mac_node, *tmp, *new_node;
9005 struct list_head tmp_add_list, tmp_del_list;
9006 struct list_head *list;
9009 INIT_LIST_HEAD(&tmp_add_list);
9010 INIT_LIST_HEAD(&tmp_del_list);
9012 /* move the mac addr to the tmp_add_list and tmp_del_list, then
9013 * we can add/delete these mac addr outside the spin lock
9015 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9016 &vport->uc_mac_list : &vport->mc_mac_list;
9018 spin_lock_bh(&vport->mac_list_lock);
9020 list_for_each_entry_safe(mac_node, tmp, list, node) {
9021 switch (mac_node->state) {
9022 case HCLGE_MAC_TO_DEL:
9023 list_move_tail(&mac_node->node, &tmp_del_list);
9025 case HCLGE_MAC_TO_ADD:
9026 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9029 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9030 new_node->state = mac_node->state;
9031 list_add_tail(&new_node->node, &tmp_add_list);
9039 spin_unlock_bh(&vport->mac_list_lock);
9041 /* delete first, in order to get max mac table space for adding */
9042 if (mac_type == HCLGE_MAC_ADDR_UC) {
9043 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9044 hclge_rm_uc_addr_common);
9045 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9046 hclge_add_uc_addr_common);
9048 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9049 hclge_rm_mc_addr_common);
9050 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9051 hclge_add_mc_addr_common);
9054 /* if some mac addresses were added/deleted fail, move back to the
9055 * mac_list, and retry at next time.
9057 spin_lock_bh(&vport->mac_list_lock);
9059 hclge_sync_from_del_list(&tmp_del_list, list);
9060 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9062 spin_unlock_bh(&vport->mac_list_lock);
9064 hclge_update_overflow_flags(vport, mac_type, all_added);
9067 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9069 struct hclge_dev *hdev = vport->back;
9071 if (test_bit(vport->vport_id, hdev->vport_config_block))
9074 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9080 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9084 for (i = 0; i < hdev->num_alloc_vport; i++) {
9085 struct hclge_vport *vport = &hdev->vport[i];
9087 if (!hclge_need_sync_mac_table(vport))
9090 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9091 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9095 static void hclge_build_del_list(struct list_head *list,
9097 struct list_head *tmp_del_list)
9099 struct hclge_mac_node *mac_cfg, *tmp;
9101 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9102 switch (mac_cfg->state) {
9103 case HCLGE_MAC_TO_DEL:
9104 case HCLGE_MAC_ACTIVE:
9105 list_move_tail(&mac_cfg->node, tmp_del_list);
9107 case HCLGE_MAC_TO_ADD:
9109 list_del(&mac_cfg->node);
9117 static void hclge_unsync_del_list(struct hclge_vport *vport,
9118 int (*unsync)(struct hclge_vport *vport,
9119 const unsigned char *addr),
9121 struct list_head *tmp_del_list)
9123 struct hclge_mac_node *mac_cfg, *tmp;
9126 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9127 ret = unsync(vport, mac_cfg->mac_addr);
9128 if (!ret || ret == -ENOENT) {
9129 /* clear all mac addr from hardware, but remain these
9130 * mac addr in the mac list, and restore them after
9131 * vf reset finished.
9134 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9135 mac_cfg->state = HCLGE_MAC_TO_ADD;
9137 list_del(&mac_cfg->node);
9140 } else if (is_del_list) {
9141 mac_cfg->state = HCLGE_MAC_TO_DEL;
9146 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9147 enum HCLGE_MAC_ADDR_TYPE mac_type)
9149 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9150 struct hclge_dev *hdev = vport->back;
9151 struct list_head tmp_del_list, *list;
9153 if (mac_type == HCLGE_MAC_ADDR_UC) {
9154 list = &vport->uc_mac_list;
9155 unsync = hclge_rm_uc_addr_common;
9157 list = &vport->mc_mac_list;
9158 unsync = hclge_rm_mc_addr_common;
9161 INIT_LIST_HEAD(&tmp_del_list);
9164 set_bit(vport->vport_id, hdev->vport_config_block);
9166 spin_lock_bh(&vport->mac_list_lock);
9168 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9170 spin_unlock_bh(&vport->mac_list_lock);
9172 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9174 spin_lock_bh(&vport->mac_list_lock);
9176 hclge_sync_from_del_list(&tmp_del_list, list);
9178 spin_unlock_bh(&vport->mac_list_lock);
9181 /* remove all mac address when uninitailize */
9182 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9183 enum HCLGE_MAC_ADDR_TYPE mac_type)
9185 struct hclge_mac_node *mac_node, *tmp;
9186 struct hclge_dev *hdev = vport->back;
9187 struct list_head tmp_del_list, *list;
9189 INIT_LIST_HEAD(&tmp_del_list);
9191 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9192 &vport->uc_mac_list : &vport->mc_mac_list;
9194 spin_lock_bh(&vport->mac_list_lock);
9196 list_for_each_entry_safe(mac_node, tmp, list, node) {
9197 switch (mac_node->state) {
9198 case HCLGE_MAC_TO_DEL:
9199 case HCLGE_MAC_ACTIVE:
9200 list_move_tail(&mac_node->node, &tmp_del_list);
9202 case HCLGE_MAC_TO_ADD:
9203 list_del(&mac_node->node);
9209 spin_unlock_bh(&vport->mac_list_lock);
9211 if (mac_type == HCLGE_MAC_ADDR_UC)
9212 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9213 hclge_rm_uc_addr_common);
9215 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9216 hclge_rm_mc_addr_common);
9218 if (!list_empty(&tmp_del_list))
9219 dev_warn(&hdev->pdev->dev,
9220 "uninit %s mac list for vport %u not completely.\n",
9221 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9224 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9225 list_del(&mac_node->node);
9230 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9232 struct hclge_vport *vport;
9235 for (i = 0; i < hdev->num_alloc_vport; i++) {
9236 vport = &hdev->vport[i];
9237 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9238 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9242 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9243 u16 cmdq_resp, u8 resp_code)
9245 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9246 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9247 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9248 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9253 dev_err(&hdev->pdev->dev,
9254 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9259 switch (resp_code) {
9260 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9261 case HCLGE_ETHERTYPE_ALREADY_ADD:
9264 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9265 dev_err(&hdev->pdev->dev,
9266 "add mac ethertype failed for manager table overflow.\n");
9267 return_status = -EIO;
9269 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9270 dev_err(&hdev->pdev->dev,
9271 "add mac ethertype failed for key conflict.\n");
9272 return_status = -EIO;
9275 dev_err(&hdev->pdev->dev,
9276 "add mac ethertype failed for undefined, code=%u.\n",
9278 return_status = -EIO;
9281 return return_status;
9284 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9287 struct hclge_mac_vlan_tbl_entry_cmd req;
9288 struct hclge_dev *hdev = vport->back;
9289 struct hclge_desc desc;
9290 u16 egress_port = 0;
9293 if (is_zero_ether_addr(mac_addr))
9296 memset(&req, 0, sizeof(req));
9297 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9298 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9299 req.egress_port = cpu_to_le16(egress_port);
9300 hclge_prepare_mac_addr(&req, mac_addr, false);
9302 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9305 vf_idx += HCLGE_VF_VPORT_START_NUM;
9306 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9308 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9314 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9317 struct hclge_vport *vport = hclge_get_vport(handle);
9318 struct hclge_dev *hdev = vport->back;
9320 vport = hclge_get_vf_vport(hdev, vf);
9324 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9325 dev_info(&hdev->pdev->dev,
9326 "Specified MAC(=%pM) is same as before, no change committed!\n",
9331 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9332 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9337 ether_addr_copy(vport->vf_info.mac, mac_addr);
9339 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9340 dev_info(&hdev->pdev->dev,
9341 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9343 return hclge_inform_reset_assert_to_vf(vport);
9346 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9351 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9352 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9354 struct hclge_desc desc;
9359 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9360 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9362 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9364 dev_err(&hdev->pdev->dev,
9365 "add mac ethertype failed for cmd_send, ret =%d.\n",
9370 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9371 retval = le16_to_cpu(desc.retval);
9373 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9376 static int init_mgr_tbl(struct hclge_dev *hdev)
9381 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9382 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9384 dev_err(&hdev->pdev->dev,
9385 "add mac ethertype failed, ret =%d.\n",
9394 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9396 struct hclge_vport *vport = hclge_get_vport(handle);
9397 struct hclge_dev *hdev = vport->back;
9399 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9402 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9403 const u8 *old_addr, const u8 *new_addr)
9405 struct list_head *list = &vport->uc_mac_list;
9406 struct hclge_mac_node *old_node, *new_node;
9408 new_node = hclge_find_mac_node(list, new_addr);
9410 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9414 new_node->state = HCLGE_MAC_TO_ADD;
9415 ether_addr_copy(new_node->mac_addr, new_addr);
9416 list_add(&new_node->node, list);
9418 if (new_node->state == HCLGE_MAC_TO_DEL)
9419 new_node->state = HCLGE_MAC_ACTIVE;
9421 /* make sure the new addr is in the list head, avoid dev
9422 * addr may be not re-added into mac table for the umv space
9423 * limitation after global/imp reset which will clear mac
9424 * table by hardware.
9426 list_move(&new_node->node, list);
9429 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9430 old_node = hclge_find_mac_node(list, old_addr);
9432 if (old_node->state == HCLGE_MAC_TO_ADD) {
9433 list_del(&old_node->node);
9436 old_node->state = HCLGE_MAC_TO_DEL;
9441 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9446 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9449 const unsigned char *new_addr = (const unsigned char *)p;
9450 struct hclge_vport *vport = hclge_get_vport(handle);
9451 struct hclge_dev *hdev = vport->back;
9452 unsigned char *old_addr = NULL;
9455 /* mac addr check */
9456 if (is_zero_ether_addr(new_addr) ||
9457 is_broadcast_ether_addr(new_addr) ||
9458 is_multicast_ether_addr(new_addr)) {
9459 dev_err(&hdev->pdev->dev,
9460 "change uc mac err! invalid mac: %pM.\n",
9465 ret = hclge_pause_addr_cfg(hdev, new_addr);
9467 dev_err(&hdev->pdev->dev,
9468 "failed to configure mac pause address, ret = %d\n",
9474 old_addr = hdev->hw.mac.mac_addr;
9476 spin_lock_bh(&vport->mac_list_lock);
9477 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9479 dev_err(&hdev->pdev->dev,
9480 "failed to change the mac addr:%pM, ret = %d\n",
9482 spin_unlock_bh(&vport->mac_list_lock);
9485 hclge_pause_addr_cfg(hdev, old_addr);
9489 /* we must update dev addr with spin lock protect, preventing dev addr
9490 * being removed by set_rx_mode path.
9492 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9493 spin_unlock_bh(&vport->mac_list_lock);
9495 hclge_task_schedule(hdev, 0);
9500 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9502 struct mii_ioctl_data *data = if_mii(ifr);
9504 if (!hnae3_dev_phy_imp_supported(hdev))
9509 data->phy_id = hdev->hw.mac.phy_addr;
9510 /* this command reads phy id and register at the same time */
9513 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9517 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9523 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9526 struct hclge_vport *vport = hclge_get_vport(handle);
9527 struct hclge_dev *hdev = vport->back;
9531 return hclge_ptp_get_cfg(hdev, ifr);
9533 return hclge_ptp_set_cfg(hdev, ifr);
9535 if (!hdev->hw.mac.phydev)
9536 return hclge_mii_ioctl(hdev, ifr, cmd);
9539 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9542 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9545 struct hclge_port_vlan_filter_bypass_cmd *req;
9546 struct hclge_desc desc;
9549 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9550 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9552 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9555 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9557 dev_err(&hdev->pdev->dev,
9558 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9564 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9565 u8 fe_type, bool filter_en, u8 vf_id)
9567 struct hclge_vlan_filter_ctrl_cmd *req;
9568 struct hclge_desc desc;
9571 /* read current vlan filter parameter */
9572 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9573 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9574 req->vlan_type = vlan_type;
9577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9579 dev_err(&hdev->pdev->dev,
9580 "failed to get vlan filter config, ret = %d.\n", ret);
9584 /* modify and write new config parameter */
9585 hclge_cmd_reuse_desc(&desc, false);
9586 req->vlan_fe = filter_en ?
9587 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9589 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9591 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9597 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9599 struct hclge_dev *hdev = vport->back;
9600 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9603 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9604 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9605 HCLGE_FILTER_FE_EGRESS_V1_B,
9606 enable, vport->vport_id);
9608 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9609 HCLGE_FILTER_FE_EGRESS, enable,
9614 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9615 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9617 } else if (!vport->vport_id) {
9618 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9621 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9622 HCLGE_FILTER_FE_INGRESS,
9629 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9631 struct hnae3_handle *handle = &vport->nic;
9632 struct hclge_vport_vlan_cfg *vlan, *tmp;
9633 struct hclge_dev *hdev = vport->back;
9635 if (vport->vport_id) {
9636 if (vport->port_base_vlan_cfg.state !=
9637 HNAE3_PORT_BASE_VLAN_DISABLE)
9640 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9642 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9646 if (!vport->req_vlan_fltr_en)
9649 /* compatible with former device, always enable vlan filter */
9650 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9653 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9654 if (vlan->vlan_id != 0)
9660 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9662 struct hclge_dev *hdev = vport->back;
9666 mutex_lock(&hdev->vport_lock);
9668 vport->req_vlan_fltr_en = request_en;
9670 need_en = hclge_need_enable_vport_vlan_filter(vport);
9671 if (need_en == vport->cur_vlan_fltr_en) {
9672 mutex_unlock(&hdev->vport_lock);
9676 ret = hclge_set_vport_vlan_filter(vport, need_en);
9678 mutex_unlock(&hdev->vport_lock);
9682 vport->cur_vlan_fltr_en = need_en;
9684 mutex_unlock(&hdev->vport_lock);
9689 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9691 struct hclge_vport *vport = hclge_get_vport(handle);
9693 return hclge_enable_vport_vlan_filter(vport, enable);
9696 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9697 bool is_kill, u16 vlan,
9698 struct hclge_desc *desc)
9700 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9701 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9706 hclge_cmd_setup_basic_desc(&desc[0],
9707 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9708 hclge_cmd_setup_basic_desc(&desc[1],
9709 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9711 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9713 vf_byte_off = vfid / 8;
9714 vf_byte_val = 1 << (vfid % 8);
9716 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9717 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9719 req0->vlan_id = cpu_to_le16(vlan);
9720 req0->vlan_cfg = is_kill;
9722 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9723 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9725 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9727 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9729 dev_err(&hdev->pdev->dev,
9730 "Send vf vlan command fail, ret =%d.\n",
9738 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9739 bool is_kill, struct hclge_desc *desc)
9741 struct hclge_vlan_filter_vf_cfg_cmd *req;
9743 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9746 #define HCLGE_VF_VLAN_NO_ENTRY 2
9747 if (!req->resp_code || req->resp_code == 1)
9750 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9751 set_bit(vfid, hdev->vf_vlan_full);
9752 dev_warn(&hdev->pdev->dev,
9753 "vf vlan table is full, vf vlan filter is disabled\n");
9757 dev_err(&hdev->pdev->dev,
9758 "Add vf vlan filter fail, ret =%u.\n",
9761 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9762 if (!req->resp_code)
9765 /* vf vlan filter is disabled when vf vlan table is full,
9766 * then new vlan id will not be added into vf vlan table.
9767 * Just return 0 without warning, avoid massive verbose
9768 * print logs when unload.
9770 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9773 dev_err(&hdev->pdev->dev,
9774 "Kill vf vlan filter fail, ret =%u.\n",
9781 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9782 bool is_kill, u16 vlan)
9784 struct hclge_vport *vport = &hdev->vport[vfid];
9785 struct hclge_desc desc[2];
9788 /* if vf vlan table is full, firmware will close vf vlan filter, it
9789 * is unable and unnecessary to add new vlan id to vf vlan filter.
9790 * If spoof check is enable, and vf vlan is full, it shouldn't add
9791 * new vlan, because tx packets with these vlan id will be dropped.
9793 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9794 if (vport->vf_info.spoofchk && vlan) {
9795 dev_err(&hdev->pdev->dev,
9796 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9802 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9806 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9809 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9810 u16 vlan_id, bool is_kill)
9812 struct hclge_vlan_filter_pf_cfg_cmd *req;
9813 struct hclge_desc desc;
9814 u8 vlan_offset_byte_val;
9815 u8 vlan_offset_byte;
9819 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9821 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9822 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9823 HCLGE_VLAN_BYTE_SIZE;
9824 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9826 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9827 req->vlan_offset = vlan_offset_160;
9828 req->vlan_cfg = is_kill;
9829 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9831 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9833 dev_err(&hdev->pdev->dev,
9834 "port vlan command, send fail, ret =%d.\n", ret);
9838 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9839 u16 vport_id, u16 vlan_id,
9842 u16 vport_idx, vport_num = 0;
9845 if (is_kill && !vlan_id)
9848 if (vlan_id >= VLAN_N_VID)
9851 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9853 dev_err(&hdev->pdev->dev,
9854 "Set %u vport vlan filter config fail, ret =%d.\n",
9859 /* vlan 0 may be added twice when 8021q module is enabled */
9860 if (!is_kill && !vlan_id &&
9861 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9864 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9865 dev_err(&hdev->pdev->dev,
9866 "Add port vlan failed, vport %u is already in vlan %u\n",
9872 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9873 dev_err(&hdev->pdev->dev,
9874 "Delete port vlan failed, vport %u is not in vlan %u\n",
9879 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9882 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9883 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9889 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9891 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9892 struct hclge_vport_vtag_tx_cfg_cmd *req;
9893 struct hclge_dev *hdev = vport->back;
9894 struct hclge_desc desc;
9898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9900 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9901 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9902 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9903 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9904 vcfg->accept_tag1 ? 1 : 0);
9905 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9906 vcfg->accept_untag1 ? 1 : 0);
9907 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9908 vcfg->accept_tag2 ? 1 : 0);
9909 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9910 vcfg->accept_untag2 ? 1 : 0);
9911 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9912 vcfg->insert_tag1_en ? 1 : 0);
9913 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9914 vcfg->insert_tag2_en ? 1 : 0);
9915 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9916 vcfg->tag_shift_mode_en ? 1 : 0);
9917 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9919 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9920 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9921 HCLGE_VF_NUM_PER_BYTE;
9922 req->vf_bitmap[bmap_index] =
9923 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9925 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9927 dev_err(&hdev->pdev->dev,
9928 "Send port txvlan cfg command fail, ret =%d\n",
9934 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9936 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9937 struct hclge_vport_vtag_rx_cfg_cmd *req;
9938 struct hclge_dev *hdev = vport->back;
9939 struct hclge_desc desc;
9943 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9945 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9946 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9947 vcfg->strip_tag1_en ? 1 : 0);
9948 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9949 vcfg->strip_tag2_en ? 1 : 0);
9950 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9951 vcfg->vlan1_vlan_prionly ? 1 : 0);
9952 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9953 vcfg->vlan2_vlan_prionly ? 1 : 0);
9954 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9955 vcfg->strip_tag1_discard_en ? 1 : 0);
9956 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9957 vcfg->strip_tag2_discard_en ? 1 : 0);
9959 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9960 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9961 HCLGE_VF_NUM_PER_BYTE;
9962 req->vf_bitmap[bmap_index] =
9963 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9965 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9967 dev_err(&hdev->pdev->dev,
9968 "Send port rxvlan cfg command fail, ret =%d\n",
9974 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9975 u16 port_base_vlan_state,
9976 u16 vlan_tag, u8 qos)
9980 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9981 vport->txvlan_cfg.accept_tag1 = true;
9982 vport->txvlan_cfg.insert_tag1_en = false;
9983 vport->txvlan_cfg.default_tag1 = 0;
9985 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9987 vport->txvlan_cfg.accept_tag1 =
9988 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9989 vport->txvlan_cfg.insert_tag1_en = true;
9990 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9994 vport->txvlan_cfg.accept_untag1 = true;
9996 /* accept_tag2 and accept_untag2 are not supported on
9997 * pdev revision(0x20), new revision support them,
9998 * this two fields can not be configured by user.
10000 vport->txvlan_cfg.accept_tag2 = true;
10001 vport->txvlan_cfg.accept_untag2 = true;
10002 vport->txvlan_cfg.insert_tag2_en = false;
10003 vport->txvlan_cfg.default_tag2 = 0;
10004 vport->txvlan_cfg.tag_shift_mode_en = true;
10006 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10007 vport->rxvlan_cfg.strip_tag1_en = false;
10008 vport->rxvlan_cfg.strip_tag2_en =
10009 vport->rxvlan_cfg.rx_vlan_offload_en;
10010 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10012 vport->rxvlan_cfg.strip_tag1_en =
10013 vport->rxvlan_cfg.rx_vlan_offload_en;
10014 vport->rxvlan_cfg.strip_tag2_en = true;
10015 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10018 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10019 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10020 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10022 ret = hclge_set_vlan_tx_offload_cfg(vport);
10026 return hclge_set_vlan_rx_offload_cfg(vport);
10029 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10031 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10032 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10033 struct hclge_desc desc;
10036 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10037 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10038 rx_req->ot_fst_vlan_type =
10039 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10040 rx_req->ot_sec_vlan_type =
10041 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10042 rx_req->in_fst_vlan_type =
10043 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10044 rx_req->in_sec_vlan_type =
10045 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10047 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10049 dev_err(&hdev->pdev->dev,
10050 "Send rxvlan protocol type command fail, ret =%d\n",
10055 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10057 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10058 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10059 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10061 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10063 dev_err(&hdev->pdev->dev,
10064 "Send txvlan protocol type command fail, ret =%d\n",
10070 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10072 #define HCLGE_DEF_VLAN_TYPE 0x8100
10074 struct hnae3_handle *handle = &hdev->vport[0].nic;
10075 struct hclge_vport *vport;
10079 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10080 /* for revision 0x21, vf vlan filter is per function */
10081 for (i = 0; i < hdev->num_alloc_vport; i++) {
10082 vport = &hdev->vport[i];
10083 ret = hclge_set_vlan_filter_ctrl(hdev,
10084 HCLGE_FILTER_TYPE_VF,
10085 HCLGE_FILTER_FE_EGRESS,
10090 vport->cur_vlan_fltr_en = true;
10093 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10094 HCLGE_FILTER_FE_INGRESS, true,
10099 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10100 HCLGE_FILTER_FE_EGRESS_V1_B,
10106 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10107 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10108 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10109 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10110 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10111 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10113 ret = hclge_set_vlan_protocol_type(hdev);
10117 for (i = 0; i < hdev->num_alloc_vport; i++) {
10121 vport = &hdev->vport[i];
10122 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10123 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10125 ret = hclge_vlan_offload_cfg(vport,
10126 vport->port_base_vlan_cfg.state,
10132 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10135 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10136 bool writen_to_tbl)
10138 struct hclge_vport_vlan_cfg *vlan, *tmp;
10140 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10141 if (vlan->vlan_id == vlan_id)
10144 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10148 vlan->hd_tbl_status = writen_to_tbl;
10149 vlan->vlan_id = vlan_id;
10151 list_add_tail(&vlan->node, &vport->vlan_list);
10154 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10156 struct hclge_vport_vlan_cfg *vlan, *tmp;
10157 struct hclge_dev *hdev = vport->back;
10160 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10161 if (!vlan->hd_tbl_status) {
10162 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10164 vlan->vlan_id, false);
10166 dev_err(&hdev->pdev->dev,
10167 "restore vport vlan list failed, ret=%d\n",
10172 vlan->hd_tbl_status = true;
10178 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10181 struct hclge_vport_vlan_cfg *vlan, *tmp;
10182 struct hclge_dev *hdev = vport->back;
10184 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10185 if (vlan->vlan_id == vlan_id) {
10186 if (is_write_tbl && vlan->hd_tbl_status)
10187 hclge_set_vlan_filter_hw(hdev,
10188 htons(ETH_P_8021Q),
10193 list_del(&vlan->node);
10200 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10202 struct hclge_vport_vlan_cfg *vlan, *tmp;
10203 struct hclge_dev *hdev = vport->back;
10205 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10206 if (vlan->hd_tbl_status)
10207 hclge_set_vlan_filter_hw(hdev,
10208 htons(ETH_P_8021Q),
10213 vlan->hd_tbl_status = false;
10215 list_del(&vlan->node);
10219 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10222 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10224 struct hclge_vport_vlan_cfg *vlan, *tmp;
10225 struct hclge_vport *vport;
10228 for (i = 0; i < hdev->num_alloc_vport; i++) {
10229 vport = &hdev->vport[i];
10230 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10231 list_del(&vlan->node);
10237 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10239 struct hclge_vport_vlan_cfg *vlan, *tmp;
10240 struct hclge_dev *hdev = vport->back;
10246 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10247 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10248 state = vport->port_base_vlan_cfg.state;
10250 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10251 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10252 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10253 vport->vport_id, vlan_id,
10258 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10259 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10261 vlan->vlan_id, false);
10264 vlan->hd_tbl_status = true;
10268 /* For global reset and imp reset, hardware will clear the mac table,
10269 * so we change the mac address state from ACTIVE to TO_ADD, then they
10270 * can be restored in the service task after reset complete. Furtherly,
10271 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10272 * be restored after reset, so just remove these mac nodes from mac_list.
10274 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10276 struct hclge_mac_node *mac_node, *tmp;
10278 list_for_each_entry_safe(mac_node, tmp, list, node) {
10279 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10280 mac_node->state = HCLGE_MAC_TO_ADD;
10281 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10282 list_del(&mac_node->node);
10288 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10290 spin_lock_bh(&vport->mac_list_lock);
10292 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10293 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10294 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10296 spin_unlock_bh(&vport->mac_list_lock);
10299 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10301 struct hclge_vport *vport = &hdev->vport[0];
10302 struct hnae3_handle *handle = &vport->nic;
10304 hclge_restore_mac_table_common(vport);
10305 hclge_restore_vport_vlan_table(vport);
10306 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10307 hclge_restore_fd_entries(handle);
10310 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10312 struct hclge_vport *vport = hclge_get_vport(handle);
10314 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10315 vport->rxvlan_cfg.strip_tag1_en = false;
10316 vport->rxvlan_cfg.strip_tag2_en = enable;
10317 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10319 vport->rxvlan_cfg.strip_tag1_en = enable;
10320 vport->rxvlan_cfg.strip_tag2_en = true;
10321 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10324 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10325 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10326 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10327 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10329 return hclge_set_vlan_rx_offload_cfg(vport);
10332 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10334 struct hclge_dev *hdev = vport->back;
10336 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10337 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10340 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10341 u16 port_base_vlan_state,
10342 struct hclge_vlan_info *new_info,
10343 struct hclge_vlan_info *old_info)
10345 struct hclge_dev *hdev = vport->back;
10348 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10349 hclge_rm_vport_all_vlan_table(vport, false);
10350 /* force clear VLAN 0 */
10351 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10354 return hclge_set_vlan_filter_hw(hdev,
10355 htons(new_info->vlan_proto),
10357 new_info->vlan_tag,
10361 /* force add VLAN 0 */
10362 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10366 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10367 vport->vport_id, old_info->vlan_tag,
10372 return hclge_add_vport_all_vlan_table(vport);
10375 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10376 const struct hclge_vlan_info *old_cfg)
10378 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10381 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10387 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10388 struct hclge_vlan_info *vlan_info)
10390 struct hnae3_handle *nic = &vport->nic;
10391 struct hclge_vlan_info *old_vlan_info;
10392 struct hclge_dev *hdev = vport->back;
10395 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10397 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10402 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10405 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10406 /* add new VLAN tag */
10407 ret = hclge_set_vlan_filter_hw(hdev,
10408 htons(vlan_info->vlan_proto),
10410 vlan_info->vlan_tag,
10415 /* remove old VLAN tag */
10416 if (old_vlan_info->vlan_tag == 0)
10417 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10420 ret = hclge_set_vlan_filter_hw(hdev,
10421 htons(ETH_P_8021Q),
10423 old_vlan_info->vlan_tag,
10426 dev_err(&hdev->pdev->dev,
10427 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10428 vport->vport_id, old_vlan_info->vlan_tag, ret);
10435 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10441 vport->port_base_vlan_cfg.state = state;
10442 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10443 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10445 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10447 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10448 hclge_set_vport_vlan_fltr_change(vport);
10453 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10454 enum hnae3_port_base_vlan_state state,
10457 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10459 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10461 return HNAE3_PORT_BASE_VLAN_ENABLE;
10465 return HNAE3_PORT_BASE_VLAN_DISABLE;
10467 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10468 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10469 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10471 return HNAE3_PORT_BASE_VLAN_MODIFY;
10474 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10475 u16 vlan, u8 qos, __be16 proto)
10477 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10478 struct hclge_vport *vport = hclge_get_vport(handle);
10479 struct hclge_dev *hdev = vport->back;
10480 struct hclge_vlan_info vlan_info;
10484 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10485 return -EOPNOTSUPP;
10487 vport = hclge_get_vf_vport(hdev, vfid);
10491 /* qos is a 3 bits value, so can not be bigger than 7 */
10492 if (vlan > VLAN_N_VID - 1 || qos > 7)
10494 if (proto != htons(ETH_P_8021Q))
10495 return -EPROTONOSUPPORT;
10497 state = hclge_get_port_base_vlan_state(vport,
10498 vport->port_base_vlan_cfg.state,
10500 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10503 vlan_info.vlan_tag = vlan;
10504 vlan_info.qos = qos;
10505 vlan_info.vlan_proto = ntohs(proto);
10507 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10509 dev_err(&hdev->pdev->dev,
10510 "failed to update port base vlan for vf %d, ret = %d\n",
10515 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10518 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10519 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10520 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10521 vport->vport_id, state,
10527 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10529 struct hclge_vlan_info *vlan_info;
10530 struct hclge_vport *vport;
10534 /* clear port base vlan for all vf */
10535 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10536 vport = &hdev->vport[vf];
10537 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10539 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10541 vlan_info->vlan_tag, true);
10543 dev_err(&hdev->pdev->dev,
10544 "failed to clear vf vlan for vf%d, ret = %d\n",
10545 vf - HCLGE_VF_VPORT_START_NUM, ret);
10549 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10550 u16 vlan_id, bool is_kill)
10552 struct hclge_vport *vport = hclge_get_vport(handle);
10553 struct hclge_dev *hdev = vport->back;
10554 bool writen_to_tbl = false;
10557 /* When device is resetting or reset failed, firmware is unable to
10558 * handle mailbox. Just record the vlan id, and remove it after
10561 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10562 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10563 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10567 /* when port base vlan enabled, we use port base vlan as the vlan
10568 * filter entry. In this case, we don't update vlan filter table
10569 * when user add new vlan or remove exist vlan, just update the vport
10570 * vlan list. The vlan id in vlan list will be writen in vlan filter
10571 * table until port base vlan disabled
10573 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10574 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10576 writen_to_tbl = true;
10581 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10583 hclge_add_vport_vlan_table(vport, vlan_id,
10585 } else if (is_kill) {
10586 /* when remove hw vlan filter failed, record the vlan id,
10587 * and try to remove it from hw later, to be consistence
10590 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10593 hclge_set_vport_vlan_fltr_change(vport);
10598 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10600 struct hclge_vport *vport;
10604 for (i = 0; i < hdev->num_alloc_vport; i++) {
10605 vport = &hdev->vport[i];
10606 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10610 ret = hclge_enable_vport_vlan_filter(vport,
10611 vport->req_vlan_fltr_en);
10613 dev_err(&hdev->pdev->dev,
10614 "failed to sync vlan filter state for vport%u, ret = %d\n",
10615 vport->vport_id, ret);
10616 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10623 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10625 #define HCLGE_MAX_SYNC_COUNT 60
10627 int i, ret, sync_cnt = 0;
10630 /* start from vport 1 for PF is always alive */
10631 for (i = 0; i < hdev->num_alloc_vport; i++) {
10632 struct hclge_vport *vport = &hdev->vport[i];
10634 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10636 while (vlan_id != VLAN_N_VID) {
10637 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10638 vport->vport_id, vlan_id,
10640 if (ret && ret != -EINVAL)
10643 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10644 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10645 hclge_set_vport_vlan_fltr_change(vport);
10648 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10651 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10656 hclge_sync_vlan_fltr_state(hdev);
10659 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10661 struct hclge_config_max_frm_size_cmd *req;
10662 struct hclge_desc desc;
10664 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10666 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10667 req->max_frm_size = cpu_to_le16(new_mps);
10668 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10670 return hclge_cmd_send(&hdev->hw, &desc, 1);
10673 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10675 struct hclge_vport *vport = hclge_get_vport(handle);
10677 return hclge_set_vport_mtu(vport, new_mtu);
10680 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10682 struct hclge_dev *hdev = vport->back;
10683 int i, max_frm_size, ret;
10685 /* HW supprt 2 layer vlan */
10686 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10687 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10688 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10691 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10692 mutex_lock(&hdev->vport_lock);
10693 /* VF's mps must fit within hdev->mps */
10694 if (vport->vport_id && max_frm_size > hdev->mps) {
10695 mutex_unlock(&hdev->vport_lock);
10697 } else if (vport->vport_id) {
10698 vport->mps = max_frm_size;
10699 mutex_unlock(&hdev->vport_lock);
10703 /* PF's mps must be greater then VF's mps */
10704 for (i = 1; i < hdev->num_alloc_vport; i++)
10705 if (max_frm_size < hdev->vport[i].mps) {
10706 mutex_unlock(&hdev->vport_lock);
10710 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10712 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10714 dev_err(&hdev->pdev->dev,
10715 "Change mtu fail, ret =%d\n", ret);
10719 hdev->mps = max_frm_size;
10720 vport->mps = max_frm_size;
10722 ret = hclge_buffer_alloc(hdev);
10724 dev_err(&hdev->pdev->dev,
10725 "Allocate buffer fail, ret =%d\n", ret);
10728 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10729 mutex_unlock(&hdev->vport_lock);
10733 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10736 struct hclge_reset_tqp_queue_cmd *req;
10737 struct hclge_desc desc;
10740 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10742 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10743 req->tqp_id = cpu_to_le16(queue_id);
10745 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10747 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10749 dev_err(&hdev->pdev->dev,
10750 "Send tqp reset cmd error, status =%d\n", ret);
10757 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10760 struct hclge_reset_tqp_queue_cmd *req;
10761 struct hclge_desc desc;
10764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10766 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10767 req->tqp_id = cpu_to_le16(queue_id);
10769 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10771 dev_err(&hdev->pdev->dev,
10772 "Get reset status error, status =%d\n", ret);
10776 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10781 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10783 struct hnae3_queue *queue;
10784 struct hclge_tqp *tqp;
10786 queue = handle->kinfo.tqp[queue_id];
10787 tqp = container_of(queue, struct hclge_tqp, q);
10792 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10794 struct hclge_vport *vport = hclge_get_vport(handle);
10795 struct hclge_dev *hdev = vport->back;
10796 u16 reset_try_times = 0;
10802 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10803 queue_gid = hclge_covert_handle_qid_global(handle, i);
10804 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10806 dev_err(&hdev->pdev->dev,
10807 "failed to send reset tqp cmd, ret = %d\n",
10812 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10813 ret = hclge_get_reset_status(hdev, queue_gid,
10821 /* Wait for tqp hw reset */
10822 usleep_range(1000, 1200);
10825 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10826 dev_err(&hdev->pdev->dev,
10827 "wait for tqp hw reset timeout\n");
10831 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10833 dev_err(&hdev->pdev->dev,
10834 "failed to deassert soft reset, ret = %d\n",
10838 reset_try_times = 0;
10843 static int hclge_reset_rcb(struct hnae3_handle *handle)
10845 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10846 #define HCLGE_RESET_RCB_SUCCESS 1U
10848 struct hclge_vport *vport = hclge_get_vport(handle);
10849 struct hclge_dev *hdev = vport->back;
10850 struct hclge_reset_cmd *req;
10851 struct hclge_desc desc;
10856 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10858 req = (struct hclge_reset_cmd *)desc.data;
10859 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10860 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10861 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10862 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10864 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10866 dev_err(&hdev->pdev->dev,
10867 "failed to send rcb reset cmd, ret = %d\n", ret);
10871 return_status = req->fun_reset_rcb_return_status;
10872 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10875 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10876 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10881 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10882 * again to reset all tqps
10884 return hclge_reset_tqp_cmd(handle);
10887 int hclge_reset_tqp(struct hnae3_handle *handle)
10889 struct hclge_vport *vport = hclge_get_vport(handle);
10890 struct hclge_dev *hdev = vport->back;
10893 /* only need to disable PF's tqp */
10894 if (!vport->vport_id) {
10895 ret = hclge_tqp_enable(handle, false);
10897 dev_err(&hdev->pdev->dev,
10898 "failed to disable tqp, ret = %d\n", ret);
10903 return hclge_reset_rcb(handle);
10906 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10908 struct hclge_vport *vport = hclge_get_vport(handle);
10909 struct hclge_dev *hdev = vport->back;
10911 return hdev->fw_version;
10914 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10916 struct phy_device *phydev = hdev->hw.mac.phydev;
10921 phy_set_asym_pause(phydev, rx_en, tx_en);
10924 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10928 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10931 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10933 dev_err(&hdev->pdev->dev,
10934 "configure pauseparam error, ret = %d.\n", ret);
10939 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10941 struct phy_device *phydev = hdev->hw.mac.phydev;
10942 u16 remote_advertising = 0;
10943 u16 local_advertising;
10944 u32 rx_pause, tx_pause;
10947 if (!phydev->link || !phydev->autoneg)
10950 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10953 remote_advertising = LPA_PAUSE_CAP;
10955 if (phydev->asym_pause)
10956 remote_advertising |= LPA_PAUSE_ASYM;
10958 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10959 remote_advertising);
10960 tx_pause = flowctl & FLOW_CTRL_TX;
10961 rx_pause = flowctl & FLOW_CTRL_RX;
10963 if (phydev->duplex == HCLGE_MAC_HALF) {
10968 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10971 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10972 u32 *rx_en, u32 *tx_en)
10974 struct hclge_vport *vport = hclge_get_vport(handle);
10975 struct hclge_dev *hdev = vport->back;
10976 u8 media_type = hdev->hw.mac.media_type;
10978 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10979 hclge_get_autoneg(handle) : 0;
10981 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10987 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10990 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10993 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11002 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11003 u32 rx_en, u32 tx_en)
11005 if (rx_en && tx_en)
11006 hdev->fc_mode_last_time = HCLGE_FC_FULL;
11007 else if (rx_en && !tx_en)
11008 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11009 else if (!rx_en && tx_en)
11010 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11012 hdev->fc_mode_last_time = HCLGE_FC_NONE;
11014 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11017 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11018 u32 rx_en, u32 tx_en)
11020 struct hclge_vport *vport = hclge_get_vport(handle);
11021 struct hclge_dev *hdev = vport->back;
11022 struct phy_device *phydev = hdev->hw.mac.phydev;
11025 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11026 fc_autoneg = hclge_get_autoneg(handle);
11027 if (auto_neg != fc_autoneg) {
11028 dev_info(&hdev->pdev->dev,
11029 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11030 return -EOPNOTSUPP;
11034 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11035 dev_info(&hdev->pdev->dev,
11036 "Priority flow control enabled. Cannot set link flow control.\n");
11037 return -EOPNOTSUPP;
11040 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11042 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11044 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11045 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11048 return phy_start_aneg(phydev);
11050 return -EOPNOTSUPP;
11053 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11054 u8 *auto_neg, u32 *speed, u8 *duplex)
11056 struct hclge_vport *vport = hclge_get_vport(handle);
11057 struct hclge_dev *hdev = vport->back;
11060 *speed = hdev->hw.mac.speed;
11062 *duplex = hdev->hw.mac.duplex;
11064 *auto_neg = hdev->hw.mac.autoneg;
11067 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11070 struct hclge_vport *vport = hclge_get_vport(handle);
11071 struct hclge_dev *hdev = vport->back;
11073 /* When nic is down, the service task is not running, doesn't update
11074 * the port information per second. Query the port information before
11075 * return the media type, ensure getting the correct media information.
11077 hclge_update_port_info(hdev);
11080 *media_type = hdev->hw.mac.media_type;
11083 *module_type = hdev->hw.mac.module_type;
11086 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11087 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11089 struct hclge_vport *vport = hclge_get_vport(handle);
11090 struct hclge_dev *hdev = vport->back;
11091 struct phy_device *phydev = hdev->hw.mac.phydev;
11092 int mdix_ctrl, mdix, is_resolved;
11093 unsigned int retval;
11096 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11097 *tp_mdix = ETH_TP_MDI_INVALID;
11101 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11103 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11104 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11105 HCLGE_PHY_MDIX_CTRL_S);
11107 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11108 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11109 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11111 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11113 switch (mdix_ctrl) {
11115 *tp_mdix_ctrl = ETH_TP_MDI;
11118 *tp_mdix_ctrl = ETH_TP_MDI_X;
11121 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11124 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11129 *tp_mdix = ETH_TP_MDI_INVALID;
11131 *tp_mdix = ETH_TP_MDI_X;
11133 *tp_mdix = ETH_TP_MDI;
11136 static void hclge_info_show(struct hclge_dev *hdev)
11138 struct device *dev = &hdev->pdev->dev;
11140 dev_info(dev, "PF info begin:\n");
11142 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11143 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11144 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11145 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11146 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11147 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11148 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11149 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11150 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11151 dev_info(dev, "This is %s PF\n",
11152 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11153 dev_info(dev, "DCB %s\n",
11154 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11155 dev_info(dev, "MQPRIO %s\n",
11156 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11157 dev_info(dev, "Default tx spare buffer size: %u\n",
11158 hdev->tx_spare_buf_size);
11160 dev_info(dev, "PF info end.\n");
11163 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11164 struct hclge_vport *vport)
11166 struct hnae3_client *client = vport->nic.client;
11167 struct hclge_dev *hdev = ae_dev->priv;
11168 int rst_cnt = hdev->rst_stats.reset_cnt;
11171 ret = client->ops->init_instance(&vport->nic);
11175 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11176 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11177 rst_cnt != hdev->rst_stats.reset_cnt) {
11182 /* Enable nic hw error interrupts */
11183 ret = hclge_config_nic_hw_error(hdev, true);
11185 dev_err(&ae_dev->pdev->dev,
11186 "fail(%d) to enable hw error interrupts\n", ret);
11190 hnae3_set_client_init_flag(client, ae_dev, 1);
11192 if (netif_msg_drv(&hdev->vport->nic))
11193 hclge_info_show(hdev);
11198 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11199 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11200 msleep(HCLGE_WAIT_RESET_DONE);
11202 client->ops->uninit_instance(&vport->nic, 0);
11207 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11208 struct hclge_vport *vport)
11210 struct hclge_dev *hdev = ae_dev->priv;
11211 struct hnae3_client *client;
11215 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11219 client = hdev->roce_client;
11220 ret = hclge_init_roce_base_info(vport);
11224 rst_cnt = hdev->rst_stats.reset_cnt;
11225 ret = client->ops->init_instance(&vport->roce);
11229 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11230 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11231 rst_cnt != hdev->rst_stats.reset_cnt) {
11233 goto init_roce_err;
11236 /* Enable roce ras interrupts */
11237 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11239 dev_err(&ae_dev->pdev->dev,
11240 "fail(%d) to enable roce ras interrupts\n", ret);
11241 goto init_roce_err;
11244 hnae3_set_client_init_flag(client, ae_dev, 1);
11249 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11250 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11251 msleep(HCLGE_WAIT_RESET_DONE);
11253 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11258 static int hclge_init_client_instance(struct hnae3_client *client,
11259 struct hnae3_ae_dev *ae_dev)
11261 struct hclge_dev *hdev = ae_dev->priv;
11262 struct hclge_vport *vport = &hdev->vport[0];
11265 switch (client->type) {
11266 case HNAE3_CLIENT_KNIC:
11267 hdev->nic_client = client;
11268 vport->nic.client = client;
11269 ret = hclge_init_nic_client_instance(ae_dev, vport);
11273 ret = hclge_init_roce_client_instance(ae_dev, vport);
11278 case HNAE3_CLIENT_ROCE:
11279 if (hnae3_dev_roce_supported(hdev)) {
11280 hdev->roce_client = client;
11281 vport->roce.client = client;
11284 ret = hclge_init_roce_client_instance(ae_dev, vport);
11296 hdev->nic_client = NULL;
11297 vport->nic.client = NULL;
11300 hdev->roce_client = NULL;
11301 vport->roce.client = NULL;
11305 static void hclge_uninit_client_instance(struct hnae3_client *client,
11306 struct hnae3_ae_dev *ae_dev)
11308 struct hclge_dev *hdev = ae_dev->priv;
11309 struct hclge_vport *vport = &hdev->vport[0];
11311 if (hdev->roce_client) {
11312 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11313 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11314 msleep(HCLGE_WAIT_RESET_DONE);
11316 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11317 hdev->roce_client = NULL;
11318 vport->roce.client = NULL;
11320 if (client->type == HNAE3_CLIENT_ROCE)
11322 if (hdev->nic_client && client->ops->uninit_instance) {
11323 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11324 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11325 msleep(HCLGE_WAIT_RESET_DONE);
11327 client->ops->uninit_instance(&vport->nic, 0);
11328 hdev->nic_client = NULL;
11329 vport->nic.client = NULL;
11333 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11335 #define HCLGE_MEM_BAR 4
11337 struct pci_dev *pdev = hdev->pdev;
11338 struct hclge_hw *hw = &hdev->hw;
11340 /* for device does not have device memory, return directly */
11341 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11344 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11345 pci_resource_start(pdev, HCLGE_MEM_BAR),
11346 pci_resource_len(pdev, HCLGE_MEM_BAR));
11347 if (!hw->mem_base) {
11348 dev_err(&pdev->dev, "failed to map device memory\n");
11355 static int hclge_pci_init(struct hclge_dev *hdev)
11357 struct pci_dev *pdev = hdev->pdev;
11358 struct hclge_hw *hw;
11361 ret = pci_enable_device(pdev);
11363 dev_err(&pdev->dev, "failed to enable PCI device\n");
11367 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11369 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11371 dev_err(&pdev->dev,
11372 "can't set consistent PCI DMA");
11373 goto err_disable_device;
11375 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11378 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11380 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11381 goto err_disable_device;
11384 pci_set_master(pdev);
11386 hw->io_base = pcim_iomap(pdev, 2, 0);
11387 if (!hw->io_base) {
11388 dev_err(&pdev->dev, "Can't map configuration register space\n");
11390 goto err_clr_master;
11393 ret = hclge_dev_mem_map(hdev);
11395 goto err_unmap_io_base;
11397 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11402 pcim_iounmap(pdev, hdev->hw.io_base);
11404 pci_clear_master(pdev);
11405 pci_release_regions(pdev);
11406 err_disable_device:
11407 pci_disable_device(pdev);
11412 static void hclge_pci_uninit(struct hclge_dev *hdev)
11414 struct pci_dev *pdev = hdev->pdev;
11416 if (hdev->hw.mem_base)
11417 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11419 pcim_iounmap(pdev, hdev->hw.io_base);
11420 pci_free_irq_vectors(pdev);
11421 pci_clear_master(pdev);
11422 pci_release_mem_regions(pdev);
11423 pci_disable_device(pdev);
11426 static void hclge_state_init(struct hclge_dev *hdev)
11428 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11429 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11430 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11431 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11432 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11433 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11434 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11437 static void hclge_state_uninit(struct hclge_dev *hdev)
11439 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11440 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11442 if (hdev->reset_timer.function)
11443 del_timer_sync(&hdev->reset_timer);
11444 if (hdev->service_task.work.func)
11445 cancel_delayed_work_sync(&hdev->service_task);
11448 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11449 enum hnae3_reset_type rst_type)
11451 #define HCLGE_RESET_RETRY_WAIT_MS 500
11452 #define HCLGE_RESET_RETRY_CNT 5
11454 struct hclge_dev *hdev = ae_dev->priv;
11459 down(&hdev->reset_sem);
11460 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11461 hdev->reset_type = rst_type;
11462 ret = hclge_reset_prepare(hdev);
11463 if (ret || hdev->reset_pending) {
11464 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11466 if (hdev->reset_pending ||
11467 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11468 dev_err(&hdev->pdev->dev,
11469 "reset_pending:0x%lx, retry_cnt:%d\n",
11470 hdev->reset_pending, retry_cnt);
11471 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11472 up(&hdev->reset_sem);
11473 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11478 /* disable misc vector before reset done */
11479 hclge_enable_vector(&hdev->misc_vector, false);
11480 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11482 if (hdev->reset_type == HNAE3_FLR_RESET)
11483 hdev->rst_stats.flr_rst_cnt++;
11486 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11488 struct hclge_dev *hdev = ae_dev->priv;
11491 hclge_enable_vector(&hdev->misc_vector, true);
11493 ret = hclge_reset_rebuild(hdev);
11495 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11497 hdev->reset_type = HNAE3_NONE_RESET;
11498 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11499 up(&hdev->reset_sem);
11502 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11506 for (i = 0; i < hdev->num_alloc_vport; i++) {
11507 struct hclge_vport *vport = &hdev->vport[i];
11510 /* Send cmd to clear vport's FUNC_RST_ING */
11511 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11513 dev_warn(&hdev->pdev->dev,
11514 "clear vport(%u) rst failed %d!\n",
11515 vport->vport_id, ret);
11519 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11521 struct hclge_desc desc;
11524 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11526 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11527 /* This new command is only supported by new firmware, it will
11528 * fail with older firmware. Error value -EOPNOSUPP can only be
11529 * returned by older firmware running this command, to keep code
11530 * backward compatible we will override this value and return
11533 if (ret && ret != -EOPNOTSUPP) {
11534 dev_err(&hdev->pdev->dev,
11535 "failed to clear hw resource, ret = %d\n", ret);
11541 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11543 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11544 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11547 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11549 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11550 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11553 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11555 struct pci_dev *pdev = ae_dev->pdev;
11556 struct hclge_dev *hdev;
11559 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11564 hdev->ae_dev = ae_dev;
11565 hdev->reset_type = HNAE3_NONE_RESET;
11566 hdev->reset_level = HNAE3_FUNC_RESET;
11567 ae_dev->priv = hdev;
11569 /* HW supprt 2 layer vlan */
11570 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11572 mutex_init(&hdev->vport_lock);
11573 spin_lock_init(&hdev->fd_rule_lock);
11574 sema_init(&hdev->reset_sem, 1);
11576 ret = hclge_pci_init(hdev);
11580 ret = hclge_devlink_init(hdev);
11582 goto err_pci_uninit;
11584 /* Firmware command queue initialize */
11585 ret = hclge_cmd_queue_init(hdev);
11587 goto err_devlink_uninit;
11589 /* Firmware command initialize */
11590 ret = hclge_cmd_init(hdev);
11592 goto err_cmd_uninit;
11594 ret = hclge_clear_hw_resource(hdev);
11596 goto err_cmd_uninit;
11598 ret = hclge_get_cap(hdev);
11600 goto err_cmd_uninit;
11602 ret = hclge_query_dev_specs(hdev);
11604 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11606 goto err_cmd_uninit;
11609 ret = hclge_configure(hdev);
11611 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11612 goto err_cmd_uninit;
11615 ret = hclge_init_msi(hdev);
11617 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11618 goto err_cmd_uninit;
11621 ret = hclge_misc_irq_init(hdev);
11623 goto err_msi_uninit;
11625 ret = hclge_alloc_tqps(hdev);
11627 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11628 goto err_msi_irq_uninit;
11631 ret = hclge_alloc_vport(hdev);
11633 goto err_msi_irq_uninit;
11635 ret = hclge_map_tqp(hdev);
11637 goto err_msi_irq_uninit;
11639 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11640 !hnae3_dev_phy_imp_supported(hdev)) {
11641 ret = hclge_mac_mdio_config(hdev);
11643 goto err_msi_irq_uninit;
11646 ret = hclge_init_umv_space(hdev);
11648 goto err_mdiobus_unreg;
11650 ret = hclge_mac_init(hdev);
11652 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11653 goto err_mdiobus_unreg;
11656 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11658 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11659 goto err_mdiobus_unreg;
11662 ret = hclge_config_gro(hdev);
11664 goto err_mdiobus_unreg;
11666 ret = hclge_init_vlan_config(hdev);
11668 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11669 goto err_mdiobus_unreg;
11672 ret = hclge_tm_schd_init(hdev);
11674 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11675 goto err_mdiobus_unreg;
11678 ret = hclge_rss_init_cfg(hdev);
11680 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11681 goto err_mdiobus_unreg;
11684 ret = hclge_rss_init_hw(hdev);
11686 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11687 goto err_mdiobus_unreg;
11690 ret = init_mgr_tbl(hdev);
11692 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11693 goto err_mdiobus_unreg;
11696 ret = hclge_init_fd_config(hdev);
11698 dev_err(&pdev->dev,
11699 "fd table init fail, ret=%d\n", ret);
11700 goto err_mdiobus_unreg;
11703 ret = hclge_ptp_init(hdev);
11705 goto err_mdiobus_unreg;
11707 INIT_KFIFO(hdev->mac_tnl_log);
11709 hclge_dcb_ops_set(hdev);
11711 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11712 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11714 /* Setup affinity after service timer setup because add_timer_on
11715 * is called in affinity notify.
11717 hclge_misc_affinity_setup(hdev);
11719 hclge_clear_all_event_cause(hdev);
11720 hclge_clear_resetting_state(hdev);
11722 /* Log and clear the hw errors those already occurred */
11723 if (hnae3_dev_ras_imp_supported(hdev))
11724 hclge_handle_occurred_error(hdev);
11726 hclge_handle_all_hns_hw_errors(ae_dev);
11728 /* request delayed reset for the error recovery because an immediate
11729 * global reset on a PF affecting pending initialization of other PFs
11731 if (ae_dev->hw_err_reset_req) {
11732 enum hnae3_reset_type reset_level;
11734 reset_level = hclge_get_reset_level(ae_dev,
11735 &ae_dev->hw_err_reset_req);
11736 hclge_set_def_reset_request(ae_dev, reset_level);
11737 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11740 hclge_init_rxd_adv_layout(hdev);
11742 /* Enable MISC vector(vector0) */
11743 hclge_enable_vector(&hdev->misc_vector, true);
11745 hclge_state_init(hdev);
11746 hdev->last_reset_time = jiffies;
11748 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11749 HCLGE_DRIVER_NAME);
11751 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11756 if (hdev->hw.mac.phydev)
11757 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11758 err_msi_irq_uninit:
11759 hclge_misc_irq_uninit(hdev);
11761 pci_free_irq_vectors(pdev);
11763 hclge_cmd_uninit(hdev);
11764 err_devlink_uninit:
11765 hclge_devlink_uninit(hdev);
11767 pcim_iounmap(pdev, hdev->hw.io_base);
11768 pci_clear_master(pdev);
11769 pci_release_regions(pdev);
11770 pci_disable_device(pdev);
11772 mutex_destroy(&hdev->vport_lock);
11776 static void hclge_stats_clear(struct hclge_dev *hdev)
11778 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11781 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11783 return hclge_config_switch_param(hdev, vf, enable,
11784 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11787 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11789 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11790 HCLGE_FILTER_FE_NIC_INGRESS_B,
11794 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11798 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11800 dev_err(&hdev->pdev->dev,
11801 "Set vf %d mac spoof check %s failed, ret=%d\n",
11802 vf, enable ? "on" : "off", ret);
11806 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11808 dev_err(&hdev->pdev->dev,
11809 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11810 vf, enable ? "on" : "off", ret);
11815 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11818 struct hclge_vport *vport = hclge_get_vport(handle);
11819 struct hclge_dev *hdev = vport->back;
11820 u32 new_spoofchk = enable ? 1 : 0;
11823 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11824 return -EOPNOTSUPP;
11826 vport = hclge_get_vf_vport(hdev, vf);
11830 if (vport->vf_info.spoofchk == new_spoofchk)
11833 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11834 dev_warn(&hdev->pdev->dev,
11835 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11837 else if (enable && hclge_is_umv_space_full(vport, true))
11838 dev_warn(&hdev->pdev->dev,
11839 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11842 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11846 vport->vf_info.spoofchk = new_spoofchk;
11850 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11852 struct hclge_vport *vport = hdev->vport;
11856 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11859 /* resume the vf spoof check state after reset */
11860 for (i = 0; i < hdev->num_alloc_vport; i++) {
11861 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11862 vport->vf_info.spoofchk);
11872 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11874 struct hclge_vport *vport = hclge_get_vport(handle);
11875 struct hclge_dev *hdev = vport->back;
11876 u32 new_trusted = enable ? 1 : 0;
11878 vport = hclge_get_vf_vport(hdev, vf);
11882 if (vport->vf_info.trusted == new_trusted)
11885 vport->vf_info.trusted = new_trusted;
11886 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11887 hclge_task_schedule(hdev, 0);
11892 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11897 /* reset vf rate to default value */
11898 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11899 struct hclge_vport *vport = &hdev->vport[vf];
11901 vport->vf_info.max_tx_rate = 0;
11902 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11904 dev_err(&hdev->pdev->dev,
11905 "vf%d failed to reset to default, ret=%d\n",
11906 vf - HCLGE_VF_VPORT_START_NUM, ret);
11910 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11911 int min_tx_rate, int max_tx_rate)
11913 if (min_tx_rate != 0 ||
11914 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11915 dev_err(&hdev->pdev->dev,
11916 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11917 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11924 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11925 int min_tx_rate, int max_tx_rate, bool force)
11927 struct hclge_vport *vport = hclge_get_vport(handle);
11928 struct hclge_dev *hdev = vport->back;
11931 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11935 vport = hclge_get_vf_vport(hdev, vf);
11939 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11942 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11946 vport->vf_info.max_tx_rate = max_tx_rate;
11951 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11953 struct hnae3_handle *handle = &hdev->vport->nic;
11954 struct hclge_vport *vport;
11958 /* resume the vf max_tx_rate after reset */
11959 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11960 vport = hclge_get_vf_vport(hdev, vf);
11964 /* zero means max rate, after reset, firmware already set it to
11965 * max rate, so just continue.
11967 if (!vport->vf_info.max_tx_rate)
11970 ret = hclge_set_vf_rate(handle, vf, 0,
11971 vport->vf_info.max_tx_rate, true);
11973 dev_err(&hdev->pdev->dev,
11974 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11975 vf, vport->vf_info.max_tx_rate, ret);
11983 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11985 struct hclge_vport *vport = hdev->vport;
11988 for (i = 0; i < hdev->num_alloc_vport; i++) {
11989 hclge_vport_stop(vport);
11994 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11996 struct hclge_dev *hdev = ae_dev->priv;
11997 struct pci_dev *pdev = ae_dev->pdev;
12000 set_bit(HCLGE_STATE_DOWN, &hdev->state);
12002 hclge_stats_clear(hdev);
12003 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12004 * so here should not clean table in memory.
12006 if (hdev->reset_type == HNAE3_IMP_RESET ||
12007 hdev->reset_type == HNAE3_GLOBAL_RESET) {
12008 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12009 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12010 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12011 hclge_reset_umv_space(hdev);
12014 ret = hclge_cmd_init(hdev);
12016 dev_err(&pdev->dev, "Cmd queue init failed\n");
12020 ret = hclge_map_tqp(hdev);
12022 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12026 ret = hclge_mac_init(hdev);
12028 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12032 ret = hclge_tp_port_init(hdev);
12034 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12039 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12041 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12045 ret = hclge_config_gro(hdev);
12049 ret = hclge_init_vlan_config(hdev);
12051 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12055 ret = hclge_tm_init_hw(hdev, true);
12057 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12061 ret = hclge_rss_init_hw(hdev);
12063 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12067 ret = init_mgr_tbl(hdev);
12069 dev_err(&pdev->dev,
12070 "failed to reinit manager table, ret = %d\n", ret);
12074 ret = hclge_init_fd_config(hdev);
12076 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12080 ret = hclge_ptp_init(hdev);
12084 /* Log and clear the hw errors those already occurred */
12085 if (hnae3_dev_ras_imp_supported(hdev))
12086 hclge_handle_occurred_error(hdev);
12088 hclge_handle_all_hns_hw_errors(ae_dev);
12090 /* Re-enable the hw error interrupts because
12091 * the interrupts get disabled on global reset.
12093 ret = hclge_config_nic_hw_error(hdev, true);
12095 dev_err(&pdev->dev,
12096 "fail(%d) to re-enable NIC hw error interrupts\n",
12101 if (hdev->roce_client) {
12102 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12104 dev_err(&pdev->dev,
12105 "fail(%d) to re-enable roce ras interrupts\n",
12111 hclge_reset_vport_state(hdev);
12112 ret = hclge_reset_vport_spoofchk(hdev);
12116 ret = hclge_resume_vf_rate(hdev);
12120 hclge_init_rxd_adv_layout(hdev);
12122 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12123 HCLGE_DRIVER_NAME);
12128 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12130 struct hclge_dev *hdev = ae_dev->priv;
12131 struct hclge_mac *mac = &hdev->hw.mac;
12133 hclge_reset_vf_rate(hdev);
12134 hclge_clear_vf_vlan(hdev);
12135 hclge_misc_affinity_teardown(hdev);
12136 hclge_state_uninit(hdev);
12137 hclge_ptp_uninit(hdev);
12138 hclge_uninit_rxd_adv_layout(hdev);
12139 hclge_uninit_mac_table(hdev);
12140 hclge_del_all_fd_entries(hdev);
12143 mdiobus_unregister(mac->mdio_bus);
12145 /* Disable MISC vector(vector0) */
12146 hclge_enable_vector(&hdev->misc_vector, false);
12147 synchronize_irq(hdev->misc_vector.vector_irq);
12149 /* Disable all hw interrupts */
12150 hclge_config_mac_tnl_int(hdev, false);
12151 hclge_config_nic_hw_error(hdev, false);
12152 hclge_config_rocee_ras_interrupt(hdev, false);
12154 hclge_cmd_uninit(hdev);
12155 hclge_misc_irq_uninit(hdev);
12156 hclge_devlink_uninit(hdev);
12157 hclge_pci_uninit(hdev);
12158 mutex_destroy(&hdev->vport_lock);
12159 hclge_uninit_vport_vlan_table(hdev);
12160 ae_dev->priv = NULL;
12163 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12165 struct hclge_vport *vport = hclge_get_vport(handle);
12166 struct hclge_dev *hdev = vport->back;
12168 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12171 static void hclge_get_channels(struct hnae3_handle *handle,
12172 struct ethtool_channels *ch)
12174 ch->max_combined = hclge_get_max_channels(handle);
12175 ch->other_count = 1;
12177 ch->combined_count = handle->kinfo.rss_size;
12180 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12181 u16 *alloc_tqps, u16 *max_rss_size)
12183 struct hclge_vport *vport = hclge_get_vport(handle);
12184 struct hclge_dev *hdev = vport->back;
12186 *alloc_tqps = vport->alloc_tqps;
12187 *max_rss_size = hdev->pf_rss_size_max;
12190 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12191 bool rxfh_configured)
12193 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12194 struct hclge_vport *vport = hclge_get_vport(handle);
12195 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12196 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12197 struct hclge_dev *hdev = vport->back;
12198 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12199 u16 cur_rss_size = kinfo->rss_size;
12200 u16 cur_tqps = kinfo->num_tqps;
12201 u16 tc_valid[HCLGE_MAX_TC_NUM];
12207 kinfo->req_rss_size = new_tqps_num;
12209 ret = hclge_tm_vport_map_update(hdev);
12211 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12215 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12216 roundup_size = ilog2(roundup_size);
12217 /* Set the RSS TC mode according to the new RSS size */
12218 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12221 if (!(hdev->hw_tc_map & BIT(i)))
12225 tc_size[i] = roundup_size;
12226 tc_offset[i] = kinfo->rss_size * i;
12228 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12232 /* RSS indirection table has been configured by user */
12233 if (rxfh_configured)
12236 /* Reinitializes the rss indirect table according to the new RSS size */
12237 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12242 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12243 rss_indir[i] = i % kinfo->rss_size;
12245 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12247 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12254 dev_info(&hdev->pdev->dev,
12255 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12256 cur_rss_size, kinfo->rss_size,
12257 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12262 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12263 u32 *regs_num_64_bit)
12265 struct hclge_desc desc;
12269 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12270 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12272 dev_err(&hdev->pdev->dev,
12273 "Query register number cmd failed, ret = %d.\n", ret);
12277 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12278 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12280 total_num = *regs_num_32_bit + *regs_num_64_bit;
12287 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12290 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12291 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12293 struct hclge_desc *desc;
12294 u32 *reg_val = data;
12304 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12305 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12306 HCLGE_32_BIT_REG_RTN_DATANUM);
12307 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12311 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12312 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12314 dev_err(&hdev->pdev->dev,
12315 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12320 for (i = 0; i < cmd_num; i++) {
12322 desc_data = (__le32 *)(&desc[i].data[0]);
12323 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12325 desc_data = (__le32 *)(&desc[i]);
12326 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12328 for (k = 0; k < n; k++) {
12329 *reg_val++ = le32_to_cpu(*desc_data++);
12341 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12344 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12345 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12347 struct hclge_desc *desc;
12348 u64 *reg_val = data;
12358 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12359 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12360 HCLGE_64_BIT_REG_RTN_DATANUM);
12361 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12365 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12366 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12368 dev_err(&hdev->pdev->dev,
12369 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12374 for (i = 0; i < cmd_num; i++) {
12376 desc_data = (__le64 *)(&desc[i].data[0]);
12377 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12379 desc_data = (__le64 *)(&desc[i]);
12380 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12382 for (k = 0; k < n; k++) {
12383 *reg_val++ = le64_to_cpu(*desc_data++);
12395 #define MAX_SEPARATE_NUM 4
12396 #define SEPARATOR_VALUE 0xFDFCFBFA
12397 #define REG_NUM_PER_LINE 4
12398 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12399 #define REG_SEPARATOR_LINE 1
12400 #define REG_NUM_REMAIN_MASK 3
12402 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12406 /* initialize command BD except the last one */
12407 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12408 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12410 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12413 /* initialize the last command BD */
12414 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12416 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12419 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12423 u32 entries_per_desc, desc_index, index, offset, i;
12424 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12427 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12429 dev_err(&hdev->pdev->dev,
12430 "Get dfx bd num fail, status is %d.\n", ret);
12434 entries_per_desc = ARRAY_SIZE(desc[0].data);
12435 for (i = 0; i < type_num; i++) {
12436 offset = hclge_dfx_bd_offset_list[i];
12437 index = offset % entries_per_desc;
12438 desc_index = offset / entries_per_desc;
12439 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12445 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12446 struct hclge_desc *desc_src, int bd_num,
12447 enum hclge_opcode_type cmd)
12449 struct hclge_desc *desc = desc_src;
12452 hclge_cmd_setup_basic_desc(desc, cmd, true);
12453 for (i = 0; i < bd_num - 1; i++) {
12454 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12456 hclge_cmd_setup_basic_desc(desc, cmd, true);
12460 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12462 dev_err(&hdev->pdev->dev,
12463 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12469 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12472 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12473 struct hclge_desc *desc = desc_src;
12476 entries_per_desc = ARRAY_SIZE(desc->data);
12477 reg_num = entries_per_desc * bd_num;
12478 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12479 for (i = 0; i < reg_num; i++) {
12480 index = i % entries_per_desc;
12481 desc_index = i / entries_per_desc;
12482 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12484 for (i = 0; i < separator_num; i++)
12485 *reg++ = SEPARATOR_VALUE;
12487 return reg_num + separator_num;
12490 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12492 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12493 int data_len_per_desc, bd_num, i;
12498 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12502 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12504 dev_err(&hdev->pdev->dev,
12505 "Get dfx reg bd num fail, status is %d.\n", ret);
12509 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12511 for (i = 0; i < dfx_reg_type_num; i++) {
12512 bd_num = bd_num_list[i];
12513 data_len = data_len_per_desc * bd_num;
12514 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12518 kfree(bd_num_list);
12522 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12524 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12525 int bd_num, bd_num_max, buf_len, i;
12526 struct hclge_desc *desc_src;
12531 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12535 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12537 dev_err(&hdev->pdev->dev,
12538 "Get dfx reg bd num fail, status is %d.\n", ret);
12542 bd_num_max = bd_num_list[0];
12543 for (i = 1; i < dfx_reg_type_num; i++)
12544 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12546 buf_len = sizeof(*desc_src) * bd_num_max;
12547 desc_src = kzalloc(buf_len, GFP_KERNEL);
12553 for (i = 0; i < dfx_reg_type_num; i++) {
12554 bd_num = bd_num_list[i];
12555 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12556 hclge_dfx_reg_opcode_list[i]);
12558 dev_err(&hdev->pdev->dev,
12559 "Get dfx reg fail, status is %d.\n", ret);
12563 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12568 kfree(bd_num_list);
12572 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12573 struct hnae3_knic_private_info *kinfo)
12575 #define HCLGE_RING_REG_OFFSET 0x200
12576 #define HCLGE_RING_INT_REG_OFFSET 0x4
12578 int i, j, reg_num, separator_num;
12582 /* fetching per-PF registers valus from PF PCIe register space */
12583 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12584 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12585 for (i = 0; i < reg_num; i++)
12586 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12587 for (i = 0; i < separator_num; i++)
12588 *reg++ = SEPARATOR_VALUE;
12589 data_num_sum = reg_num + separator_num;
12591 reg_num = ARRAY_SIZE(common_reg_addr_list);
12592 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12593 for (i = 0; i < reg_num; i++)
12594 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12595 for (i = 0; i < separator_num; i++)
12596 *reg++ = SEPARATOR_VALUE;
12597 data_num_sum += reg_num + separator_num;
12599 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12600 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12601 for (j = 0; j < kinfo->num_tqps; j++) {
12602 for (i = 0; i < reg_num; i++)
12603 *reg++ = hclge_read_dev(&hdev->hw,
12604 ring_reg_addr_list[i] +
12605 HCLGE_RING_REG_OFFSET * j);
12606 for (i = 0; i < separator_num; i++)
12607 *reg++ = SEPARATOR_VALUE;
12609 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12611 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12612 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12613 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12614 for (i = 0; i < reg_num; i++)
12615 *reg++ = hclge_read_dev(&hdev->hw,
12616 tqp_intr_reg_addr_list[i] +
12617 HCLGE_RING_INT_REG_OFFSET * j);
12618 for (i = 0; i < separator_num; i++)
12619 *reg++ = SEPARATOR_VALUE;
12621 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12623 return data_num_sum;
12626 static int hclge_get_regs_len(struct hnae3_handle *handle)
12628 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12629 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12630 struct hclge_vport *vport = hclge_get_vport(handle);
12631 struct hclge_dev *hdev = vport->back;
12632 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12633 int regs_lines_32_bit, regs_lines_64_bit;
12636 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12638 dev_err(&hdev->pdev->dev,
12639 "Get register number failed, ret = %d.\n", ret);
12643 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12645 dev_err(&hdev->pdev->dev,
12646 "Get dfx reg len failed, ret = %d.\n", ret);
12650 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12651 REG_SEPARATOR_LINE;
12652 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12653 REG_SEPARATOR_LINE;
12654 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12655 REG_SEPARATOR_LINE;
12656 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12657 REG_SEPARATOR_LINE;
12658 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12659 REG_SEPARATOR_LINE;
12660 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12661 REG_SEPARATOR_LINE;
12663 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12664 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12665 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12668 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12671 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12672 struct hclge_vport *vport = hclge_get_vport(handle);
12673 struct hclge_dev *hdev = vport->back;
12674 u32 regs_num_32_bit, regs_num_64_bit;
12675 int i, reg_num, separator_num, ret;
12678 *version = hdev->fw_version;
12680 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12682 dev_err(&hdev->pdev->dev,
12683 "Get register number failed, ret = %d.\n", ret);
12687 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12689 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12691 dev_err(&hdev->pdev->dev,
12692 "Get 32 bit register failed, ret = %d.\n", ret);
12695 reg_num = regs_num_32_bit;
12697 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12698 for (i = 0; i < separator_num; i++)
12699 *reg++ = SEPARATOR_VALUE;
12701 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12703 dev_err(&hdev->pdev->dev,
12704 "Get 64 bit register failed, ret = %d.\n", ret);
12707 reg_num = regs_num_64_bit * 2;
12709 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12710 for (i = 0; i < separator_num; i++)
12711 *reg++ = SEPARATOR_VALUE;
12713 ret = hclge_get_dfx_reg(hdev, reg);
12715 dev_err(&hdev->pdev->dev,
12716 "Get dfx register failed, ret = %d.\n", ret);
12719 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12721 struct hclge_set_led_state_cmd *req;
12722 struct hclge_desc desc;
12725 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12727 req = (struct hclge_set_led_state_cmd *)desc.data;
12728 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12729 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12733 dev_err(&hdev->pdev->dev,
12734 "Send set led state cmd error, ret =%d\n", ret);
12739 enum hclge_led_status {
12742 HCLGE_LED_NO_CHANGE = 0xFF,
12745 static int hclge_set_led_id(struct hnae3_handle *handle,
12746 enum ethtool_phys_id_state status)
12748 struct hclge_vport *vport = hclge_get_vport(handle);
12749 struct hclge_dev *hdev = vport->back;
12752 case ETHTOOL_ID_ACTIVE:
12753 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12754 case ETHTOOL_ID_INACTIVE:
12755 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12761 static void hclge_get_link_mode(struct hnae3_handle *handle,
12762 unsigned long *supported,
12763 unsigned long *advertising)
12765 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12766 struct hclge_vport *vport = hclge_get_vport(handle);
12767 struct hclge_dev *hdev = vport->back;
12768 unsigned int idx = 0;
12770 for (; idx < size; idx++) {
12771 supported[idx] = hdev->hw.mac.supported[idx];
12772 advertising[idx] = hdev->hw.mac.advertising[idx];
12776 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12778 struct hclge_vport *vport = hclge_get_vport(handle);
12779 struct hclge_dev *hdev = vport->back;
12780 bool gro_en_old = hdev->gro_en;
12783 hdev->gro_en = enable;
12784 ret = hclge_config_gro(hdev);
12786 hdev->gro_en = gro_en_old;
12791 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12793 struct hclge_vport *vport = &hdev->vport[0];
12794 struct hnae3_handle *handle = &vport->nic;
12799 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12800 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12801 vport->last_promisc_flags = vport->overflow_promisc_flags;
12804 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12805 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12806 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12807 tmp_flags & HNAE3_MPE);
12809 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12811 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12816 for (i = 1; i < hdev->num_alloc_vport; i++) {
12817 bool uc_en = false;
12818 bool mc_en = false;
12821 vport = &hdev->vport[i];
12823 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12827 if (vport->vf_info.trusted) {
12828 uc_en = vport->vf_info.request_uc_en > 0;
12829 mc_en = vport->vf_info.request_mc_en > 0;
12831 bc_en = vport->vf_info.request_bc_en > 0;
12833 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12836 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12840 hclge_set_vport_vlan_fltr_change(vport);
12844 static bool hclge_module_existed(struct hclge_dev *hdev)
12846 struct hclge_desc desc;
12850 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12851 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12853 dev_err(&hdev->pdev->dev,
12854 "failed to get SFP exist state, ret = %d\n", ret);
12858 existed = le32_to_cpu(desc.data[0]);
12860 return existed != 0;
12863 /* need 6 bds(total 140 bytes) in one reading
12864 * return the number of bytes actually read, 0 means read failed.
12866 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12869 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12870 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12876 /* setup all 6 bds to read module eeprom info. */
12877 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12878 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12881 /* bd0~bd4 need next flag */
12882 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12883 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12886 /* setup bd0, this bd contains offset and read length. */
12887 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12888 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12889 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12890 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12892 ret = hclge_cmd_send(&hdev->hw, desc, i);
12894 dev_err(&hdev->pdev->dev,
12895 "failed to get SFP eeprom info, ret = %d\n", ret);
12899 /* copy sfp info from bd0 to out buffer. */
12900 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12901 memcpy(data, sfp_info_bd0->data, copy_len);
12902 read_len = copy_len;
12904 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12905 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12906 if (read_len >= len)
12909 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12910 memcpy(data + read_len, desc[i].data, copy_len);
12911 read_len += copy_len;
12917 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12920 struct hclge_vport *vport = hclge_get_vport(handle);
12921 struct hclge_dev *hdev = vport->back;
12925 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12926 return -EOPNOTSUPP;
12928 if (!hclge_module_existed(hdev))
12931 while (read_len < len) {
12932 data_len = hclge_get_sfp_eeprom_info(hdev,
12939 read_len += data_len;
12945 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12948 struct hclge_vport *vport = hclge_get_vport(handle);
12949 struct hclge_dev *hdev = vport->back;
12950 struct hclge_desc desc;
12953 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12954 return -EOPNOTSUPP;
12956 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12957 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12959 dev_err(&hdev->pdev->dev,
12960 "failed to query link diagnosis info, ret = %d\n", ret);
12964 *status_code = le32_to_cpu(desc.data[0]);
12968 static const struct hnae3_ae_ops hclge_ops = {
12969 .init_ae_dev = hclge_init_ae_dev,
12970 .uninit_ae_dev = hclge_uninit_ae_dev,
12971 .reset_prepare = hclge_reset_prepare_general,
12972 .reset_done = hclge_reset_done,
12973 .init_client_instance = hclge_init_client_instance,
12974 .uninit_client_instance = hclge_uninit_client_instance,
12975 .map_ring_to_vector = hclge_map_ring_to_vector,
12976 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12977 .get_vector = hclge_get_vector,
12978 .put_vector = hclge_put_vector,
12979 .set_promisc_mode = hclge_set_promisc_mode,
12980 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12981 .set_loopback = hclge_set_loopback,
12982 .start = hclge_ae_start,
12983 .stop = hclge_ae_stop,
12984 .client_start = hclge_client_start,
12985 .client_stop = hclge_client_stop,
12986 .get_status = hclge_get_status,
12987 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12988 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12989 .get_media_type = hclge_get_media_type,
12990 .check_port_speed = hclge_check_port_speed,
12991 .get_fec = hclge_get_fec,
12992 .set_fec = hclge_set_fec,
12993 .get_rss_key_size = hclge_get_rss_key_size,
12994 .get_rss = hclge_get_rss,
12995 .set_rss = hclge_set_rss,
12996 .set_rss_tuple = hclge_set_rss_tuple,
12997 .get_rss_tuple = hclge_get_rss_tuple,
12998 .get_tc_size = hclge_get_tc_size,
12999 .get_mac_addr = hclge_get_mac_addr,
13000 .set_mac_addr = hclge_set_mac_addr,
13001 .do_ioctl = hclge_do_ioctl,
13002 .add_uc_addr = hclge_add_uc_addr,
13003 .rm_uc_addr = hclge_rm_uc_addr,
13004 .add_mc_addr = hclge_add_mc_addr,
13005 .rm_mc_addr = hclge_rm_mc_addr,
13006 .set_autoneg = hclge_set_autoneg,
13007 .get_autoneg = hclge_get_autoneg,
13008 .restart_autoneg = hclge_restart_autoneg,
13009 .halt_autoneg = hclge_halt_autoneg,
13010 .get_pauseparam = hclge_get_pauseparam,
13011 .set_pauseparam = hclge_set_pauseparam,
13012 .set_mtu = hclge_set_mtu,
13013 .reset_queue = hclge_reset_tqp,
13014 .get_stats = hclge_get_stats,
13015 .get_mac_stats = hclge_get_mac_stat,
13016 .update_stats = hclge_update_stats,
13017 .get_strings = hclge_get_strings,
13018 .get_sset_count = hclge_get_sset_count,
13019 .get_fw_version = hclge_get_fw_version,
13020 .get_mdix_mode = hclge_get_mdix_mode,
13021 .enable_vlan_filter = hclge_enable_vlan_filter,
13022 .set_vlan_filter = hclge_set_vlan_filter,
13023 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13024 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13025 .reset_event = hclge_reset_event,
13026 .get_reset_level = hclge_get_reset_level,
13027 .set_default_reset_request = hclge_set_def_reset_request,
13028 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13029 .set_channels = hclge_set_channels,
13030 .get_channels = hclge_get_channels,
13031 .get_regs_len = hclge_get_regs_len,
13032 .get_regs = hclge_get_regs,
13033 .set_led_id = hclge_set_led_id,
13034 .get_link_mode = hclge_get_link_mode,
13035 .add_fd_entry = hclge_add_fd_entry,
13036 .del_fd_entry = hclge_del_fd_entry,
13037 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13038 .get_fd_rule_info = hclge_get_fd_rule_info,
13039 .get_fd_all_rules = hclge_get_all_rules,
13040 .enable_fd = hclge_enable_fd,
13041 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
13042 .dbg_read_cmd = hclge_dbg_read_cmd,
13043 .handle_hw_ras_error = hclge_handle_hw_ras_error,
13044 .get_hw_reset_stat = hclge_get_hw_reset_stat,
13045 .ae_dev_resetting = hclge_ae_dev_resetting,
13046 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13047 .set_gro_en = hclge_gro_en,
13048 .get_global_queue_id = hclge_covert_handle_qid_global,
13049 .set_timer_task = hclge_set_timer_task,
13050 .mac_connect_phy = hclge_mac_connect_phy,
13051 .mac_disconnect_phy = hclge_mac_disconnect_phy,
13052 .get_vf_config = hclge_get_vf_config,
13053 .set_vf_link_state = hclge_set_vf_link_state,
13054 .set_vf_spoofchk = hclge_set_vf_spoofchk,
13055 .set_vf_trust = hclge_set_vf_trust,
13056 .set_vf_rate = hclge_set_vf_rate,
13057 .set_vf_mac = hclge_set_vf_mac,
13058 .get_module_eeprom = hclge_get_module_eeprom,
13059 .get_cmdq_stat = hclge_get_cmdq_stat,
13060 .add_cls_flower = hclge_add_cls_flower,
13061 .del_cls_flower = hclge_del_cls_flower,
13062 .cls_flower_active = hclge_is_cls_flower_active,
13063 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13064 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13065 .set_tx_hwts_info = hclge_ptp_set_tx_info,
13066 .get_rx_hwts = hclge_ptp_get_rx_hwts,
13067 .get_ts_info = hclge_ptp_get_ts_info,
13068 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13071 static struct hnae3_ae_algo ae_algo = {
13073 .pdev_id_table = ae_algo_pci_tbl,
13076 static int hclge_init(void)
13078 pr_info("%s is initializing\n", HCLGE_NAME);
13080 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13082 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13086 hnae3_register_ae_algo(&ae_algo);
13091 static void hclge_exit(void)
13093 hnae3_unregister_ae_algo(&ae_algo);
13094 destroy_workqueue(hclge_wq);
13096 module_init(hclge_init);
13097 module_exit(hclge_exit);
13099 MODULE_LICENSE("GPL");
13100 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13101 MODULE_DESCRIPTION("HCLGE Driver");
13102 MODULE_VERSION(HCLGE_MOD_VERSION);