1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
39 static struct hnae3_ae_algo ae_algo;
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 HCLGE_CMDQ_TX_ADDR_H_REG,
57 HCLGE_CMDQ_TX_DEPTH_REG,
58 HCLGE_CMDQ_TX_TAIL_REG,
59 HCLGE_CMDQ_TX_HEAD_REG,
60 HCLGE_CMDQ_RX_ADDR_L_REG,
61 HCLGE_CMDQ_RX_ADDR_H_REG,
62 HCLGE_CMDQ_RX_DEPTH_REG,
63 HCLGE_CMDQ_RX_TAIL_REG,
64 HCLGE_CMDQ_RX_HEAD_REG,
65 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 HCLGE_CMDQ_INTR_STS_REG,
67 HCLGE_CMDQ_INTR_EN_REG,
68 HCLGE_CMDQ_INTR_GEN_REG};
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 HCLGE_VECTOR0_OTER_EN_REG,
72 HCLGE_MISC_RESET_STS_REG,
73 HCLGE_MISC_VECTOR_INT_STS,
74 HCLGE_GLOBAL_RESET_REG,
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 HCLGE_RING_RX_ADDR_H_REG,
80 HCLGE_RING_RX_BD_NUM_REG,
81 HCLGE_RING_RX_BD_LENGTH_REG,
82 HCLGE_RING_RX_MERGE_EN_REG,
83 HCLGE_RING_RX_TAIL_REG,
84 HCLGE_RING_RX_HEAD_REG,
85 HCLGE_RING_RX_FBD_NUM_REG,
86 HCLGE_RING_RX_OFFSET_REG,
87 HCLGE_RING_RX_FBD_OFFSET_REG,
88 HCLGE_RING_RX_STASH_REG,
89 HCLGE_RING_RX_BD_ERR_REG,
90 HCLGE_RING_TX_ADDR_L_REG,
91 HCLGE_RING_TX_ADDR_H_REG,
92 HCLGE_RING_TX_BD_NUM_REG,
93 HCLGE_RING_TX_PRIORITY_REG,
95 HCLGE_RING_TX_MERGE_EN_REG,
96 HCLGE_RING_TX_TAIL_REG,
97 HCLGE_RING_TX_HEAD_REG,
98 HCLGE_RING_TX_FBD_NUM_REG,
99 HCLGE_RING_TX_OFFSET_REG,
100 HCLGE_RING_TX_EBD_NUM_REG,
101 HCLGE_RING_TX_EBD_OFFSET_REG,
102 HCLGE_RING_TX_BD_ERR_REG,
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 HCLGE_TQP_INTR_GL0_REG,
107 HCLGE_TQP_INTR_GL1_REG,
108 HCLGE_TQP_INTR_GL2_REG,
109 HCLGE_TQP_INTR_RL_REG};
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
113 "Serdes serial Loopback test",
114 "Serdes parallel Loopback test",
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 {"mac_tx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 {"mac_rx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 {"mac_tx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 {"mac_rx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 {"mac_tx_pfc_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 {"mac_tx_pfc_pri0_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 {"mac_tx_pfc_pri1_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 {"mac_tx_pfc_pri2_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 {"mac_tx_pfc_pri3_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 {"mac_tx_pfc_pri4_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 {"mac_tx_pfc_pri5_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 {"mac_tx_pfc_pri6_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 {"mac_tx_pfc_pri7_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 {"mac_rx_pfc_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 {"mac_rx_pfc_pri0_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 {"mac_rx_pfc_pri1_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 {"mac_rx_pfc_pri2_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 {"mac_rx_pfc_pri3_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 {"mac_rx_pfc_pri4_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 {"mac_rx_pfc_pri5_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 {"mac_rx_pfc_pri6_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 {"mac_rx_pfc_pri7_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 {"mac_tx_total_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 {"mac_tx_total_oct_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 {"mac_tx_good_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 {"mac_tx_bad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 {"mac_tx_good_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 {"mac_tx_bad_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 {"mac_tx_uni_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 {"mac_tx_multi_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 {"mac_tx_broad_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 {"mac_tx_undersize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 {"mac_tx_oversize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 {"mac_tx_64_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 {"mac_tx_65_127_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 {"mac_tx_128_255_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 {"mac_tx_256_511_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 {"mac_tx_512_1023_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 {"mac_tx_1024_1518_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 {"mac_tx_1519_2047_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 {"mac_tx_2048_4095_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 {"mac_tx_4096_8191_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 {"mac_tx_8192_9216_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 {"mac_tx_9217_12287_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 {"mac_tx_12288_16383_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 {"mac_tx_1519_max_good_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 {"mac_tx_1519_max_bad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 {"mac_rx_total_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 {"mac_rx_total_oct_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 {"mac_rx_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 {"mac_rx_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 {"mac_rx_good_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 {"mac_rx_bad_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 {"mac_rx_uni_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 {"mac_rx_multi_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 {"mac_rx_broad_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 {"mac_rx_undersize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 {"mac_rx_oversize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 {"mac_rx_64_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 {"mac_rx_65_127_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 {"mac_rx_128_255_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 {"mac_rx_256_511_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 {"mac_rx_512_1023_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 {"mac_rx_1024_1518_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 {"mac_rx_1519_2047_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 {"mac_rx_2048_4095_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 {"mac_rx_4096_8191_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 {"mac_rx_8192_9216_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 {"mac_rx_9217_12287_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 {"mac_rx_12288_16383_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 {"mac_rx_1519_max_good_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 {"mac_rx_1519_max_bad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
264 {"mac_tx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 {"mac_tx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 {"mac_tx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 {"mac_tx_err_all_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 {"mac_tx_from_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 {"mac_tx_from_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 {"mac_rx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 {"mac_rx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 {"mac_rx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 {"mac_rx_fcs_err_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 {"mac_rx_send_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 {"mac_rx_send_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
292 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 .i_port_bitmap = 0x1,
300 static const u8 hclge_hash_key[] = {
301 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
310 #define HCLGE_MAC_CMD_NUM 21
312 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
318 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
321 dev_err(&hdev->pdev->dev,
322 "Get MAC pkt stats fail, status = %d.\n", ret);
327 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 /* for special opcode 0032, only the first desc has the head */
329 if (unlikely(i == 0)) {
330 desc_data = (__le64 *)(&desc[i].data[0]);
331 n = HCLGE_RD_FIRST_STATS_NUM;
333 desc_data = (__le64 *)(&desc[i]);
334 n = HCLGE_RD_OTHER_STATS_NUM;
337 for (k = 0; k < n; k++) {
338 *data += le64_to_cpu(*desc_data);
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
349 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 struct hclge_desc *desc;
355 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
365 for (i = 0; i < desc_num; i++) {
366 /* for special opcode 0034, only the first desc has the head */
368 desc_data = (__le64 *)(&desc[i].data[0]);
369 n = HCLGE_RD_FIRST_STATS_NUM;
371 desc_data = (__le64 *)(&desc[i]);
372 n = HCLGE_RD_OTHER_STATS_NUM;
375 for (k = 0; k < n; k++) {
376 *data += le64_to_cpu(*desc_data);
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
389 struct hclge_desc desc;
394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
399 desc_data = (__le32 *)(&desc.data[0]);
400 reg_num = le32_to_cpu(*desc_data);
402 *desc_num = 1 + ((reg_num - 3) >> 2) +
403 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
413 ret = hclge_mac_query_reg_num(hdev, &desc_num);
415 /* The firmware supports the new statistics acquisition method */
417 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 else if (ret == -EOPNOTSUPP)
419 ret = hclge_mac_update_stats_defective(hdev);
421 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 struct hclge_vport *vport = hclge_get_vport(handle);
430 struct hclge_dev *hdev = vport->back;
431 struct hnae3_queue *queue;
432 struct hclge_desc desc[1];
433 struct hclge_tqp *tqp;
436 for (i = 0; i < kinfo->num_tqps; i++) {
437 queue = handle->kinfo.tqp[i];
438 tqp = container_of(queue, struct hclge_tqp, q);
439 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440 hclge_cmd_setup_basic_desc(&desc[0],
441 HCLGE_OPC_QUERY_RX_STATUS,
444 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
447 dev_err(&hdev->pdev->dev,
448 "Query tqp stat fail, status = %d,queue = %d\n",
452 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 le32_to_cpu(desc[0].data[1]);
456 for (i = 0; i < kinfo->num_tqps; i++) {
457 queue = handle->kinfo.tqp[i];
458 tqp = container_of(queue, struct hclge_tqp, q);
459 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460 hclge_cmd_setup_basic_desc(&desc[0],
461 HCLGE_OPC_QUERY_TX_STATUS,
464 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 ret = hclge_cmd_send(&hdev->hw, desc, 1);
467 dev_err(&hdev->pdev->dev,
468 "Query tqp stat fail, status = %d,queue = %d\n",
472 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 le32_to_cpu(desc[0].data[1]);
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
481 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 struct hclge_tqp *tqp;
486 for (i = 0; i < kinfo->num_tqps; i++) {
487 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
503 return kinfo->num_tqps * (2);
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
512 for (i = 0; i < kinfo->num_tqps; i++) {
513 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 struct hclge_tqp, q);
515 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
517 buff = buff + ETH_GSTRING_LEN;
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 struct hclge_tqp, q);
523 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
525 buff = buff + ETH_GSTRING_LEN;
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 const struct hclge_comm_stats_str strs[],
538 for (i = 0; i < size; i++)
539 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 const struct hclge_comm_stats_str strs[],
548 char *buff = (char *)data;
551 if (stringset != ETH_SS_STATS)
554 for (i = 0; i < size; i++) {
555 snprintf(buff, ETH_GSTRING_LEN,
557 buff = buff + ETH_GSTRING_LEN;
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
565 struct hnae3_handle *handle;
568 handle = &hdev->vport[0].nic;
569 if (handle->client) {
570 status = hclge_tqps_update_stats(handle);
572 dev_err(&hdev->pdev->dev,
573 "Update TQPS stats fail, status = %d.\n",
578 status = hclge_mac_update_stats(hdev);
580 dev_err(&hdev->pdev->dev,
581 "Update MAC stats fail, status = %d.\n", status);
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 struct net_device_stats *net_stats)
587 struct hclge_vport *vport = hclge_get_vport(handle);
588 struct hclge_dev *hdev = vport->back;
591 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594 status = hclge_mac_update_stats(hdev);
596 dev_err(&hdev->pdev->dev,
597 "Update MAC stats fail, status = %d.\n",
600 status = hclge_tqps_update_stats(handle);
602 dev_err(&hdev->pdev->dev,
603 "Update TQPS stats fail, status = %d.\n",
606 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 HNAE3_SUPPORT_PHY_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
616 struct hclge_vport *vport = hclge_get_vport(handle);
617 struct hclge_dev *hdev = vport->back;
620 /* Loopback test support rules:
621 * mac: only GE mode support
622 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 * phy: only support when phy device exist on board
625 if (stringset == ETH_SS_TEST) {
626 /* clear loopback bit flags at first */
627 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 if (hdev->pdev->revision >= 0x21 ||
629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
633 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
637 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 } else if (stringset == ETH_SS_STATS) {
640 count = ARRAY_SIZE(g_mac_stats_string) +
641 hclge_tqps_get_sset_count(handle, stringset);
647 static void hclge_get_strings(struct hnae3_handle *handle,
651 u8 *p = (char *)data;
654 if (stringset == ETH_SS_STATS) {
655 size = ARRAY_SIZE(g_mac_stats_string);
656 p = hclge_comm_get_strings(stringset,
660 p = hclge_tqps_get_strings(handle, p);
661 } else if (stringset == ETH_SS_TEST) {
662 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
664 hns3_nic_test_strs[HNAE3_LOOP_APP],
666 p += ETH_GSTRING_LEN;
668 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
670 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
672 p += ETH_GSTRING_LEN;
674 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
676 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PHY],
684 p += ETH_GSTRING_LEN;
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
697 ARRAY_SIZE(g_mac_stats_string),
699 p = hclge_tqps_get_stats(handle, p);
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705 struct hclge_vport *vport = hclge_get_vport(handle);
706 struct hclge_dev *hdev = vport->back;
708 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713 struct hclge_func_status_cmd *status)
715 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718 /* Set the pf to main pf */
719 if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 hdev->flag |= HCLGE_FLAG_MAIN;
722 hdev->flag &= ~HCLGE_FLAG_MAIN;
727 static int hclge_query_function_status(struct hclge_dev *hdev)
729 struct hclge_func_status_cmd *req;
730 struct hclge_desc desc;
734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735 req = (struct hclge_func_status_cmd *)desc.data;
738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
740 dev_err(&hdev->pdev->dev,
741 "query function status failed %d.\n",
747 /* Check pf reset is done */
750 usleep_range(1000, 2000);
751 } while (timeout++ < 5);
753 ret = hclge_parse_func_status(hdev, req);
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
760 struct hclge_pf_res_cmd *req;
761 struct hclge_desc desc;
764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
767 dev_err(&hdev->pdev->dev,
768 "query pf resource failed %d.\n", ret);
772 req = (struct hclge_pf_res_cmd *)desc.data;
773 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
776 if (req->tx_buf_size)
778 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
780 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
782 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
784 if (req->dv_buf_size)
786 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
788 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
790 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
792 if (hnae3_dev_roce_supported(hdev)) {
793 hdev->roce_base_msix_offset =
794 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
800 /* PF should have NIC vectors and Roce vectors,
801 * NIC vectors are queued before Roce vectors.
803 hdev->num_msi = hdev->num_roce_msi +
804 hdev->roce_base_msix_offset;
807 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
814 static int hclge_parse_speed(int speed_cmd, int *speed)
818 *speed = HCLGE_MAC_SPEED_10M;
821 *speed = HCLGE_MAC_SPEED_100M;
824 *speed = HCLGE_MAC_SPEED_1G;
827 *speed = HCLGE_MAC_SPEED_10G;
830 *speed = HCLGE_MAC_SPEED_25G;
833 *speed = HCLGE_MAC_SPEED_40G;
836 *speed = HCLGE_MAC_SPEED_50G;
839 *speed = HCLGE_MAC_SPEED_100G;
848 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
851 unsigned long *supported = hdev->hw.mac.supported;
853 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
854 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
857 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
858 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
861 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
862 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
865 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
866 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
869 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
870 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
873 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
874 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
877 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
880 unsigned long *supported = hdev->hw.mac.supported;
882 /* default to support all speed for GE port */
884 speed_ability = HCLGE_SUPPORT_GE;
886 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
887 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
890 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
891 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
893 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
897 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
898 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
899 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
902 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
903 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
904 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
907 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
909 u8 media_type = hdev->hw.mac.media_type;
911 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
912 hclge_parse_fiber_link_mode(hdev, speed_ability);
913 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
914 hclge_parse_copper_link_mode(hdev, speed_ability);
917 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
919 struct hclge_cfg_param_cmd *req;
920 u64 mac_addr_tmp_high;
924 req = (struct hclge_cfg_param_cmd *)desc[0].data;
926 /* get the configuration */
927 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
930 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
931 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
932 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
933 HCLGE_CFG_TQP_DESC_N_M,
934 HCLGE_CFG_TQP_DESC_N_S);
936 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
937 HCLGE_CFG_PHY_ADDR_M,
938 HCLGE_CFG_PHY_ADDR_S);
939 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
940 HCLGE_CFG_MEDIA_TP_M,
941 HCLGE_CFG_MEDIA_TP_S);
942 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
943 HCLGE_CFG_RX_BUF_LEN_M,
944 HCLGE_CFG_RX_BUF_LEN_S);
945 /* get mac_address */
946 mac_addr_tmp = __le32_to_cpu(req->param[2]);
947 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
948 HCLGE_CFG_MAC_ADDR_H_M,
949 HCLGE_CFG_MAC_ADDR_H_S);
951 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
953 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
954 HCLGE_CFG_DEFAULT_SPEED_M,
955 HCLGE_CFG_DEFAULT_SPEED_S);
956 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
957 HCLGE_CFG_RSS_SIZE_M,
958 HCLGE_CFG_RSS_SIZE_S);
960 for (i = 0; i < ETH_ALEN; i++)
961 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
963 req = (struct hclge_cfg_param_cmd *)desc[1].data;
964 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
966 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
967 HCLGE_CFG_SPEED_ABILITY_M,
968 HCLGE_CFG_SPEED_ABILITY_S);
969 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
970 HCLGE_CFG_UMV_TBL_SPACE_M,
971 HCLGE_CFG_UMV_TBL_SPACE_S);
973 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
976 /* hclge_get_cfg: query the static parameter from flash
977 * @hdev: pointer to struct hclge_dev
978 * @hcfg: the config structure to be getted
980 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
982 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
983 struct hclge_cfg_param_cmd *req;
986 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
989 req = (struct hclge_cfg_param_cmd *)desc[i].data;
990 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
992 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
993 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
994 /* Len should be united by 4 bytes when send to hardware */
995 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
996 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
997 req->offset = cpu_to_le32(offset);
1000 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1002 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1006 hclge_parse_cfg(hcfg, desc);
1011 static int hclge_get_cap(struct hclge_dev *hdev)
1015 ret = hclge_query_function_status(hdev);
1017 dev_err(&hdev->pdev->dev,
1018 "query function status error %d.\n", ret);
1022 /* get pf resource */
1023 ret = hclge_query_pf_resource(hdev);
1025 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1030 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1032 #define HCLGE_MIN_TX_DESC 64
1033 #define HCLGE_MIN_RX_DESC 64
1035 if (!is_kdump_kernel())
1038 dev_info(&hdev->pdev->dev,
1039 "Running kdump kernel. Using minimal resources\n");
1041 /* minimal queue pairs equals to the number of vports */
1042 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1043 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1044 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1047 static int hclge_configure(struct hclge_dev *hdev)
1049 struct hclge_cfg cfg;
1052 ret = hclge_get_cfg(hdev, &cfg);
1054 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1058 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1059 hdev->base_tqp_pid = 0;
1060 hdev->rss_size_max = cfg.rss_size_max;
1061 hdev->rx_buf_len = cfg.rx_buf_len;
1062 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1063 hdev->hw.mac.media_type = cfg.media_type;
1064 hdev->hw.mac.phy_addr = cfg.phy_addr;
1065 hdev->num_tx_desc = cfg.tqp_desc_num;
1066 hdev->num_rx_desc = cfg.tqp_desc_num;
1067 hdev->tm_info.num_pg = 1;
1068 hdev->tc_max = cfg.tc_num;
1069 hdev->tm_info.hw_pfc_map = 0;
1070 hdev->wanted_umv_size = cfg.umv_space;
1072 if (hnae3_dev_fd_supported(hdev))
1075 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1077 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1081 hclge_parse_link_mode(hdev, cfg.speed_ability);
1083 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1084 (hdev->tc_max < 1)) {
1085 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1090 /* Dev does not support DCB */
1091 if (!hnae3_dev_dcb_supported(hdev)) {
1095 hdev->pfc_max = hdev->tc_max;
1098 hdev->tm_info.num_tc = 1;
1100 /* Currently not support uncontiuous tc */
1101 for (i = 0; i < hdev->tm_info.num_tc; i++)
1102 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1104 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1106 hclge_init_kdump_kernel_config(hdev);
1111 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1114 struct hclge_cfg_tso_status_cmd *req;
1115 struct hclge_desc desc;
1118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1120 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1123 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1124 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1125 req->tso_mss_min = cpu_to_le16(tso_mss);
1128 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1129 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1130 req->tso_mss_max = cpu_to_le16(tso_mss);
1132 return hclge_cmd_send(&hdev->hw, &desc, 1);
1135 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1137 struct hclge_cfg_gro_status_cmd *req;
1138 struct hclge_desc desc;
1141 if (!hnae3_dev_gro_supported(hdev))
1144 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1145 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1147 req->gro_en = cpu_to_le16(en ? 1 : 0);
1149 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1151 dev_err(&hdev->pdev->dev,
1152 "GRO hardware config cmd failed, ret = %d\n", ret);
1157 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1159 struct hclge_tqp *tqp;
1162 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1163 sizeof(struct hclge_tqp), GFP_KERNEL);
1169 for (i = 0; i < hdev->num_tqps; i++) {
1170 tqp->dev = &hdev->pdev->dev;
1173 tqp->q.ae_algo = &ae_algo;
1174 tqp->q.buf_size = hdev->rx_buf_len;
1175 tqp->q.tx_desc_num = hdev->num_tx_desc;
1176 tqp->q.rx_desc_num = hdev->num_rx_desc;
1177 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1178 i * HCLGE_TQP_REG_SIZE;
1186 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1187 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1189 struct hclge_tqp_map_cmd *req;
1190 struct hclge_desc desc;
1193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1195 req = (struct hclge_tqp_map_cmd *)desc.data;
1196 req->tqp_id = cpu_to_le16(tqp_pid);
1197 req->tqp_vf = func_id;
1198 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1199 1 << HCLGE_TQP_MAP_EN_B;
1200 req->tqp_vid = cpu_to_le16(tqp_vid);
1202 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1204 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1209 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1211 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1212 struct hclge_dev *hdev = vport->back;
1215 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1216 alloced < num_tqps; i++) {
1217 if (!hdev->htqp[i].alloced) {
1218 hdev->htqp[i].q.handle = &vport->nic;
1219 hdev->htqp[i].q.tqp_index = alloced;
1220 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1221 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1222 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1223 hdev->htqp[i].alloced = true;
1227 vport->alloc_tqps = alloced;
1228 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1229 vport->alloc_tqps / hdev->tm_info.num_tc);
1234 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1235 u16 num_tx_desc, u16 num_rx_desc)
1238 struct hnae3_handle *nic = &vport->nic;
1239 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1240 struct hclge_dev *hdev = vport->back;
1243 kinfo->num_tx_desc = num_tx_desc;
1244 kinfo->num_rx_desc = num_rx_desc;
1246 kinfo->rx_buf_len = hdev->rx_buf_len;
1248 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1249 sizeof(struct hnae3_queue *), GFP_KERNEL);
1253 ret = hclge_assign_tqp(vport, num_tqps);
1255 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1260 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1261 struct hclge_vport *vport)
1263 struct hnae3_handle *nic = &vport->nic;
1264 struct hnae3_knic_private_info *kinfo;
1267 kinfo = &nic->kinfo;
1268 for (i = 0; i < vport->alloc_tqps; i++) {
1269 struct hclge_tqp *q =
1270 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1274 is_pf = !(vport->vport_id);
1275 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1284 static int hclge_map_tqp(struct hclge_dev *hdev)
1286 struct hclge_vport *vport = hdev->vport;
1289 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1290 for (i = 0; i < num_vport; i++) {
1293 ret = hclge_map_tqp_to_vport(hdev, vport);
1303 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1305 /* this would be initialized later */
1308 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1310 struct hnae3_handle *nic = &vport->nic;
1311 struct hclge_dev *hdev = vport->back;
1314 nic->pdev = hdev->pdev;
1315 nic->ae_algo = &ae_algo;
1316 nic->numa_node_mask = hdev->numa_node_mask;
1318 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1319 ret = hclge_knic_setup(vport, num_tqps,
1320 hdev->num_tx_desc, hdev->num_rx_desc);
1323 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1328 hclge_unic_setup(vport, num_tqps);
1334 static int hclge_alloc_vport(struct hclge_dev *hdev)
1336 struct pci_dev *pdev = hdev->pdev;
1337 struct hclge_vport *vport;
1343 /* We need to alloc a vport for main NIC of PF */
1344 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1346 if (hdev->num_tqps < num_vport) {
1347 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1348 hdev->num_tqps, num_vport);
1352 /* Alloc the same number of TQPs for every vport */
1353 tqp_per_vport = hdev->num_tqps / num_vport;
1354 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1356 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1361 hdev->vport = vport;
1362 hdev->num_alloc_vport = num_vport;
1364 if (IS_ENABLED(CONFIG_PCI_IOV))
1365 hdev->num_alloc_vfs = hdev->num_req_vfs;
1367 for (i = 0; i < num_vport; i++) {
1369 vport->vport_id = i;
1370 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1371 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1372 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1373 INIT_LIST_HEAD(&vport->vlan_list);
1374 INIT_LIST_HEAD(&vport->uc_mac_list);
1375 INIT_LIST_HEAD(&vport->mc_mac_list);
1378 ret = hclge_vport_setup(vport, tqp_main_vport);
1380 ret = hclge_vport_setup(vport, tqp_per_vport);
1383 "vport setup failed for vport %d, %d\n",
1394 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1395 struct hclge_pkt_buf_alloc *buf_alloc)
1397 /* TX buffer size is unit by 128 byte */
1398 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1399 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1400 struct hclge_tx_buff_alloc_cmd *req;
1401 struct hclge_desc desc;
1405 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1408 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1409 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1411 req->tx_pkt_buff[i] =
1412 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1413 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1418 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1424 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1425 struct hclge_pkt_buf_alloc *buf_alloc)
1427 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1430 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1435 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1439 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1440 if (hdev->hw_tc_map & BIT(i))
1445 /* Get the number of pfc enabled TCs, which have private buffer */
1446 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1447 struct hclge_pkt_buf_alloc *buf_alloc)
1449 struct hclge_priv_buf *priv;
1452 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1453 priv = &buf_alloc->priv_buf[i];
1454 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1462 /* Get the number of pfc disabled TCs, which have private buffer */
1463 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1464 struct hclge_pkt_buf_alloc *buf_alloc)
1466 struct hclge_priv_buf *priv;
1469 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1470 priv = &buf_alloc->priv_buf[i];
1471 if (hdev->hw_tc_map & BIT(i) &&
1472 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1480 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1482 struct hclge_priv_buf *priv;
1486 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1487 priv = &buf_alloc->priv_buf[i];
1489 rx_priv += priv->buf_size;
1494 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1496 u32 i, total_tx_size = 0;
1498 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1499 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1501 return total_tx_size;
1504 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1505 struct hclge_pkt_buf_alloc *buf_alloc,
1508 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1509 u32 tc_num = hclge_get_tc_num(hdev);
1510 u32 shared_buf, aligned_mps;
1514 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1516 if (hnae3_dev_dcb_supported(hdev))
1517 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1519 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1520 + hdev->dv_buf_size;
1522 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1523 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1524 HCLGE_BUF_SIZE_UNIT);
1526 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1527 if (rx_all < rx_priv + shared_std)
1530 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1531 buf_alloc->s_buf.buf_size = shared_buf;
1532 if (hnae3_dev_dcb_supported(hdev)) {
1533 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1534 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1535 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1537 buf_alloc->s_buf.self.high = aligned_mps +
1538 HCLGE_NON_DCB_ADDITIONAL_BUF;
1539 buf_alloc->s_buf.self.low = aligned_mps;
1542 if (hnae3_dev_dcb_supported(hdev)) {
1544 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1546 hi_thrd = shared_buf - hdev->dv_buf_size;
1548 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1549 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1550 lo_thrd = hi_thrd - aligned_mps / 2;
1552 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1553 lo_thrd = aligned_mps;
1556 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1557 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1558 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1564 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1565 struct hclge_pkt_buf_alloc *buf_alloc)
1569 total_size = hdev->pkt_buf_size;
1571 /* alloc tx buffer for all enabled tc */
1572 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1573 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1575 if (hdev->hw_tc_map & BIT(i)) {
1576 if (total_size < hdev->tx_buf_size)
1579 priv->tx_buf_size = hdev->tx_buf_size;
1581 priv->tx_buf_size = 0;
1584 total_size -= priv->tx_buf_size;
1590 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1591 struct hclge_pkt_buf_alloc *buf_alloc)
1593 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1594 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1597 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1598 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1605 if (!(hdev->hw_tc_map & BIT(i)))
1610 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1611 priv->wl.low = max ? aligned_mps : 256;
1612 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1613 HCLGE_BUF_SIZE_UNIT);
1616 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1619 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1622 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1625 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1626 struct hclge_pkt_buf_alloc *buf_alloc)
1628 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1629 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1632 /* let the last to be cleared first */
1633 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1634 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1636 if (hdev->hw_tc_map & BIT(i) &&
1637 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1638 /* Clear the no pfc TC private buffer */
1646 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1647 no_pfc_priv_num == 0)
1651 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1654 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1655 struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1658 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1661 /* let the last to be cleared first */
1662 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1663 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1665 if (hdev->hw_tc_map & BIT(i) &&
1666 hdev->tm_info.hw_pfc_map & BIT(i)) {
1667 /* Reduce the number of pfc TC with private buffer */
1675 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1680 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1683 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1684 * @hdev: pointer to struct hclge_dev
1685 * @buf_alloc: pointer to buffer calculation data
1686 * @return: 0: calculate sucessful, negative: fail
1688 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1689 struct hclge_pkt_buf_alloc *buf_alloc)
1691 /* When DCB is not supported, rx private buffer is not allocated. */
1692 if (!hnae3_dev_dcb_supported(hdev)) {
1693 u32 rx_all = hdev->pkt_buf_size;
1695 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1696 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1702 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1705 /* try to decrease the buffer size */
1706 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1709 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1712 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1718 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1719 struct hclge_pkt_buf_alloc *buf_alloc)
1721 struct hclge_rx_priv_buff_cmd *req;
1722 struct hclge_desc desc;
1726 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1727 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1729 /* Alloc private buffer TCs */
1730 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1731 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1734 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1736 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1740 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1741 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1745 dev_err(&hdev->pdev->dev,
1746 "rx private buffer alloc cmd failed %d\n", ret);
1751 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 struct hclge_rx_priv_wl_buf *req;
1755 struct hclge_priv_buf *priv;
1756 struct hclge_desc desc[2];
1760 for (i = 0; i < 2; i++) {
1761 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1763 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1765 /* The first descriptor set the NEXT bit to 1 */
1767 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1769 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1771 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1772 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1774 priv = &buf_alloc->priv_buf[idx];
1775 req->tc_wl[j].high =
1776 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1777 req->tc_wl[j].high |=
1778 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1780 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1781 req->tc_wl[j].low |=
1782 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1786 /* Send 2 descriptor at one time */
1787 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1789 dev_err(&hdev->pdev->dev,
1790 "rx private waterline config cmd failed %d\n",
1795 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1796 struct hclge_pkt_buf_alloc *buf_alloc)
1798 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1799 struct hclge_rx_com_thrd *req;
1800 struct hclge_desc desc[2];
1801 struct hclge_tc_thrd *tc;
1805 for (i = 0; i < 2; i++) {
1806 hclge_cmd_setup_basic_desc(&desc[i],
1807 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1808 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1810 /* The first descriptor set the NEXT bit to 1 */
1812 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1814 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1816 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1817 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1819 req->com_thrd[j].high =
1820 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1821 req->com_thrd[j].high |=
1822 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1823 req->com_thrd[j].low =
1824 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1825 req->com_thrd[j].low |=
1826 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1830 /* Send 2 descriptors at one time */
1831 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1833 dev_err(&hdev->pdev->dev,
1834 "common threshold config cmd failed %d\n", ret);
1838 static int hclge_common_wl_config(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
1841 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1842 struct hclge_rx_com_wl *req;
1843 struct hclge_desc desc;
1846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1848 req = (struct hclge_rx_com_wl *)desc.data;
1849 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1850 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1852 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1853 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1855 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1857 dev_err(&hdev->pdev->dev,
1858 "common waterline config cmd failed %d\n", ret);
1863 int hclge_buffer_alloc(struct hclge_dev *hdev)
1865 struct hclge_pkt_buf_alloc *pkt_buf;
1868 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1872 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1874 dev_err(&hdev->pdev->dev,
1875 "could not calc tx buffer size for all TCs %d\n", ret);
1879 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1881 dev_err(&hdev->pdev->dev,
1882 "could not alloc tx buffers %d\n", ret);
1886 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1888 dev_err(&hdev->pdev->dev,
1889 "could not calc rx priv buffer size for all TCs %d\n",
1894 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1896 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1901 if (hnae3_dev_dcb_supported(hdev)) {
1902 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1904 dev_err(&hdev->pdev->dev,
1905 "could not configure rx private waterline %d\n",
1910 ret = hclge_common_thrd_config(hdev, pkt_buf);
1912 dev_err(&hdev->pdev->dev,
1913 "could not configure common threshold %d\n",
1919 ret = hclge_common_wl_config(hdev, pkt_buf);
1921 dev_err(&hdev->pdev->dev,
1922 "could not configure common waterline %d\n", ret);
1929 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1931 struct hnae3_handle *roce = &vport->roce;
1932 struct hnae3_handle *nic = &vport->nic;
1934 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1936 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1937 vport->back->num_msi_left == 0)
1940 roce->rinfo.base_vector = vport->back->roce_base_vector;
1942 roce->rinfo.netdev = nic->kinfo.netdev;
1943 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1945 roce->pdev = nic->pdev;
1946 roce->ae_algo = nic->ae_algo;
1947 roce->numa_node_mask = nic->numa_node_mask;
1952 static int hclge_init_msi(struct hclge_dev *hdev)
1954 struct pci_dev *pdev = hdev->pdev;
1958 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1959 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1962 "failed(%d) to allocate MSI/MSI-X vectors\n",
1966 if (vectors < hdev->num_msi)
1967 dev_warn(&hdev->pdev->dev,
1968 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1969 hdev->num_msi, vectors);
1971 hdev->num_msi = vectors;
1972 hdev->num_msi_left = vectors;
1973 hdev->base_msi_vector = pdev->irq;
1974 hdev->roce_base_vector = hdev->base_msi_vector +
1975 hdev->roce_base_msix_offset;
1977 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1978 sizeof(u16), GFP_KERNEL);
1979 if (!hdev->vector_status) {
1980 pci_free_irq_vectors(pdev);
1984 for (i = 0; i < hdev->num_msi; i++)
1985 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1987 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1988 sizeof(int), GFP_KERNEL);
1989 if (!hdev->vector_irq) {
1990 pci_free_irq_vectors(pdev);
1997 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2000 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2001 duplex = HCLGE_MAC_FULL;
2006 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2009 struct hclge_config_mac_speed_dup_cmd *req;
2010 struct hclge_desc desc;
2013 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2015 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2017 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2020 case HCLGE_MAC_SPEED_10M:
2021 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2022 HCLGE_CFG_SPEED_S, 6);
2024 case HCLGE_MAC_SPEED_100M:
2025 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2026 HCLGE_CFG_SPEED_S, 7);
2028 case HCLGE_MAC_SPEED_1G:
2029 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2030 HCLGE_CFG_SPEED_S, 0);
2032 case HCLGE_MAC_SPEED_10G:
2033 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2034 HCLGE_CFG_SPEED_S, 1);
2036 case HCLGE_MAC_SPEED_25G:
2037 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2038 HCLGE_CFG_SPEED_S, 2);
2040 case HCLGE_MAC_SPEED_40G:
2041 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2042 HCLGE_CFG_SPEED_S, 3);
2044 case HCLGE_MAC_SPEED_50G:
2045 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2046 HCLGE_CFG_SPEED_S, 4);
2048 case HCLGE_MAC_SPEED_100G:
2049 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2050 HCLGE_CFG_SPEED_S, 5);
2053 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2057 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2060 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2062 dev_err(&hdev->pdev->dev,
2063 "mac speed/duplex config cmd failed %d.\n", ret);
2070 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2074 duplex = hclge_check_speed_dup(duplex, speed);
2075 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2078 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2082 hdev->hw.mac.speed = speed;
2083 hdev->hw.mac.duplex = duplex;
2088 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2091 struct hclge_vport *vport = hclge_get_vport(handle);
2092 struct hclge_dev *hdev = vport->back;
2094 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2097 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2099 struct hclge_config_auto_neg_cmd *req;
2100 struct hclge_desc desc;
2104 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2106 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2107 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2108 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2110 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2112 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2118 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2120 struct hclge_vport *vport = hclge_get_vport(handle);
2121 struct hclge_dev *hdev = vport->back;
2123 return hclge_set_autoneg_en(hdev, enable);
2126 static int hclge_get_autoneg(struct hnae3_handle *handle)
2128 struct hclge_vport *vport = hclge_get_vport(handle);
2129 struct hclge_dev *hdev = vport->back;
2130 struct phy_device *phydev = hdev->hw.mac.phydev;
2133 return phydev->autoneg;
2135 return hdev->hw.mac.autoneg;
2138 static int hclge_mac_init(struct hclge_dev *hdev)
2140 struct hclge_mac *mac = &hdev->hw.mac;
2143 hdev->support_sfp_query = true;
2144 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2145 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2146 hdev->hw.mac.duplex);
2148 dev_err(&hdev->pdev->dev,
2149 "Config mac speed dup fail ret=%d\n", ret);
2155 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2157 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2161 ret = hclge_buffer_alloc(hdev);
2163 dev_err(&hdev->pdev->dev,
2164 "allocate buffer fail, ret=%d\n", ret);
2169 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2171 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2172 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2173 schedule_work(&hdev->mbx_service_task);
2176 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2178 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2179 schedule_work(&hdev->rst_service_task);
2182 static void hclge_task_schedule(struct hclge_dev *hdev)
2184 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2185 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2186 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2187 (void)schedule_work(&hdev->service_task);
2190 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2192 struct hclge_link_status_cmd *req;
2193 struct hclge_desc desc;
2197 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2198 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2200 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2205 req = (struct hclge_link_status_cmd *)desc.data;
2206 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2208 return !!link_status;
2211 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2216 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2219 mac_state = hclge_get_mac_link_status(hdev);
2221 if (hdev->hw.mac.phydev) {
2222 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2223 link_stat = mac_state &
2224 hdev->hw.mac.phydev->link;
2229 link_stat = mac_state;
2235 static void hclge_update_link_status(struct hclge_dev *hdev)
2237 struct hnae3_client *rclient = hdev->roce_client;
2238 struct hnae3_client *client = hdev->nic_client;
2239 struct hnae3_handle *rhandle;
2240 struct hnae3_handle *handle;
2246 state = hclge_get_mac_phy_link(hdev);
2247 if (state != hdev->hw.mac.link) {
2248 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2249 handle = &hdev->vport[i].nic;
2250 client->ops->link_status_change(handle, state);
2251 rhandle = &hdev->vport[i].roce;
2252 if (rclient && rclient->ops->link_status_change)
2253 rclient->ops->link_status_change(rhandle,
2256 hdev->hw.mac.link = state;
2260 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2262 struct hclge_sfp_speed_cmd *resp = NULL;
2263 struct hclge_desc desc;
2266 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2267 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2268 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2269 if (ret == -EOPNOTSUPP) {
2270 dev_warn(&hdev->pdev->dev,
2271 "IMP do not support get SFP speed %d\n", ret);
2274 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2278 *speed = resp->sfp_speed;
2283 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2285 struct hclge_mac mac = hdev->hw.mac;
2289 /* get the speed from SFP cmd when phy
2295 /* if IMP does not support get SFP/qSFP speed, return directly */
2296 if (!hdev->support_sfp_query)
2299 ret = hclge_get_sfp_speed(hdev, &speed);
2300 if (ret == -EOPNOTSUPP) {
2301 hdev->support_sfp_query = false;
2307 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2308 return 0; /* do nothing if no SFP */
2310 /* must config full duplex for SFP */
2311 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2314 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2316 struct hclge_vport *vport = hclge_get_vport(handle);
2317 struct hclge_dev *hdev = vport->back;
2319 return hclge_update_speed_duplex(hdev);
2322 static int hclge_get_status(struct hnae3_handle *handle)
2324 struct hclge_vport *vport = hclge_get_vport(handle);
2325 struct hclge_dev *hdev = vport->back;
2327 hclge_update_link_status(hdev);
2329 return hdev->hw.mac.link;
2332 static void hclge_service_timer(struct timer_list *t)
2334 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2336 mod_timer(&hdev->service_timer, jiffies + HZ);
2337 hdev->hw_stats.stats_timer++;
2338 hclge_task_schedule(hdev);
2341 static void hclge_service_complete(struct hclge_dev *hdev)
2343 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2345 /* Flush memory before next watchdog */
2346 smp_mb__before_atomic();
2347 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2350 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2352 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2354 /* fetch the events from their corresponding regs */
2355 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2356 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2357 msix_src_reg = hclge_read_dev(&hdev->hw,
2358 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2360 /* Assumption: If by any chance reset and mailbox events are reported
2361 * together then we will only process reset event in this go and will
2362 * defer the processing of the mailbox events. Since, we would have not
2363 * cleared RX CMDQ event this time we would receive again another
2364 * interrupt from H/W just for the mailbox.
2367 /* check for vector0 reset event sources */
2368 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2369 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2370 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2371 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2372 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2373 hdev->rst_stats.imp_rst_cnt++;
2374 return HCLGE_VECTOR0_EVENT_RST;
2377 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2378 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2379 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2380 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2381 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2382 hdev->rst_stats.global_rst_cnt++;
2383 return HCLGE_VECTOR0_EVENT_RST;
2386 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2387 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2388 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2389 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2390 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2391 hdev->rst_stats.core_rst_cnt++;
2392 return HCLGE_VECTOR0_EVENT_RST;
2395 /* check for vector0 msix event source */
2396 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2397 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2399 return HCLGE_VECTOR0_EVENT_ERR;
2402 /* check for vector0 mailbox(=CMDQ RX) event source */
2403 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2404 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2405 *clearval = cmdq_src_reg;
2406 return HCLGE_VECTOR0_EVENT_MBX;
2409 /* print other vector0 event source */
2410 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2411 cmdq_src_reg, msix_src_reg);
2412 return HCLGE_VECTOR0_EVENT_OTHER;
2415 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2418 switch (event_type) {
2419 case HCLGE_VECTOR0_EVENT_RST:
2420 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2422 case HCLGE_VECTOR0_EVENT_MBX:
2423 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2430 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2432 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2433 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2434 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2435 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2436 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2439 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2441 writel(enable ? 1 : 0, vector->addr);
2444 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2446 struct hclge_dev *hdev = data;
2450 hclge_enable_vector(&hdev->misc_vector, false);
2451 event_cause = hclge_check_event_cause(hdev, &clearval);
2453 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2454 switch (event_cause) {
2455 case HCLGE_VECTOR0_EVENT_ERR:
2456 /* we do not know what type of reset is required now. This could
2457 * only be decided after we fetch the type of errors which
2458 * caused this event. Therefore, we will do below for now:
2459 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2460 * have defered type of reset to be used.
2461 * 2. Schedule the reset serivce task.
2462 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2463 * will fetch the correct type of reset. This would be done
2464 * by first decoding the types of errors.
2466 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2468 case HCLGE_VECTOR0_EVENT_RST:
2469 hclge_reset_task_schedule(hdev);
2471 case HCLGE_VECTOR0_EVENT_MBX:
2472 /* If we are here then,
2473 * 1. Either we are not handling any mbx task and we are not
2476 * 2. We could be handling a mbx task but nothing more is
2478 * In both cases, we should schedule mbx task as there are more
2479 * mbx messages reported by this interrupt.
2481 hclge_mbx_task_schedule(hdev);
2484 dev_warn(&hdev->pdev->dev,
2485 "received unknown or unhandled event of vector0\n");
2489 /* clear the source of interrupt if it is not cause by reset */
2490 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2491 hclge_clear_event_cause(hdev, event_cause, clearval);
2492 hclge_enable_vector(&hdev->misc_vector, true);
2498 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2500 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2501 dev_warn(&hdev->pdev->dev,
2502 "vector(vector_id %d) has been freed.\n", vector_id);
2506 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2507 hdev->num_msi_left += 1;
2508 hdev->num_msi_used -= 1;
2511 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2513 struct hclge_misc_vector *vector = &hdev->misc_vector;
2515 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2517 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2518 hdev->vector_status[0] = 0;
2520 hdev->num_msi_left -= 1;
2521 hdev->num_msi_used += 1;
2524 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2528 hclge_get_misc_vector(hdev);
2530 /* this would be explicitly freed in the end */
2531 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2532 0, "hclge_misc", hdev);
2534 hclge_free_vector(hdev, 0);
2535 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2536 hdev->misc_vector.vector_irq);
2542 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2544 free_irq(hdev->misc_vector.vector_irq, hdev);
2545 hclge_free_vector(hdev, 0);
2548 int hclge_notify_client(struct hclge_dev *hdev,
2549 enum hnae3_reset_notify_type type)
2551 struct hnae3_client *client = hdev->nic_client;
2554 if (!client->ops->reset_notify)
2557 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2558 struct hnae3_handle *handle = &hdev->vport[i].nic;
2561 ret = client->ops->reset_notify(handle, type);
2563 dev_err(&hdev->pdev->dev,
2564 "notify nic client failed %d(%d)\n", type, ret);
2572 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2573 enum hnae3_reset_notify_type type)
2575 struct hnae3_client *client = hdev->roce_client;
2582 if (!client->ops->reset_notify)
2585 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2586 struct hnae3_handle *handle = &hdev->vport[i].roce;
2588 ret = client->ops->reset_notify(handle, type);
2590 dev_err(&hdev->pdev->dev,
2591 "notify roce client failed %d(%d)",
2600 static int hclge_reset_wait(struct hclge_dev *hdev)
2602 #define HCLGE_RESET_WATI_MS 100
2603 #define HCLGE_RESET_WAIT_CNT 200
2604 u32 val, reg, reg_bit;
2607 switch (hdev->reset_type) {
2608 case HNAE3_IMP_RESET:
2609 reg = HCLGE_GLOBAL_RESET_REG;
2610 reg_bit = HCLGE_IMP_RESET_BIT;
2612 case HNAE3_GLOBAL_RESET:
2613 reg = HCLGE_GLOBAL_RESET_REG;
2614 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2616 case HNAE3_CORE_RESET:
2617 reg = HCLGE_GLOBAL_RESET_REG;
2618 reg_bit = HCLGE_CORE_RESET_BIT;
2620 case HNAE3_FUNC_RESET:
2621 reg = HCLGE_FUN_RST_ING;
2622 reg_bit = HCLGE_FUN_RST_ING_B;
2624 case HNAE3_FLR_RESET:
2627 dev_err(&hdev->pdev->dev,
2628 "Wait for unsupported reset type: %d\n",
2633 if (hdev->reset_type == HNAE3_FLR_RESET) {
2634 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2635 cnt++ < HCLGE_RESET_WAIT_CNT)
2636 msleep(HCLGE_RESET_WATI_MS);
2638 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2639 dev_err(&hdev->pdev->dev,
2640 "flr wait timeout: %d\n", cnt);
2647 val = hclge_read_dev(&hdev->hw, reg);
2648 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2649 msleep(HCLGE_RESET_WATI_MS);
2650 val = hclge_read_dev(&hdev->hw, reg);
2654 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2655 dev_warn(&hdev->pdev->dev,
2656 "Wait for reset timeout: %d\n", hdev->reset_type);
2663 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2665 struct hclge_vf_rst_cmd *req;
2666 struct hclge_desc desc;
2668 req = (struct hclge_vf_rst_cmd *)desc.data;
2669 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2670 req->dest_vfid = func_id;
2675 return hclge_cmd_send(&hdev->hw, &desc, 1);
2678 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2682 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2683 struct hclge_vport *vport = &hdev->vport[i];
2686 /* Send cmd to set/clear VF's FUNC_RST_ING */
2687 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2689 dev_err(&hdev->pdev->dev,
2690 "set vf(%d) rst failed %d!\n",
2691 vport->vport_id, ret);
2695 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2698 /* Inform VF to process the reset.
2699 * hclge_inform_reset_assert_to_vf may fail if VF
2700 * driver is not loaded.
2702 ret = hclge_inform_reset_assert_to_vf(vport);
2704 dev_warn(&hdev->pdev->dev,
2705 "inform reset to vf(%d) failed %d!\n",
2706 vport->vport_id, ret);
2712 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2714 struct hclge_desc desc;
2715 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2719 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2720 req->fun_reset_vfid = func_id;
2722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2724 dev_err(&hdev->pdev->dev,
2725 "send function reset cmd fail, status =%d\n", ret);
2730 static void hclge_do_reset(struct hclge_dev *hdev)
2732 struct hnae3_handle *handle = &hdev->vport[0].nic;
2733 struct pci_dev *pdev = hdev->pdev;
2736 if (hclge_get_hw_reset_stat(handle)) {
2737 dev_info(&pdev->dev, "Hardware reset not finish\n");
2738 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2739 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2740 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2744 switch (hdev->reset_type) {
2745 case HNAE3_GLOBAL_RESET:
2746 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2747 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2748 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2749 dev_info(&pdev->dev, "Global Reset requested\n");
2751 case HNAE3_CORE_RESET:
2752 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2753 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2754 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2755 dev_info(&pdev->dev, "Core Reset requested\n");
2757 case HNAE3_FUNC_RESET:
2758 dev_info(&pdev->dev, "PF Reset requested\n");
2759 /* schedule again to check later */
2760 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2761 hclge_reset_task_schedule(hdev);
2763 case HNAE3_FLR_RESET:
2764 dev_info(&pdev->dev, "FLR requested\n");
2765 /* schedule again to check later */
2766 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2767 hclge_reset_task_schedule(hdev);
2770 dev_warn(&pdev->dev,
2771 "Unsupported reset type: %d\n", hdev->reset_type);
2776 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2777 unsigned long *addr)
2779 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2781 /* first, resolve any unknown reset type to the known type(s) */
2782 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2783 /* we will intentionally ignore any errors from this function
2784 * as we will end up in *some* reset request in any case
2786 hclge_handle_hw_msix_error(hdev, addr);
2787 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2788 /* We defered the clearing of the error event which caused
2789 * interrupt since it was not posssible to do that in
2790 * interrupt context (and this is the reason we introduced
2791 * new UNKNOWN reset type). Now, the errors have been
2792 * handled and cleared in hardware we can safely enable
2793 * interrupts. This is an exception to the norm.
2795 hclge_enable_vector(&hdev->misc_vector, true);
2798 /* return the highest priority reset level amongst all */
2799 if (test_bit(HNAE3_IMP_RESET, addr)) {
2800 rst_level = HNAE3_IMP_RESET;
2801 clear_bit(HNAE3_IMP_RESET, addr);
2802 clear_bit(HNAE3_GLOBAL_RESET, addr);
2803 clear_bit(HNAE3_CORE_RESET, addr);
2804 clear_bit(HNAE3_FUNC_RESET, addr);
2805 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2806 rst_level = HNAE3_GLOBAL_RESET;
2807 clear_bit(HNAE3_GLOBAL_RESET, addr);
2808 clear_bit(HNAE3_CORE_RESET, addr);
2809 clear_bit(HNAE3_FUNC_RESET, addr);
2810 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2811 rst_level = HNAE3_CORE_RESET;
2812 clear_bit(HNAE3_CORE_RESET, addr);
2813 clear_bit(HNAE3_FUNC_RESET, addr);
2814 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2815 rst_level = HNAE3_FUNC_RESET;
2816 clear_bit(HNAE3_FUNC_RESET, addr);
2817 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2818 rst_level = HNAE3_FLR_RESET;
2819 clear_bit(HNAE3_FLR_RESET, addr);
2822 if (hdev->reset_type != HNAE3_NONE_RESET &&
2823 rst_level < hdev->reset_type)
2824 return HNAE3_NONE_RESET;
2829 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2833 switch (hdev->reset_type) {
2834 case HNAE3_IMP_RESET:
2835 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2837 case HNAE3_GLOBAL_RESET:
2838 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2840 case HNAE3_CORE_RESET:
2841 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2850 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2851 hclge_enable_vector(&hdev->misc_vector, true);
2854 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2858 switch (hdev->reset_type) {
2859 case HNAE3_FUNC_RESET:
2861 case HNAE3_FLR_RESET:
2862 ret = hclge_set_all_vf_rst(hdev, true);
2871 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2876 switch (hdev->reset_type) {
2877 case HNAE3_FUNC_RESET:
2878 /* There is no mechanism for PF to know if VF has stopped IO
2879 * for now, just wait 100 ms for VF to stop IO
2882 ret = hclge_func_reset_cmd(hdev, 0);
2884 dev_err(&hdev->pdev->dev,
2885 "asserting function reset fail %d!\n", ret);
2889 /* After performaning pf reset, it is not necessary to do the
2890 * mailbox handling or send any command to firmware, because
2891 * any mailbox handling or command to firmware is only valid
2892 * after hclge_cmd_init is called.
2894 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2895 hdev->rst_stats.pf_rst_cnt++;
2897 case HNAE3_FLR_RESET:
2898 /* There is no mechanism for PF to know if VF has stopped IO
2899 * for now, just wait 100 ms for VF to stop IO
2902 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2903 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2904 hdev->rst_stats.flr_rst_cnt++;
2906 case HNAE3_IMP_RESET:
2907 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2908 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2909 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2915 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2920 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2922 #define MAX_RESET_FAIL_CNT 5
2923 #define RESET_UPGRADE_DELAY_SEC 10
2925 if (hdev->reset_pending) {
2926 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2927 hdev->reset_pending);
2929 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2930 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2931 BIT(HCLGE_IMP_RESET_BIT))) {
2932 dev_info(&hdev->pdev->dev,
2933 "reset failed because IMP Reset is pending\n");
2934 hclge_clear_reset_cause(hdev);
2936 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2937 hdev->reset_fail_cnt++;
2939 set_bit(hdev->reset_type, &hdev->reset_pending);
2940 dev_info(&hdev->pdev->dev,
2941 "re-schedule to wait for hw reset done\n");
2945 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2946 hclge_clear_reset_cause(hdev);
2947 mod_timer(&hdev->reset_timer,
2948 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2953 hclge_clear_reset_cause(hdev);
2954 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2958 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2962 switch (hdev->reset_type) {
2963 case HNAE3_FUNC_RESET:
2965 case HNAE3_FLR_RESET:
2966 ret = hclge_set_all_vf_rst(hdev, false);
2975 static void hclge_reset(struct hclge_dev *hdev)
2977 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2978 bool is_timeout = false;
2981 /* Initialize ae_dev reset status as well, in case enet layer wants to
2982 * know if device is undergoing reset
2984 ae_dev->reset_type = hdev->reset_type;
2985 hdev->rst_stats.reset_cnt++;
2986 /* perform reset of the stack & ae device for a client */
2987 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2991 ret = hclge_reset_prepare_down(hdev);
2996 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2998 goto err_reset_lock;
3002 ret = hclge_reset_prepare_wait(hdev);
3006 if (hclge_reset_wait(hdev)) {
3011 hdev->rst_stats.hw_reset_done_cnt++;
3013 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3018 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3020 goto err_reset_lock;
3022 ret = hclge_reset_ae_dev(hdev->ae_dev);
3024 goto err_reset_lock;
3026 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3028 goto err_reset_lock;
3030 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3032 goto err_reset_lock;
3034 hclge_clear_reset_cause(hdev);
3036 ret = hclge_reset_prepare_up(hdev);
3038 goto err_reset_lock;
3040 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3042 goto err_reset_lock;
3046 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3050 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3054 hdev->last_reset_time = jiffies;
3055 hdev->reset_fail_cnt = 0;
3056 hdev->rst_stats.reset_done_cnt++;
3057 ae_dev->reset_type = HNAE3_NONE_RESET;
3058 del_timer(&hdev->reset_timer);
3065 if (hclge_reset_err_handle(hdev, is_timeout))
3066 hclge_reset_task_schedule(hdev);
3069 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3071 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3072 struct hclge_dev *hdev = ae_dev->priv;
3074 /* We might end up getting called broadly because of 2 below cases:
3075 * 1. Recoverable error was conveyed through APEI and only way to bring
3076 * normalcy is to reset.
3077 * 2. A new reset request from the stack due to timeout
3079 * For the first case,error event might not have ae handle available.
3080 * check if this is a new reset request and we are not here just because
3081 * last reset attempt did not succeed and watchdog hit us again. We will
3082 * know this if last reset request did not occur very recently (watchdog
3083 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3084 * In case of new request we reset the "reset level" to PF reset.
3085 * And if it is a repeat reset request of the most recent one then we
3086 * want to make sure we throttle the reset request. Therefore, we will
3087 * not allow it again before 3*HZ times.
3090 handle = &hdev->vport[0].nic;
3092 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3094 else if (hdev->default_reset_request)
3096 hclge_get_reset_level(hdev,
3097 &hdev->default_reset_request);
3098 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3099 hdev->reset_level = HNAE3_FUNC_RESET;
3101 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3104 /* request reset & schedule reset task */
3105 set_bit(hdev->reset_level, &hdev->reset_request);
3106 hclge_reset_task_schedule(hdev);
3108 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3109 hdev->reset_level++;
3112 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3113 enum hnae3_reset_type rst_type)
3115 struct hclge_dev *hdev = ae_dev->priv;
3117 set_bit(rst_type, &hdev->default_reset_request);
3120 static void hclge_reset_timer(struct timer_list *t)
3122 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3124 dev_info(&hdev->pdev->dev,
3125 "triggering global reset in reset timer\n");
3126 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3127 hclge_reset_event(hdev->pdev, NULL);
3130 static void hclge_reset_subtask(struct hclge_dev *hdev)
3132 /* check if there is any ongoing reset in the hardware. This status can
3133 * be checked from reset_pending. If there is then, we need to wait for
3134 * hardware to complete reset.
3135 * a. If we are able to figure out in reasonable time that hardware
3136 * has fully resetted then, we can proceed with driver, client
3138 * b. else, we can come back later to check this status so re-sched
3141 hdev->last_reset_time = jiffies;
3142 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3143 if (hdev->reset_type != HNAE3_NONE_RESET)
3146 /* check if we got any *new* reset requests to be honored */
3147 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3148 if (hdev->reset_type != HNAE3_NONE_RESET)
3149 hclge_do_reset(hdev);
3151 hdev->reset_type = HNAE3_NONE_RESET;
3154 static void hclge_reset_service_task(struct work_struct *work)
3156 struct hclge_dev *hdev =
3157 container_of(work, struct hclge_dev, rst_service_task);
3159 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3162 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3164 hclge_reset_subtask(hdev);
3166 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3169 static void hclge_mailbox_service_task(struct work_struct *work)
3171 struct hclge_dev *hdev =
3172 container_of(work, struct hclge_dev, mbx_service_task);
3174 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3177 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3179 hclge_mbx_handler(hdev);
3181 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3184 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3188 /* start from vport 1 for PF is always alive */
3189 for (i = 1; i < hdev->num_alloc_vport; i++) {
3190 struct hclge_vport *vport = &hdev->vport[i];
3192 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3193 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3195 /* If vf is not alive, set to default value */
3196 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3197 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3201 static void hclge_service_task(struct work_struct *work)
3203 struct hclge_dev *hdev =
3204 container_of(work, struct hclge_dev, service_task);
3206 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3207 hclge_update_stats_for_all(hdev);
3208 hdev->hw_stats.stats_timer = 0;
3211 hclge_update_speed_duplex(hdev);
3212 hclge_update_link_status(hdev);
3213 hclge_update_vport_alive(hdev);
3214 hclge_service_complete(hdev);
3217 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3219 /* VF handle has no client */
3220 if (!handle->client)
3221 return container_of(handle, struct hclge_vport, nic);
3222 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3223 return container_of(handle, struct hclge_vport, roce);
3225 return container_of(handle, struct hclge_vport, nic);
3228 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3229 struct hnae3_vector_info *vector_info)
3231 struct hclge_vport *vport = hclge_get_vport(handle);
3232 struct hnae3_vector_info *vector = vector_info;
3233 struct hclge_dev *hdev = vport->back;
3237 vector_num = min(hdev->num_msi_left, vector_num);
3239 for (j = 0; j < vector_num; j++) {
3240 for (i = 1; i < hdev->num_msi; i++) {
3241 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3242 vector->vector = pci_irq_vector(hdev->pdev, i);
3243 vector->io_addr = hdev->hw.io_base +
3244 HCLGE_VECTOR_REG_BASE +
3245 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3247 HCLGE_VECTOR_VF_OFFSET;
3248 hdev->vector_status[i] = vport->vport_id;
3249 hdev->vector_irq[i] = vector->vector;
3258 hdev->num_msi_left -= alloc;
3259 hdev->num_msi_used += alloc;
3264 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3268 for (i = 0; i < hdev->num_msi; i++)
3269 if (vector == hdev->vector_irq[i])
3275 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3277 struct hclge_vport *vport = hclge_get_vport(handle);
3278 struct hclge_dev *hdev = vport->back;
3281 vector_id = hclge_get_vector_index(hdev, vector);
3282 if (vector_id < 0) {
3283 dev_err(&hdev->pdev->dev,
3284 "Get vector index fail. vector_id =%d\n", vector_id);
3288 hclge_free_vector(hdev, vector_id);
3293 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3295 return HCLGE_RSS_KEY_SIZE;
3298 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3300 return HCLGE_RSS_IND_TBL_SIZE;
3303 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3304 const u8 hfunc, const u8 *key)
3306 struct hclge_rss_config_cmd *req;
3307 struct hclge_desc desc;
3312 req = (struct hclge_rss_config_cmd *)desc.data;
3314 for (key_offset = 0; key_offset < 3; key_offset++) {
3315 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3318 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3319 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3321 if (key_offset == 2)
3323 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3325 key_size = HCLGE_RSS_HASH_KEY_NUM;
3327 memcpy(req->hash_key,
3328 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3330 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3332 dev_err(&hdev->pdev->dev,
3333 "Configure RSS config fail, status = %d\n",
3341 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3343 struct hclge_rss_indirection_table_cmd *req;
3344 struct hclge_desc desc;
3348 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3350 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3351 hclge_cmd_setup_basic_desc
3352 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3354 req->start_table_index =
3355 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3356 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3358 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3359 req->rss_result[j] =
3360 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3362 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3364 dev_err(&hdev->pdev->dev,
3365 "Configure rss indir table fail,status = %d\n",
3373 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3374 u16 *tc_size, u16 *tc_offset)
3376 struct hclge_rss_tc_mode_cmd *req;
3377 struct hclge_desc desc;
3381 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3382 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3384 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3387 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3388 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3389 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3390 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3391 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3393 req->rss_tc_mode[i] = cpu_to_le16(mode);
3396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3398 dev_err(&hdev->pdev->dev,
3399 "Configure rss tc mode fail, status = %d\n", ret);
3404 static void hclge_get_rss_type(struct hclge_vport *vport)
3406 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3407 vport->rss_tuple_sets.ipv4_udp_en ||
3408 vport->rss_tuple_sets.ipv4_sctp_en ||
3409 vport->rss_tuple_sets.ipv6_tcp_en ||
3410 vport->rss_tuple_sets.ipv6_udp_en ||
3411 vport->rss_tuple_sets.ipv6_sctp_en)
3412 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3413 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3414 vport->rss_tuple_sets.ipv6_fragment_en)
3415 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3417 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3420 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3422 struct hclge_rss_input_tuple_cmd *req;
3423 struct hclge_desc desc;
3426 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3428 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3430 /* Get the tuple cfg from pf */
3431 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3432 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3433 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3434 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3435 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3436 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3437 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3438 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3439 hclge_get_rss_type(&hdev->vport[0]);
3440 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3442 dev_err(&hdev->pdev->dev,
3443 "Configure rss input fail, status = %d\n", ret);
3447 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3450 struct hclge_vport *vport = hclge_get_vport(handle);
3453 /* Get hash algorithm */
3455 switch (vport->rss_algo) {
3456 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3457 *hfunc = ETH_RSS_HASH_TOP;
3459 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3460 *hfunc = ETH_RSS_HASH_XOR;
3463 *hfunc = ETH_RSS_HASH_UNKNOWN;
3468 /* Get the RSS Key required by the user */
3470 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3472 /* Get indirect table */
3474 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3475 indir[i] = vport->rss_indirection_tbl[i];
3480 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3481 const u8 *key, const u8 hfunc)
3483 struct hclge_vport *vport = hclge_get_vport(handle);
3484 struct hclge_dev *hdev = vport->back;
3488 /* Set the RSS Hash Key if specififed by the user */
3491 case ETH_RSS_HASH_TOP:
3492 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3494 case ETH_RSS_HASH_XOR:
3495 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3497 case ETH_RSS_HASH_NO_CHANGE:
3498 hash_algo = vport->rss_algo;
3504 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3508 /* Update the shadow RSS key with user specified qids */
3509 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3510 vport->rss_algo = hash_algo;
3513 /* Update the shadow RSS table with user specified qids */
3514 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3515 vport->rss_indirection_tbl[i] = indir[i];
3517 /* Update the hardware */
3518 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3521 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3523 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3525 if (nfc->data & RXH_L4_B_2_3)
3526 hash_sets |= HCLGE_D_PORT_BIT;
3528 hash_sets &= ~HCLGE_D_PORT_BIT;
3530 if (nfc->data & RXH_IP_SRC)
3531 hash_sets |= HCLGE_S_IP_BIT;
3533 hash_sets &= ~HCLGE_S_IP_BIT;
3535 if (nfc->data & RXH_IP_DST)
3536 hash_sets |= HCLGE_D_IP_BIT;
3538 hash_sets &= ~HCLGE_D_IP_BIT;
3540 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3541 hash_sets |= HCLGE_V_TAG_BIT;
3546 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3547 struct ethtool_rxnfc *nfc)
3549 struct hclge_vport *vport = hclge_get_vport(handle);
3550 struct hclge_dev *hdev = vport->back;
3551 struct hclge_rss_input_tuple_cmd *req;
3552 struct hclge_desc desc;
3556 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3557 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3560 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3563 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3564 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3565 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3566 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3567 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3568 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3569 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3570 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3572 tuple_sets = hclge_get_rss_hash_bits(nfc);
3573 switch (nfc->flow_type) {
3575 req->ipv4_tcp_en = tuple_sets;
3578 req->ipv6_tcp_en = tuple_sets;
3581 req->ipv4_udp_en = tuple_sets;
3584 req->ipv6_udp_en = tuple_sets;
3587 req->ipv4_sctp_en = tuple_sets;
3590 if ((nfc->data & RXH_L4_B_0_1) ||
3591 (nfc->data & RXH_L4_B_2_3))
3594 req->ipv6_sctp_en = tuple_sets;
3597 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3600 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3606 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3608 dev_err(&hdev->pdev->dev,
3609 "Set rss tuple fail, status = %d\n", ret);
3613 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3614 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3615 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3616 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3617 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3618 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3619 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3620 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3621 hclge_get_rss_type(vport);
3625 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3626 struct ethtool_rxnfc *nfc)
3628 struct hclge_vport *vport = hclge_get_vport(handle);
3633 switch (nfc->flow_type) {
3635 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3638 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3641 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3644 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3647 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3650 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3654 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3663 if (tuple_sets & HCLGE_D_PORT_BIT)
3664 nfc->data |= RXH_L4_B_2_3;
3665 if (tuple_sets & HCLGE_S_PORT_BIT)
3666 nfc->data |= RXH_L4_B_0_1;
3667 if (tuple_sets & HCLGE_D_IP_BIT)
3668 nfc->data |= RXH_IP_DST;
3669 if (tuple_sets & HCLGE_S_IP_BIT)
3670 nfc->data |= RXH_IP_SRC;
3675 static int hclge_get_tc_size(struct hnae3_handle *handle)
3677 struct hclge_vport *vport = hclge_get_vport(handle);
3678 struct hclge_dev *hdev = vport->back;
3680 return hdev->rss_size_max;
3683 int hclge_rss_init_hw(struct hclge_dev *hdev)
3685 struct hclge_vport *vport = hdev->vport;
3686 u8 *rss_indir = vport[0].rss_indirection_tbl;
3687 u16 rss_size = vport[0].alloc_rss_size;
3688 u8 *key = vport[0].rss_hash_key;
3689 u8 hfunc = vport[0].rss_algo;
3690 u16 tc_offset[HCLGE_MAX_TC_NUM];
3691 u16 tc_valid[HCLGE_MAX_TC_NUM];
3692 u16 tc_size[HCLGE_MAX_TC_NUM];
3696 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3700 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3704 ret = hclge_set_rss_input_tuple(hdev);
3708 /* Each TC have the same queue size, and tc_size set to hardware is
3709 * the log2 of roundup power of two of rss_size, the acutal queue
3710 * size is limited by indirection table.
3712 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3713 dev_err(&hdev->pdev->dev,
3714 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3719 roundup_size = roundup_pow_of_two(rss_size);
3720 roundup_size = ilog2(roundup_size);
3722 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3725 if (!(hdev->hw_tc_map & BIT(i)))
3729 tc_size[i] = roundup_size;
3730 tc_offset[i] = rss_size * i;
3733 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3736 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3738 struct hclge_vport *vport = hdev->vport;
3741 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3742 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3743 vport[j].rss_indirection_tbl[i] =
3744 i % vport[j].alloc_rss_size;
3748 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3750 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3751 struct hclge_vport *vport = hdev->vport;
3753 if (hdev->pdev->revision >= 0x21)
3754 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3756 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3757 vport[i].rss_tuple_sets.ipv4_tcp_en =
3758 HCLGE_RSS_INPUT_TUPLE_OTHER;
3759 vport[i].rss_tuple_sets.ipv4_udp_en =
3760 HCLGE_RSS_INPUT_TUPLE_OTHER;
3761 vport[i].rss_tuple_sets.ipv4_sctp_en =
3762 HCLGE_RSS_INPUT_TUPLE_SCTP;
3763 vport[i].rss_tuple_sets.ipv4_fragment_en =
3764 HCLGE_RSS_INPUT_TUPLE_OTHER;
3765 vport[i].rss_tuple_sets.ipv6_tcp_en =
3766 HCLGE_RSS_INPUT_TUPLE_OTHER;
3767 vport[i].rss_tuple_sets.ipv6_udp_en =
3768 HCLGE_RSS_INPUT_TUPLE_OTHER;
3769 vport[i].rss_tuple_sets.ipv6_sctp_en =
3770 HCLGE_RSS_INPUT_TUPLE_SCTP;
3771 vport[i].rss_tuple_sets.ipv6_fragment_en =
3772 HCLGE_RSS_INPUT_TUPLE_OTHER;
3774 vport[i].rss_algo = rss_algo;
3776 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3777 HCLGE_RSS_KEY_SIZE);
3780 hclge_rss_indir_init_cfg(hdev);
3783 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3784 int vector_id, bool en,
3785 struct hnae3_ring_chain_node *ring_chain)
3787 struct hclge_dev *hdev = vport->back;
3788 struct hnae3_ring_chain_node *node;
3789 struct hclge_desc desc;
3790 struct hclge_ctrl_vector_chain_cmd *req
3791 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3792 enum hclge_cmd_status status;
3793 enum hclge_opcode_type op;
3794 u16 tqp_type_and_id;
3797 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3798 hclge_cmd_setup_basic_desc(&desc, op, false);
3799 req->int_vector_id = vector_id;
3802 for (node = ring_chain; node; node = node->next) {
3803 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3804 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3806 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3807 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3808 HCLGE_TQP_ID_S, node->tqp_index);
3809 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3811 hnae3_get_field(node->int_gl_idx,
3812 HNAE3_RING_GL_IDX_M,
3813 HNAE3_RING_GL_IDX_S));
3814 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3815 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3816 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3817 req->vfid = vport->vport_id;
3819 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3821 dev_err(&hdev->pdev->dev,
3822 "Map TQP fail, status is %d.\n",
3828 hclge_cmd_setup_basic_desc(&desc,
3831 req->int_vector_id = vector_id;
3836 req->int_cause_num = i;
3837 req->vfid = vport->vport_id;
3838 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3840 dev_err(&hdev->pdev->dev,
3841 "Map TQP fail, status is %d.\n", status);
3849 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3851 struct hnae3_ring_chain_node *ring_chain)
3853 struct hclge_vport *vport = hclge_get_vport(handle);
3854 struct hclge_dev *hdev = vport->back;
3857 vector_id = hclge_get_vector_index(hdev, vector);
3858 if (vector_id < 0) {
3859 dev_err(&hdev->pdev->dev,
3860 "Get vector index fail. vector_id =%d\n", vector_id);
3864 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3867 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3869 struct hnae3_ring_chain_node *ring_chain)
3871 struct hclge_vport *vport = hclge_get_vport(handle);
3872 struct hclge_dev *hdev = vport->back;
3875 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3878 vector_id = hclge_get_vector_index(hdev, vector);
3879 if (vector_id < 0) {
3880 dev_err(&handle->pdev->dev,
3881 "Get vector index fail. ret =%d\n", vector_id);
3885 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3887 dev_err(&handle->pdev->dev,
3888 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3895 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3896 struct hclge_promisc_param *param)
3898 struct hclge_promisc_cfg_cmd *req;
3899 struct hclge_desc desc;
3902 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3904 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3905 req->vf_id = param->vf_id;
3907 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3908 * pdev revision(0x20), new revision support them. The
3909 * value of this two fields will not return error when driver
3910 * send command to fireware in revision(0x20).
3912 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3913 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3915 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3917 dev_err(&hdev->pdev->dev,
3918 "Set promisc mode fail, status is %d.\n", ret);
3923 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3924 bool en_mc, bool en_bc, int vport_id)
3929 memset(param, 0, sizeof(struct hclge_promisc_param));
3931 param->enable = HCLGE_PROMISC_EN_UC;
3933 param->enable |= HCLGE_PROMISC_EN_MC;
3935 param->enable |= HCLGE_PROMISC_EN_BC;
3936 param->vf_id = vport_id;
3939 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3942 struct hclge_vport *vport = hclge_get_vport(handle);
3943 struct hclge_dev *hdev = vport->back;
3944 struct hclge_promisc_param param;
3945 bool en_bc_pmc = true;
3947 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3948 * always bypassed. So broadcast promisc should be disabled until
3949 * user enable promisc mode
3951 if (handle->pdev->revision == 0x20)
3952 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3954 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3956 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3959 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3961 struct hclge_get_fd_mode_cmd *req;
3962 struct hclge_desc desc;
3965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3967 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3971 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3975 *fd_mode = req->mode;
3980 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3981 u32 *stage1_entry_num,
3982 u32 *stage2_entry_num,
3983 u16 *stage1_counter_num,
3984 u16 *stage2_counter_num)
3986 struct hclge_get_fd_allocation_cmd *req;
3987 struct hclge_desc desc;
3990 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3992 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3996 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4001 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4002 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4003 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4004 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4009 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4011 struct hclge_set_fd_key_config_cmd *req;
4012 struct hclge_fd_key_cfg *stage;
4013 struct hclge_desc desc;
4016 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4018 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4019 stage = &hdev->fd_cfg.key_cfg[stage_num];
4020 req->stage = stage_num;
4021 req->key_select = stage->key_sel;
4022 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4023 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4024 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4025 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4026 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4027 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4029 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4031 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4036 static int hclge_init_fd_config(struct hclge_dev *hdev)
4038 #define LOW_2_WORDS 0x03
4039 struct hclge_fd_key_cfg *key_cfg;
4042 if (!hnae3_dev_fd_supported(hdev))
4045 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4049 switch (hdev->fd_cfg.fd_mode) {
4050 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4051 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4053 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4054 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4057 dev_err(&hdev->pdev->dev,
4058 "Unsupported flow director mode %d\n",
4059 hdev->fd_cfg.fd_mode);
4063 hdev->fd_cfg.proto_support =
4064 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4065 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4066 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4067 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4068 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4069 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4070 key_cfg->outer_sipv6_word_en = 0;
4071 key_cfg->outer_dipv6_word_en = 0;
4073 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4074 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4075 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4076 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4078 /* If use max 400bit key, we can support tuples for ether type */
4079 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4080 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4081 key_cfg->tuple_active |=
4082 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4085 /* roce_type is used to filter roce frames
4086 * dst_vport is used to specify the rule
4088 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4090 ret = hclge_get_fd_allocation(hdev,
4091 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4092 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4093 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4094 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4098 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4101 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4102 int loc, u8 *key, bool is_add)
4104 struct hclge_fd_tcam_config_1_cmd *req1;
4105 struct hclge_fd_tcam_config_2_cmd *req2;
4106 struct hclge_fd_tcam_config_3_cmd *req3;
4107 struct hclge_desc desc[3];
4110 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4111 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4112 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4113 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4114 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4116 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4117 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4118 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4120 req1->stage = stage;
4121 req1->xy_sel = sel_x ? 1 : 0;
4122 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4123 req1->index = cpu_to_le32(loc);
4124 req1->entry_vld = sel_x ? is_add : 0;
4127 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4128 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4129 sizeof(req2->tcam_data));
4130 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4131 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4134 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4136 dev_err(&hdev->pdev->dev,
4137 "config tcam key fail, ret=%d\n",
4143 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4144 struct hclge_fd_ad_data *action)
4146 struct hclge_fd_ad_config_cmd *req;
4147 struct hclge_desc desc;
4151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4153 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4154 req->index = cpu_to_le32(loc);
4157 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4158 action->write_rule_id_to_bd);
4159 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4162 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4163 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4164 action->forward_to_direct_queue);
4165 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4167 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4168 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4169 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4170 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4171 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4172 action->counter_id);
4174 req->ad_data = cpu_to_le64(ad_data);
4175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4177 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4182 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4183 struct hclge_fd_rule *rule)
4185 u16 tmp_x_s, tmp_y_s;
4186 u32 tmp_x_l, tmp_y_l;
4189 if (rule->unused_tuple & tuple_bit)
4192 switch (tuple_bit) {
4195 case BIT(INNER_DST_MAC):
4196 for (i = 0; i < 6; i++) {
4197 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4198 rule->tuples_mask.dst_mac[i]);
4199 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4200 rule->tuples_mask.dst_mac[i]);
4204 case BIT(INNER_SRC_MAC):
4205 for (i = 0; i < 6; i++) {
4206 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4207 rule->tuples.src_mac[i]);
4208 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4209 rule->tuples.src_mac[i]);
4213 case BIT(INNER_VLAN_TAG_FST):
4214 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4215 rule->tuples_mask.vlan_tag1);
4216 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4217 rule->tuples_mask.vlan_tag1);
4218 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4219 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4222 case BIT(INNER_ETH_TYPE):
4223 calc_x(tmp_x_s, rule->tuples.ether_proto,
4224 rule->tuples_mask.ether_proto);
4225 calc_y(tmp_y_s, rule->tuples.ether_proto,
4226 rule->tuples_mask.ether_proto);
4227 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4228 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4231 case BIT(INNER_IP_TOS):
4232 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4233 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4236 case BIT(INNER_IP_PROTO):
4237 calc_x(*key_x, rule->tuples.ip_proto,
4238 rule->tuples_mask.ip_proto);
4239 calc_y(*key_y, rule->tuples.ip_proto,
4240 rule->tuples_mask.ip_proto);
4243 case BIT(INNER_SRC_IP):
4244 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4245 rule->tuples_mask.src_ip[3]);
4246 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4247 rule->tuples_mask.src_ip[3]);
4248 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4249 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4252 case BIT(INNER_DST_IP):
4253 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4254 rule->tuples_mask.dst_ip[3]);
4255 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4256 rule->tuples_mask.dst_ip[3]);
4257 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4258 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4261 case BIT(INNER_SRC_PORT):
4262 calc_x(tmp_x_s, rule->tuples.src_port,
4263 rule->tuples_mask.src_port);
4264 calc_y(tmp_y_s, rule->tuples.src_port,
4265 rule->tuples_mask.src_port);
4266 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4267 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4270 case BIT(INNER_DST_PORT):
4271 calc_x(tmp_x_s, rule->tuples.dst_port,
4272 rule->tuples_mask.dst_port);
4273 calc_y(tmp_y_s, rule->tuples.dst_port,
4274 rule->tuples_mask.dst_port);
4275 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4276 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4284 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4285 u8 vf_id, u8 network_port_id)
4287 u32 port_number = 0;
4289 if (port_type == HOST_PORT) {
4290 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4292 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4294 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4296 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4297 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4298 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4304 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4305 __le32 *key_x, __le32 *key_y,
4306 struct hclge_fd_rule *rule)
4308 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4309 u8 cur_pos = 0, tuple_size, shift_bits;
4312 for (i = 0; i < MAX_META_DATA; i++) {
4313 tuple_size = meta_data_key_info[i].key_length;
4314 tuple_bit = key_cfg->meta_data_active & BIT(i);
4316 switch (tuple_bit) {
4317 case BIT(ROCE_TYPE):
4318 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4319 cur_pos += tuple_size;
4321 case BIT(DST_VPORT):
4322 port_number = hclge_get_port_number(HOST_PORT, 0,
4324 hnae3_set_field(meta_data,
4325 GENMASK(cur_pos + tuple_size, cur_pos),
4326 cur_pos, port_number);
4327 cur_pos += tuple_size;
4334 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4335 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4336 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4338 *key_x = cpu_to_le32(tmp_x << shift_bits);
4339 *key_y = cpu_to_le32(tmp_y << shift_bits);
4342 /* A complete key is combined with meta data key and tuple key.
4343 * Meta data key is stored at the MSB region, and tuple key is stored at
4344 * the LSB region, unused bits will be filled 0.
4346 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4347 struct hclge_fd_rule *rule)
4349 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4350 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4351 u8 *cur_key_x, *cur_key_y;
4352 int i, ret, tuple_size;
4353 u8 meta_data_region;
4355 memset(key_x, 0, sizeof(key_x));
4356 memset(key_y, 0, sizeof(key_y));
4360 for (i = 0 ; i < MAX_TUPLE; i++) {
4364 tuple_size = tuple_key_info[i].key_length / 8;
4365 check_tuple = key_cfg->tuple_active & BIT(i);
4367 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4370 cur_key_x += tuple_size;
4371 cur_key_y += tuple_size;
4375 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4376 MAX_META_DATA_LENGTH / 8;
4378 hclge_fd_convert_meta_data(key_cfg,
4379 (__le32 *)(key_x + meta_data_region),
4380 (__le32 *)(key_y + meta_data_region),
4383 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4386 dev_err(&hdev->pdev->dev,
4387 "fd key_y config fail, loc=%d, ret=%d\n",
4388 rule->queue_id, ret);
4392 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4395 dev_err(&hdev->pdev->dev,
4396 "fd key_x config fail, loc=%d, ret=%d\n",
4397 rule->queue_id, ret);
4401 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4402 struct hclge_fd_rule *rule)
4404 struct hclge_fd_ad_data ad_data;
4406 ad_data.ad_id = rule->location;
4408 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4409 ad_data.drop_packet = true;
4410 ad_data.forward_to_direct_queue = false;
4411 ad_data.queue_id = 0;
4413 ad_data.drop_packet = false;
4414 ad_data.forward_to_direct_queue = true;
4415 ad_data.queue_id = rule->queue_id;
4418 ad_data.use_counter = false;
4419 ad_data.counter_id = 0;
4421 ad_data.use_next_stage = false;
4422 ad_data.next_input_key = 0;
4424 ad_data.write_rule_id_to_bd = true;
4425 ad_data.rule_id = rule->location;
4427 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4430 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4431 struct ethtool_rx_flow_spec *fs, u32 *unused)
4433 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4434 struct ethtool_usrip4_spec *usr_ip4_spec;
4435 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4436 struct ethtool_usrip6_spec *usr_ip6_spec;
4437 struct ethhdr *ether_spec;
4439 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4442 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4445 if ((fs->flow_type & FLOW_EXT) &&
4446 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4447 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4451 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4455 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4456 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4458 if (!tcp_ip4_spec->ip4src)
4459 *unused |= BIT(INNER_SRC_IP);
4461 if (!tcp_ip4_spec->ip4dst)
4462 *unused |= BIT(INNER_DST_IP);
4464 if (!tcp_ip4_spec->psrc)
4465 *unused |= BIT(INNER_SRC_PORT);
4467 if (!tcp_ip4_spec->pdst)
4468 *unused |= BIT(INNER_DST_PORT);
4470 if (!tcp_ip4_spec->tos)
4471 *unused |= BIT(INNER_IP_TOS);
4475 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4476 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4477 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4479 if (!usr_ip4_spec->ip4src)
4480 *unused |= BIT(INNER_SRC_IP);
4482 if (!usr_ip4_spec->ip4dst)
4483 *unused |= BIT(INNER_DST_IP);
4485 if (!usr_ip4_spec->tos)
4486 *unused |= BIT(INNER_IP_TOS);
4488 if (!usr_ip4_spec->proto)
4489 *unused |= BIT(INNER_IP_PROTO);
4491 if (usr_ip4_spec->l4_4_bytes)
4494 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4501 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4502 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4505 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4506 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4507 *unused |= BIT(INNER_SRC_IP);
4509 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4510 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4511 *unused |= BIT(INNER_DST_IP);
4513 if (!tcp_ip6_spec->psrc)
4514 *unused |= BIT(INNER_SRC_PORT);
4516 if (!tcp_ip6_spec->pdst)
4517 *unused |= BIT(INNER_DST_PORT);
4519 if (tcp_ip6_spec->tclass)
4523 case IPV6_USER_FLOW:
4524 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4525 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4526 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4527 BIT(INNER_DST_PORT);
4529 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4530 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4531 *unused |= BIT(INNER_SRC_IP);
4533 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4534 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4535 *unused |= BIT(INNER_DST_IP);
4537 if (!usr_ip6_spec->l4_proto)
4538 *unused |= BIT(INNER_IP_PROTO);
4540 if (usr_ip6_spec->tclass)
4543 if (usr_ip6_spec->l4_4_bytes)
4548 ether_spec = &fs->h_u.ether_spec;
4549 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4550 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4551 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4553 if (is_zero_ether_addr(ether_spec->h_source))
4554 *unused |= BIT(INNER_SRC_MAC);
4556 if (is_zero_ether_addr(ether_spec->h_dest))
4557 *unused |= BIT(INNER_DST_MAC);
4559 if (!ether_spec->h_proto)
4560 *unused |= BIT(INNER_ETH_TYPE);
4567 if ((fs->flow_type & FLOW_EXT)) {
4568 if (fs->h_ext.vlan_etype)
4570 if (!fs->h_ext.vlan_tci)
4571 *unused |= BIT(INNER_VLAN_TAG_FST);
4573 if (fs->m_ext.vlan_tci) {
4574 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4578 *unused |= BIT(INNER_VLAN_TAG_FST);
4581 if (fs->flow_type & FLOW_MAC_EXT) {
4582 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4585 if (is_zero_ether_addr(fs->h_ext.h_dest))
4586 *unused |= BIT(INNER_DST_MAC);
4588 *unused &= ~(BIT(INNER_DST_MAC));
4594 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4596 struct hclge_fd_rule *rule = NULL;
4597 struct hlist_node *node2;
4599 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4600 if (rule->location >= location)
4604 return rule && rule->location == location;
4607 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4608 struct hclge_fd_rule *new_rule,
4612 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4613 struct hlist_node *node2;
4615 if (is_add && !new_rule)
4618 hlist_for_each_entry_safe(rule, node2,
4619 &hdev->fd_rule_list, rule_node) {
4620 if (rule->location >= location)
4625 if (rule && rule->location == location) {
4626 hlist_del(&rule->rule_node);
4628 hdev->hclge_fd_rule_num--;
4633 } else if (!is_add) {
4634 dev_err(&hdev->pdev->dev,
4635 "delete fail, rule %d is inexistent\n",
4640 INIT_HLIST_NODE(&new_rule->rule_node);
4643 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4645 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4647 hdev->hclge_fd_rule_num++;
4652 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4653 struct ethtool_rx_flow_spec *fs,
4654 struct hclge_fd_rule *rule)
4656 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4658 switch (flow_type) {
4662 rule->tuples.src_ip[3] =
4663 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4664 rule->tuples_mask.src_ip[3] =
4665 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4667 rule->tuples.dst_ip[3] =
4668 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4669 rule->tuples_mask.dst_ip[3] =
4670 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4672 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4673 rule->tuples_mask.src_port =
4674 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4676 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4677 rule->tuples_mask.dst_port =
4678 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4680 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4681 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4683 rule->tuples.ether_proto = ETH_P_IP;
4684 rule->tuples_mask.ether_proto = 0xFFFF;
4688 rule->tuples.src_ip[3] =
4689 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4690 rule->tuples_mask.src_ip[3] =
4691 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4693 rule->tuples.dst_ip[3] =
4694 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4695 rule->tuples_mask.dst_ip[3] =
4696 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4698 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4699 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4701 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4702 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4704 rule->tuples.ether_proto = ETH_P_IP;
4705 rule->tuples_mask.ether_proto = 0xFFFF;
4711 be32_to_cpu_array(rule->tuples.src_ip,
4712 fs->h_u.tcp_ip6_spec.ip6src, 4);
4713 be32_to_cpu_array(rule->tuples_mask.src_ip,
4714 fs->m_u.tcp_ip6_spec.ip6src, 4);
4716 be32_to_cpu_array(rule->tuples.dst_ip,
4717 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4718 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4719 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4721 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4722 rule->tuples_mask.src_port =
4723 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4725 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4726 rule->tuples_mask.dst_port =
4727 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4729 rule->tuples.ether_proto = ETH_P_IPV6;
4730 rule->tuples_mask.ether_proto = 0xFFFF;
4733 case IPV6_USER_FLOW:
4734 be32_to_cpu_array(rule->tuples.src_ip,
4735 fs->h_u.usr_ip6_spec.ip6src, 4);
4736 be32_to_cpu_array(rule->tuples_mask.src_ip,
4737 fs->m_u.usr_ip6_spec.ip6src, 4);
4739 be32_to_cpu_array(rule->tuples.dst_ip,
4740 fs->h_u.usr_ip6_spec.ip6dst, 4);
4741 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4742 fs->m_u.usr_ip6_spec.ip6dst, 4);
4744 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4745 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4747 rule->tuples.ether_proto = ETH_P_IPV6;
4748 rule->tuples_mask.ether_proto = 0xFFFF;
4752 ether_addr_copy(rule->tuples.src_mac,
4753 fs->h_u.ether_spec.h_source);
4754 ether_addr_copy(rule->tuples_mask.src_mac,
4755 fs->m_u.ether_spec.h_source);
4757 ether_addr_copy(rule->tuples.dst_mac,
4758 fs->h_u.ether_spec.h_dest);
4759 ether_addr_copy(rule->tuples_mask.dst_mac,
4760 fs->m_u.ether_spec.h_dest);
4762 rule->tuples.ether_proto =
4763 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4764 rule->tuples_mask.ether_proto =
4765 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4772 switch (flow_type) {
4775 rule->tuples.ip_proto = IPPROTO_SCTP;
4776 rule->tuples_mask.ip_proto = 0xFF;
4780 rule->tuples.ip_proto = IPPROTO_TCP;
4781 rule->tuples_mask.ip_proto = 0xFF;
4785 rule->tuples.ip_proto = IPPROTO_UDP;
4786 rule->tuples_mask.ip_proto = 0xFF;
4792 if ((fs->flow_type & FLOW_EXT)) {
4793 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4794 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4797 if (fs->flow_type & FLOW_MAC_EXT) {
4798 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4799 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4805 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4806 struct ethtool_rxnfc *cmd)
4808 struct hclge_vport *vport = hclge_get_vport(handle);
4809 struct hclge_dev *hdev = vport->back;
4810 u16 dst_vport_id = 0, q_index = 0;
4811 struct ethtool_rx_flow_spec *fs;
4812 struct hclge_fd_rule *rule;
4817 if (!hnae3_dev_fd_supported(hdev))
4821 dev_warn(&hdev->pdev->dev,
4822 "Please enable flow director first\n");
4826 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4828 ret = hclge_fd_check_spec(hdev, fs, &unused);
4830 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4834 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4835 action = HCLGE_FD_ACTION_DROP_PACKET;
4837 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4838 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4841 if (vf > hdev->num_req_vfs) {
4842 dev_err(&hdev->pdev->dev,
4843 "Error: vf id (%d) > max vf num (%d)\n",
4844 vf, hdev->num_req_vfs);
4848 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4849 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4852 dev_err(&hdev->pdev->dev,
4853 "Error: queue id (%d) > max tqp num (%d)\n",
4858 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4862 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4866 ret = hclge_fd_get_tuple(hdev, fs, rule);
4870 rule->flow_type = fs->flow_type;
4872 rule->location = fs->location;
4873 rule->unused_tuple = unused;
4874 rule->vf_id = dst_vport_id;
4875 rule->queue_id = q_index;
4876 rule->action = action;
4878 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4882 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4886 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4897 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4898 struct ethtool_rxnfc *cmd)
4900 struct hclge_vport *vport = hclge_get_vport(handle);
4901 struct hclge_dev *hdev = vport->back;
4902 struct ethtool_rx_flow_spec *fs;
4905 if (!hnae3_dev_fd_supported(hdev))
4908 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4910 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4913 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4914 dev_err(&hdev->pdev->dev,
4915 "Delete fail, rule %d is inexistent\n",
4920 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4921 fs->location, NULL, false);
4925 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4929 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4932 struct hclge_vport *vport = hclge_get_vport(handle);
4933 struct hclge_dev *hdev = vport->back;
4934 struct hclge_fd_rule *rule;
4935 struct hlist_node *node;
4937 if (!hnae3_dev_fd_supported(hdev))
4941 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4943 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4944 rule->location, NULL, false);
4945 hlist_del(&rule->rule_node);
4947 hdev->hclge_fd_rule_num--;
4950 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4952 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4953 rule->location, NULL, false);
4957 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4959 struct hclge_vport *vport = hclge_get_vport(handle);
4960 struct hclge_dev *hdev = vport->back;
4961 struct hclge_fd_rule *rule;
4962 struct hlist_node *node;
4965 /* Return ok here, because reset error handling will check this
4966 * return value. If error is returned here, the reset process will
4969 if (!hnae3_dev_fd_supported(hdev))
4972 /* if fd is disabled, should not restore it when reset */
4976 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4977 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4979 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4982 dev_warn(&hdev->pdev->dev,
4983 "Restore rule %d failed, remove it\n",
4985 hlist_del(&rule->rule_node);
4987 hdev->hclge_fd_rule_num--;
4993 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4994 struct ethtool_rxnfc *cmd)
4996 struct hclge_vport *vport = hclge_get_vport(handle);
4997 struct hclge_dev *hdev = vport->back;
4999 if (!hnae3_dev_fd_supported(hdev))
5002 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5003 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5008 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5009 struct ethtool_rxnfc *cmd)
5011 struct hclge_vport *vport = hclge_get_vport(handle);
5012 struct hclge_fd_rule *rule = NULL;
5013 struct hclge_dev *hdev = vport->back;
5014 struct ethtool_rx_flow_spec *fs;
5015 struct hlist_node *node2;
5017 if (!hnae3_dev_fd_supported(hdev))
5020 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5022 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5023 if (rule->location >= fs->location)
5027 if (!rule || fs->location != rule->location)
5030 fs->flow_type = rule->flow_type;
5031 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5035 fs->h_u.tcp_ip4_spec.ip4src =
5036 cpu_to_be32(rule->tuples.src_ip[3]);
5037 fs->m_u.tcp_ip4_spec.ip4src =
5038 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5039 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5041 fs->h_u.tcp_ip4_spec.ip4dst =
5042 cpu_to_be32(rule->tuples.dst_ip[3]);
5043 fs->m_u.tcp_ip4_spec.ip4dst =
5044 rule->unused_tuple & BIT(INNER_DST_IP) ?
5045 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5047 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5048 fs->m_u.tcp_ip4_spec.psrc =
5049 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5050 0 : cpu_to_be16(rule->tuples_mask.src_port);
5052 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5053 fs->m_u.tcp_ip4_spec.pdst =
5054 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5055 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5057 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5058 fs->m_u.tcp_ip4_spec.tos =
5059 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5060 0 : rule->tuples_mask.ip_tos;
5064 fs->h_u.usr_ip4_spec.ip4src =
5065 cpu_to_be32(rule->tuples.src_ip[3]);
5066 fs->m_u.tcp_ip4_spec.ip4src =
5067 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5068 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5070 fs->h_u.usr_ip4_spec.ip4dst =
5071 cpu_to_be32(rule->tuples.dst_ip[3]);
5072 fs->m_u.usr_ip4_spec.ip4dst =
5073 rule->unused_tuple & BIT(INNER_DST_IP) ?
5074 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5076 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5077 fs->m_u.usr_ip4_spec.tos =
5078 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5079 0 : rule->tuples_mask.ip_tos;
5081 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5082 fs->m_u.usr_ip4_spec.proto =
5083 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5084 0 : rule->tuples_mask.ip_proto;
5086 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5092 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5093 rule->tuples.src_ip, 4);
5094 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5095 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5097 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5098 rule->tuples_mask.src_ip, 4);
5100 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5101 rule->tuples.dst_ip, 4);
5102 if (rule->unused_tuple & BIT(INNER_DST_IP))
5103 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5105 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5106 rule->tuples_mask.dst_ip, 4);
5108 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5109 fs->m_u.tcp_ip6_spec.psrc =
5110 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5111 0 : cpu_to_be16(rule->tuples_mask.src_port);
5113 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5114 fs->m_u.tcp_ip6_spec.pdst =
5115 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5116 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5119 case IPV6_USER_FLOW:
5120 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5121 rule->tuples.src_ip, 4);
5122 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5123 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5125 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5126 rule->tuples_mask.src_ip, 4);
5128 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5129 rule->tuples.dst_ip, 4);
5130 if (rule->unused_tuple & BIT(INNER_DST_IP))
5131 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5133 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5134 rule->tuples_mask.dst_ip, 4);
5136 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5137 fs->m_u.usr_ip6_spec.l4_proto =
5138 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5139 0 : rule->tuples_mask.ip_proto;
5143 ether_addr_copy(fs->h_u.ether_spec.h_source,
5144 rule->tuples.src_mac);
5145 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5146 eth_zero_addr(fs->m_u.ether_spec.h_source);
5148 ether_addr_copy(fs->m_u.ether_spec.h_source,
5149 rule->tuples_mask.src_mac);
5151 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5152 rule->tuples.dst_mac);
5153 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5154 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5156 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5157 rule->tuples_mask.dst_mac);
5159 fs->h_u.ether_spec.h_proto =
5160 cpu_to_be16(rule->tuples.ether_proto);
5161 fs->m_u.ether_spec.h_proto =
5162 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5163 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5170 if (fs->flow_type & FLOW_EXT) {
5171 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5172 fs->m_ext.vlan_tci =
5173 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5174 cpu_to_be16(VLAN_VID_MASK) :
5175 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5178 if (fs->flow_type & FLOW_MAC_EXT) {
5179 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5180 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5181 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5183 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5184 rule->tuples_mask.dst_mac);
5187 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5188 fs->ring_cookie = RX_CLS_FLOW_DISC;
5192 fs->ring_cookie = rule->queue_id;
5193 vf_id = rule->vf_id;
5194 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5195 fs->ring_cookie |= vf_id;
5201 static int hclge_get_all_rules(struct hnae3_handle *handle,
5202 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5204 struct hclge_vport *vport = hclge_get_vport(handle);
5205 struct hclge_dev *hdev = vport->back;
5206 struct hclge_fd_rule *rule;
5207 struct hlist_node *node2;
5210 if (!hnae3_dev_fd_supported(hdev))
5213 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5215 hlist_for_each_entry_safe(rule, node2,
5216 &hdev->fd_rule_list, rule_node) {
5217 if (cnt == cmd->rule_cnt)
5220 rule_locs[cnt] = rule->location;
5224 cmd->rule_cnt = cnt;
5229 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5231 struct hclge_vport *vport = hclge_get_vport(handle);
5232 struct hclge_dev *hdev = vport->back;
5234 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5235 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5238 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5240 struct hclge_vport *vport = hclge_get_vport(handle);
5241 struct hclge_dev *hdev = vport->back;
5243 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5246 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5248 struct hclge_vport *vport = hclge_get_vport(handle);
5249 struct hclge_dev *hdev = vport->back;
5251 return hdev->rst_stats.hw_reset_done_cnt;
5254 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5256 struct hclge_vport *vport = hclge_get_vport(handle);
5257 struct hclge_dev *hdev = vport->back;
5259 hdev->fd_en = enable;
5261 hclge_del_all_fd_entries(handle, false);
5263 hclge_restore_fd_entries(handle);
5266 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5268 struct hclge_desc desc;
5269 struct hclge_config_mac_mode_cmd *req =
5270 (struct hclge_config_mac_mode_cmd *)desc.data;
5274 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5275 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5276 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5277 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5278 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5279 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5280 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5281 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5282 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5283 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5284 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5285 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5286 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5287 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5288 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5289 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5291 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5293 dev_err(&hdev->pdev->dev,
5294 "mac enable fail, ret =%d.\n", ret);
5297 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5299 struct hclge_config_mac_mode_cmd *req;
5300 struct hclge_desc desc;
5304 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5305 /* 1 Read out the MAC mode config at first */
5306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5307 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5309 dev_err(&hdev->pdev->dev,
5310 "mac loopback get fail, ret =%d.\n", ret);
5314 /* 2 Then setup the loopback flag */
5315 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5316 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5317 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5318 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5320 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5322 /* 3 Config mac work mode with loopback flag
5323 * and its original configure parameters
5325 hclge_cmd_reuse_desc(&desc, false);
5326 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5328 dev_err(&hdev->pdev->dev,
5329 "mac loopback set fail, ret =%d.\n", ret);
5333 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5334 enum hnae3_loop loop_mode)
5336 #define HCLGE_SERDES_RETRY_MS 10
5337 #define HCLGE_SERDES_RETRY_NUM 100
5339 #define HCLGE_MAC_LINK_STATUS_MS 20
5340 #define HCLGE_MAC_LINK_STATUS_NUM 10
5341 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5342 #define HCLGE_MAC_LINK_STATUS_UP 1
5344 struct hclge_serdes_lb_cmd *req;
5345 struct hclge_desc desc;
5346 int mac_link_ret = 0;
5350 req = (struct hclge_serdes_lb_cmd *)desc.data;
5351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5353 switch (loop_mode) {
5354 case HNAE3_LOOP_SERIAL_SERDES:
5355 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5357 case HNAE3_LOOP_PARALLEL_SERDES:
5358 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5361 dev_err(&hdev->pdev->dev,
5362 "unsupported serdes loopback mode %d\n", loop_mode);
5367 req->enable = loop_mode_b;
5368 req->mask = loop_mode_b;
5369 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5371 req->mask = loop_mode_b;
5372 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5375 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5377 dev_err(&hdev->pdev->dev,
5378 "serdes loopback set fail, ret = %d\n", ret);
5383 msleep(HCLGE_SERDES_RETRY_MS);
5384 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5386 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5388 dev_err(&hdev->pdev->dev,
5389 "serdes loopback get, ret = %d\n", ret);
5392 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5393 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5395 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5396 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5398 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5399 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5403 hclge_cfg_mac_mode(hdev, en);
5407 /* serdes Internal loopback, independent of the network cable.*/
5408 msleep(HCLGE_MAC_LINK_STATUS_MS);
5409 ret = hclge_get_mac_link_status(hdev);
5410 if (ret == mac_link_ret)
5412 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5414 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5419 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5420 int stream_id, bool enable)
5422 struct hclge_desc desc;
5423 struct hclge_cfg_com_tqp_queue_cmd *req =
5424 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5427 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5428 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5429 req->stream_id = cpu_to_le16(stream_id);
5430 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5432 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5434 dev_err(&hdev->pdev->dev,
5435 "Tqp enable fail, status =%d.\n", ret);
5439 static int hclge_set_loopback(struct hnae3_handle *handle,
5440 enum hnae3_loop loop_mode, bool en)
5442 struct hclge_vport *vport = hclge_get_vport(handle);
5443 struct hnae3_knic_private_info *kinfo;
5444 struct hclge_dev *hdev = vport->back;
5447 switch (loop_mode) {
5448 case HNAE3_LOOP_APP:
5449 ret = hclge_set_app_loopback(hdev, en);
5451 case HNAE3_LOOP_SERIAL_SERDES:
5452 case HNAE3_LOOP_PARALLEL_SERDES:
5453 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5457 dev_err(&hdev->pdev->dev,
5458 "loop_mode %d is not supported\n", loop_mode);
5465 kinfo = &vport->nic.kinfo;
5466 for (i = 0; i < kinfo->num_tqps; i++) {
5467 ret = hclge_tqp_enable(hdev, i, 0, en);
5475 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5477 struct hclge_vport *vport = hclge_get_vport(handle);
5478 struct hnae3_knic_private_info *kinfo;
5479 struct hnae3_queue *queue;
5480 struct hclge_tqp *tqp;
5483 kinfo = &vport->nic.kinfo;
5484 for (i = 0; i < kinfo->num_tqps; i++) {
5485 queue = handle->kinfo.tqp[i];
5486 tqp = container_of(queue, struct hclge_tqp, q);
5487 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5491 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5493 struct hclge_vport *vport = hclge_get_vport(handle);
5494 struct hclge_dev *hdev = vport->back;
5497 mod_timer(&hdev->service_timer, jiffies + HZ);
5499 del_timer_sync(&hdev->service_timer);
5500 cancel_work_sync(&hdev->service_task);
5501 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5505 static int hclge_ae_start(struct hnae3_handle *handle)
5507 struct hclge_vport *vport = hclge_get_vport(handle);
5508 struct hclge_dev *hdev = vport->back;
5511 hclge_cfg_mac_mode(hdev, true);
5512 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5513 hdev->hw.mac.link = 0;
5515 /* reset tqp stats */
5516 hclge_reset_tqp_stats(handle);
5518 hclge_mac_start_phy(hdev);
5523 static void hclge_ae_stop(struct hnae3_handle *handle)
5525 struct hclge_vport *vport = hclge_get_vport(handle);
5526 struct hclge_dev *hdev = vport->back;
5529 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5531 /* If it is not PF reset, the firmware will disable the MAC,
5532 * so it only need to stop phy here.
5534 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5535 hdev->reset_type != HNAE3_FUNC_RESET) {
5536 hclge_mac_stop_phy(hdev);
5540 for (i = 0; i < handle->kinfo.num_tqps; i++)
5541 hclge_reset_tqp(handle, i);
5544 hclge_cfg_mac_mode(hdev, false);
5546 hclge_mac_stop_phy(hdev);
5548 /* reset tqp stats */
5549 hclge_reset_tqp_stats(handle);
5550 hclge_update_link_status(hdev);
5553 int hclge_vport_start(struct hclge_vport *vport)
5555 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5556 vport->last_active_jiffies = jiffies;
5560 void hclge_vport_stop(struct hclge_vport *vport)
5562 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5565 static int hclge_client_start(struct hnae3_handle *handle)
5567 struct hclge_vport *vport = hclge_get_vport(handle);
5569 return hclge_vport_start(vport);
5572 static void hclge_client_stop(struct hnae3_handle *handle)
5574 struct hclge_vport *vport = hclge_get_vport(handle);
5576 hclge_vport_stop(vport);
5579 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5580 u16 cmdq_resp, u8 resp_code,
5581 enum hclge_mac_vlan_tbl_opcode op)
5583 struct hclge_dev *hdev = vport->back;
5584 int return_status = -EIO;
5587 dev_err(&hdev->pdev->dev,
5588 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5593 if (op == HCLGE_MAC_VLAN_ADD) {
5594 if ((!resp_code) || (resp_code == 1)) {
5596 } else if (resp_code == 2) {
5597 return_status = -ENOSPC;
5598 dev_err(&hdev->pdev->dev,
5599 "add mac addr failed for uc_overflow.\n");
5600 } else if (resp_code == 3) {
5601 return_status = -ENOSPC;
5602 dev_err(&hdev->pdev->dev,
5603 "add mac addr failed for mc_overflow.\n");
5605 dev_err(&hdev->pdev->dev,
5606 "add mac addr failed for undefined, code=%d.\n",
5609 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5612 } else if (resp_code == 1) {
5613 return_status = -ENOENT;
5614 dev_dbg(&hdev->pdev->dev,
5615 "remove mac addr failed for miss.\n");
5617 dev_err(&hdev->pdev->dev,
5618 "remove mac addr failed for undefined, code=%d.\n",
5621 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5624 } else if (resp_code == 1) {
5625 return_status = -ENOENT;
5626 dev_dbg(&hdev->pdev->dev,
5627 "lookup mac addr failed for miss.\n");
5629 dev_err(&hdev->pdev->dev,
5630 "lookup mac addr failed for undefined, code=%d.\n",
5634 return_status = -EINVAL;
5635 dev_err(&hdev->pdev->dev,
5636 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5640 return return_status;
5643 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5648 if (vfid > 255 || vfid < 0)
5651 if (vfid >= 0 && vfid <= 191) {
5652 word_num = vfid / 32;
5653 bit_num = vfid % 32;
5655 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5657 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5659 word_num = (vfid - 192) / 32;
5660 bit_num = vfid % 32;
5662 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5664 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5670 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5672 #define HCLGE_DESC_NUMBER 3
5673 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5676 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5677 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5678 if (desc[i].data[j])
5684 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5685 const u8 *addr, bool is_mc)
5687 const unsigned char *mac_addr = addr;
5688 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5689 (mac_addr[0]) | (mac_addr[1] << 8);
5690 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5692 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5694 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5695 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5698 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5699 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5702 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5703 struct hclge_mac_vlan_tbl_entry_cmd *req)
5705 struct hclge_dev *hdev = vport->back;
5706 struct hclge_desc desc;
5711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5713 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5715 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5717 dev_err(&hdev->pdev->dev,
5718 "del mac addr failed for cmd_send, ret =%d.\n",
5722 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5723 retval = le16_to_cpu(desc.retval);
5725 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5726 HCLGE_MAC_VLAN_REMOVE);
5729 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5730 struct hclge_mac_vlan_tbl_entry_cmd *req,
5731 struct hclge_desc *desc,
5734 struct hclge_dev *hdev = vport->back;
5739 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5741 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5742 memcpy(desc[0].data,
5744 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5745 hclge_cmd_setup_basic_desc(&desc[1],
5746 HCLGE_OPC_MAC_VLAN_ADD,
5748 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5749 hclge_cmd_setup_basic_desc(&desc[2],
5750 HCLGE_OPC_MAC_VLAN_ADD,
5752 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5754 memcpy(desc[0].data,
5756 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5757 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5760 dev_err(&hdev->pdev->dev,
5761 "lookup mac addr failed for cmd_send, ret =%d.\n",
5765 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5766 retval = le16_to_cpu(desc[0].retval);
5768 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5769 HCLGE_MAC_VLAN_LKUP);
5772 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5773 struct hclge_mac_vlan_tbl_entry_cmd *req,
5774 struct hclge_desc *mc_desc)
5776 struct hclge_dev *hdev = vport->back;
5783 struct hclge_desc desc;
5785 hclge_cmd_setup_basic_desc(&desc,
5786 HCLGE_OPC_MAC_VLAN_ADD,
5788 memcpy(desc.data, req,
5789 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5791 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5792 retval = le16_to_cpu(desc.retval);
5794 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5796 HCLGE_MAC_VLAN_ADD);
5798 hclge_cmd_reuse_desc(&mc_desc[0], false);
5799 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5800 hclge_cmd_reuse_desc(&mc_desc[1], false);
5801 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5802 hclge_cmd_reuse_desc(&mc_desc[2], false);
5803 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5804 memcpy(mc_desc[0].data, req,
5805 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5806 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5807 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5808 retval = le16_to_cpu(mc_desc[0].retval);
5810 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5812 HCLGE_MAC_VLAN_ADD);
5816 dev_err(&hdev->pdev->dev,
5817 "add mac addr failed for cmd_send, ret =%d.\n",
5825 static int hclge_init_umv_space(struct hclge_dev *hdev)
5827 u16 allocated_size = 0;
5830 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5835 if (allocated_size < hdev->wanted_umv_size)
5836 dev_warn(&hdev->pdev->dev,
5837 "Alloc umv space failed, want %d, get %d\n",
5838 hdev->wanted_umv_size, allocated_size);
5840 mutex_init(&hdev->umv_mutex);
5841 hdev->max_umv_size = allocated_size;
5842 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5843 hdev->share_umv_size = hdev->priv_umv_size +
5844 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5849 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5853 if (hdev->max_umv_size > 0) {
5854 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5858 hdev->max_umv_size = 0;
5860 mutex_destroy(&hdev->umv_mutex);
5865 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5866 u16 *allocated_size, bool is_alloc)
5868 struct hclge_umv_spc_alc_cmd *req;
5869 struct hclge_desc desc;
5872 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5873 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5874 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5875 req->space_size = cpu_to_le32(space_size);
5877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5879 dev_err(&hdev->pdev->dev,
5880 "%s umv space failed for cmd_send, ret =%d\n",
5881 is_alloc ? "allocate" : "free", ret);
5885 if (is_alloc && allocated_size)
5886 *allocated_size = le32_to_cpu(desc.data[1]);
5891 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5893 struct hclge_vport *vport;
5896 for (i = 0; i < hdev->num_alloc_vport; i++) {
5897 vport = &hdev->vport[i];
5898 vport->used_umv_num = 0;
5901 mutex_lock(&hdev->umv_mutex);
5902 hdev->share_umv_size = hdev->priv_umv_size +
5903 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5904 mutex_unlock(&hdev->umv_mutex);
5907 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5909 struct hclge_dev *hdev = vport->back;
5912 mutex_lock(&hdev->umv_mutex);
5913 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5914 hdev->share_umv_size == 0);
5915 mutex_unlock(&hdev->umv_mutex);
5920 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5922 struct hclge_dev *hdev = vport->back;
5924 mutex_lock(&hdev->umv_mutex);
5926 if (vport->used_umv_num > hdev->priv_umv_size)
5927 hdev->share_umv_size++;
5929 if (vport->used_umv_num > 0)
5930 vport->used_umv_num--;
5932 if (vport->used_umv_num >= hdev->priv_umv_size &&
5933 hdev->share_umv_size > 0)
5934 hdev->share_umv_size--;
5935 vport->used_umv_num++;
5937 mutex_unlock(&hdev->umv_mutex);
5940 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5941 const unsigned char *addr)
5943 struct hclge_vport *vport = hclge_get_vport(handle);
5945 return hclge_add_uc_addr_common(vport, addr);
5948 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5949 const unsigned char *addr)
5951 struct hclge_dev *hdev = vport->back;
5952 struct hclge_mac_vlan_tbl_entry_cmd req;
5953 struct hclge_desc desc;
5954 u16 egress_port = 0;
5957 /* mac addr check */
5958 if (is_zero_ether_addr(addr) ||
5959 is_broadcast_ether_addr(addr) ||
5960 is_multicast_ether_addr(addr)) {
5961 dev_err(&hdev->pdev->dev,
5962 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5964 is_zero_ether_addr(addr),
5965 is_broadcast_ether_addr(addr),
5966 is_multicast_ether_addr(addr));
5970 memset(&req, 0, sizeof(req));
5972 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5973 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5975 req.egress_port = cpu_to_le16(egress_port);
5977 hclge_prepare_mac_addr(&req, addr, false);
5979 /* Lookup the mac address in the mac_vlan table, and add
5980 * it if the entry is inexistent. Repeated unicast entry
5981 * is not allowed in the mac vlan table.
5983 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5984 if (ret == -ENOENT) {
5985 if (!hclge_is_umv_space_full(vport)) {
5986 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5988 hclge_update_umv_space(vport, false);
5992 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5993 hdev->priv_umv_size);
5998 /* check if we just hit the duplicate */
6000 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6001 vport->vport_id, addr);
6005 dev_err(&hdev->pdev->dev,
6006 "PF failed to add unicast entry(%pM) in the MAC table\n",
6012 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6013 const unsigned char *addr)
6015 struct hclge_vport *vport = hclge_get_vport(handle);
6017 return hclge_rm_uc_addr_common(vport, addr);
6020 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6021 const unsigned char *addr)
6023 struct hclge_dev *hdev = vport->back;
6024 struct hclge_mac_vlan_tbl_entry_cmd req;
6027 /* mac addr check */
6028 if (is_zero_ether_addr(addr) ||
6029 is_broadcast_ether_addr(addr) ||
6030 is_multicast_ether_addr(addr)) {
6031 dev_dbg(&hdev->pdev->dev,
6032 "Remove mac err! invalid mac:%pM.\n",
6037 memset(&req, 0, sizeof(req));
6038 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6039 hclge_prepare_mac_addr(&req, addr, false);
6040 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6042 hclge_update_umv_space(vport, true);
6047 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6048 const unsigned char *addr)
6050 struct hclge_vport *vport = hclge_get_vport(handle);
6052 return hclge_add_mc_addr_common(vport, addr);
6055 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6056 const unsigned char *addr)
6058 struct hclge_dev *hdev = vport->back;
6059 struct hclge_mac_vlan_tbl_entry_cmd req;
6060 struct hclge_desc desc[3];
6063 /* mac addr check */
6064 if (!is_multicast_ether_addr(addr)) {
6065 dev_err(&hdev->pdev->dev,
6066 "Add mc mac err! invalid mac:%pM.\n",
6070 memset(&req, 0, sizeof(req));
6071 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6072 hclge_prepare_mac_addr(&req, addr, true);
6073 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6075 /* This mac addr exist, update VFID for it */
6076 hclge_update_desc_vfid(desc, vport->vport_id, false);
6077 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6079 /* This mac addr do not exist, add new entry for it */
6080 memset(desc[0].data, 0, sizeof(desc[0].data));
6081 memset(desc[1].data, 0, sizeof(desc[0].data));
6082 memset(desc[2].data, 0, sizeof(desc[0].data));
6083 hclge_update_desc_vfid(desc, vport->vport_id, false);
6084 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6087 if (status == -ENOSPC)
6088 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6093 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6094 const unsigned char *addr)
6096 struct hclge_vport *vport = hclge_get_vport(handle);
6098 return hclge_rm_mc_addr_common(vport, addr);
6101 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6102 const unsigned char *addr)
6104 struct hclge_dev *hdev = vport->back;
6105 struct hclge_mac_vlan_tbl_entry_cmd req;
6106 enum hclge_cmd_status status;
6107 struct hclge_desc desc[3];
6109 /* mac addr check */
6110 if (!is_multicast_ether_addr(addr)) {
6111 dev_dbg(&hdev->pdev->dev,
6112 "Remove mc mac err! invalid mac:%pM.\n",
6117 memset(&req, 0, sizeof(req));
6118 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6119 hclge_prepare_mac_addr(&req, addr, true);
6120 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6122 /* This mac addr exist, remove this handle's VFID for it */
6123 hclge_update_desc_vfid(desc, vport->vport_id, true);
6125 if (hclge_is_all_function_id_zero(desc))
6126 /* All the vfid is zero, so need to delete this entry */
6127 status = hclge_remove_mac_vlan_tbl(vport, &req);
6129 /* Not all the vfid is zero, update the vfid */
6130 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6133 /* Maybe this mac address is in mta table, but it cannot be
6134 * deleted here because an entry of mta represents an address
6135 * range rather than a specific address. the delete action to
6136 * all entries will take effect in update_mta_status called by
6137 * hns3_nic_set_rx_mode.
6145 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6146 enum HCLGE_MAC_ADDR_TYPE mac_type)
6148 struct hclge_vport_mac_addr_cfg *mac_cfg;
6149 struct list_head *list;
6151 if (!vport->vport_id)
6154 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6158 mac_cfg->hd_tbl_status = true;
6159 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6161 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6162 &vport->uc_mac_list : &vport->mc_mac_list;
6164 list_add_tail(&mac_cfg->node, list);
6167 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6169 enum HCLGE_MAC_ADDR_TYPE mac_type)
6171 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6172 struct list_head *list;
6173 bool uc_flag, mc_flag;
6175 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6176 &vport->uc_mac_list : &vport->mc_mac_list;
6178 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6179 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6181 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6182 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6183 if (uc_flag && mac_cfg->hd_tbl_status)
6184 hclge_rm_uc_addr_common(vport, mac_addr);
6186 if (mc_flag && mac_cfg->hd_tbl_status)
6187 hclge_rm_mc_addr_common(vport, mac_addr);
6189 list_del(&mac_cfg->node);
6196 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6197 enum HCLGE_MAC_ADDR_TYPE mac_type)
6199 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6200 struct list_head *list;
6202 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6203 &vport->uc_mac_list : &vport->mc_mac_list;
6205 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6206 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6207 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6209 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6210 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6212 mac_cfg->hd_tbl_status = false;
6214 list_del(&mac_cfg->node);
6220 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6222 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6223 struct hclge_vport *vport;
6226 mutex_lock(&hdev->vport_cfg_mutex);
6227 for (i = 0; i < hdev->num_alloc_vport; i++) {
6228 vport = &hdev->vport[i];
6229 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6230 list_del(&mac->node);
6234 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6235 list_del(&mac->node);
6239 mutex_unlock(&hdev->vport_cfg_mutex);
6242 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6243 u16 cmdq_resp, u8 resp_code)
6245 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6246 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6247 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6248 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6253 dev_err(&hdev->pdev->dev,
6254 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6259 switch (resp_code) {
6260 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6261 case HCLGE_ETHERTYPE_ALREADY_ADD:
6264 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6265 dev_err(&hdev->pdev->dev,
6266 "add mac ethertype failed for manager table overflow.\n");
6267 return_status = -EIO;
6269 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6270 dev_err(&hdev->pdev->dev,
6271 "add mac ethertype failed for key conflict.\n");
6272 return_status = -EIO;
6275 dev_err(&hdev->pdev->dev,
6276 "add mac ethertype failed for undefined, code=%d.\n",
6278 return_status = -EIO;
6281 return return_status;
6284 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6285 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6287 struct hclge_desc desc;
6292 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6293 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6295 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6297 dev_err(&hdev->pdev->dev,
6298 "add mac ethertype failed for cmd_send, ret =%d.\n",
6303 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6304 retval = le16_to_cpu(desc.retval);
6306 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6309 static int init_mgr_tbl(struct hclge_dev *hdev)
6314 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6315 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6317 dev_err(&hdev->pdev->dev,
6318 "add mac ethertype failed, ret =%d.\n",
6327 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6329 struct hclge_vport *vport = hclge_get_vport(handle);
6330 struct hclge_dev *hdev = vport->back;
6332 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6335 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6338 const unsigned char *new_addr = (const unsigned char *)p;
6339 struct hclge_vport *vport = hclge_get_vport(handle);
6340 struct hclge_dev *hdev = vport->back;
6343 /* mac addr check */
6344 if (is_zero_ether_addr(new_addr) ||
6345 is_broadcast_ether_addr(new_addr) ||
6346 is_multicast_ether_addr(new_addr)) {
6347 dev_err(&hdev->pdev->dev,
6348 "Change uc mac err! invalid mac:%p.\n",
6353 if ((!is_first || is_kdump_kernel()) &&
6354 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6355 dev_warn(&hdev->pdev->dev,
6356 "remove old uc mac address fail.\n");
6358 ret = hclge_add_uc_addr(handle, new_addr);
6360 dev_err(&hdev->pdev->dev,
6361 "add uc mac address fail, ret =%d.\n",
6365 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6366 dev_err(&hdev->pdev->dev,
6367 "restore uc mac address fail.\n");
6372 ret = hclge_pause_addr_cfg(hdev, new_addr);
6374 dev_err(&hdev->pdev->dev,
6375 "configure mac pause address fail, ret =%d.\n",
6380 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6385 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6388 struct hclge_vport *vport = hclge_get_vport(handle);
6389 struct hclge_dev *hdev = vport->back;
6391 if (!hdev->hw.mac.phydev)
6394 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6397 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6398 u8 fe_type, bool filter_en, u8 vf_id)
6400 struct hclge_vlan_filter_ctrl_cmd *req;
6401 struct hclge_desc desc;
6404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6406 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6407 req->vlan_type = vlan_type;
6408 req->vlan_fe = filter_en ? fe_type : 0;
6411 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6413 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6419 #define HCLGE_FILTER_TYPE_VF 0
6420 #define HCLGE_FILTER_TYPE_PORT 1
6421 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6422 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6423 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6424 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6425 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6426 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6427 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6428 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6429 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6431 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6433 struct hclge_vport *vport = hclge_get_vport(handle);
6434 struct hclge_dev *hdev = vport->back;
6436 if (hdev->pdev->revision >= 0x21) {
6437 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6438 HCLGE_FILTER_FE_EGRESS, enable, 0);
6439 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6440 HCLGE_FILTER_FE_INGRESS, enable, 0);
6442 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6443 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6447 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6449 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6452 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6453 bool is_kill, u16 vlan, u8 qos,
6456 #define HCLGE_MAX_VF_BYTES 16
6457 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6458 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6459 struct hclge_desc desc[2];
6464 hclge_cmd_setup_basic_desc(&desc[0],
6465 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6466 hclge_cmd_setup_basic_desc(&desc[1],
6467 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6469 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6471 vf_byte_off = vfid / 8;
6472 vf_byte_val = 1 << (vfid % 8);
6474 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6475 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6477 req0->vlan_id = cpu_to_le16(vlan);
6478 req0->vlan_cfg = is_kill;
6480 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6481 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6483 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6485 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6487 dev_err(&hdev->pdev->dev,
6488 "Send vf vlan command fail, ret =%d.\n",
6494 #define HCLGE_VF_VLAN_NO_ENTRY 2
6495 if (!req0->resp_code || req0->resp_code == 1)
6498 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6499 dev_warn(&hdev->pdev->dev,
6500 "vf vlan table is full, vf vlan filter is disabled\n");
6504 dev_err(&hdev->pdev->dev,
6505 "Add vf vlan filter fail, ret =%d.\n",
6508 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6509 if (!req0->resp_code)
6512 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6513 dev_warn(&hdev->pdev->dev,
6514 "vlan %d filter is not in vf vlan table\n",
6519 dev_err(&hdev->pdev->dev,
6520 "Kill vf vlan filter fail, ret =%d.\n",
6527 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6528 u16 vlan_id, bool is_kill)
6530 struct hclge_vlan_filter_pf_cfg_cmd *req;
6531 struct hclge_desc desc;
6532 u8 vlan_offset_byte_val;
6533 u8 vlan_offset_byte;
6537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6539 vlan_offset_160 = vlan_id / 160;
6540 vlan_offset_byte = (vlan_id % 160) / 8;
6541 vlan_offset_byte_val = 1 << (vlan_id % 8);
6543 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6544 req->vlan_offset = vlan_offset_160;
6545 req->vlan_cfg = is_kill;
6546 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6550 dev_err(&hdev->pdev->dev,
6551 "port vlan command, send fail, ret =%d.\n", ret);
6555 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6556 u16 vport_id, u16 vlan_id, u8 qos,
6559 u16 vport_idx, vport_num = 0;
6562 if (is_kill && !vlan_id)
6565 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6568 dev_err(&hdev->pdev->dev,
6569 "Set %d vport vlan filter config fail, ret =%d.\n",
6574 /* vlan 0 may be added twice when 8021q module is enabled */
6575 if (!is_kill && !vlan_id &&
6576 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6579 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6580 dev_err(&hdev->pdev->dev,
6581 "Add port vlan failed, vport %d is already in vlan %d\n",
6587 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6588 dev_err(&hdev->pdev->dev,
6589 "Delete port vlan failed, vport %d is not in vlan %d\n",
6594 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6597 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6598 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6604 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6606 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6607 struct hclge_vport_vtag_tx_cfg_cmd *req;
6608 struct hclge_dev *hdev = vport->back;
6609 struct hclge_desc desc;
6612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6614 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6615 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6616 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6617 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6618 vcfg->accept_tag1 ? 1 : 0);
6619 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6620 vcfg->accept_untag1 ? 1 : 0);
6621 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6622 vcfg->accept_tag2 ? 1 : 0);
6623 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6624 vcfg->accept_untag2 ? 1 : 0);
6625 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6626 vcfg->insert_tag1_en ? 1 : 0);
6627 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6628 vcfg->insert_tag2_en ? 1 : 0);
6629 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6631 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6632 req->vf_bitmap[req->vf_offset] =
6633 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6635 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6637 dev_err(&hdev->pdev->dev,
6638 "Send port txvlan cfg command fail, ret =%d\n",
6644 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6646 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6647 struct hclge_vport_vtag_rx_cfg_cmd *req;
6648 struct hclge_dev *hdev = vport->back;
6649 struct hclge_desc desc;
6652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6654 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6655 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6656 vcfg->strip_tag1_en ? 1 : 0);
6657 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6658 vcfg->strip_tag2_en ? 1 : 0);
6659 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6660 vcfg->vlan1_vlan_prionly ? 1 : 0);
6661 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6662 vcfg->vlan2_vlan_prionly ? 1 : 0);
6664 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6665 req->vf_bitmap[req->vf_offset] =
6666 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6668 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6670 dev_err(&hdev->pdev->dev,
6671 "Send port rxvlan cfg command fail, ret =%d\n",
6677 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6678 u16 port_base_vlan_state,
6683 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6684 vport->txvlan_cfg.accept_tag1 = true;
6685 vport->txvlan_cfg.insert_tag1_en = false;
6686 vport->txvlan_cfg.default_tag1 = 0;
6688 vport->txvlan_cfg.accept_tag1 = false;
6689 vport->txvlan_cfg.insert_tag1_en = true;
6690 vport->txvlan_cfg.default_tag1 = vlan_tag;
6693 vport->txvlan_cfg.accept_untag1 = true;
6695 /* accept_tag2 and accept_untag2 are not supported on
6696 * pdev revision(0x20), new revision support them,
6697 * this two fields can not be configured by user.
6699 vport->txvlan_cfg.accept_tag2 = true;
6700 vport->txvlan_cfg.accept_untag2 = true;
6701 vport->txvlan_cfg.insert_tag2_en = false;
6702 vport->txvlan_cfg.default_tag2 = 0;
6704 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6705 vport->rxvlan_cfg.strip_tag1_en = false;
6706 vport->rxvlan_cfg.strip_tag2_en =
6707 vport->rxvlan_cfg.rx_vlan_offload_en;
6709 vport->rxvlan_cfg.strip_tag1_en =
6710 vport->rxvlan_cfg.rx_vlan_offload_en;
6711 vport->rxvlan_cfg.strip_tag2_en = true;
6713 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6714 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6716 ret = hclge_set_vlan_tx_offload_cfg(vport);
6720 return hclge_set_vlan_rx_offload_cfg(vport);
6723 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6725 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6726 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6727 struct hclge_desc desc;
6730 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6731 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6732 rx_req->ot_fst_vlan_type =
6733 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6734 rx_req->ot_sec_vlan_type =
6735 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6736 rx_req->in_fst_vlan_type =
6737 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6738 rx_req->in_sec_vlan_type =
6739 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6741 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6743 dev_err(&hdev->pdev->dev,
6744 "Send rxvlan protocol type command fail, ret =%d\n",
6749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6751 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6752 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6753 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6755 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6757 dev_err(&hdev->pdev->dev,
6758 "Send txvlan protocol type command fail, ret =%d\n",
6764 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6766 #define HCLGE_DEF_VLAN_TYPE 0x8100
6768 struct hnae3_handle *handle = &hdev->vport[0].nic;
6769 struct hclge_vport *vport;
6773 if (hdev->pdev->revision >= 0x21) {
6774 /* for revision 0x21, vf vlan filter is per function */
6775 for (i = 0; i < hdev->num_alloc_vport; i++) {
6776 vport = &hdev->vport[i];
6777 ret = hclge_set_vlan_filter_ctrl(hdev,
6778 HCLGE_FILTER_TYPE_VF,
6779 HCLGE_FILTER_FE_EGRESS,
6786 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6787 HCLGE_FILTER_FE_INGRESS, true,
6792 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6793 HCLGE_FILTER_FE_EGRESS_V1_B,
6799 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6801 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6802 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6803 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6804 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6805 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6806 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6808 ret = hclge_set_vlan_protocol_type(hdev);
6812 for (i = 0; i < hdev->num_alloc_vport; i++) {
6815 vport = &hdev->vport[i];
6816 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
6818 ret = hclge_vlan_offload_cfg(vport,
6819 vport->port_base_vlan_cfg.state,
6825 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6828 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6831 struct hclge_vport_vlan_cfg *vlan;
6833 /* vlan 0 is reserved */
6837 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6841 vlan->hd_tbl_status = writen_to_tbl;
6842 vlan->vlan_id = vlan_id;
6844 list_add_tail(&vlan->node, &vport->vlan_list);
6847 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
6849 struct hclge_vport_vlan_cfg *vlan, *tmp;
6850 struct hclge_dev *hdev = vport->back;
6853 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6854 if (!vlan->hd_tbl_status) {
6855 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
6857 vlan->vlan_id, 0, false);
6859 dev_err(&hdev->pdev->dev,
6860 "restore vport vlan list failed, ret=%d\n",
6865 vlan->hd_tbl_status = true;
6871 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6874 struct hclge_vport_vlan_cfg *vlan, *tmp;
6875 struct hclge_dev *hdev = vport->back;
6877 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6878 if (vlan->vlan_id == vlan_id) {
6879 if (is_write_tbl && vlan->hd_tbl_status)
6880 hclge_set_vlan_filter_hw(hdev,
6886 list_del(&vlan->node);
6893 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6895 struct hclge_vport_vlan_cfg *vlan, *tmp;
6896 struct hclge_dev *hdev = vport->back;
6898 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6899 if (vlan->hd_tbl_status)
6900 hclge_set_vlan_filter_hw(hdev,
6906 vlan->hd_tbl_status = false;
6908 list_del(&vlan->node);
6914 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6916 struct hclge_vport_vlan_cfg *vlan, *tmp;
6917 struct hclge_vport *vport;
6920 mutex_lock(&hdev->vport_cfg_mutex);
6921 for (i = 0; i < hdev->num_alloc_vport; i++) {
6922 vport = &hdev->vport[i];
6923 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6924 list_del(&vlan->node);
6928 mutex_unlock(&hdev->vport_cfg_mutex);
6931 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6933 struct hclge_vport *vport = hclge_get_vport(handle);
6935 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6936 vport->rxvlan_cfg.strip_tag1_en = false;
6937 vport->rxvlan_cfg.strip_tag2_en = enable;
6939 vport->rxvlan_cfg.strip_tag1_en = enable;
6940 vport->rxvlan_cfg.strip_tag2_en = true;
6942 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6943 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6944 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
6946 return hclge_set_vlan_rx_offload_cfg(vport);
6949 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
6950 u16 port_base_vlan_state,
6951 struct hclge_vlan_info *new_info,
6952 struct hclge_vlan_info *old_info)
6954 struct hclge_dev *hdev = vport->back;
6957 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
6958 hclge_rm_vport_all_vlan_table(vport, false);
6959 return hclge_set_vlan_filter_hw(hdev,
6960 htons(new_info->vlan_proto),
6963 new_info->qos, false);
6966 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
6967 vport->vport_id, old_info->vlan_tag,
6968 old_info->qos, true);
6972 return hclge_add_vport_all_vlan_table(vport);
6975 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
6976 struct hclge_vlan_info *vlan_info)
6978 struct hnae3_handle *nic = &vport->nic;
6979 struct hclge_vlan_info *old_vlan_info;
6980 struct hclge_dev *hdev = vport->back;
6983 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
6985 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
6989 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
6990 /* add new VLAN tag */
6991 ret = hclge_set_vlan_filter_hw(hdev,
6992 htons(vlan_info->vlan_proto),
6994 vlan_info->vlan_tag,
6995 vlan_info->qos, false);
6999 /* remove old VLAN tag */
7000 ret = hclge_set_vlan_filter_hw(hdev,
7001 htons(old_vlan_info->vlan_proto),
7003 old_vlan_info->vlan_tag,
7004 old_vlan_info->qos, true);
7011 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7016 /* update state only when disable/enable port based VLAN */
7017 vport->port_base_vlan_cfg.state = state;
7018 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7019 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7021 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7024 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7025 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7026 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7031 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7032 enum hnae3_port_base_vlan_state state,
7035 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7037 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7039 return HNAE3_PORT_BASE_VLAN_ENABLE;
7042 return HNAE3_PORT_BASE_VLAN_DISABLE;
7043 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7044 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7046 return HNAE3_PORT_BASE_VLAN_MODIFY;
7050 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7051 u16 vlan, u8 qos, __be16 proto)
7053 struct hclge_vport *vport = hclge_get_vport(handle);
7054 struct hclge_dev *hdev = vport->back;
7055 struct hclge_vlan_info vlan_info;
7059 if (hdev->pdev->revision == 0x20)
7062 /* qos is a 3 bits value, so can not be bigger than 7 */
7063 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7065 if (proto != htons(ETH_P_8021Q))
7066 return -EPROTONOSUPPORT;
7068 vport = &hdev->vport[vfid];
7069 state = hclge_get_port_base_vlan_state(vport,
7070 vport->port_base_vlan_cfg.state,
7072 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7075 vlan_info.vlan_tag = vlan;
7076 vlan_info.qos = qos;
7077 vlan_info.vlan_proto = ntohs(proto);
7079 /* update port based VLAN for PF */
7081 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7082 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7083 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7088 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7089 return hclge_update_port_base_vlan_cfg(vport, state,
7092 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7100 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7101 u16 vlan_id, bool is_kill)
7103 struct hclge_vport *vport = hclge_get_vport(handle);
7104 struct hclge_dev *hdev = vport->back;
7105 bool writen_to_tbl = false;
7108 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7109 * filter entry. In this case, we don't update VLAN filter table
7110 * when user add new VLAN or remove exist VLAN, just update the vport
7111 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7112 * table until port based VLAN disabled
7114 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7115 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7116 vlan_id, 0, is_kill);
7117 writen_to_tbl = true;
7124 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7126 hclge_add_vport_vlan_table(vport, vlan_id,
7132 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7134 struct hclge_config_max_frm_size_cmd *req;
7135 struct hclge_desc desc;
7137 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7139 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7140 req->max_frm_size = cpu_to_le16(new_mps);
7141 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7143 return hclge_cmd_send(&hdev->hw, &desc, 1);
7146 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7148 struct hclge_vport *vport = hclge_get_vport(handle);
7150 return hclge_set_vport_mtu(vport, new_mtu);
7153 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7155 struct hclge_dev *hdev = vport->back;
7156 int i, max_frm_size, ret = 0;
7158 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7159 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7160 max_frm_size > HCLGE_MAC_MAX_FRAME)
7163 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7164 mutex_lock(&hdev->vport_lock);
7165 /* VF's mps must fit within hdev->mps */
7166 if (vport->vport_id && max_frm_size > hdev->mps) {
7167 mutex_unlock(&hdev->vport_lock);
7169 } else if (vport->vport_id) {
7170 vport->mps = max_frm_size;
7171 mutex_unlock(&hdev->vport_lock);
7175 /* PF's mps must be greater then VF's mps */
7176 for (i = 1; i < hdev->num_alloc_vport; i++)
7177 if (max_frm_size < hdev->vport[i].mps) {
7178 mutex_unlock(&hdev->vport_lock);
7182 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7184 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7186 dev_err(&hdev->pdev->dev,
7187 "Change mtu fail, ret =%d\n", ret);
7191 hdev->mps = max_frm_size;
7192 vport->mps = max_frm_size;
7194 ret = hclge_buffer_alloc(hdev);
7196 dev_err(&hdev->pdev->dev,
7197 "Allocate buffer fail, ret =%d\n", ret);
7200 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7201 mutex_unlock(&hdev->vport_lock);
7205 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7208 struct hclge_reset_tqp_queue_cmd *req;
7209 struct hclge_desc desc;
7212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7214 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7215 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7216 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7220 dev_err(&hdev->pdev->dev,
7221 "Send tqp reset cmd error, status =%d\n", ret);
7228 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7230 struct hclge_reset_tqp_queue_cmd *req;
7231 struct hclge_desc desc;
7234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7236 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7237 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7239 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7241 dev_err(&hdev->pdev->dev,
7242 "Get reset status error, status =%d\n", ret);
7246 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7249 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7251 struct hnae3_queue *queue;
7252 struct hclge_tqp *tqp;
7254 queue = handle->kinfo.tqp[queue_id];
7255 tqp = container_of(queue, struct hclge_tqp, q);
7260 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7262 struct hclge_vport *vport = hclge_get_vport(handle);
7263 struct hclge_dev *hdev = vport->back;
7264 int reset_try_times = 0;
7269 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7271 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7273 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7277 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7279 dev_err(&hdev->pdev->dev,
7280 "Send reset tqp cmd fail, ret = %d\n", ret);
7284 reset_try_times = 0;
7285 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7286 /* Wait for tqp hw reset */
7288 reset_status = hclge_get_reset_status(hdev, queue_gid);
7293 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7294 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7298 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7300 dev_err(&hdev->pdev->dev,
7301 "Deassert the soft reset fail, ret = %d\n", ret);
7306 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7308 struct hclge_dev *hdev = vport->back;
7309 int reset_try_times = 0;
7314 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7316 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7318 dev_warn(&hdev->pdev->dev,
7319 "Send reset tqp cmd fail, ret = %d\n", ret);
7323 reset_try_times = 0;
7324 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7325 /* Wait for tqp hw reset */
7327 reset_status = hclge_get_reset_status(hdev, queue_gid);
7332 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7333 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7337 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7339 dev_warn(&hdev->pdev->dev,
7340 "Deassert the soft reset fail, ret = %d\n", ret);
7343 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7345 struct hclge_vport *vport = hclge_get_vport(handle);
7346 struct hclge_dev *hdev = vport->back;
7348 return hdev->fw_version;
7351 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7353 struct phy_device *phydev = hdev->hw.mac.phydev;
7358 phy_set_asym_pause(phydev, rx_en, tx_en);
7361 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7366 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7367 else if (rx_en && !tx_en)
7368 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7369 else if (!rx_en && tx_en)
7370 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7372 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7374 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7377 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7379 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7384 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7389 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7391 struct phy_device *phydev = hdev->hw.mac.phydev;
7392 u16 remote_advertising = 0;
7393 u16 local_advertising = 0;
7394 u32 rx_pause, tx_pause;
7397 if (!phydev->link || !phydev->autoneg)
7400 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7403 remote_advertising = LPA_PAUSE_CAP;
7405 if (phydev->asym_pause)
7406 remote_advertising |= LPA_PAUSE_ASYM;
7408 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7409 remote_advertising);
7410 tx_pause = flowctl & FLOW_CTRL_TX;
7411 rx_pause = flowctl & FLOW_CTRL_RX;
7413 if (phydev->duplex == HCLGE_MAC_HALF) {
7418 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7421 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7422 u32 *rx_en, u32 *tx_en)
7424 struct hclge_vport *vport = hclge_get_vport(handle);
7425 struct hclge_dev *hdev = vport->back;
7427 *auto_neg = hclge_get_autoneg(handle);
7429 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7435 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7438 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7441 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7450 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7451 u32 rx_en, u32 tx_en)
7453 struct hclge_vport *vport = hclge_get_vport(handle);
7454 struct hclge_dev *hdev = vport->back;
7455 struct phy_device *phydev = hdev->hw.mac.phydev;
7458 fc_autoneg = hclge_get_autoneg(handle);
7459 if (auto_neg != fc_autoneg) {
7460 dev_info(&hdev->pdev->dev,
7461 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7465 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7466 dev_info(&hdev->pdev->dev,
7467 "Priority flow control enabled. Cannot set link flow control.\n");
7471 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7474 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7476 /* Only support flow control negotiation for netdev with
7477 * phy attached for now.
7482 return phy_start_aneg(phydev);
7485 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7486 u8 *auto_neg, u32 *speed, u8 *duplex)
7488 struct hclge_vport *vport = hclge_get_vport(handle);
7489 struct hclge_dev *hdev = vport->back;
7492 *speed = hdev->hw.mac.speed;
7494 *duplex = hdev->hw.mac.duplex;
7496 *auto_neg = hdev->hw.mac.autoneg;
7499 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7501 struct hclge_vport *vport = hclge_get_vport(handle);
7502 struct hclge_dev *hdev = vport->back;
7505 *media_type = hdev->hw.mac.media_type;
7508 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7509 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7511 struct hclge_vport *vport = hclge_get_vport(handle);
7512 struct hclge_dev *hdev = vport->back;
7513 struct phy_device *phydev = hdev->hw.mac.phydev;
7514 int mdix_ctrl, mdix, retval, is_resolved;
7517 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7518 *tp_mdix = ETH_TP_MDI_INVALID;
7522 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7524 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7525 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7526 HCLGE_PHY_MDIX_CTRL_S);
7528 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7529 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7530 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7532 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7534 switch (mdix_ctrl) {
7536 *tp_mdix_ctrl = ETH_TP_MDI;
7539 *tp_mdix_ctrl = ETH_TP_MDI_X;
7542 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7545 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7550 *tp_mdix = ETH_TP_MDI_INVALID;
7552 *tp_mdix = ETH_TP_MDI_X;
7554 *tp_mdix = ETH_TP_MDI;
7557 static int hclge_init_client_instance(struct hnae3_client *client,
7558 struct hnae3_ae_dev *ae_dev)
7560 struct hclge_dev *hdev = ae_dev->priv;
7561 struct hclge_vport *vport;
7564 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7565 vport = &hdev->vport[i];
7567 switch (client->type) {
7568 case HNAE3_CLIENT_KNIC:
7570 hdev->nic_client = client;
7571 vport->nic.client = client;
7572 ret = client->ops->init_instance(&vport->nic);
7576 hnae3_set_client_init_flag(client, ae_dev, 1);
7578 if (hdev->roce_client &&
7579 hnae3_dev_roce_supported(hdev)) {
7580 struct hnae3_client *rc = hdev->roce_client;
7582 ret = hclge_init_roce_base_info(vport);
7586 ret = rc->ops->init_instance(&vport->roce);
7590 hnae3_set_client_init_flag(hdev->roce_client,
7595 case HNAE3_CLIENT_UNIC:
7596 hdev->nic_client = client;
7597 vport->nic.client = client;
7599 ret = client->ops->init_instance(&vport->nic);
7603 hnae3_set_client_init_flag(client, ae_dev, 1);
7606 case HNAE3_CLIENT_ROCE:
7607 if (hnae3_dev_roce_supported(hdev)) {
7608 hdev->roce_client = client;
7609 vport->roce.client = client;
7612 if (hdev->roce_client && hdev->nic_client) {
7613 ret = hclge_init_roce_base_info(vport);
7617 ret = client->ops->init_instance(&vport->roce);
7621 hnae3_set_client_init_flag(client, ae_dev, 1);
7633 hdev->nic_client = NULL;
7634 vport->nic.client = NULL;
7637 hdev->roce_client = NULL;
7638 vport->roce.client = NULL;
7642 static void hclge_uninit_client_instance(struct hnae3_client *client,
7643 struct hnae3_ae_dev *ae_dev)
7645 struct hclge_dev *hdev = ae_dev->priv;
7646 struct hclge_vport *vport;
7649 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7650 vport = &hdev->vport[i];
7651 if (hdev->roce_client) {
7652 hdev->roce_client->ops->uninit_instance(&vport->roce,
7654 hdev->roce_client = NULL;
7655 vport->roce.client = NULL;
7657 if (client->type == HNAE3_CLIENT_ROCE)
7659 if (hdev->nic_client && client->ops->uninit_instance) {
7660 client->ops->uninit_instance(&vport->nic, 0);
7661 hdev->nic_client = NULL;
7662 vport->nic.client = NULL;
7667 static int hclge_pci_init(struct hclge_dev *hdev)
7669 struct pci_dev *pdev = hdev->pdev;
7670 struct hclge_hw *hw;
7673 ret = pci_enable_device(pdev);
7675 dev_err(&pdev->dev, "failed to enable PCI device\n");
7679 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7681 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7684 "can't set consistent PCI DMA");
7685 goto err_disable_device;
7687 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7690 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7692 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7693 goto err_disable_device;
7696 pci_set_master(pdev);
7698 hw->io_base = pcim_iomap(pdev, 2, 0);
7700 dev_err(&pdev->dev, "Can't map configuration register space\n");
7702 goto err_clr_master;
7705 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7709 pci_clear_master(pdev);
7710 pci_release_regions(pdev);
7712 pci_disable_device(pdev);
7717 static void hclge_pci_uninit(struct hclge_dev *hdev)
7719 struct pci_dev *pdev = hdev->pdev;
7721 pcim_iounmap(pdev, hdev->hw.io_base);
7722 pci_free_irq_vectors(pdev);
7723 pci_clear_master(pdev);
7724 pci_release_mem_regions(pdev);
7725 pci_disable_device(pdev);
7728 static void hclge_state_init(struct hclge_dev *hdev)
7730 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7731 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7732 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7733 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7734 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7735 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7738 static void hclge_state_uninit(struct hclge_dev *hdev)
7740 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7742 if (hdev->service_timer.function)
7743 del_timer_sync(&hdev->service_timer);
7744 if (hdev->reset_timer.function)
7745 del_timer_sync(&hdev->reset_timer);
7746 if (hdev->service_task.func)
7747 cancel_work_sync(&hdev->service_task);
7748 if (hdev->rst_service_task.func)
7749 cancel_work_sync(&hdev->rst_service_task);
7750 if (hdev->mbx_service_task.func)
7751 cancel_work_sync(&hdev->mbx_service_task);
7754 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7756 #define HCLGE_FLR_WAIT_MS 100
7757 #define HCLGE_FLR_WAIT_CNT 50
7758 struct hclge_dev *hdev = ae_dev->priv;
7761 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7762 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7763 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7764 hclge_reset_event(hdev->pdev, NULL);
7766 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7767 cnt++ < HCLGE_FLR_WAIT_CNT)
7768 msleep(HCLGE_FLR_WAIT_MS);
7770 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7771 dev_err(&hdev->pdev->dev,
7772 "flr wait down timeout: %d\n", cnt);
7775 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7777 struct hclge_dev *hdev = ae_dev->priv;
7779 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7782 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7784 struct pci_dev *pdev = ae_dev->pdev;
7785 struct hclge_dev *hdev;
7788 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7795 hdev->ae_dev = ae_dev;
7796 hdev->reset_type = HNAE3_NONE_RESET;
7797 hdev->reset_level = HNAE3_FUNC_RESET;
7798 ae_dev->priv = hdev;
7799 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7801 mutex_init(&hdev->vport_lock);
7802 mutex_init(&hdev->vport_cfg_mutex);
7804 ret = hclge_pci_init(hdev);
7806 dev_err(&pdev->dev, "PCI init failed\n");
7810 /* Firmware command queue initialize */
7811 ret = hclge_cmd_queue_init(hdev);
7813 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7814 goto err_pci_uninit;
7817 /* Firmware command initialize */
7818 ret = hclge_cmd_init(hdev);
7820 goto err_cmd_uninit;
7822 ret = hclge_get_cap(hdev);
7824 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7826 goto err_cmd_uninit;
7829 ret = hclge_configure(hdev);
7831 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7832 goto err_cmd_uninit;
7835 ret = hclge_init_msi(hdev);
7837 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7838 goto err_cmd_uninit;
7841 ret = hclge_misc_irq_init(hdev);
7844 "Misc IRQ(vector0) init error, ret = %d.\n",
7846 goto err_msi_uninit;
7849 ret = hclge_alloc_tqps(hdev);
7851 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7852 goto err_msi_irq_uninit;
7855 ret = hclge_alloc_vport(hdev);
7857 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7858 goto err_msi_irq_uninit;
7861 ret = hclge_map_tqp(hdev);
7863 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7864 goto err_msi_irq_uninit;
7867 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7868 ret = hclge_mac_mdio_config(hdev);
7870 dev_err(&hdev->pdev->dev,
7871 "mdio config fail ret=%d\n", ret);
7872 goto err_msi_irq_uninit;
7876 ret = hclge_init_umv_space(hdev);
7878 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7879 goto err_mdiobus_unreg;
7882 ret = hclge_mac_init(hdev);
7884 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7885 goto err_mdiobus_unreg;
7888 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7890 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7891 goto err_mdiobus_unreg;
7894 ret = hclge_config_gro(hdev, true);
7896 goto err_mdiobus_unreg;
7898 ret = hclge_init_vlan_config(hdev);
7900 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7901 goto err_mdiobus_unreg;
7904 ret = hclge_tm_schd_init(hdev);
7906 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7907 goto err_mdiobus_unreg;
7910 hclge_rss_init_cfg(hdev);
7911 ret = hclge_rss_init_hw(hdev);
7913 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7914 goto err_mdiobus_unreg;
7917 ret = init_mgr_tbl(hdev);
7919 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7920 goto err_mdiobus_unreg;
7923 ret = hclge_init_fd_config(hdev);
7926 "fd table init fail, ret=%d\n", ret);
7927 goto err_mdiobus_unreg;
7930 ret = hclge_hw_error_set_state(hdev, true);
7933 "fail(%d) to enable hw error interrupts\n", ret);
7934 goto err_mdiobus_unreg;
7937 hclge_dcb_ops_set(hdev);
7939 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7940 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7941 INIT_WORK(&hdev->service_task, hclge_service_task);
7942 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7943 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7945 hclge_clear_all_event_cause(hdev);
7947 /* Enable MISC vector(vector0) */
7948 hclge_enable_vector(&hdev->misc_vector, true);
7950 hclge_state_init(hdev);
7951 hdev->last_reset_time = jiffies;
7953 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7957 if (hdev->hw.mac.phydev)
7958 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7960 hclge_misc_irq_uninit(hdev);
7962 pci_free_irq_vectors(pdev);
7964 hclge_cmd_uninit(hdev);
7966 pcim_iounmap(pdev, hdev->hw.io_base);
7967 pci_clear_master(pdev);
7968 pci_release_regions(pdev);
7969 pci_disable_device(pdev);
7974 static void hclge_stats_clear(struct hclge_dev *hdev)
7976 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7979 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7981 struct hclge_vport *vport = hdev->vport;
7984 for (i = 0; i < hdev->num_alloc_vport; i++) {
7985 hclge_vport_stop(vport);
7990 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7992 struct hclge_dev *hdev = ae_dev->priv;
7993 struct pci_dev *pdev = ae_dev->pdev;
7996 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7998 hclge_stats_clear(hdev);
7999 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8001 ret = hclge_cmd_init(hdev);
8003 dev_err(&pdev->dev, "Cmd queue init failed\n");
8007 ret = hclge_map_tqp(hdev);
8009 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8013 hclge_reset_umv_space(hdev);
8015 ret = hclge_mac_init(hdev);
8017 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8021 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8023 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8027 ret = hclge_config_gro(hdev, true);
8031 ret = hclge_init_vlan_config(hdev);
8033 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8037 ret = hclge_tm_init_hw(hdev, true);
8039 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8043 ret = hclge_rss_init_hw(hdev);
8045 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8049 ret = hclge_init_fd_config(hdev);
8052 "fd table init fail, ret=%d\n", ret);
8056 /* Re-enable the hw error interrupts because
8057 * the interrupts get disabled on core/global reset.
8059 ret = hclge_hw_error_set_state(hdev, true);
8062 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8066 hclge_reset_vport_state(hdev);
8068 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8074 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8076 struct hclge_dev *hdev = ae_dev->priv;
8077 struct hclge_mac *mac = &hdev->hw.mac;
8079 hclge_state_uninit(hdev);
8082 mdiobus_unregister(mac->mdio_bus);
8084 hclge_uninit_umv_space(hdev);
8086 /* Disable MISC vector(vector0) */
8087 hclge_enable_vector(&hdev->misc_vector, false);
8088 synchronize_irq(hdev->misc_vector.vector_irq);
8090 hclge_hw_error_set_state(hdev, false);
8091 hclge_cmd_uninit(hdev);
8092 hclge_misc_irq_uninit(hdev);
8093 hclge_pci_uninit(hdev);
8094 mutex_destroy(&hdev->vport_lock);
8095 hclge_uninit_vport_mac_table(hdev);
8096 hclge_uninit_vport_vlan_table(hdev);
8097 mutex_destroy(&hdev->vport_cfg_mutex);
8098 ae_dev->priv = NULL;
8101 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8103 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8104 struct hclge_vport *vport = hclge_get_vport(handle);
8105 struct hclge_dev *hdev = vport->back;
8107 return min_t(u32, hdev->rss_size_max,
8108 vport->alloc_tqps / kinfo->num_tc);
8111 static void hclge_get_channels(struct hnae3_handle *handle,
8112 struct ethtool_channels *ch)
8114 ch->max_combined = hclge_get_max_channels(handle);
8115 ch->other_count = 1;
8117 ch->combined_count = handle->kinfo.rss_size;
8120 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8121 u16 *alloc_tqps, u16 *max_rss_size)
8123 struct hclge_vport *vport = hclge_get_vport(handle);
8124 struct hclge_dev *hdev = vport->back;
8126 *alloc_tqps = vport->alloc_tqps;
8127 *max_rss_size = hdev->rss_size_max;
8130 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8131 bool rxfh_configured)
8133 struct hclge_vport *vport = hclge_get_vport(handle);
8134 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8135 struct hclge_dev *hdev = vport->back;
8136 int cur_rss_size = kinfo->rss_size;
8137 int cur_tqps = kinfo->num_tqps;
8138 u16 tc_offset[HCLGE_MAX_TC_NUM];
8139 u16 tc_valid[HCLGE_MAX_TC_NUM];
8140 u16 tc_size[HCLGE_MAX_TC_NUM];
8145 kinfo->req_rss_size = new_tqps_num;
8147 ret = hclge_tm_vport_map_update(hdev);
8149 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8153 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8154 roundup_size = ilog2(roundup_size);
8155 /* Set the RSS TC mode according to the new RSS size */
8156 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8159 if (!(hdev->hw_tc_map & BIT(i)))
8163 tc_size[i] = roundup_size;
8164 tc_offset[i] = kinfo->rss_size * i;
8166 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8170 /* RSS indirection table has been configuared by user */
8171 if (rxfh_configured)
8174 /* Reinitializes the rss indirect table according to the new RSS size */
8175 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8179 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8180 rss_indir[i] = i % kinfo->rss_size;
8182 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8184 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8191 dev_info(&hdev->pdev->dev,
8192 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8193 cur_rss_size, kinfo->rss_size,
8194 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8199 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8200 u32 *regs_num_64_bit)
8202 struct hclge_desc desc;
8206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8207 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8209 dev_err(&hdev->pdev->dev,
8210 "Query register number cmd failed, ret = %d.\n", ret);
8214 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8215 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8217 total_num = *regs_num_32_bit + *regs_num_64_bit;
8224 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8227 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8229 struct hclge_desc *desc;
8230 u32 *reg_val = data;
8239 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8240 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8244 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8245 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8247 dev_err(&hdev->pdev->dev,
8248 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8253 for (i = 0; i < cmd_num; i++) {
8255 desc_data = (__le32 *)(&desc[i].data[0]);
8256 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8258 desc_data = (__le32 *)(&desc[i]);
8259 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8261 for (k = 0; k < n; k++) {
8262 *reg_val++ = le32_to_cpu(*desc_data++);
8274 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8277 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8279 struct hclge_desc *desc;
8280 u64 *reg_val = data;
8289 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8290 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8294 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8295 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8297 dev_err(&hdev->pdev->dev,
8298 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8303 for (i = 0; i < cmd_num; i++) {
8305 desc_data = (__le64 *)(&desc[i].data[0]);
8306 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8308 desc_data = (__le64 *)(&desc[i]);
8309 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8311 for (k = 0; k < n; k++) {
8312 *reg_val++ = le64_to_cpu(*desc_data++);
8324 #define MAX_SEPARATE_NUM 4
8325 #define SEPARATOR_VALUE 0xFFFFFFFF
8326 #define REG_NUM_PER_LINE 4
8327 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8329 static int hclge_get_regs_len(struct hnae3_handle *handle)
8331 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8332 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8333 struct hclge_vport *vport = hclge_get_vport(handle);
8334 struct hclge_dev *hdev = vport->back;
8335 u32 regs_num_32_bit, regs_num_64_bit;
8338 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8340 dev_err(&hdev->pdev->dev,
8341 "Get register number failed, ret = %d.\n", ret);
8345 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8346 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8347 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8348 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8350 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8351 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8352 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8355 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8358 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8359 struct hclge_vport *vport = hclge_get_vport(handle);
8360 struct hclge_dev *hdev = vport->back;
8361 u32 regs_num_32_bit, regs_num_64_bit;
8362 int i, j, reg_um, separator_num;
8366 *version = hdev->fw_version;
8368 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8370 dev_err(&hdev->pdev->dev,
8371 "Get register number failed, ret = %d.\n", ret);
8375 /* fetching per-PF registers valus from PF PCIe register space */
8376 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8377 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8378 for (i = 0; i < reg_um; i++)
8379 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8380 for (i = 0; i < separator_num; i++)
8381 *reg++ = SEPARATOR_VALUE;
8383 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8384 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8385 for (i = 0; i < reg_um; i++)
8386 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8387 for (i = 0; i < separator_num; i++)
8388 *reg++ = SEPARATOR_VALUE;
8390 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8391 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8392 for (j = 0; j < kinfo->num_tqps; j++) {
8393 for (i = 0; i < reg_um; i++)
8394 *reg++ = hclge_read_dev(&hdev->hw,
8395 ring_reg_addr_list[i] +
8397 for (i = 0; i < separator_num; i++)
8398 *reg++ = SEPARATOR_VALUE;
8401 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8402 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8403 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8404 for (i = 0; i < reg_um; i++)
8405 *reg++ = hclge_read_dev(&hdev->hw,
8406 tqp_intr_reg_addr_list[i] +
8408 for (i = 0; i < separator_num; i++)
8409 *reg++ = SEPARATOR_VALUE;
8412 /* fetching PF common registers values from firmware */
8413 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8415 dev_err(&hdev->pdev->dev,
8416 "Get 32 bit register failed, ret = %d.\n", ret);
8420 reg += regs_num_32_bit;
8421 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8423 dev_err(&hdev->pdev->dev,
8424 "Get 64 bit register failed, ret = %d.\n", ret);
8427 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8429 struct hclge_set_led_state_cmd *req;
8430 struct hclge_desc desc;
8433 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8435 req = (struct hclge_set_led_state_cmd *)desc.data;
8436 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8437 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8439 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8441 dev_err(&hdev->pdev->dev,
8442 "Send set led state cmd error, ret =%d\n", ret);
8447 enum hclge_led_status {
8450 HCLGE_LED_NO_CHANGE = 0xFF,
8453 static int hclge_set_led_id(struct hnae3_handle *handle,
8454 enum ethtool_phys_id_state status)
8456 struct hclge_vport *vport = hclge_get_vport(handle);
8457 struct hclge_dev *hdev = vport->back;
8460 case ETHTOOL_ID_ACTIVE:
8461 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8462 case ETHTOOL_ID_INACTIVE:
8463 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8469 static void hclge_get_link_mode(struct hnae3_handle *handle,
8470 unsigned long *supported,
8471 unsigned long *advertising)
8473 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8474 struct hclge_vport *vport = hclge_get_vport(handle);
8475 struct hclge_dev *hdev = vport->back;
8476 unsigned int idx = 0;
8478 for (; idx < size; idx++) {
8479 supported[idx] = hdev->hw.mac.supported[idx];
8480 advertising[idx] = hdev->hw.mac.advertising[idx];
8484 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8486 struct hclge_vport *vport = hclge_get_vport(handle);
8487 struct hclge_dev *hdev = vport->back;
8489 return hclge_config_gro(hdev, enable);
8492 static const struct hnae3_ae_ops hclge_ops = {
8493 .init_ae_dev = hclge_init_ae_dev,
8494 .uninit_ae_dev = hclge_uninit_ae_dev,
8495 .flr_prepare = hclge_flr_prepare,
8496 .flr_done = hclge_flr_done,
8497 .init_client_instance = hclge_init_client_instance,
8498 .uninit_client_instance = hclge_uninit_client_instance,
8499 .map_ring_to_vector = hclge_map_ring_to_vector,
8500 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8501 .get_vector = hclge_get_vector,
8502 .put_vector = hclge_put_vector,
8503 .set_promisc_mode = hclge_set_promisc_mode,
8504 .set_loopback = hclge_set_loopback,
8505 .start = hclge_ae_start,
8506 .stop = hclge_ae_stop,
8507 .client_start = hclge_client_start,
8508 .client_stop = hclge_client_stop,
8509 .get_status = hclge_get_status,
8510 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8511 .update_speed_duplex_h = hclge_update_speed_duplex_h,
8512 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8513 .get_media_type = hclge_get_media_type,
8514 .get_rss_key_size = hclge_get_rss_key_size,
8515 .get_rss_indir_size = hclge_get_rss_indir_size,
8516 .get_rss = hclge_get_rss,
8517 .set_rss = hclge_set_rss,
8518 .set_rss_tuple = hclge_set_rss_tuple,
8519 .get_rss_tuple = hclge_get_rss_tuple,
8520 .get_tc_size = hclge_get_tc_size,
8521 .get_mac_addr = hclge_get_mac_addr,
8522 .set_mac_addr = hclge_set_mac_addr,
8523 .do_ioctl = hclge_do_ioctl,
8524 .add_uc_addr = hclge_add_uc_addr,
8525 .rm_uc_addr = hclge_rm_uc_addr,
8526 .add_mc_addr = hclge_add_mc_addr,
8527 .rm_mc_addr = hclge_rm_mc_addr,
8528 .set_autoneg = hclge_set_autoneg,
8529 .get_autoneg = hclge_get_autoneg,
8530 .get_pauseparam = hclge_get_pauseparam,
8531 .set_pauseparam = hclge_set_pauseparam,
8532 .set_mtu = hclge_set_mtu,
8533 .reset_queue = hclge_reset_tqp,
8534 .get_stats = hclge_get_stats,
8535 .get_mac_pause_stats = hclge_get_mac_pause_stat,
8536 .update_stats = hclge_update_stats,
8537 .get_strings = hclge_get_strings,
8538 .get_sset_count = hclge_get_sset_count,
8539 .get_fw_version = hclge_get_fw_version,
8540 .get_mdix_mode = hclge_get_mdix_mode,
8541 .enable_vlan_filter = hclge_enable_vlan_filter,
8542 .set_vlan_filter = hclge_set_vlan_filter,
8543 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8544 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8545 .reset_event = hclge_reset_event,
8546 .set_default_reset_request = hclge_set_def_reset_request,
8547 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8548 .set_channels = hclge_set_channels,
8549 .get_channels = hclge_get_channels,
8550 .get_regs_len = hclge_get_regs_len,
8551 .get_regs = hclge_get_regs,
8552 .set_led_id = hclge_set_led_id,
8553 .get_link_mode = hclge_get_link_mode,
8554 .add_fd_entry = hclge_add_fd_entry,
8555 .del_fd_entry = hclge_del_fd_entry,
8556 .del_all_fd_entries = hclge_del_all_fd_entries,
8557 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8558 .get_fd_rule_info = hclge_get_fd_rule_info,
8559 .get_fd_all_rules = hclge_get_all_rules,
8560 .restore_fd_rules = hclge_restore_fd_entries,
8561 .enable_fd = hclge_enable_fd,
8562 .dbg_run_cmd = hclge_dbg_run_cmd,
8563 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8564 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8565 .ae_dev_resetting = hclge_ae_dev_resetting,
8566 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8567 .set_gro_en = hclge_gro_en,
8568 .get_global_queue_id = hclge_covert_handle_qid_global,
8569 .set_timer_task = hclge_set_timer_task,
8570 .mac_connect_phy = hclge_mac_connect_phy,
8571 .mac_disconnect_phy = hclge_mac_disconnect_phy,
8574 static struct hnae3_ae_algo ae_algo = {
8576 .pdev_id_table = ae_algo_pci_tbl,
8579 static int hclge_init(void)
8581 pr_info("%s is initializing\n", HCLGE_NAME);
8583 hnae3_register_ae_algo(&ae_algo);
8588 static void hclge_exit(void)
8590 hnae3_unregister_ae_algo(&ae_algo);
8592 module_init(hclge_init);
8593 module_exit(hclge_exit);
8595 MODULE_LICENSE("GPL");
8596 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8597 MODULE_DESCRIPTION("HCLGE Driver");
8598 MODULE_VERSION(HCLGE_MOD_VERSION);