net: hns3: always assume no drop TC for performance reason
[linux-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
f2f432f2 16#include <net/rtnetlink.h>
46a3df9f 17#include "hclge_cmd.h"
cacde272 18#include "hclge_dcb.h"
46a3df9f 19#include "hclge_main.h"
dde1a86e 20#include "hclge_mbx.h"
46a3df9f
S
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
5a9f0eac 23#include "hclge_err.h"
46a3df9f
S
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 29
b9a400ac
YL
30#define HCLGE_BUF_SIZE_UNIT 256
31
e6d7d79d 32static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 33static int hclge_init_vlan_config(struct hclge_dev *hdev);
4ed340ab 34static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 35static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
39932473
JS
36static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
46a3df9f
S
38
39static struct hnae3_ae_algo ae_algo;
40
41static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 49 /* required last entry */
46a3df9f
S
50 {0, }
51};
52
2f550a46
YL
53MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54
ea4750ca
JS
55static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 HCLGE_CMDQ_TX_ADDR_H_REG,
57 HCLGE_CMDQ_TX_DEPTH_REG,
58 HCLGE_CMDQ_TX_TAIL_REG,
59 HCLGE_CMDQ_TX_HEAD_REG,
60 HCLGE_CMDQ_RX_ADDR_L_REG,
61 HCLGE_CMDQ_RX_ADDR_H_REG,
62 HCLGE_CMDQ_RX_DEPTH_REG,
63 HCLGE_CMDQ_RX_TAIL_REG,
64 HCLGE_CMDQ_RX_HEAD_REG,
65 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 HCLGE_CMDQ_INTR_STS_REG,
67 HCLGE_CMDQ_INTR_EN_REG,
68 HCLGE_CMDQ_INTR_GEN_REG};
69
70static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 HCLGE_VECTOR0_OTER_EN_REG,
72 HCLGE_MISC_RESET_STS_REG,
73 HCLGE_MISC_VECTOR_INT_STS,
74 HCLGE_GLOBAL_RESET_REG,
75 HCLGE_FUN_RST_ING,
76 HCLGE_GRO_EN_REG};
77
78static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 HCLGE_RING_RX_ADDR_H_REG,
80 HCLGE_RING_RX_BD_NUM_REG,
81 HCLGE_RING_RX_BD_LENGTH_REG,
82 HCLGE_RING_RX_MERGE_EN_REG,
83 HCLGE_RING_RX_TAIL_REG,
84 HCLGE_RING_RX_HEAD_REG,
85 HCLGE_RING_RX_FBD_NUM_REG,
86 HCLGE_RING_RX_OFFSET_REG,
87 HCLGE_RING_RX_FBD_OFFSET_REG,
88 HCLGE_RING_RX_STASH_REG,
89 HCLGE_RING_RX_BD_ERR_REG,
90 HCLGE_RING_TX_ADDR_L_REG,
91 HCLGE_RING_TX_ADDR_H_REG,
92 HCLGE_RING_TX_BD_NUM_REG,
93 HCLGE_RING_TX_PRIORITY_REG,
94 HCLGE_RING_TX_TC_REG,
95 HCLGE_RING_TX_MERGE_EN_REG,
96 HCLGE_RING_TX_TAIL_REG,
97 HCLGE_RING_TX_HEAD_REG,
98 HCLGE_RING_TX_FBD_NUM_REG,
99 HCLGE_RING_TX_OFFSET_REG,
100 HCLGE_RING_TX_EBD_NUM_REG,
101 HCLGE_RING_TX_EBD_OFFSET_REG,
102 HCLGE_RING_TX_BD_ERR_REG,
103 HCLGE_RING_EN_REG};
104
105static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 HCLGE_TQP_INTR_GL0_REG,
107 HCLGE_TQP_INTR_GL1_REG,
108 HCLGE_TQP_INTR_GL2_REG,
109 HCLGE_TQP_INTR_RL_REG};
110
46a3df9f 111static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 112 "App Loopback test",
4dc13b96
FL
113 "Serdes serial Loopback test",
114 "Serdes parallel Loopback test",
46a3df9f
S
115 "Phy Loopback test"
116};
117
46a3df9f
S
118static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 {"mac_tx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 {"mac_rx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 123 {"mac_tx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 {"mac_rx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 {"mac_tx_pfc_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
129 {"mac_tx_pfc_pri0_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 {"mac_tx_pfc_pri1_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 {"mac_tx_pfc_pri2_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 {"mac_tx_pfc_pri3_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 {"mac_tx_pfc_pri4_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 {"mac_tx_pfc_pri5_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 {"mac_tx_pfc_pri6_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 {"mac_tx_pfc_pri7_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 145 {"mac_rx_pfc_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
147 {"mac_rx_pfc_pri0_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 {"mac_rx_pfc_pri1_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 {"mac_rx_pfc_pri2_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 {"mac_rx_pfc_pri3_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 {"mac_rx_pfc_pri4_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 {"mac_rx_pfc_pri5_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 {"mac_rx_pfc_pri6_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 {"mac_rx_pfc_pri7_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 {"mac_tx_total_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 {"mac_tx_total_oct_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 {"mac_tx_good_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 {"mac_tx_bad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 {"mac_tx_good_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 {"mac_tx_bad_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 {"mac_tx_uni_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 {"mac_tx_multi_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 {"mac_tx_broad_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 {"mac_tx_undersize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
183 {"mac_tx_oversize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
185 {"mac_tx_64_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 {"mac_tx_65_127_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 {"mac_tx_128_255_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 {"mac_tx_256_511_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 {"mac_tx_512_1023_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 {"mac_tx_1024_1518_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
197 {"mac_tx_1519_2047_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 {"mac_tx_2048_4095_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 {"mac_tx_4096_8191_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
203 {"mac_tx_8192_9216_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 {"mac_tx_9217_12287_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 {"mac_tx_12288_16383_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 {"mac_tx_1519_max_good_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 {"mac_tx_1519_max_bad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
213 {"mac_rx_total_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 {"mac_rx_total_oct_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 {"mac_rx_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 {"mac_rx_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 {"mac_rx_good_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 {"mac_rx_bad_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 {"mac_rx_uni_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 {"mac_rx_multi_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 {"mac_rx_broad_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 {"mac_rx_undersize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
233 {"mac_rx_oversize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
235 {"mac_rx_64_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 {"mac_rx_65_127_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 {"mac_rx_128_255_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 {"mac_rx_256_511_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 {"mac_rx_512_1023_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 {"mac_rx_1024_1518_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
247 {"mac_rx_1519_2047_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 {"mac_rx_2048_4095_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 {"mac_rx_4096_8191_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
253 {"mac_rx_8192_9216_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 {"mac_rx_9217_12287_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 {"mac_rx_12288_16383_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 {"mac_rx_1519_max_good_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 {"mac_rx_1519_max_bad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 263
a6c51c26
JS
264 {"mac_tx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 {"mac_tx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 {"mac_tx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 {"mac_tx_err_all_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 {"mac_tx_from_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 {"mac_tx_from_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 {"mac_rx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 {"mac_rx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 {"mac_rx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 {"mac_rx_fcs_err_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 {"mac_rx_send_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 {"mac_rx_send_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
288};
289
f5aac71c
FL
290static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291 {
292 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 .i_port_bitmap = 0x1,
297 },
298};
299
472d7ece
JS
300static const u8 hclge_hash_key[] = {
301 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306};
307
d174ea75 308static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 309{
91f384f6 310#define HCLGE_MAC_CMD_NUM 21
46a3df9f
S
311
312 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 314 __le64 *desc_data;
46a3df9f
S
315 int i, k, n;
316 int ret;
317
318 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320 if (ret) {
321 dev_err(&hdev->pdev->dev,
322 "Get MAC pkt stats fail, status = %d.\n", ret);
323
324 return ret;
325 }
326
327 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 328 /* for special opcode 0032, only the first desc has the head */
46a3df9f 329 if (unlikely(i == 0)) {
a90bb9a5 330 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 331 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 332 } else {
a90bb9a5 333 desc_data = (__le64 *)(&desc[i]);
d174ea75 334 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 335 }
d174ea75 336
46a3df9f 337 for (k = 0; k < n; k++) {
d174ea75 338 *data += le64_to_cpu(*desc_data);
339 data++;
46a3df9f
S
340 desc_data++;
341 }
342 }
343
344 return 0;
345}
346
d174ea75 347static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348{
349 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 struct hclge_desc *desc;
351 __le64 *desc_data;
352 u16 i, k, n;
353 int ret;
354
355 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
39ee6e82
DC
356 if (!desc)
357 return -ENOMEM;
d174ea75 358 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360 if (ret) {
361 kfree(desc);
362 return ret;
363 }
364
365 for (i = 0; i < desc_num; i++) {
366 /* for special opcode 0034, only the first desc has the head */
367 if (i == 0) {
368 desc_data = (__le64 *)(&desc[i].data[0]);
369 n = HCLGE_RD_FIRST_STATS_NUM;
370 } else {
371 desc_data = (__le64 *)(&desc[i]);
372 n = HCLGE_RD_OTHER_STATS_NUM;
373 }
374
375 for (k = 0; k < n; k++) {
376 *data += le64_to_cpu(*desc_data);
377 data++;
378 desc_data++;
379 }
380 }
381
382 kfree(desc);
383
384 return 0;
385}
386
387static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388{
389 struct hclge_desc desc;
390 __le32 *desc_data;
391 u32 reg_num;
392 int ret;
393
394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396 if (ret)
397 return ret;
398
399 desc_data = (__le32 *)(&desc.data[0]);
400 reg_num = le32_to_cpu(*desc_data);
401
402 *desc_num = 1 + ((reg_num - 3) >> 2) +
403 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404
405 return 0;
406}
407
408static int hclge_mac_update_stats(struct hclge_dev *hdev)
409{
410 u32 desc_num;
411 int ret;
412
413 ret = hclge_mac_query_reg_num(hdev, &desc_num);
414
415 /* The firmware supports the new statistics acquisition method */
416 if (!ret)
417 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 else if (ret == -EOPNOTSUPP)
419 ret = hclge_mac_update_stats_defective(hdev);
420 else
421 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422
423 return ret;
424}
425
46a3df9f
S
426static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427{
428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 struct hclge_vport *vport = hclge_get_vport(handle);
430 struct hclge_dev *hdev = vport->back;
431 struct hnae3_queue *queue;
432 struct hclge_desc desc[1];
433 struct hclge_tqp *tqp;
434 int ret, i;
435
436 for (i = 0; i < kinfo->num_tqps; i++) {
437 queue = handle->kinfo.tqp[i];
438 tqp = container_of(queue, struct hclge_tqp, q);
439 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440 hclge_cmd_setup_basic_desc(&desc[0],
441 HCLGE_OPC_QUERY_RX_STATUS,
442 true);
443
a90bb9a5 444 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
446 if (ret) {
447 dev_err(&hdev->pdev->dev,
448 "Query tqp stat fail, status = %d,queue = %d\n",
449 ret, i);
450 return ret;
451 }
452 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 453 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
454 }
455
456 for (i = 0; i < kinfo->num_tqps; i++) {
457 queue = handle->kinfo.tqp[i];
458 tqp = container_of(queue, struct hclge_tqp, q);
459 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460 hclge_cmd_setup_basic_desc(&desc[0],
461 HCLGE_OPC_QUERY_TX_STATUS,
462 true);
463
a90bb9a5 464 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
465 ret = hclge_cmd_send(&hdev->hw, desc, 1);
466 if (ret) {
467 dev_err(&hdev->pdev->dev,
468 "Query tqp stat fail, status = %d,queue = %d\n",
469 ret, i);
470 return ret;
471 }
472 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 473 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
474 }
475
476 return 0;
477}
478
479static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480{
481 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 struct hclge_tqp *tqp;
483 u64 *buff = data;
484 int i;
485
486 for (i = 0; i < kinfo->num_tqps; i++) {
487 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 488 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
489 }
490
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 493 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
494 }
495
496 return buff;
497}
498
499static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500{
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502
503 return kinfo->num_tqps * (2);
504}
505
506static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507{
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509 u8 *buff = data;
510 int i = 0;
511
512 for (i = 0; i < kinfo->num_tqps; i++) {
513 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 struct hclge_tqp, q);
0c218123 515 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
46a3df9f
S
516 tqp->index);
517 buff = buff + ETH_GSTRING_LEN;
518 }
519
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 struct hclge_tqp, q);
0c218123 523 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
46a3df9f
S
524 tqp->index);
525 buff = buff + ETH_GSTRING_LEN;
526 }
527
528 return buff;
529}
530
531static u64 *hclge_comm_get_stats(void *comm_stats,
532 const struct hclge_comm_stats_str strs[],
533 int size, u64 *data)
534{
535 u64 *buf = data;
536 u32 i;
537
538 for (i = 0; i < size; i++)
539 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540
541 return buf + size;
542}
543
544static u8 *hclge_comm_get_strings(u32 stringset,
545 const struct hclge_comm_stats_str strs[],
546 int size, u8 *data)
547{
548 char *buff = (char *)data;
549 u32 i;
550
551 if (stringset != ETH_SS_STATS)
552 return buff;
553
554 for (i = 0; i < size; i++) {
555 snprintf(buff, ETH_GSTRING_LEN,
556 strs[i].desc);
557 buff = buff + ETH_GSTRING_LEN;
558 }
559
560 return (u8 *)buff;
561}
562
46a3df9f
S
563static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564{
565 struct hnae3_handle *handle;
566 int status;
567
568 handle = &hdev->vport[0].nic;
569 if (handle->client) {
570 status = hclge_tqps_update_stats(handle);
571 if (status) {
572 dev_err(&hdev->pdev->dev,
573 "Update TQPS stats fail, status = %d.\n",
574 status);
575 }
576 }
577
578 status = hclge_mac_update_stats(hdev);
579 if (status)
580 dev_err(&hdev->pdev->dev,
581 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
582}
583
584static void hclge_update_stats(struct hnae3_handle *handle,
585 struct net_device_stats *net_stats)
586{
587 struct hclge_vport *vport = hclge_get_vport(handle);
588 struct hclge_dev *hdev = vport->back;
46a3df9f
S
589 int status;
590
c5f65480
JS
591 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592 return;
593
46a3df9f
S
594 status = hclge_mac_update_stats(hdev);
595 if (status)
596 dev_err(&hdev->pdev->dev,
597 "Update MAC stats fail, status = %d.\n",
598 status);
599
46a3df9f
S
600 status = hclge_tqps_update_stats(handle);
601 if (status)
602 dev_err(&hdev->pdev->dev,
603 "Update TQPS stats fail, status = %d.\n",
604 status);
605
c5f65480 606 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
607}
608
609static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610{
4dc13b96
FL
611#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 HNAE3_SUPPORT_PHY_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
615
616 struct hclge_vport *vport = hclge_get_vport(handle);
617 struct hclge_dev *hdev = vport->back;
618 int count = 0;
619
620 /* Loopback test support rules:
621 * mac: only GE mode support
622 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 * phy: only support when phy device exist on board
624 */
625 if (stringset == ETH_SS_TEST) {
626 /* clear loopback bit flags at first */
627 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
3ff6cde8 628 if (hdev->pdev->revision >= 0x21 ||
4dc13b96 629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632 count += 1;
eb66d503 633 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 634 }
5fd50ac3 635
4dc13b96
FL
636 count += 2;
637 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
46a3df9f
S
639 } else if (stringset == ETH_SS_STATS) {
640 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
641 hclge_tqps_get_sset_count(handle, stringset);
642 }
643
644 return count;
645}
646
647static void hclge_get_strings(struct hnae3_handle *handle,
648 u32 stringset,
649 u8 *data)
650{
651 u8 *p = (char *)data;
652 int size;
653
654 if (stringset == ETH_SS_STATS) {
655 size = ARRAY_SIZE(g_mac_stats_string);
656 p = hclge_comm_get_strings(stringset,
657 g_mac_stats_string,
658 size,
659 p);
46a3df9f
S
660 p = hclge_tqps_get_strings(handle, p);
661 } else if (stringset == ETH_SS_TEST) {
eb66d503 662 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
46a3df9f 663 memcpy(p,
eb66d503 664 hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
665 ETH_GSTRING_LEN);
666 p += ETH_GSTRING_LEN;
667 }
4dc13b96 668 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
46a3df9f 669 memcpy(p,
4dc13b96
FL
670 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671 ETH_GSTRING_LEN);
672 p += ETH_GSTRING_LEN;
673 }
674 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675 memcpy(p,
676 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
677 ETH_GSTRING_LEN);
678 p += ETH_GSTRING_LEN;
679 }
680 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681 memcpy(p,
a7b687b3 682 hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
683 ETH_GSTRING_LEN);
684 p += ETH_GSTRING_LEN;
685 }
686 }
687}
688
689static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690{
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
693 u64 *p;
694
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696 g_mac_stats_string,
697 ARRAY_SIZE(g_mac_stats_string),
698 data);
46a3df9f
S
699 p = hclge_tqps_get_stats(handle, p);
700}
701
702static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 703 struct hclge_func_status_cmd *status)
46a3df9f
S
704{
705 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
706 return -EINVAL;
707
708 /* Set the pf to main pf */
709 if (status->pf_state & HCLGE_PF_STATE_MAIN)
710 hdev->flag |= HCLGE_FLAG_MAIN;
711 else
712 hdev->flag &= ~HCLGE_FLAG_MAIN;
713
46a3df9f
S
714 return 0;
715}
716
717static int hclge_query_function_status(struct hclge_dev *hdev)
718{
d44f9b63 719 struct hclge_func_status_cmd *req;
46a3df9f
S
720 struct hclge_desc desc;
721 int timeout = 0;
722 int ret;
723
724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 725 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
726
727 do {
728 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
729 if (ret) {
730 dev_err(&hdev->pdev->dev,
731 "query function status failed %d.\n",
732 ret);
733
734 return ret;
735 }
736
737 /* Check pf reset is done */
738 if (req->pf_state)
739 break;
740 usleep_range(1000, 2000);
741 } while (timeout++ < 5);
742
743 ret = hclge_parse_func_status(hdev, req);
744
745 return ret;
746}
747
748static int hclge_query_pf_resource(struct hclge_dev *hdev)
749{
d44f9b63 750 struct hclge_pf_res_cmd *req;
46a3df9f
S
751 struct hclge_desc desc;
752 int ret;
753
754 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
756 if (ret) {
757 dev_err(&hdev->pdev->dev,
758 "query pf resource failed %d.\n", ret);
759 return ret;
760 }
761
d44f9b63 762 req = (struct hclge_pf_res_cmd *)desc.data;
46a3df9f
S
763 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
764 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
765
368686be
YL
766 if (req->tx_buf_size)
767 hdev->tx_buf_size =
768 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
769 else
770 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
771
b9a400ac
YL
772 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
773
368686be
YL
774 if (req->dv_buf_size)
775 hdev->dv_buf_size =
776 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
777 else
778 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
779
b9a400ac
YL
780 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
781
e92a0843 782 if (hnae3_dev_roce_supported(hdev)) {
375dd5e4
JS
783 hdev->roce_base_msix_offset =
784 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
785 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
887c3820 786 hdev->num_roce_msi =
e4e87715
PL
787 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
788 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f
S
789
790 /* PF should have NIC vectors and Roce vectors,
791 * NIC vectors are queued before Roce vectors.
792 */
375dd5e4
JS
793 hdev->num_msi = hdev->num_roce_msi +
794 hdev->roce_base_msix_offset;
46a3df9f
S
795 } else {
796 hdev->num_msi =
e4e87715
PL
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f
S
799 }
800
801 return 0;
802}
803
804static int hclge_parse_speed(int speed_cmd, int *speed)
805{
806 switch (speed_cmd) {
807 case 6:
808 *speed = HCLGE_MAC_SPEED_10M;
809 break;
810 case 7:
811 *speed = HCLGE_MAC_SPEED_100M;
812 break;
813 case 0:
814 *speed = HCLGE_MAC_SPEED_1G;
815 break;
816 case 1:
817 *speed = HCLGE_MAC_SPEED_10G;
818 break;
819 case 2:
820 *speed = HCLGE_MAC_SPEED_25G;
821 break;
822 case 3:
823 *speed = HCLGE_MAC_SPEED_40G;
824 break;
825 case 4:
826 *speed = HCLGE_MAC_SPEED_50G;
827 break;
828 case 5:
829 *speed = HCLGE_MAC_SPEED_100G;
830 break;
831 default:
832 return -EINVAL;
833 }
834
835 return 0;
836}
837
0979aa0b
FL
838static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
839 u8 speed_ability)
840{
841 unsigned long *supported = hdev->hw.mac.supported;
842
843 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
db68ca0e
JS
844 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
845 supported);
0979aa0b
FL
846
847 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e
JS
848 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
849 supported);
0979aa0b
FL
850
851 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e
JS
852 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
853 supported);
0979aa0b
FL
854
855 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
db68ca0e
JS
856 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
857 supported);
0979aa0b
FL
858
859 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
db68ca0e
JS
860 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
861 supported);
0979aa0b 862
db68ca0e
JS
863 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
864 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
0979aa0b
FL
865}
866
f18635d5
JS
867static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
868 u8 speed_ability)
869{
870 unsigned long *supported = hdev->hw.mac.supported;
871
872 /* default to support all speed for GE port */
873 if (!speed_ability)
874 speed_ability = HCLGE_SUPPORT_GE;
875
876 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
877 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
878 supported);
879
880 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
881 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
882 supported);
883 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
884 supported);
885 }
886
887 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
888 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
889 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
890 }
891
892 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
893 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
894 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
895}
896
0979aa0b
FL
897static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
898{
899 u8 media_type = hdev->hw.mac.media_type;
900
f18635d5
JS
901 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
902 hclge_parse_fiber_link_mode(hdev, speed_ability);
903 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
904 hclge_parse_copper_link_mode(hdev, speed_ability);
0979aa0b
FL
905}
906
46a3df9f
S
907static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
908{
d44f9b63 909 struct hclge_cfg_param_cmd *req;
46a3df9f
S
910 u64 mac_addr_tmp_high;
911 u64 mac_addr_tmp;
912 int i;
913
d44f9b63 914 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
915
916 /* get the configuration */
e4e87715
PL
917 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
918 HCLGE_CFG_VMDQ_M,
919 HCLGE_CFG_VMDQ_S);
920 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
921 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
922 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
923 HCLGE_CFG_TQP_DESC_N_M,
924 HCLGE_CFG_TQP_DESC_N_S);
925
926 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
927 HCLGE_CFG_PHY_ADDR_M,
928 HCLGE_CFG_PHY_ADDR_S);
929 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
930 HCLGE_CFG_MEDIA_TP_M,
931 HCLGE_CFG_MEDIA_TP_S);
932 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
933 HCLGE_CFG_RX_BUF_LEN_M,
934 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
935 /* get mac_address */
936 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
937 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
938 HCLGE_CFG_MAC_ADDR_H_M,
939 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
940
941 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
942
e4e87715
PL
943 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
944 HCLGE_CFG_DEFAULT_SPEED_M,
945 HCLGE_CFG_DEFAULT_SPEED_S);
946 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
947 HCLGE_CFG_RSS_SIZE_M,
948 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 949
46a3df9f
S
950 for (i = 0; i < ETH_ALEN; i++)
951 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
952
d44f9b63 953 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 954 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 955
e4e87715
PL
956 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
957 HCLGE_CFG_SPEED_ABILITY_M,
958 HCLGE_CFG_SPEED_ABILITY_S);
39932473
JS
959 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
960 HCLGE_CFG_UMV_TBL_SPACE_M,
961 HCLGE_CFG_UMV_TBL_SPACE_S);
962 if (!cfg->umv_space)
963 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
46a3df9f
S
964}
965
966/* hclge_get_cfg: query the static parameter from flash
967 * @hdev: pointer to struct hclge_dev
968 * @hcfg: the config structure to be getted
969 */
970static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
971{
972 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 973 struct hclge_cfg_param_cmd *req;
46a3df9f
S
974 int i, ret;
975
976 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
977 u32 offset = 0;
978
d44f9b63 979 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
980 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
981 true);
e4e87715
PL
982 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
983 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 984 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
985 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
986 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 987 req->offset = cpu_to_le32(offset);
46a3df9f
S
988 }
989
990 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
991 if (ret) {
3f639907 992 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
993 return ret;
994 }
995
996 hclge_parse_cfg(hcfg, desc);
3f639907 997
46a3df9f
S
998 return 0;
999}
1000
1001static int hclge_get_cap(struct hclge_dev *hdev)
1002{
1003 int ret;
1004
1005 ret = hclge_query_function_status(hdev);
1006 if (ret) {
1007 dev_err(&hdev->pdev->dev,
1008 "query function status error %d.\n", ret);
1009 return ret;
1010 }
1011
1012 /* get pf resource */
1013 ret = hclge_query_pf_resource(hdev);
3f639907
JS
1014 if (ret)
1015 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
46a3df9f 1016
3f639907 1017 return ret;
46a3df9f
S
1018}
1019
962e31bd
YL
1020static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1021{
1022#define HCLGE_MIN_TX_DESC 64
1023#define HCLGE_MIN_RX_DESC 64
1024
1025 if (!is_kdump_kernel())
1026 return;
1027
1028 dev_info(&hdev->pdev->dev,
1029 "Running kdump kernel. Using minimal resources\n");
1030
1031 /* minimal queue pairs equals to the number of vports */
1032 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1033 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1034 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1035}
1036
46a3df9f
S
1037static int hclge_configure(struct hclge_dev *hdev)
1038{
1039 struct hclge_cfg cfg;
1040 int ret, i;
1041
1042 ret = hclge_get_cfg(hdev, &cfg);
1043 if (ret) {
1044 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1045 return ret;
1046 }
1047
1048 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1049 hdev->base_tqp_pid = 0;
0e7a40cd 1050 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1051 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1052 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1053 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1054 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1055 hdev->num_tx_desc = cfg.tqp_desc_num;
1056 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1057 hdev->tm_info.num_pg = 1;
cacde272 1058 hdev->tc_max = cfg.tc_num;
46a3df9f 1059 hdev->tm_info.hw_pfc_map = 0;
39932473 1060 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1061
9abeb7d8
JS
1062 if (hnae3_dev_fd_supported(hdev))
1063 hdev->fd_en = true;
1064
46a3df9f
S
1065 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1066 if (ret) {
1067 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1068 return ret;
1069 }
1070
0979aa0b
FL
1071 hclge_parse_link_mode(hdev, cfg.speed_ability);
1072
cacde272
YL
1073 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1074 (hdev->tc_max < 1)) {
46a3df9f 1075 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
cacde272
YL
1076 hdev->tc_max);
1077 hdev->tc_max = 1;
46a3df9f
S
1078 }
1079
cacde272
YL
1080 /* Dev does not support DCB */
1081 if (!hnae3_dev_dcb_supported(hdev)) {
1082 hdev->tc_max = 1;
1083 hdev->pfc_max = 0;
1084 } else {
1085 hdev->pfc_max = hdev->tc_max;
1086 }
1087
a2987975 1088 hdev->tm_info.num_tc = 1;
cacde272 1089
46a3df9f 1090 /* Currently not support uncontiuous tc */
cacde272 1091 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1092 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1093
71b83869 1094 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1095
962e31bd
YL
1096 hclge_init_kdump_kernel_config(hdev);
1097
46a3df9f
S
1098 return ret;
1099}
1100
1101static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1102 int tso_mss_max)
1103{
d44f9b63 1104 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1105 struct hclge_desc desc;
a90bb9a5 1106 u16 tso_mss;
46a3df9f
S
1107
1108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1109
d44f9b63 1110 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1111
1112 tso_mss = 0;
e4e87715
PL
1113 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1114 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1115 req->tso_mss_min = cpu_to_le16(tso_mss);
1116
1117 tso_mss = 0;
e4e87715
PL
1118 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1119 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1120 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1121
1122 return hclge_cmd_send(&hdev->hw, &desc, 1);
1123}
1124
b26a6fea
PL
1125static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1126{
1127 struct hclge_cfg_gro_status_cmd *req;
1128 struct hclge_desc desc;
1129 int ret;
1130
1131 if (!hnae3_dev_gro_supported(hdev))
1132 return 0;
1133
1134 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1135 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1136
1137 req->gro_en = cpu_to_le16(en ? 1 : 0);
1138
1139 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140 if (ret)
1141 dev_err(&hdev->pdev->dev,
1142 "GRO hardware config cmd failed, ret = %d\n", ret);
1143
1144 return ret;
1145}
1146
46a3df9f
S
1147static int hclge_alloc_tqps(struct hclge_dev *hdev)
1148{
1149 struct hclge_tqp *tqp;
1150 int i;
1151
1152 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1153 sizeof(struct hclge_tqp), GFP_KERNEL);
1154 if (!hdev->htqp)
1155 return -ENOMEM;
1156
1157 tqp = hdev->htqp;
1158
1159 for (i = 0; i < hdev->num_tqps; i++) {
1160 tqp->dev = &hdev->pdev->dev;
1161 tqp->index = i;
1162
1163 tqp->q.ae_algo = &ae_algo;
1164 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1165 tqp->q.tx_desc_num = hdev->num_tx_desc;
1166 tqp->q.rx_desc_num = hdev->num_rx_desc;
46a3df9f
S
1167 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1168 i * HCLGE_TQP_REG_SIZE;
1169
1170 tqp++;
1171 }
1172
1173 return 0;
1174}
1175
1176static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1177 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1178{
d44f9b63 1179 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1180 struct hclge_desc desc;
1181 int ret;
1182
1183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1184
d44f9b63 1185 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1186 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1187 req->tqp_vf = func_id;
46a3df9f
S
1188 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1189 1 << HCLGE_TQP_MAP_EN_B;
1190 req->tqp_vid = cpu_to_le16(tqp_vid);
1191
1192 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1193 if (ret)
1194 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1195
3f639907 1196 return ret;
46a3df9f
S
1197}
1198
672ad0ed 1199static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1200{
128b900d 1201 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1202 struct hclge_dev *hdev = vport->back;
7df7dad6 1203 int i, alloced;
46a3df9f
S
1204
1205 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1206 alloced < num_tqps; i++) {
46a3df9f
S
1207 if (!hdev->htqp[i].alloced) {
1208 hdev->htqp[i].q.handle = &vport->nic;
1209 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1210 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1211 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1212 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1213 hdev->htqp[i].alloced = true;
46a3df9f
S
1214 alloced++;
1215 }
1216 }
672ad0ed
HT
1217 vport->alloc_tqps = alloced;
1218 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1219 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f
S
1220
1221 return 0;
1222}
1223
c0425944
PL
1224static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1225 u16 num_tx_desc, u16 num_rx_desc)
1226
46a3df9f
S
1227{
1228 struct hnae3_handle *nic = &vport->nic;
1229 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1230 struct hclge_dev *hdev = vport->back;
af958827 1231 int ret;
46a3df9f 1232
c0425944
PL
1233 kinfo->num_tx_desc = num_tx_desc;
1234 kinfo->num_rx_desc = num_rx_desc;
1235
46a3df9f 1236 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1237
672ad0ed 1238 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1239 sizeof(struct hnae3_queue *), GFP_KERNEL);
1240 if (!kinfo->tqp)
1241 return -ENOMEM;
1242
672ad0ed 1243 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1244 if (ret)
46a3df9f 1245 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1246
3f639907 1247 return ret;
46a3df9f
S
1248}
1249
7df7dad6
L
1250static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1251 struct hclge_vport *vport)
1252{
1253 struct hnae3_handle *nic = &vport->nic;
1254 struct hnae3_knic_private_info *kinfo;
1255 u16 i;
1256
1257 kinfo = &nic->kinfo;
205a24ca 1258 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1259 struct hclge_tqp *q =
1260 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1261 bool is_pf;
1262 int ret;
1263
1264 is_pf = !(vport->vport_id);
1265 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1266 i, is_pf);
1267 if (ret)
1268 return ret;
1269 }
1270
1271 return 0;
1272}
1273
1274static int hclge_map_tqp(struct hclge_dev *hdev)
1275{
1276 struct hclge_vport *vport = hdev->vport;
1277 u16 i, num_vport;
1278
1279 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1280 for (i = 0; i < num_vport; i++) {
1281 int ret;
1282
1283 ret = hclge_map_tqp_to_vport(hdev, vport);
1284 if (ret)
1285 return ret;
1286
1287 vport++;
1288 }
1289
1290 return 0;
1291}
1292
46a3df9f
S
1293static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1294{
1295 /* this would be initialized later */
1296}
1297
1298static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1299{
1300 struct hnae3_handle *nic = &vport->nic;
1301 struct hclge_dev *hdev = vport->back;
1302 int ret;
1303
1304 nic->pdev = hdev->pdev;
1305 nic->ae_algo = &ae_algo;
1306 nic->numa_node_mask = hdev->numa_node_mask;
1307
1308 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
c0425944
PL
1309 ret = hclge_knic_setup(vport, num_tqps,
1310 hdev->num_tx_desc, hdev->num_rx_desc);
1311
46a3df9f
S
1312 if (ret) {
1313 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1314 ret);
1315 return ret;
1316 }
1317 } else {
1318 hclge_unic_setup(vport, num_tqps);
1319 }
1320
1321 return 0;
1322}
1323
1324static int hclge_alloc_vport(struct hclge_dev *hdev)
1325{
1326 struct pci_dev *pdev = hdev->pdev;
1327 struct hclge_vport *vport;
1328 u32 tqp_main_vport;
1329 u32 tqp_per_vport;
1330 int num_vport, i;
1331 int ret;
1332
1333 /* We need to alloc a vport for main NIC of PF */
1334 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1335
38e62046
HT
1336 if (hdev->num_tqps < num_vport) {
1337 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1338 hdev->num_tqps, num_vport);
1339 return -EINVAL;
1340 }
46a3df9f
S
1341
1342 /* Alloc the same number of TQPs for every vport */
1343 tqp_per_vport = hdev->num_tqps / num_vport;
1344 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1345
1346 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1347 GFP_KERNEL);
1348 if (!vport)
1349 return -ENOMEM;
1350
1351 hdev->vport = vport;
1352 hdev->num_alloc_vport = num_vport;
1353
2312e050
FL
1354 if (IS_ENABLED(CONFIG_PCI_IOV))
1355 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1356
1357 for (i = 0; i < num_vport; i++) {
1358 vport->back = hdev;
1359 vport->vport_id = i;
818f1675 1360 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1361 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1362 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1363 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1364 INIT_LIST_HEAD(&vport->uc_mac_list);
1365 INIT_LIST_HEAD(&vport->mc_mac_list);
46a3df9f
S
1366
1367 if (i == 0)
1368 ret = hclge_vport_setup(vport, tqp_main_vport);
1369 else
1370 ret = hclge_vport_setup(vport, tqp_per_vport);
1371 if (ret) {
1372 dev_err(&pdev->dev,
1373 "vport setup failed for vport %d, %d\n",
1374 i, ret);
1375 return ret;
1376 }
1377
1378 vport++;
1379 }
1380
1381 return 0;
1382}
1383
acf61ecd
YL
1384static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1385 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1386{
1387/* TX buffer size is unit by 128 byte */
1388#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1389#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1390 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1391 struct hclge_desc desc;
1392 int ret;
1393 u8 i;
1394
d44f9b63 1395 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1396
1397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1398 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1399 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1400
46a3df9f
S
1401 req->tx_pkt_buff[i] =
1402 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1403 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1404 }
46a3df9f
S
1405
1406 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1407 if (ret)
46a3df9f
S
1408 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1409 ret);
46a3df9f 1410
3f639907 1411 return ret;
46a3df9f
S
1412}
1413
acf61ecd
YL
1414static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1415 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1416{
acf61ecd 1417 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1418
3f639907
JS
1419 if (ret)
1420 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1421
3f639907 1422 return ret;
46a3df9f
S
1423}
1424
1425static int hclge_get_tc_num(struct hclge_dev *hdev)
1426{
1427 int i, cnt = 0;
1428
1429 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1430 if (hdev->hw_tc_map & BIT(i))
1431 cnt++;
1432 return cnt;
1433}
1434
46a3df9f 1435/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1436static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1437 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1438{
1439 struct hclge_priv_buf *priv;
1440 int i, cnt = 0;
1441
1442 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1443 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1444 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1445 priv->enable)
1446 cnt++;
1447 }
1448
1449 return cnt;
1450}
1451
1452/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1453static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1454 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1455{
1456 struct hclge_priv_buf *priv;
1457 int i, cnt = 0;
1458
1459 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1460 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1461 if (hdev->hw_tc_map & BIT(i) &&
1462 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1463 priv->enable)
1464 cnt++;
1465 }
1466
1467 return cnt;
1468}
1469
acf61ecd 1470static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1471{
1472 struct hclge_priv_buf *priv;
1473 u32 rx_priv = 0;
1474 int i;
1475
1476 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1477 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1478 if (priv->enable)
1479 rx_priv += priv->buf_size;
1480 }
1481 return rx_priv;
1482}
1483
acf61ecd 1484static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1485{
1486 u32 i, total_tx_size = 0;
1487
1488 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1489 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1490
1491 return total_tx_size;
1492}
1493
acf61ecd
YL
1494static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1495 struct hclge_pkt_buf_alloc *buf_alloc,
1496 u32 rx_all)
46a3df9f
S
1497{
1498 u32 shared_buf_min, shared_buf_tc, shared_std;
db5936db 1499 int tc_num = hclge_get_tc_num(hdev);
b9a400ac 1500 u32 shared_buf, aligned_mps;
46a3df9f
S
1501 u32 rx_priv;
1502 int i;
1503
b9a400ac 1504 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1505
d221df4e 1506 if (hnae3_dev_dcb_supported(hdev))
b9a400ac 1507 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
d221df4e 1508 else
b9a400ac 1509 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1510 + hdev->dv_buf_size;
d221df4e 1511
db5936db 1512 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1513 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1514 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1515
acf61ecd 1516 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1517 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1518 return false;
1519
b9a400ac 1520 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1521 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1522 if (hnae3_dev_dcb_supported(hdev)) {
1523 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1524 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b9a400ac 1525 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
368686be 1526 } else {
b9a400ac 1527 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1528 HCLGE_NON_DCB_ADDITIONAL_BUF;
b9a400ac
YL
1529 buf_alloc->s_buf.self.low =
1530 roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
368686be 1531 }
46a3df9f
S
1532
1533 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
db5936db
YL
1534 buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1535 buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
46a3df9f
S
1536 }
1537
1538 return true;
1539}
1540
acf61ecd
YL
1541static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1542 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1543{
1544 u32 i, total_size;
1545
1546 total_size = hdev->pkt_buf_size;
1547
1548 /* alloc tx buffer for all enabled tc */
1549 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1550 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 1551
b6b4f987
HT
1552 if (hdev->hw_tc_map & BIT(i)) {
1553 if (total_size < hdev->tx_buf_size)
1554 return -ENOMEM;
9ffe79a9 1555
368686be 1556 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 1557 } else {
9ffe79a9 1558 priv->tx_buf_size = 0;
b6b4f987 1559 }
9ffe79a9
YL
1560
1561 total_size -= priv->tx_buf_size;
1562 }
1563
1564 return 0;
1565}
1566
8ca754b1
YL
1567static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1568 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1569{
8ca754b1
YL
1570 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1571 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1572 int i;
1573
46a3df9f 1574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 1575 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 1576
bb1fe9ea
YL
1577 priv->enable = 0;
1578 priv->wl.low = 0;
1579 priv->wl.high = 0;
1580 priv->buf_size = 0;
1581
1582 if (!(hdev->hw_tc_map & BIT(i)))
1583 continue;
1584
1585 priv->enable = 1;
46a3df9f
S
1586
1587 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
8ca754b1
YL
1588 priv->wl.low = max ? aligned_mps : 256;
1589 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1590 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1591 } else {
1592 priv->wl.low = 0;
8ca754b1 1593 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
46a3df9f 1594 }
8ca754b1
YL
1595
1596 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
1597 }
1598
8ca754b1
YL
1599 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1600}
46a3df9f 1601
8ca754b1
YL
1602static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1603 struct hclge_pkt_buf_alloc *buf_alloc)
1604{
1605 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1606 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1607 int i;
46a3df9f
S
1608
1609 /* let the last to be cleared first */
1610 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1611 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1612
1613 if (hdev->hw_tc_map & BIT(i) &&
1614 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1615 /* Clear the no pfc TC private buffer */
1616 priv->wl.low = 0;
1617 priv->wl.high = 0;
1618 priv->buf_size = 0;
1619 priv->enable = 0;
1620 no_pfc_priv_num--;
1621 }
1622
acf61ecd 1623 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1624 no_pfc_priv_num == 0)
1625 break;
1626 }
1627
8ca754b1
YL
1628 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1629}
46a3df9f 1630
8ca754b1
YL
1631static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1632 struct hclge_pkt_buf_alloc *buf_alloc)
1633{
1634 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1635 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1636 int i;
46a3df9f
S
1637
1638 /* let the last to be cleared first */
1639 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1640 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1641
1642 if (hdev->hw_tc_map & BIT(i) &&
1643 hdev->tm_info.hw_pfc_map & BIT(i)) {
1644 /* Reduce the number of pfc TC with private buffer */
1645 priv->wl.low = 0;
1646 priv->enable = 0;
1647 priv->wl.high = 0;
1648 priv->buf_size = 0;
1649 pfc_priv_num--;
1650 }
1651
acf61ecd 1652 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1653 pfc_priv_num == 0)
1654 break;
1655 }
8ca754b1
YL
1656
1657 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1658}
1659
1660/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1661 * @hdev: pointer to struct hclge_dev
1662 * @buf_alloc: pointer to buffer calculation data
1663 * @return: 0: calculate sucessful, negative: fail
1664 */
1665static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc)
1667{
1668 /* When DCB is not supported, rx private buffer is not allocated. */
1669 if (!hnae3_dev_dcb_supported(hdev)) {
1670 u32 rx_all = hdev->pkt_buf_size;
1671
1672 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1673 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1674 return -ENOMEM;
1675
1676 return 0;
1677 }
1678
1679 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1680 return 0;
1681
1682 /* try to decrease the buffer size */
1683 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1684 return 0;
1685
1686 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1687 return 0;
1688
1689 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
1690 return 0;
1691
1692 return -ENOMEM;
1693}
1694
acf61ecd
YL
1695static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1696 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1697{
d44f9b63 1698 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
1699 struct hclge_desc desc;
1700 int ret;
1701 int i;
1702
1703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 1704 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
1705
1706 /* Alloc private buffer TCs */
1707 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1708 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1709
1710 req->buf_num[i] =
1711 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1712 req->buf_num[i] |=
5bca3b94 1713 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
1714 }
1715
b8c8bf47 1716 req->shared_buf =
acf61ecd 1717 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
1718 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1719
46a3df9f 1720 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1721 if (ret)
46a3df9f
S
1722 dev_err(&hdev->pdev->dev,
1723 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 1724
3f639907 1725 return ret;
46a3df9f
S
1726}
1727
acf61ecd
YL
1728static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1729 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1730{
1731 struct hclge_rx_priv_wl_buf *req;
1732 struct hclge_priv_buf *priv;
1733 struct hclge_desc desc[2];
1734 int i, j;
1735 int ret;
1736
1737 for (i = 0; i < 2; i++) {
1738 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1739 false);
1740 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1741
1742 /* The first descriptor set the NEXT bit to 1 */
1743 if (i == 0)
1744 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1745 else
1746 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1747
1748 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
1749 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1750
1751 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
1752 req->tc_wl[j].high =
1753 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1754 req->tc_wl[j].high |=
3738287c 1755 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1756 req->tc_wl[j].low =
1757 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1758 req->tc_wl[j].low |=
3738287c 1759 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1760 }
1761 }
1762
1763 /* Send 2 descriptor at one time */
1764 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 1765 if (ret)
46a3df9f
S
1766 dev_err(&hdev->pdev->dev,
1767 "rx private waterline config cmd failed %d\n",
1768 ret);
3f639907 1769 return ret;
46a3df9f
S
1770}
1771
acf61ecd
YL
1772static int hclge_common_thrd_config(struct hclge_dev *hdev,
1773 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1774{
acf61ecd 1775 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
1776 struct hclge_rx_com_thrd *req;
1777 struct hclge_desc desc[2];
1778 struct hclge_tc_thrd *tc;
1779 int i, j;
1780 int ret;
1781
1782 for (i = 0; i < 2; i++) {
1783 hclge_cmd_setup_basic_desc(&desc[i],
1784 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1785 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1786
1787 /* The first descriptor set the NEXT bit to 1 */
1788 if (i == 0)
1789 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1790 else
1791 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1792
1793 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1794 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1795
1796 req->com_thrd[j].high =
1797 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1798 req->com_thrd[j].high |=
3738287c 1799 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1800 req->com_thrd[j].low =
1801 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1802 req->com_thrd[j].low |=
3738287c 1803 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1804 }
1805 }
1806
1807 /* Send 2 descriptors at one time */
1808 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 1809 if (ret)
46a3df9f
S
1810 dev_err(&hdev->pdev->dev,
1811 "common threshold config cmd failed %d\n", ret);
3f639907 1812 return ret;
46a3df9f
S
1813}
1814
acf61ecd
YL
1815static int hclge_common_wl_config(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1817{
acf61ecd 1818 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
1819 struct hclge_rx_com_wl *req;
1820 struct hclge_desc desc;
1821 int ret;
1822
1823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1824
1825 req = (struct hclge_rx_com_wl *)desc.data;
1826 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 1827 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1828
1829 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 1830 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1831
1832 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1833 if (ret)
46a3df9f
S
1834 dev_err(&hdev->pdev->dev,
1835 "common waterline config cmd failed %d\n", ret);
46a3df9f 1836
3f639907 1837 return ret;
46a3df9f
S
1838}
1839
1840int hclge_buffer_alloc(struct hclge_dev *hdev)
1841{
acf61ecd 1842 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
1843 int ret;
1844
acf61ecd
YL
1845 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1846 if (!pkt_buf)
46a3df9f
S
1847 return -ENOMEM;
1848
acf61ecd 1849 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
1850 if (ret) {
1851 dev_err(&hdev->pdev->dev,
1852 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 1853 goto out;
9ffe79a9
YL
1854 }
1855
acf61ecd 1856 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
1857 if (ret) {
1858 dev_err(&hdev->pdev->dev,
1859 "could not alloc tx buffers %d\n", ret);
acf61ecd 1860 goto out;
46a3df9f
S
1861 }
1862
acf61ecd 1863 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
1864 if (ret) {
1865 dev_err(&hdev->pdev->dev,
1866 "could not calc rx priv buffer size for all TCs %d\n",
1867 ret);
acf61ecd 1868 goto out;
46a3df9f
S
1869 }
1870
acf61ecd 1871 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
1872 if (ret) {
1873 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1874 ret);
acf61ecd 1875 goto out;
46a3df9f
S
1876 }
1877
2daf4a65 1878 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 1879 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
1880 if (ret) {
1881 dev_err(&hdev->pdev->dev,
1882 "could not configure rx private waterline %d\n",
1883 ret);
acf61ecd 1884 goto out;
2daf4a65 1885 }
46a3df9f 1886
acf61ecd 1887 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
1888 if (ret) {
1889 dev_err(&hdev->pdev->dev,
1890 "could not configure common threshold %d\n",
1891 ret);
acf61ecd 1892 goto out;
2daf4a65 1893 }
46a3df9f
S
1894 }
1895
acf61ecd
YL
1896 ret = hclge_common_wl_config(hdev, pkt_buf);
1897 if (ret)
46a3df9f
S
1898 dev_err(&hdev->pdev->dev,
1899 "could not configure common waterline %d\n", ret);
46a3df9f 1900
acf61ecd
YL
1901out:
1902 kfree(pkt_buf);
1903 return ret;
46a3df9f
S
1904}
1905
1906static int hclge_init_roce_base_info(struct hclge_vport *vport)
1907{
1908 struct hnae3_handle *roce = &vport->roce;
1909 struct hnae3_handle *nic = &vport->nic;
1910
887c3820 1911 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
1912
1913 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1914 vport->back->num_msi_left == 0)
1915 return -EINVAL;
1916
1917 roce->rinfo.base_vector = vport->back->roce_base_vector;
1918
1919 roce->rinfo.netdev = nic->kinfo.netdev;
1920 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1921
1922 roce->pdev = nic->pdev;
1923 roce->ae_algo = nic->ae_algo;
1924 roce->numa_node_mask = nic->numa_node_mask;
1925
1926 return 0;
1927}
1928
887c3820 1929static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
1930{
1931 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
1932 int vectors;
1933 int i;
46a3df9f 1934
887c3820
SM
1935 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1936 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1937 if (vectors < 0) {
1938 dev_err(&pdev->dev,
1939 "failed(%d) to allocate MSI/MSI-X vectors\n",
1940 vectors);
1941 return vectors;
46a3df9f 1942 }
887c3820
SM
1943 if (vectors < hdev->num_msi)
1944 dev_warn(&hdev->pdev->dev,
1945 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1946 hdev->num_msi, vectors);
46a3df9f 1947
887c3820
SM
1948 hdev->num_msi = vectors;
1949 hdev->num_msi_left = vectors;
1950 hdev->base_msi_vector = pdev->irq;
46a3df9f 1951 hdev->roce_base_vector = hdev->base_msi_vector +
375dd5e4 1952 hdev->roce_base_msix_offset;
46a3df9f 1953
46a3df9f
S
1954 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1955 sizeof(u16), GFP_KERNEL);
887c3820
SM
1956 if (!hdev->vector_status) {
1957 pci_free_irq_vectors(pdev);
46a3df9f 1958 return -ENOMEM;
887c3820 1959 }
46a3df9f
S
1960
1961 for (i = 0; i < hdev->num_msi; i++)
1962 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1963
887c3820
SM
1964 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1965 sizeof(int), GFP_KERNEL);
1966 if (!hdev->vector_irq) {
1967 pci_free_irq_vectors(pdev);
1968 return -ENOMEM;
46a3df9f 1969 }
46a3df9f
S
1970
1971 return 0;
1972}
1973
2d03eacc 1974static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 1975{
46a3df9f 1976
2d03eacc
YL
1977 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1978 duplex = HCLGE_MAC_FULL;
46a3df9f 1979
2d03eacc 1980 return duplex;
46a3df9f
S
1981}
1982
2d03eacc
YL
1983static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1984 u8 duplex)
46a3df9f 1985{
d44f9b63 1986 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
1987 struct hclge_desc desc;
1988 int ret;
1989
d44f9b63 1990 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
1991
1992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1993
e4e87715 1994 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
46a3df9f
S
1995
1996 switch (speed) {
1997 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
1998 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1999 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2000 break;
2001 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2002 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2003 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2004 break;
2005 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2006 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2007 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2008 break;
2009 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2010 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2011 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2012 break;
2013 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2014 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2015 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2016 break;
2017 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2018 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2019 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2020 break;
2021 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2022 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2023 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2024 break;
2025 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2026 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2027 HCLGE_CFG_SPEED_S, 5);
46a3df9f
S
2028 break;
2029 default:
d7629e74 2030 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2031 return -EINVAL;
2032 }
2033
e4e87715
PL
2034 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2035 1);
46a3df9f
S
2036
2037 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2038 if (ret) {
2039 dev_err(&hdev->pdev->dev,
2040 "mac speed/duplex config cmd failed %d.\n", ret);
2041 return ret;
2042 }
2043
2d03eacc
YL
2044 return 0;
2045}
2046
2047int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2048{
2049 int ret;
2050
2051 duplex = hclge_check_speed_dup(duplex, speed);
2052 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2053 return 0;
2054
2055 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2056 if (ret)
2057 return ret;
2058
2059 hdev->hw.mac.speed = speed;
2060 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2061
2062 return 0;
2063}
2064
2065static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2066 u8 duplex)
2067{
2068 struct hclge_vport *vport = hclge_get_vport(handle);
2069 struct hclge_dev *hdev = vport->back;
2070
2071 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2072}
2073
46a3df9f
S
2074static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2075{
d44f9b63 2076 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2077 struct hclge_desc desc;
a90bb9a5 2078 u32 flag = 0;
46a3df9f
S
2079 int ret;
2080
2081 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2082
d44f9b63 2083 req = (struct hclge_config_auto_neg_cmd *)desc.data;
e4e87715 2084 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
a90bb9a5 2085 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2086
2087 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2088 if (ret)
46a3df9f
S
2089 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2090 ret);
46a3df9f 2091
3f639907 2092 return ret;
46a3df9f
S
2093}
2094
2095static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2096{
2097 struct hclge_vport *vport = hclge_get_vport(handle);
2098 struct hclge_dev *hdev = vport->back;
2099
2100 return hclge_set_autoneg_en(hdev, enable);
2101}
2102
2103static int hclge_get_autoneg(struct hnae3_handle *handle)
2104{
2105 struct hclge_vport *vport = hclge_get_vport(handle);
2106 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2107 struct phy_device *phydev = hdev->hw.mac.phydev;
2108
2109 if (phydev)
2110 return phydev->autoneg;
46a3df9f
S
2111
2112 return hdev->hw.mac.autoneg;
2113}
2114
2115static int hclge_mac_init(struct hclge_dev *hdev)
2116{
2117 struct hclge_mac *mac = &hdev->hw.mac;
2118 int ret;
2119
5d497936 2120 hdev->support_sfp_query = true;
2d03eacc
YL
2121 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2122 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2123 hdev->hw.mac.duplex);
46a3df9f
S
2124 if (ret) {
2125 dev_err(&hdev->pdev->dev,
2126 "Config mac speed dup fail ret=%d\n", ret);
2127 return ret;
2128 }
2129
2130 mac->link = 0;
2131
e6d7d79d
YL
2132 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2133 if (ret) {
2134 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2135 return ret;
2136 }
f9fd82a9 2137
e6d7d79d 2138 ret = hclge_buffer_alloc(hdev);
3f639907 2139 if (ret)
f9fd82a9 2140 dev_err(&hdev->pdev->dev,
e6d7d79d 2141 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2142
3f639907 2143 return ret;
46a3df9f
S
2144}
2145
c1a81619
SM
2146static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2147{
18e24888
HT
2148 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2149 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
c1a81619
SM
2150 schedule_work(&hdev->mbx_service_task);
2151}
2152
cb1b9f77
SM
2153static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2154{
2155 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2156 schedule_work(&hdev->rst_service_task);
2157}
2158
46a3df9f
S
2159static void hclge_task_schedule(struct hclge_dev *hdev)
2160{
2161 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2162 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2163 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2164 (void)schedule_work(&hdev->service_task);
2165}
2166
2167static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2168{
d44f9b63 2169 struct hclge_link_status_cmd *req;
46a3df9f
S
2170 struct hclge_desc desc;
2171 int link_status;
2172 int ret;
2173
2174 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2176 if (ret) {
2177 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2178 ret);
2179 return ret;
2180 }
2181
d44f9b63 2182 req = (struct hclge_link_status_cmd *)desc.data;
c79301d8 2183 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
46a3df9f
S
2184
2185 return !!link_status;
2186}
2187
2188static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2189{
2190 int mac_state;
2191 int link_stat;
2192
582d37bb
PL
2193 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2194 return 0;
2195
46a3df9f
S
2196 mac_state = hclge_get_mac_link_status(hdev);
2197
2198 if (hdev->hw.mac.phydev) {
fd813314 2199 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
46a3df9f
S
2200 link_stat = mac_state &
2201 hdev->hw.mac.phydev->link;
2202 else
2203 link_stat = 0;
2204
2205 } else {
2206 link_stat = mac_state;
2207 }
2208
2209 return !!link_stat;
2210}
2211
2212static void hclge_update_link_status(struct hclge_dev *hdev)
2213{
45e92b7e 2214 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2215 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2216 struct hnae3_handle *rhandle;
46a3df9f
S
2217 struct hnae3_handle *handle;
2218 int state;
2219 int i;
2220
2221 if (!client)
2222 return;
2223 state = hclge_get_mac_phy_link(hdev);
2224 if (state != hdev->hw.mac.link) {
2225 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2226 handle = &hdev->vport[i].nic;
2227 client->ops->link_status_change(handle, state);
45e92b7e
PL
2228 rhandle = &hdev->vport[i].roce;
2229 if (rclient && rclient->ops->link_status_change)
2230 rclient->ops->link_status_change(rhandle,
2231 state);
46a3df9f
S
2232 }
2233 hdev->hw.mac.link = state;
2234 }
2235}
2236
5d497936
PL
2237static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2238{
2239 struct hclge_sfp_speed_cmd *resp = NULL;
2240 struct hclge_desc desc;
2241 int ret;
2242
2243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2244 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2245 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2246 if (ret == -EOPNOTSUPP) {
2247 dev_warn(&hdev->pdev->dev,
2248 "IMP do not support get SFP speed %d\n", ret);
2249 return ret;
2250 } else if (ret) {
2251 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2252 return ret;
2253 }
2254
2255 *speed = resp->sfp_speed;
2256
2257 return 0;
2258}
2259
46a3df9f
S
2260static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2261{
2262 struct hclge_mac mac = hdev->hw.mac;
46a3df9f
S
2263 int speed;
2264 int ret;
2265
5d497936 2266 /* get the speed from SFP cmd when phy
46a3df9f
S
2267 * doesn't exit.
2268 */
5d497936 2269 if (mac.phydev)
46a3df9f
S
2270 return 0;
2271
5d497936
PL
2272 /* if IMP does not support get SFP/qSFP speed, return directly */
2273 if (!hdev->support_sfp_query)
2274 return 0;
46a3df9f 2275
5d497936
PL
2276 ret = hclge_get_sfp_speed(hdev, &speed);
2277 if (ret == -EOPNOTSUPP) {
2278 hdev->support_sfp_query = false;
2279 return ret;
2280 } else if (ret) {
2d03eacc 2281 return ret;
46a3df9f
S
2282 }
2283
5d497936
PL
2284 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2285 return 0; /* do nothing if no SFP */
2286
2287 /* must config full duplex for SFP */
2288 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
46a3df9f
S
2289}
2290
2291static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2292{
2293 struct hclge_vport *vport = hclge_get_vport(handle);
2294 struct hclge_dev *hdev = vport->back;
2295
2296 return hclge_update_speed_duplex(hdev);
2297}
2298
2299static int hclge_get_status(struct hnae3_handle *handle)
2300{
2301 struct hclge_vport *vport = hclge_get_vport(handle);
2302 struct hclge_dev *hdev = vport->back;
2303
2304 hclge_update_link_status(hdev);
2305
2306 return hdev->hw.mac.link;
2307}
2308
d039ef68 2309static void hclge_service_timer(struct timer_list *t)
46a3df9f 2310{
d039ef68 2311 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
46a3df9f 2312
d039ef68 2313 mod_timer(&hdev->service_timer, jiffies + HZ);
c5f65480 2314 hdev->hw_stats.stats_timer++;
46a3df9f
S
2315 hclge_task_schedule(hdev);
2316}
2317
2318static void hclge_service_complete(struct hclge_dev *hdev)
2319{
2320 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2321
2322 /* Flush memory before next watchdog */
2323 smp_mb__before_atomic();
2324 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2325}
2326
ca1d7669
SM
2327static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2328{
f6162d44 2329 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
ca1d7669
SM
2330
2331 /* fetch the events from their corresponding regs */
9ca8d1a7 2332 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619 2333 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
f6162d44
SM
2334 msix_src_reg = hclge_read_dev(&hdev->hw,
2335 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
c1a81619
SM
2336
2337 /* Assumption: If by any chance reset and mailbox events are reported
2338 * together then we will only process reset event in this go and will
2339 * defer the processing of the mailbox events. Since, we would have not
2340 * cleared RX CMDQ event this time we would receive again another
2341 * interrupt from H/W just for the mailbox.
2342 */
ca1d7669
SM
2343
2344 /* check for vector0 reset event sources */
6dd22bbc
HT
2345 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2346 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2347 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2348 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2349 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2350 return HCLGE_VECTOR0_EVENT_RST;
2351 }
2352
ca1d7669 2353 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
65e41e7e 2354 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 2355 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2356 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2357 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2358 return HCLGE_VECTOR0_EVENT_RST;
2359 }
2360
2361 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
65e41e7e 2362 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
8d40854f 2363 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2364 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2365 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2366 return HCLGE_VECTOR0_EVENT_RST;
2367 }
2368
f6162d44
SM
2369 /* check for vector0 msix event source */
2370 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2371 return HCLGE_VECTOR0_EVENT_ERR;
2372
c1a81619
SM
2373 /* check for vector0 mailbox(=CMDQ RX) event source */
2374 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2375 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2376 *clearval = cmdq_src_reg;
2377 return HCLGE_VECTOR0_EVENT_MBX;
2378 }
ca1d7669
SM
2379
2380 return HCLGE_VECTOR0_EVENT_OTHER;
2381}
2382
2383static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2384 u32 regclr)
2385{
c1a81619
SM
2386 switch (event_type) {
2387 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 2388 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
2389 break;
2390 case HCLGE_VECTOR0_EVENT_MBX:
2391 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2392 break;
fa7a4bd5
JS
2393 default:
2394 break;
c1a81619 2395 }
ca1d7669
SM
2396}
2397
8e52a602
XW
2398static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2399{
2400 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2401 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2402 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2403 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2404 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2405}
2406
466b0c00
L
2407static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2408{
2409 writel(enable ? 1 : 0, vector->addr);
2410}
2411
2412static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2413{
2414 struct hclge_dev *hdev = data;
ca1d7669
SM
2415 u32 event_cause;
2416 u32 clearval;
466b0c00
L
2417
2418 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
2419 event_cause = hclge_check_event_cause(hdev, &clearval);
2420
c1a81619 2421 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 2422 switch (event_cause) {
f6162d44
SM
2423 case HCLGE_VECTOR0_EVENT_ERR:
2424 /* we do not know what type of reset is required now. This could
2425 * only be decided after we fetch the type of errors which
2426 * caused this event. Therefore, we will do below for now:
2427 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2428 * have defered type of reset to be used.
2429 * 2. Schedule the reset serivce task.
2430 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2431 * will fetch the correct type of reset. This would be done
2432 * by first decoding the types of errors.
2433 */
2434 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2435 /* fall through */
ca1d7669 2436 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 2437 hclge_reset_task_schedule(hdev);
ca1d7669 2438 break;
c1a81619
SM
2439 case HCLGE_VECTOR0_EVENT_MBX:
2440 /* If we are here then,
2441 * 1. Either we are not handling any mbx task and we are not
2442 * scheduled as well
2443 * OR
2444 * 2. We could be handling a mbx task but nothing more is
2445 * scheduled.
2446 * In both cases, we should schedule mbx task as there are more
2447 * mbx messages reported by this interrupt.
2448 */
2449 hclge_mbx_task_schedule(hdev);
f0ad97ac 2450 break;
ca1d7669 2451 default:
f0ad97ac
YL
2452 dev_warn(&hdev->pdev->dev,
2453 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
2454 break;
2455 }
2456
cd8c5c26 2457 /* clear the source of interrupt if it is not cause by reset */
0d441140 2458 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
2459 hclge_clear_event_cause(hdev, event_cause, clearval);
2460 hclge_enable_vector(&hdev->misc_vector, true);
2461 }
466b0c00
L
2462
2463 return IRQ_HANDLED;
2464}
2465
2466static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2467{
36cbbdf6
PL
2468 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2469 dev_warn(&hdev->pdev->dev,
2470 "vector(vector_id %d) has been freed.\n", vector_id);
2471 return;
2472 }
2473
466b0c00
L
2474 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2475 hdev->num_msi_left += 1;
2476 hdev->num_msi_used -= 1;
2477}
2478
2479static void hclge_get_misc_vector(struct hclge_dev *hdev)
2480{
2481 struct hclge_misc_vector *vector = &hdev->misc_vector;
2482
2483 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2484
2485 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2486 hdev->vector_status[0] = 0;
2487
2488 hdev->num_msi_left -= 1;
2489 hdev->num_msi_used += 1;
2490}
2491
2492static int hclge_misc_irq_init(struct hclge_dev *hdev)
2493{
2494 int ret;
2495
2496 hclge_get_misc_vector(hdev);
2497
ca1d7669
SM
2498 /* this would be explicitly freed in the end */
2499 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2500 0, "hclge_misc", hdev);
466b0c00
L
2501 if (ret) {
2502 hclge_free_vector(hdev, 0);
2503 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2504 hdev->misc_vector.vector_irq);
2505 }
2506
2507 return ret;
2508}
2509
ca1d7669
SM
2510static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2511{
2512 free_irq(hdev->misc_vector.vector_irq, hdev);
2513 hclge_free_vector(hdev, 0);
2514}
2515
af013903
HT
2516int hclge_notify_client(struct hclge_dev *hdev,
2517 enum hnae3_reset_notify_type type)
4ed340ab
L
2518{
2519 struct hnae3_client *client = hdev->nic_client;
2520 u16 i;
2521
2522 if (!client->ops->reset_notify)
2523 return -EOPNOTSUPP;
2524
2525 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2526 struct hnae3_handle *handle = &hdev->vport[i].nic;
2527 int ret;
2528
2529 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
2530 if (ret) {
2531 dev_err(&hdev->pdev->dev,
2532 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 2533 return ret;
65e41e7e 2534 }
4ed340ab
L
2535 }
2536
2537 return 0;
2538}
2539
f403a84f
HT
2540static int hclge_notify_roce_client(struct hclge_dev *hdev,
2541 enum hnae3_reset_notify_type type)
2542{
2543 struct hnae3_client *client = hdev->roce_client;
2544 int ret = 0;
2545 u16 i;
2546
2547 if (!client)
2548 return 0;
2549
2550 if (!client->ops->reset_notify)
2551 return -EOPNOTSUPP;
2552
2553 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2554 struct hnae3_handle *handle = &hdev->vport[i].roce;
2555
2556 ret = client->ops->reset_notify(handle, type);
2557 if (ret) {
2558 dev_err(&hdev->pdev->dev,
2559 "notify roce client failed %d(%d)",
2560 type, ret);
2561 return ret;
2562 }
2563 }
2564
2565 return ret;
2566}
2567
4ed340ab
L
2568static int hclge_reset_wait(struct hclge_dev *hdev)
2569{
2570#define HCLGE_RESET_WATI_MS 100
6dd22bbc 2571#define HCLGE_RESET_WAIT_CNT 200
4ed340ab
L
2572 u32 val, reg, reg_bit;
2573 u32 cnt = 0;
2574
2575 switch (hdev->reset_type) {
6dd22bbc
HT
2576 case HNAE3_IMP_RESET:
2577 reg = HCLGE_GLOBAL_RESET_REG;
2578 reg_bit = HCLGE_IMP_RESET_BIT;
2579 break;
4ed340ab
L
2580 case HNAE3_GLOBAL_RESET:
2581 reg = HCLGE_GLOBAL_RESET_REG;
2582 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2583 break;
2584 case HNAE3_CORE_RESET:
2585 reg = HCLGE_GLOBAL_RESET_REG;
2586 reg_bit = HCLGE_CORE_RESET_BIT;
2587 break;
2588 case HNAE3_FUNC_RESET:
2589 reg = HCLGE_FUN_RST_ING;
2590 reg_bit = HCLGE_FUN_RST_ING_B;
2591 break;
6b9a97ee
HT
2592 case HNAE3_FLR_RESET:
2593 break;
4ed340ab
L
2594 default:
2595 dev_err(&hdev->pdev->dev,
2596 "Wait for unsupported reset type: %d\n",
2597 hdev->reset_type);
2598 return -EINVAL;
2599 }
2600
6b9a97ee
HT
2601 if (hdev->reset_type == HNAE3_FLR_RESET) {
2602 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2603 cnt++ < HCLGE_RESET_WAIT_CNT)
2604 msleep(HCLGE_RESET_WATI_MS);
2605
2606 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2607 dev_err(&hdev->pdev->dev,
2608 "flr wait timeout: %d\n", cnt);
2609 return -EBUSY;
2610 }
2611
2612 return 0;
2613 }
2614
4ed340ab 2615 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 2616 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
2617 msleep(HCLGE_RESET_WATI_MS);
2618 val = hclge_read_dev(&hdev->hw, reg);
2619 cnt++;
2620 }
2621
4ed340ab
L
2622 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2623 dev_warn(&hdev->pdev->dev,
2624 "Wait for reset timeout: %d\n", hdev->reset_type);
2625 return -EBUSY;
2626 }
2627
2628 return 0;
2629}
2630
aa5c4f17
HT
2631static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2632{
2633 struct hclge_vf_rst_cmd *req;
2634 struct hclge_desc desc;
2635
2636 req = (struct hclge_vf_rst_cmd *)desc.data;
2637 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2638 req->dest_vfid = func_id;
2639
2640 if (reset)
2641 req->vf_rst = 0x1;
2642
2643 return hclge_cmd_send(&hdev->hw, &desc, 1);
2644}
2645
e511f17b 2646static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
2647{
2648 int i;
2649
2650 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2651 struct hclge_vport *vport = &hdev->vport[i];
2652 int ret;
2653
2654 /* Send cmd to set/clear VF's FUNC_RST_ING */
2655 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2656 if (ret) {
2657 dev_err(&hdev->pdev->dev,
790cd1a8 2658 "set vf(%d) rst failed %d!\n",
aa5c4f17
HT
2659 vport->vport_id, ret);
2660 return ret;
2661 }
2662
cc645dfa 2663 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
2664 continue;
2665
2666 /* Inform VF to process the reset.
2667 * hclge_inform_reset_assert_to_vf may fail if VF
2668 * driver is not loaded.
2669 */
2670 ret = hclge_inform_reset_assert_to_vf(vport);
2671 if (ret)
2672 dev_warn(&hdev->pdev->dev,
790cd1a8 2673 "inform reset to vf(%d) failed %d!\n",
aa5c4f17
HT
2674 vport->vport_id, ret);
2675 }
2676
2677 return 0;
2678}
2679
2bfbd35d 2680int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
2681{
2682 struct hclge_desc desc;
2683 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2684 int ret;
2685
2686 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 2687 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
2688 req->fun_reset_vfid = func_id;
2689
2690 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2691 if (ret)
2692 dev_err(&hdev->pdev->dev,
2693 "send function reset cmd fail, status =%d\n", ret);
2694
2695 return ret;
2696}
2697
f2f432f2 2698static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 2699{
4f765d3e 2700 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
2701 struct pci_dev *pdev = hdev->pdev;
2702 u32 val;
2703
4f765d3e
HT
2704 if (hclge_get_hw_reset_stat(handle)) {
2705 dev_info(&pdev->dev, "Hardware reset not finish\n");
2706 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2707 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2708 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2709 return;
2710 }
2711
f2f432f2 2712 switch (hdev->reset_type) {
4ed340ab
L
2713 case HNAE3_GLOBAL_RESET:
2714 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 2715 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab
L
2716 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2717 dev_info(&pdev->dev, "Global Reset requested\n");
2718 break;
2719 case HNAE3_CORE_RESET:
2720 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 2721 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
4ed340ab
L
2722 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2723 dev_info(&pdev->dev, "Core Reset requested\n");
2724 break;
2725 case HNAE3_FUNC_RESET:
2726 dev_info(&pdev->dev, "PF Reset requested\n");
cb1b9f77
SM
2727 /* schedule again to check later */
2728 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2729 hclge_reset_task_schedule(hdev);
4ed340ab 2730 break;
6b9a97ee
HT
2731 case HNAE3_FLR_RESET:
2732 dev_info(&pdev->dev, "FLR requested\n");
2733 /* schedule again to check later */
2734 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2735 hclge_reset_task_schedule(hdev);
2736 break;
4ed340ab
L
2737 default:
2738 dev_warn(&pdev->dev,
f2f432f2 2739 "Unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
2740 break;
2741 }
2742}
2743
f2f432f2
SM
2744static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2745 unsigned long *addr)
2746{
2747 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2748
f6162d44
SM
2749 /* first, resolve any unknown reset type to the known type(s) */
2750 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2751 /* we will intentionally ignore any errors from this function
2752 * as we will end up in *some* reset request in any case
2753 */
2754 hclge_handle_hw_msix_error(hdev, addr);
2755 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2756 /* We defered the clearing of the error event which caused
2757 * interrupt since it was not posssible to do that in
2758 * interrupt context (and this is the reason we introduced
2759 * new UNKNOWN reset type). Now, the errors have been
2760 * handled and cleared in hardware we can safely enable
2761 * interrupts. This is an exception to the norm.
2762 */
2763 hclge_enable_vector(&hdev->misc_vector, true);
2764 }
2765
f2f432f2 2766 /* return the highest priority reset level amongst all */
7cea834d
HT
2767 if (test_bit(HNAE3_IMP_RESET, addr)) {
2768 rst_level = HNAE3_IMP_RESET;
2769 clear_bit(HNAE3_IMP_RESET, addr);
2770 clear_bit(HNAE3_GLOBAL_RESET, addr);
2771 clear_bit(HNAE3_CORE_RESET, addr);
2772 clear_bit(HNAE3_FUNC_RESET, addr);
2773 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 2774 rst_level = HNAE3_GLOBAL_RESET;
7cea834d
HT
2775 clear_bit(HNAE3_GLOBAL_RESET, addr);
2776 clear_bit(HNAE3_CORE_RESET, addr);
2777 clear_bit(HNAE3_FUNC_RESET, addr);
2778 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
f2f432f2 2779 rst_level = HNAE3_CORE_RESET;
7cea834d
HT
2780 clear_bit(HNAE3_CORE_RESET, addr);
2781 clear_bit(HNAE3_FUNC_RESET, addr);
2782 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 2783 rst_level = HNAE3_FUNC_RESET;
7cea834d 2784 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
2785 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2786 rst_level = HNAE3_FLR_RESET;
2787 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 2788 }
f2f432f2 2789
0fdf4d30
HT
2790 if (hdev->reset_type != HNAE3_NONE_RESET &&
2791 rst_level < hdev->reset_type)
2792 return HNAE3_NONE_RESET;
2793
f2f432f2
SM
2794 return rst_level;
2795}
2796
cd8c5c26
YL
2797static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2798{
2799 u32 clearval = 0;
2800
2801 switch (hdev->reset_type) {
2802 case HNAE3_IMP_RESET:
2803 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2804 break;
2805 case HNAE3_GLOBAL_RESET:
2806 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2807 break;
2808 case HNAE3_CORE_RESET:
2809 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2810 break;
2811 default:
cd8c5c26
YL
2812 break;
2813 }
2814
2815 if (!clearval)
2816 return;
2817
2818 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2819 hclge_enable_vector(&hdev->misc_vector, true);
2820}
2821
aa5c4f17
HT
2822static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2823{
2824 int ret = 0;
2825
2826 switch (hdev->reset_type) {
2827 case HNAE3_FUNC_RESET:
6b9a97ee
HT
2828 /* fall through */
2829 case HNAE3_FLR_RESET:
aa5c4f17
HT
2830 ret = hclge_set_all_vf_rst(hdev, true);
2831 break;
2832 default:
2833 break;
2834 }
2835
2836 return ret;
2837}
2838
35d93a30
HT
2839static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2840{
6dd22bbc 2841 u32 reg_val;
35d93a30
HT
2842 int ret = 0;
2843
2844 switch (hdev->reset_type) {
2845 case HNAE3_FUNC_RESET:
aa5c4f17
HT
2846 /* There is no mechanism for PF to know if VF has stopped IO
2847 * for now, just wait 100 ms for VF to stop IO
2848 */
2849 msleep(100);
35d93a30
HT
2850 ret = hclge_func_reset_cmd(hdev, 0);
2851 if (ret) {
2852 dev_err(&hdev->pdev->dev,
141b95d5 2853 "asserting function reset fail %d!\n", ret);
35d93a30
HT
2854 return ret;
2855 }
2856
2857 /* After performaning pf reset, it is not necessary to do the
2858 * mailbox handling or send any command to firmware, because
2859 * any mailbox handling or command to firmware is only valid
2860 * after hclge_cmd_init is called.
2861 */
2862 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2863 break;
6b9a97ee
HT
2864 case HNAE3_FLR_RESET:
2865 /* There is no mechanism for PF to know if VF has stopped IO
2866 * for now, just wait 100 ms for VF to stop IO
2867 */
2868 msleep(100);
2869 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2870 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2871 break;
6dd22bbc
HT
2872 case HNAE3_IMP_RESET:
2873 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2874 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2875 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2876 break;
35d93a30
HT
2877 default:
2878 break;
2879 }
2880
2881 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2882
2883 return ret;
2884}
2885
65e41e7e
HT
2886static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2887{
2888#define MAX_RESET_FAIL_CNT 5
2889#define RESET_UPGRADE_DELAY_SEC 10
2890
2891 if (hdev->reset_pending) {
2892 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2893 hdev->reset_pending);
2894 return true;
2895 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2896 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2897 BIT(HCLGE_IMP_RESET_BIT))) {
2898 dev_info(&hdev->pdev->dev,
2899 "reset failed because IMP Reset is pending\n");
2900 hclge_clear_reset_cause(hdev);
2901 return false;
2902 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2903 hdev->reset_fail_cnt++;
2904 if (is_timeout) {
2905 set_bit(hdev->reset_type, &hdev->reset_pending);
2906 dev_info(&hdev->pdev->dev,
2907 "re-schedule to wait for hw reset done\n");
2908 return true;
2909 }
2910
2911 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2912 hclge_clear_reset_cause(hdev);
2913 mod_timer(&hdev->reset_timer,
2914 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2915
2916 return false;
2917 }
2918
2919 hclge_clear_reset_cause(hdev);
2920 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2921 return false;
2922}
2923
aa5c4f17
HT
2924static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2925{
2926 int ret = 0;
2927
2928 switch (hdev->reset_type) {
2929 case HNAE3_FUNC_RESET:
6b9a97ee
HT
2930 /* fall through */
2931 case HNAE3_FLR_RESET:
aa5c4f17
HT
2932 ret = hclge_set_all_vf_rst(hdev, false);
2933 break;
2934 default:
2935 break;
2936 }
2937
2938 return ret;
2939}
2940
f2f432f2
SM
2941static void hclge_reset(struct hclge_dev *hdev)
2942{
6871af29 2943 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
65e41e7e
HT
2944 bool is_timeout = false;
2945 int ret;
9de0b86f 2946
6871af29
JS
2947 /* Initialize ae_dev reset status as well, in case enet layer wants to
2948 * know if device is undergoing reset
2949 */
2950 ae_dev->reset_type = hdev->reset_type;
4d60291b 2951 hdev->reset_count++;
f2f432f2 2952 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
2953 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2954 if (ret)
2955 goto err_reset;
2956
aa5c4f17
HT
2957 ret = hclge_reset_prepare_down(hdev);
2958 if (ret)
2959 goto err_reset;
2960
6d4fab39 2961 rtnl_lock();
65e41e7e
HT
2962 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2963 if (ret)
2964 goto err_reset_lock;
f2f432f2 2965
65e41e7e 2966 rtnl_unlock();
35d93a30 2967
65e41e7e
HT
2968 ret = hclge_reset_prepare_wait(hdev);
2969 if (ret)
2970 goto err_reset;
cd8c5c26 2971
65e41e7e
HT
2972 if (hclge_reset_wait(hdev)) {
2973 is_timeout = true;
2974 goto err_reset;
f2f432f2
SM
2975 }
2976
65e41e7e
HT
2977 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2978 if (ret)
2979 goto err_reset;
2980
2981 rtnl_lock();
2982 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2983 if (ret)
2984 goto err_reset_lock;
2985
2986 ret = hclge_reset_ae_dev(hdev->ae_dev);
2987 if (ret)
2988 goto err_reset_lock;
2989
2990 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2991 if (ret)
2992 goto err_reset_lock;
2993
1f609492
YL
2994 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
2995 if (ret)
2996 goto err_reset_lock;
2997
65e41e7e
HT
2998 hclge_clear_reset_cause(hdev);
2999
aa5c4f17
HT
3000 ret = hclge_reset_prepare_up(hdev);
3001 if (ret)
3002 goto err_reset_lock;
3003
65e41e7e
HT
3004 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3005 if (ret)
3006 goto err_reset_lock;
3007
6d4fab39 3008 rtnl_unlock();
f403a84f 3009
65e41e7e
HT
3010 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3011 if (ret)
3012 goto err_reset;
3013
3014 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3015 if (ret)
3016 goto err_reset;
3017
b644a8d4
HT
3018 hdev->last_reset_time = jiffies;
3019 hdev->reset_fail_cnt = 0;
3020 ae_dev->reset_type = HNAE3_NONE_RESET;
056cbab3 3021 del_timer(&hdev->reset_timer);
b644a8d4 3022
65e41e7e
HT
3023 return;
3024
3025err_reset_lock:
3026 rtnl_unlock();
3027err_reset:
3028 if (hclge_reset_err_handle(hdev, is_timeout))
3029 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3030}
3031
6ae4e733
SJ
3032static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3033{
3034 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3035 struct hclge_dev *hdev = ae_dev->priv;
3036
3037 /* We might end up getting called broadly because of 2 below cases:
3038 * 1. Recoverable error was conveyed through APEI and only way to bring
3039 * normalcy is to reset.
3040 * 2. A new reset request from the stack due to timeout
3041 *
3042 * For the first case,error event might not have ae handle available.
3043 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3044 * last reset attempt did not succeed and watchdog hit us again. We will
3045 * know this if last reset request did not occur very recently (watchdog
3046 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3047 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3048 * And if it is a repeat reset request of the most recent one then we
3049 * want to make sure we throttle the reset request. Therefore, we will
3050 * not allow it again before 3*HZ times.
6d4c3981 3051 */
6ae4e733
SJ
3052 if (!handle)
3053 handle = &hdev->vport[0].nic;
3054
0742ed7c 3055 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
9de0b86f 3056 return;
720bd583 3057 else if (hdev->default_reset_request)
0742ed7c 3058 hdev->reset_level =
720bd583
HT
3059 hclge_get_reset_level(hdev,
3060 &hdev->default_reset_request);
0742ed7c
HT
3061 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3062 hdev->reset_level = HNAE3_FUNC_RESET;
4ed340ab 3063
6d4c3981 3064 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
0742ed7c 3065 hdev->reset_level);
6d4c3981
SM
3066
3067 /* request reset & schedule reset task */
0742ed7c 3068 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3069 hclge_reset_task_schedule(hdev);
3070
0742ed7c
HT
3071 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3072 hdev->reset_level++;
4ed340ab
L
3073}
3074
720bd583
HT
3075static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3076 enum hnae3_reset_type rst_type)
3077{
3078 struct hclge_dev *hdev = ae_dev->priv;
3079
3080 set_bit(rst_type, &hdev->default_reset_request);
3081}
3082
65e41e7e
HT
3083static void hclge_reset_timer(struct timer_list *t)
3084{
3085 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3086
3087 dev_info(&hdev->pdev->dev,
3088 "triggering global reset in reset timer\n");
3089 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3090 hclge_reset_event(hdev->pdev, NULL);
3091}
3092
4ed340ab
L
3093static void hclge_reset_subtask(struct hclge_dev *hdev)
3094{
f2f432f2
SM
3095 /* check if there is any ongoing reset in the hardware. This status can
3096 * be checked from reset_pending. If there is then, we need to wait for
3097 * hardware to complete reset.
3098 * a. If we are able to figure out in reasonable time that hardware
3099 * has fully resetted then, we can proceed with driver, client
3100 * reset.
3101 * b. else, we can come back later to check this status so re-sched
3102 * now.
3103 */
0742ed7c 3104 hdev->last_reset_time = jiffies;
f2f432f2
SM
3105 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3106 if (hdev->reset_type != HNAE3_NONE_RESET)
3107 hclge_reset(hdev);
4ed340ab 3108
f2f432f2
SM
3109 /* check if we got any *new* reset requests to be honored */
3110 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3111 if (hdev->reset_type != HNAE3_NONE_RESET)
3112 hclge_do_reset(hdev);
4ed340ab 3113
4ed340ab
L
3114 hdev->reset_type = HNAE3_NONE_RESET;
3115}
3116
cb1b9f77 3117static void hclge_reset_service_task(struct work_struct *work)
466b0c00 3118{
cb1b9f77
SM
3119 struct hclge_dev *hdev =
3120 container_of(work, struct hclge_dev, rst_service_task);
3121
3122 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3123 return;
3124
3125 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3126
4ed340ab 3127 hclge_reset_subtask(hdev);
cb1b9f77
SM
3128
3129 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
466b0c00
L
3130}
3131
c1a81619
SM
3132static void hclge_mailbox_service_task(struct work_struct *work)
3133{
3134 struct hclge_dev *hdev =
3135 container_of(work, struct hclge_dev, mbx_service_task);
3136
3137 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3138 return;
3139
3140 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3141
3142 hclge_mbx_handler(hdev);
3143
3144 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3145}
3146
a6d818e3
YL
3147static void hclge_update_vport_alive(struct hclge_dev *hdev)
3148{
3149 int i;
3150
3151 /* start from vport 1 for PF is always alive */
3152 for (i = 1; i < hdev->num_alloc_vport; i++) {
3153 struct hclge_vport *vport = &hdev->vport[i];
3154
3155 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3156 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
3157
3158 /* If vf is not alive, set to default value */
3159 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3160 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
3161 }
3162}
3163
46a3df9f
S
3164static void hclge_service_task(struct work_struct *work)
3165{
3166 struct hclge_dev *hdev =
3167 container_of(work, struct hclge_dev, service_task);
3168
c5f65480
JS
3169 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3170 hclge_update_stats_for_all(hdev);
3171 hdev->hw_stats.stats_timer = 0;
3172 }
3173
46a3df9f
S
3174 hclge_update_speed_duplex(hdev);
3175 hclge_update_link_status(hdev);
a6d818e3 3176 hclge_update_vport_alive(hdev);
46a3df9f
S
3177 hclge_service_complete(hdev);
3178}
3179
46a3df9f
S
3180struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3181{
3182 /* VF handle has no client */
3183 if (!handle->client)
3184 return container_of(handle, struct hclge_vport, nic);
3185 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3186 return container_of(handle, struct hclge_vport, roce);
3187 else
3188 return container_of(handle, struct hclge_vport, nic);
3189}
3190
3191static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3192 struct hnae3_vector_info *vector_info)
3193{
3194 struct hclge_vport *vport = hclge_get_vport(handle);
3195 struct hnae3_vector_info *vector = vector_info;
3196 struct hclge_dev *hdev = vport->back;
3197 int alloc = 0;
3198 int i, j;
3199
3200 vector_num = min(hdev->num_msi_left, vector_num);
3201
3202 for (j = 0; j < vector_num; j++) {
3203 for (i = 1; i < hdev->num_msi; i++) {
3204 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3205 vector->vector = pci_irq_vector(hdev->pdev, i);
3206 vector->io_addr = hdev->hw.io_base +
3207 HCLGE_VECTOR_REG_BASE +
3208 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3209 vport->vport_id *
3210 HCLGE_VECTOR_VF_OFFSET;
3211 hdev->vector_status[i] = vport->vport_id;
887c3820 3212 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
3213
3214 vector++;
3215 alloc++;
3216
3217 break;
3218 }
3219 }
3220 }
3221 hdev->num_msi_left -= alloc;
3222 hdev->num_msi_used += alloc;
3223
3224 return alloc;
3225}
3226
3227static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3228{
3229 int i;
3230
887c3820
SM
3231 for (i = 0; i < hdev->num_msi; i++)
3232 if (vector == hdev->vector_irq[i])
3233 return i;
3234
46a3df9f
S
3235 return -EINVAL;
3236}
3237
0d3e6631
YL
3238static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3239{
3240 struct hclge_vport *vport = hclge_get_vport(handle);
3241 struct hclge_dev *hdev = vport->back;
3242 int vector_id;
3243
3244 vector_id = hclge_get_vector_index(hdev, vector);
3245 if (vector_id < 0) {
3246 dev_err(&hdev->pdev->dev,
3247 "Get vector index fail. vector_id =%d\n", vector_id);
3248 return vector_id;
3249 }
3250
3251 hclge_free_vector(hdev, vector_id);
3252
3253 return 0;
3254}
3255
46a3df9f
S
3256static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3257{
3258 return HCLGE_RSS_KEY_SIZE;
3259}
3260
3261static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3262{
3263 return HCLGE_RSS_IND_TBL_SIZE;
3264}
3265
46a3df9f
S
3266static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3267 const u8 hfunc, const u8 *key)
3268{
d44f9b63 3269 struct hclge_rss_config_cmd *req;
46a3df9f
S
3270 struct hclge_desc desc;
3271 int key_offset;
3272 int key_size;
3273 int ret;
3274
d44f9b63 3275 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f
S
3276
3277 for (key_offset = 0; key_offset < 3; key_offset++) {
3278 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3279 false);
3280
3281 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3282 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3283
3284 if (key_offset == 2)
3285 key_size =
3286 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3287 else
3288 key_size = HCLGE_RSS_HASH_KEY_NUM;
3289
3290 memcpy(req->hash_key,
3291 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3292
3293 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3294 if (ret) {
3295 dev_err(&hdev->pdev->dev,
3296 "Configure RSS config fail, status = %d\n",
3297 ret);
3298 return ret;
3299 }
3300 }
3301 return 0;
3302}
3303
89523cfa 3304static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
46a3df9f 3305{
d44f9b63 3306 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
3307 struct hclge_desc desc;
3308 int i, j;
3309 int ret;
3310
d44f9b63 3311 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
3312
3313 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3314 hclge_cmd_setup_basic_desc
3315 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3316
a90bb9a5
YL
3317 req->start_table_index =
3318 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3319 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
3320
3321 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3322 req->rss_result[j] =
3323 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3324
3325 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3326 if (ret) {
3327 dev_err(&hdev->pdev->dev,
3328 "Configure rss indir table fail,status = %d\n",
3329 ret);
3330 return ret;
3331 }
3332 }
3333 return 0;
3334}
3335
3336static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3337 u16 *tc_size, u16 *tc_offset)
3338{
d44f9b63 3339 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
3340 struct hclge_desc desc;
3341 int ret;
3342 int i;
3343
3344 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 3345 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
3346
3347 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
3348 u16 mode = 0;
3349
e4e87715
PL
3350 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3351 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3352 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3353 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3354 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
3355
3356 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
3357 }
3358
3359 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 3360 if (ret)
46a3df9f
S
3361 dev_err(&hdev->pdev->dev,
3362 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 3363
3f639907 3364 return ret;
46a3df9f
S
3365}
3366
232fc64b
PL
3367static void hclge_get_rss_type(struct hclge_vport *vport)
3368{
3369 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3370 vport->rss_tuple_sets.ipv4_udp_en ||
3371 vport->rss_tuple_sets.ipv4_sctp_en ||
3372 vport->rss_tuple_sets.ipv6_tcp_en ||
3373 vport->rss_tuple_sets.ipv6_udp_en ||
3374 vport->rss_tuple_sets.ipv6_sctp_en)
3375 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3376 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3377 vport->rss_tuple_sets.ipv6_fragment_en)
3378 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3379 else
3380 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3381}
3382
46a3df9f
S
3383static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3384{
d44f9b63 3385 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
3386 struct hclge_desc desc;
3387 int ret;
3388
3389 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3390
d44f9b63 3391 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
3392
3393 /* Get the tuple cfg from pf */
3394 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3395 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3396 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3397 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3398 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3399 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3400 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3401 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 3402 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 3403 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 3404 if (ret)
46a3df9f
S
3405 dev_err(&hdev->pdev->dev,
3406 "Configure rss input fail, status = %d\n", ret);
3f639907 3407 return ret;
46a3df9f
S
3408}
3409
3410static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3411 u8 *key, u8 *hfunc)
3412{
3413 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
3414 int i;
3415
3416 /* Get hash algorithm */
775501a1
JS
3417 if (hfunc) {
3418 switch (vport->rss_algo) {
3419 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3420 *hfunc = ETH_RSS_HASH_TOP;
3421 break;
3422 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3423 *hfunc = ETH_RSS_HASH_XOR;
3424 break;
3425 default:
3426 *hfunc = ETH_RSS_HASH_UNKNOWN;
3427 break;
3428 }
3429 }
46a3df9f
S
3430
3431 /* Get the RSS Key required by the user */
3432 if (key)
3433 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3434
3435 /* Get indirect table */
3436 if (indir)
3437 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3438 indir[i] = vport->rss_indirection_tbl[i];
3439
3440 return 0;
3441}
3442
3443static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3444 const u8 *key, const u8 hfunc)
3445{
3446 struct hclge_vport *vport = hclge_get_vport(handle);
3447 struct hclge_dev *hdev = vport->back;
3448 u8 hash_algo;
3449 int ret, i;
3450
3451 /* Set the RSS Hash Key if specififed by the user */
3452 if (key) {
775501a1
JS
3453 switch (hfunc) {
3454 case ETH_RSS_HASH_TOP:
46a3df9f 3455 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
3456 break;
3457 case ETH_RSS_HASH_XOR:
3458 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3459 break;
3460 case ETH_RSS_HASH_NO_CHANGE:
3461 hash_algo = vport->rss_algo;
3462 break;
3463 default:
46a3df9f 3464 return -EINVAL;
775501a1
JS
3465 }
3466
46a3df9f
S
3467 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3468 if (ret)
3469 return ret;
89523cfa
YL
3470
3471 /* Update the shadow RSS key with user specified qids */
3472 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3473 vport->rss_algo = hash_algo;
46a3df9f
S
3474 }
3475
3476 /* Update the shadow RSS table with user specified qids */
3477 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3478 vport->rss_indirection_tbl[i] = indir[i];
3479
3480 /* Update the hardware */
89523cfa 3481 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
3482}
3483
f7db940a
L
3484static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3485{
3486 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3487
3488 if (nfc->data & RXH_L4_B_2_3)
3489 hash_sets |= HCLGE_D_PORT_BIT;
3490 else
3491 hash_sets &= ~HCLGE_D_PORT_BIT;
3492
3493 if (nfc->data & RXH_IP_SRC)
3494 hash_sets |= HCLGE_S_IP_BIT;
3495 else
3496 hash_sets &= ~HCLGE_S_IP_BIT;
3497
3498 if (nfc->data & RXH_IP_DST)
3499 hash_sets |= HCLGE_D_IP_BIT;
3500 else
3501 hash_sets &= ~HCLGE_D_IP_BIT;
3502
3503 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3504 hash_sets |= HCLGE_V_TAG_BIT;
3505
3506 return hash_sets;
3507}
3508
3509static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3510 struct ethtool_rxnfc *nfc)
3511{
3512 struct hclge_vport *vport = hclge_get_vport(handle);
3513 struct hclge_dev *hdev = vport->back;
3514 struct hclge_rss_input_tuple_cmd *req;
3515 struct hclge_desc desc;
3516 u8 tuple_sets;
3517 int ret;
3518
3519 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3520 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3521 return -EINVAL;
3522
3523 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429 3524 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
f7db940a 3525
6f2af429
YL
3526 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3527 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3528 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3529 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3530 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3531 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3532 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3533 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
3534
3535 tuple_sets = hclge_get_rss_hash_bits(nfc);
3536 switch (nfc->flow_type) {
3537 case TCP_V4_FLOW:
3538 req->ipv4_tcp_en = tuple_sets;
3539 break;
3540 case TCP_V6_FLOW:
3541 req->ipv6_tcp_en = tuple_sets;
3542 break;
3543 case UDP_V4_FLOW:
3544 req->ipv4_udp_en = tuple_sets;
3545 break;
3546 case UDP_V6_FLOW:
3547 req->ipv6_udp_en = tuple_sets;
3548 break;
3549 case SCTP_V4_FLOW:
3550 req->ipv4_sctp_en = tuple_sets;
3551 break;
3552 case SCTP_V6_FLOW:
3553 if ((nfc->data & RXH_L4_B_0_1) ||
3554 (nfc->data & RXH_L4_B_2_3))
3555 return -EINVAL;
3556
3557 req->ipv6_sctp_en = tuple_sets;
3558 break;
3559 case IPV4_FLOW:
3560 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3561 break;
3562 case IPV6_FLOW:
3563 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3564 break;
3565 default:
3566 return -EINVAL;
3567 }
3568
3569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 3570 if (ret) {
f7db940a
L
3571 dev_err(&hdev->pdev->dev,
3572 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
3573 return ret;
3574 }
f7db940a 3575
6f2af429
YL
3576 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3577 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3578 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3579 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3580 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3581 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3582 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3583 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 3584 hclge_get_rss_type(vport);
6f2af429 3585 return 0;
f7db940a
L
3586}
3587
07d29954
L
3588static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3589 struct ethtool_rxnfc *nfc)
3590{
3591 struct hclge_vport *vport = hclge_get_vport(handle);
07d29954 3592 u8 tuple_sets;
07d29954
L
3593
3594 nfc->data = 0;
3595
07d29954
L
3596 switch (nfc->flow_type) {
3597 case TCP_V4_FLOW:
6f2af429 3598 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
3599 break;
3600 case UDP_V4_FLOW:
6f2af429 3601 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
3602 break;
3603 case TCP_V6_FLOW:
6f2af429 3604 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
3605 break;
3606 case UDP_V6_FLOW:
6f2af429 3607 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
3608 break;
3609 case SCTP_V4_FLOW:
6f2af429 3610 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
3611 break;
3612 case SCTP_V6_FLOW:
6f2af429 3613 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
3614 break;
3615 case IPV4_FLOW:
3616 case IPV6_FLOW:
3617 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3618 break;
3619 default:
3620 return -EINVAL;
3621 }
3622
3623 if (!tuple_sets)
3624 return 0;
3625
3626 if (tuple_sets & HCLGE_D_PORT_BIT)
3627 nfc->data |= RXH_L4_B_2_3;
3628 if (tuple_sets & HCLGE_S_PORT_BIT)
3629 nfc->data |= RXH_L4_B_0_1;
3630 if (tuple_sets & HCLGE_D_IP_BIT)
3631 nfc->data |= RXH_IP_DST;
3632 if (tuple_sets & HCLGE_S_IP_BIT)
3633 nfc->data |= RXH_IP_SRC;
3634
3635 return 0;
3636}
3637
46a3df9f
S
3638static int hclge_get_tc_size(struct hnae3_handle *handle)
3639{
3640 struct hclge_vport *vport = hclge_get_vport(handle);
3641 struct hclge_dev *hdev = vport->back;
3642
3643 return hdev->rss_size_max;
3644}
3645
77f255c1 3646int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f 3647{
46a3df9f 3648 struct hclge_vport *vport = hdev->vport;
268f5dfa
YL
3649 u8 *rss_indir = vport[0].rss_indirection_tbl;
3650 u16 rss_size = vport[0].alloc_rss_size;
3651 u8 *key = vport[0].rss_hash_key;
3652 u8 hfunc = vport[0].rss_algo;
46a3df9f 3653 u16 tc_offset[HCLGE_MAX_TC_NUM];
46a3df9f
S
3654 u16 tc_valid[HCLGE_MAX_TC_NUM];
3655 u16 tc_size[HCLGE_MAX_TC_NUM];
268f5dfa
YL
3656 u16 roundup_size;
3657 int i, ret;
68ece54e 3658
46a3df9f
S
3659 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3660 if (ret)
268f5dfa 3661 return ret;
46a3df9f 3662
46a3df9f
S
3663 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3664 if (ret)
268f5dfa 3665 return ret;
46a3df9f
S
3666
3667 ret = hclge_set_rss_input_tuple(hdev);
3668 if (ret)
268f5dfa 3669 return ret;
46a3df9f 3670
68ece54e
YL
3671 /* Each TC have the same queue size, and tc_size set to hardware is
3672 * the log2 of roundup power of two of rss_size, the acutal queue
3673 * size is limited by indirection table.
3674 */
3675 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3676 dev_err(&hdev->pdev->dev,
3677 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3678 rss_size);
268f5dfa 3679 return -EINVAL;
68ece54e
YL
3680 }
3681
3682 roundup_size = roundup_pow_of_two(rss_size);
3683 roundup_size = ilog2(roundup_size);
3684
46a3df9f 3685 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 3686 tc_valid[i] = 0;
46a3df9f 3687
68ece54e
YL
3688 if (!(hdev->hw_tc_map & BIT(i)))
3689 continue;
3690
3691 tc_valid[i] = 1;
3692 tc_size[i] = roundup_size;
3693 tc_offset[i] = rss_size * i;
46a3df9f 3694 }
68ece54e 3695
268f5dfa
YL
3696 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3697}
46a3df9f 3698
268f5dfa
YL
3699void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3700{
3701 struct hclge_vport *vport = hdev->vport;
3702 int i, j;
46a3df9f 3703
268f5dfa
YL
3704 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3705 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3706 vport[j].rss_indirection_tbl[i] =
3707 i % vport[j].alloc_rss_size;
3708 }
3709}
3710
3711static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3712{
472d7ece 3713 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 3714 struct hclge_vport *vport = hdev->vport;
472d7ece
JS
3715
3716 if (hdev->pdev->revision >= 0x21)
3717 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 3718
268f5dfa
YL
3719 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3720 vport[i].rss_tuple_sets.ipv4_tcp_en =
3721 HCLGE_RSS_INPUT_TUPLE_OTHER;
3722 vport[i].rss_tuple_sets.ipv4_udp_en =
3723 HCLGE_RSS_INPUT_TUPLE_OTHER;
3724 vport[i].rss_tuple_sets.ipv4_sctp_en =
3725 HCLGE_RSS_INPUT_TUPLE_SCTP;
3726 vport[i].rss_tuple_sets.ipv4_fragment_en =
3727 HCLGE_RSS_INPUT_TUPLE_OTHER;
3728 vport[i].rss_tuple_sets.ipv6_tcp_en =
3729 HCLGE_RSS_INPUT_TUPLE_OTHER;
3730 vport[i].rss_tuple_sets.ipv6_udp_en =
3731 HCLGE_RSS_INPUT_TUPLE_OTHER;
3732 vport[i].rss_tuple_sets.ipv6_sctp_en =
3733 HCLGE_RSS_INPUT_TUPLE_SCTP;
3734 vport[i].rss_tuple_sets.ipv6_fragment_en =
3735 HCLGE_RSS_INPUT_TUPLE_OTHER;
3736
472d7ece 3737 vport[i].rss_algo = rss_algo;
ea739c90 3738
472d7ece
JS
3739 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3740 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
3741 }
3742
3743 hclge_rss_indir_init_cfg(hdev);
46a3df9f
S
3744}
3745
84e095d6
SM
3746int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3747 int vector_id, bool en,
3748 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3749{
3750 struct hclge_dev *hdev = vport->back;
46a3df9f
S
3751 struct hnae3_ring_chain_node *node;
3752 struct hclge_desc desc;
84e095d6
SM
3753 struct hclge_ctrl_vector_chain_cmd *req
3754 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3755 enum hclge_cmd_status status;
3756 enum hclge_opcode_type op;
3757 u16 tqp_type_and_id;
46a3df9f
S
3758 int i;
3759
84e095d6
SM
3760 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3761 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
3762 req->int_vector_id = vector_id;
3763
3764 i = 0;
3765 for (node = ring_chain; node; node = node->next) {
84e095d6 3766 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
3767 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3768 HCLGE_INT_TYPE_S,
3769 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3770 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3771 HCLGE_TQP_ID_S, node->tqp_index);
3772 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3773 HCLGE_INT_GL_IDX_S,
3774 hnae3_get_field(node->int_gl_idx,
3775 HNAE3_RING_GL_IDX_M,
3776 HNAE3_RING_GL_IDX_S));
84e095d6 3777 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
3778 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3779 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 3780 req->vfid = vport->vport_id;
46a3df9f 3781
84e095d6
SM
3782 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3783 if (status) {
46a3df9f
S
3784 dev_err(&hdev->pdev->dev,
3785 "Map TQP fail, status is %d.\n",
84e095d6
SM
3786 status);
3787 return -EIO;
46a3df9f
S
3788 }
3789 i = 0;
3790
3791 hclge_cmd_setup_basic_desc(&desc,
84e095d6 3792 op,
46a3df9f
S
3793 false);
3794 req->int_vector_id = vector_id;
3795 }
3796 }
3797
3798 if (i > 0) {
3799 req->int_cause_num = i;
84e095d6
SM
3800 req->vfid = vport->vport_id;
3801 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3802 if (status) {
46a3df9f 3803 dev_err(&hdev->pdev->dev,
84e095d6
SM
3804 "Map TQP fail, status is %d.\n", status);
3805 return -EIO;
46a3df9f
S
3806 }
3807 }
3808
3809 return 0;
3810}
3811
84e095d6
SM
3812static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3813 int vector,
3814 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3815{
3816 struct hclge_vport *vport = hclge_get_vport(handle);
3817 struct hclge_dev *hdev = vport->back;
3818 int vector_id;
3819
3820 vector_id = hclge_get_vector_index(hdev, vector);
3821 if (vector_id < 0) {
3822 dev_err(&hdev->pdev->dev,
84e095d6 3823 "Get vector index fail. vector_id =%d\n", vector_id);
46a3df9f
S
3824 return vector_id;
3825 }
3826
84e095d6 3827 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
3828}
3829
84e095d6
SM
3830static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3831 int vector,
3832 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3833{
3834 struct hclge_vport *vport = hclge_get_vport(handle);
3835 struct hclge_dev *hdev = vport->back;
84e095d6 3836 int vector_id, ret;
46a3df9f 3837
b50ae26c
PL
3838 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3839 return 0;
3840
46a3df9f
S
3841 vector_id = hclge_get_vector_index(hdev, vector);
3842 if (vector_id < 0) {
3843 dev_err(&handle->pdev->dev,
3844 "Get vector index fail. ret =%d\n", vector_id);
3845 return vector_id;
3846 }
3847
84e095d6 3848 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 3849 if (ret)
84e095d6
SM
3850 dev_err(&handle->pdev->dev,
3851 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3852 vector_id,
3853 ret);
46a3df9f 3854
0d3e6631 3855 return ret;
46a3df9f
S
3856}
3857
3858int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3859 struct hclge_promisc_param *param)
3860{
d44f9b63 3861 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
3862 struct hclge_desc desc;
3863 int ret;
3864
3865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3866
d44f9b63 3867 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f 3868 req->vf_id = param->vf_id;
96c0e861
PL
3869
3870 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3871 * pdev revision(0x20), new revision support them. The
3872 * value of this two fields will not return error when driver
3873 * send command to fireware in revision(0x20).
3874 */
3875 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3876 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
46a3df9f
S
3877
3878 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 3879 if (ret)
46a3df9f
S
3880 dev_err(&hdev->pdev->dev,
3881 "Set promisc mode fail, status is %d.\n", ret);
3f639907
JS
3882
3883 return ret;
46a3df9f
S
3884}
3885
3886void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3887 bool en_mc, bool en_bc, int vport_id)
3888{
3889 if (!param)
3890 return;
3891
3892 memset(param, 0, sizeof(struct hclge_promisc_param));
3893 if (en_uc)
3894 param->enable = HCLGE_PROMISC_EN_UC;
3895 if (en_mc)
3896 param->enable |= HCLGE_PROMISC_EN_MC;
3897 if (en_bc)
3898 param->enable |= HCLGE_PROMISC_EN_BC;
3899 param->vf_id = vport_id;
3900}
3901
7fa6be4f
HT
3902static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3903 bool en_mc_pmc)
46a3df9f
S
3904{
3905 struct hclge_vport *vport = hclge_get_vport(handle);
3906 struct hclge_dev *hdev = vport->back;
3907 struct hclge_promisc_param param;
28673b33 3908 bool en_bc_pmc = true;
46a3df9f 3909
28673b33
JS
3910 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3911 * always bypassed. So broadcast promisc should be disabled until
3912 * user enable promisc mode
3913 */
3914 if (handle->pdev->revision == 0x20)
3915 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3916
3917 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3b75c3df 3918 vport->vport_id);
7fa6be4f 3919 return hclge_cmd_set_promisc_mode(hdev, &param);
46a3df9f
S
3920}
3921
d695964d
JS
3922static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3923{
3924 struct hclge_get_fd_mode_cmd *req;
3925 struct hclge_desc desc;
3926 int ret;
3927
3928 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3929
3930 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3931
3932 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3933 if (ret) {
3934 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3935 return ret;
3936 }
3937
3938 *fd_mode = req->mode;
3939
3940 return ret;
3941}
3942
3943static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3944 u32 *stage1_entry_num,
3945 u32 *stage2_entry_num,
3946 u16 *stage1_counter_num,
3947 u16 *stage2_counter_num)
3948{
3949 struct hclge_get_fd_allocation_cmd *req;
3950 struct hclge_desc desc;
3951 int ret;
3952
3953 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3954
3955 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3956
3957 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3958 if (ret) {
3959 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3960 ret);
3961 return ret;
3962 }
3963
3964 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3965 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3966 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3967 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3968
3969 return ret;
3970}
3971
3972static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3973{
3974 struct hclge_set_fd_key_config_cmd *req;
3975 struct hclge_fd_key_cfg *stage;
3976 struct hclge_desc desc;
3977 int ret;
3978
3979 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3980
3981 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3982 stage = &hdev->fd_cfg.key_cfg[stage_num];
3983 req->stage = stage_num;
3984 req->key_select = stage->key_sel;
3985 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3986 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3987 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3988 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3989 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3990 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3991
3992 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3993 if (ret)
3994 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3995
3996 return ret;
3997}
3998
3999static int hclge_init_fd_config(struct hclge_dev *hdev)
4000{
4001#define LOW_2_WORDS 0x03
4002 struct hclge_fd_key_cfg *key_cfg;
4003 int ret;
4004
4005 if (!hnae3_dev_fd_supported(hdev))
4006 return 0;
4007
4008 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4009 if (ret)
4010 return ret;
4011
4012 switch (hdev->fd_cfg.fd_mode) {
4013 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4014 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4015 break;
4016 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4017 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4018 break;
4019 default:
4020 dev_err(&hdev->pdev->dev,
4021 "Unsupported flow director mode %d\n",
4022 hdev->fd_cfg.fd_mode);
4023 return -EOPNOTSUPP;
4024 }
4025
d695964d
JS
4026 hdev->fd_cfg.proto_support =
4027 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4028 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4029 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4030 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4031 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4032 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4033 key_cfg->outer_sipv6_word_en = 0;
4034 key_cfg->outer_dipv6_word_en = 0;
4035
4036 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4037 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4038 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4039 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4040
4041 /* If use max 400bit key, we can support tuples for ether type */
4042 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4043 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4044 key_cfg->tuple_active |=
4045 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4046 }
4047
4048 /* roce_type is used to filter roce frames
4049 * dst_vport is used to specify the rule
4050 */
4051 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4052
4053 ret = hclge_get_fd_allocation(hdev,
4054 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4055 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4056 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4057 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4058 if (ret)
4059 return ret;
4060
4061 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4062}
4063
11732868
JS
4064static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4065 int loc, u8 *key, bool is_add)
4066{
4067 struct hclge_fd_tcam_config_1_cmd *req1;
4068 struct hclge_fd_tcam_config_2_cmd *req2;
4069 struct hclge_fd_tcam_config_3_cmd *req3;
4070 struct hclge_desc desc[3];
4071 int ret;
4072
4073 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4074 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4075 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4076 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4077 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4078
4079 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4080 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4081 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4082
4083 req1->stage = stage;
4084 req1->xy_sel = sel_x ? 1 : 0;
4085 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4086 req1->index = cpu_to_le32(loc);
4087 req1->entry_vld = sel_x ? is_add : 0;
4088
4089 if (key) {
4090 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4091 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4092 sizeof(req2->tcam_data));
4093 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4094 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4095 }
4096
4097 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4098 if (ret)
4099 dev_err(&hdev->pdev->dev,
4100 "config tcam key fail, ret=%d\n",
4101 ret);
4102
4103 return ret;
4104}
4105
4106static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4107 struct hclge_fd_ad_data *action)
4108{
4109 struct hclge_fd_ad_config_cmd *req;
4110 struct hclge_desc desc;
4111 u64 ad_data = 0;
4112 int ret;
4113
4114 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4115
4116 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4117 req->index = cpu_to_le32(loc);
4118 req->stage = stage;
4119
4120 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4121 action->write_rule_id_to_bd);
4122 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4123 action->rule_id);
4124 ad_data <<= 32;
4125 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4126 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4127 action->forward_to_direct_queue);
4128 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4129 action->queue_id);
4130 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4131 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4132 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4133 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4134 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4135 action->counter_id);
4136
4137 req->ad_data = cpu_to_le64(ad_data);
4138 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4139 if (ret)
4140 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4141
4142 return ret;
4143}
4144
4145static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4146 struct hclge_fd_rule *rule)
4147{
4148 u16 tmp_x_s, tmp_y_s;
4149 u32 tmp_x_l, tmp_y_l;
4150 int i;
4151
4152 if (rule->unused_tuple & tuple_bit)
4153 return true;
4154
4155 switch (tuple_bit) {
4156 case 0:
4157 return false;
4158 case BIT(INNER_DST_MAC):
4159 for (i = 0; i < 6; i++) {
4160 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4161 rule->tuples_mask.dst_mac[i]);
4162 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4163 rule->tuples_mask.dst_mac[i]);
4164 }
4165
4166 return true;
4167 case BIT(INNER_SRC_MAC):
4168 for (i = 0; i < 6; i++) {
4169 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4170 rule->tuples.src_mac[i]);
4171 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4172 rule->tuples.src_mac[i]);
4173 }
4174
4175 return true;
4176 case BIT(INNER_VLAN_TAG_FST):
4177 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4178 rule->tuples_mask.vlan_tag1);
4179 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4180 rule->tuples_mask.vlan_tag1);
4181 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4182 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4183
4184 return true;
4185 case BIT(INNER_ETH_TYPE):
4186 calc_x(tmp_x_s, rule->tuples.ether_proto,
4187 rule->tuples_mask.ether_proto);
4188 calc_y(tmp_y_s, rule->tuples.ether_proto,
4189 rule->tuples_mask.ether_proto);
4190 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4191 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4192
4193 return true;
4194 case BIT(INNER_IP_TOS):
4195 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4196 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4197
4198 return true;
4199 case BIT(INNER_IP_PROTO):
4200 calc_x(*key_x, rule->tuples.ip_proto,
4201 rule->tuples_mask.ip_proto);
4202 calc_y(*key_y, rule->tuples.ip_proto,
4203 rule->tuples_mask.ip_proto);
4204
4205 return true;
4206 case BIT(INNER_SRC_IP):
4207 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4208 rule->tuples_mask.src_ip[3]);
4209 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4210 rule->tuples_mask.src_ip[3]);
4211 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4212 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4213
4214 return true;
4215 case BIT(INNER_DST_IP):
4216 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4217 rule->tuples_mask.dst_ip[3]);
4218 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4219 rule->tuples_mask.dst_ip[3]);
4220 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4221 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4222
4223 return true;
4224 case BIT(INNER_SRC_PORT):
4225 calc_x(tmp_x_s, rule->tuples.src_port,
4226 rule->tuples_mask.src_port);
4227 calc_y(tmp_y_s, rule->tuples.src_port,
4228 rule->tuples_mask.src_port);
4229 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4230 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4231
4232 return true;
4233 case BIT(INNER_DST_PORT):
4234 calc_x(tmp_x_s, rule->tuples.dst_port,
4235 rule->tuples_mask.dst_port);
4236 calc_y(tmp_y_s, rule->tuples.dst_port,
4237 rule->tuples_mask.dst_port);
4238 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4239 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4240
4241 return true;
4242 default:
4243 return false;
4244 }
4245}
4246
4247static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4248 u8 vf_id, u8 network_port_id)
4249{
4250 u32 port_number = 0;
4251
4252 if (port_type == HOST_PORT) {
4253 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4254 pf_id);
4255 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4256 vf_id);
4257 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4258 } else {
4259 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4260 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4261 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4262 }
4263
4264 return port_number;
4265}
4266
4267static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4268 __le32 *key_x, __le32 *key_y,
4269 struct hclge_fd_rule *rule)
4270{
4271 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4272 u8 cur_pos = 0, tuple_size, shift_bits;
4273 int i;
4274
4275 for (i = 0; i < MAX_META_DATA; i++) {
4276 tuple_size = meta_data_key_info[i].key_length;
4277 tuple_bit = key_cfg->meta_data_active & BIT(i);
4278
4279 switch (tuple_bit) {
4280 case BIT(ROCE_TYPE):
4281 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4282 cur_pos += tuple_size;
4283 break;
4284 case BIT(DST_VPORT):
4285 port_number = hclge_get_port_number(HOST_PORT, 0,
4286 rule->vf_id, 0);
4287 hnae3_set_field(meta_data,
4288 GENMASK(cur_pos + tuple_size, cur_pos),
4289 cur_pos, port_number);
4290 cur_pos += tuple_size;
4291 break;
4292 default:
4293 break;
4294 }
4295 }
4296
4297 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4298 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4299 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4300
4301 *key_x = cpu_to_le32(tmp_x << shift_bits);
4302 *key_y = cpu_to_le32(tmp_y << shift_bits);
4303}
4304
4305/* A complete key is combined with meta data key and tuple key.
4306 * Meta data key is stored at the MSB region, and tuple key is stored at
4307 * the LSB region, unused bits will be filled 0.
4308 */
4309static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4310 struct hclge_fd_rule *rule)
4311{
4312 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4313 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4314 u8 *cur_key_x, *cur_key_y;
4315 int i, ret, tuple_size;
4316 u8 meta_data_region;
4317
4318 memset(key_x, 0, sizeof(key_x));
4319 memset(key_y, 0, sizeof(key_y));
4320 cur_key_x = key_x;
4321 cur_key_y = key_y;
4322
4323 for (i = 0 ; i < MAX_TUPLE; i++) {
4324 bool tuple_valid;
4325 u32 check_tuple;
4326
4327 tuple_size = tuple_key_info[i].key_length / 8;
4328 check_tuple = key_cfg->tuple_active & BIT(i);
4329
4330 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4331 cur_key_y, rule);
4332 if (tuple_valid) {
4333 cur_key_x += tuple_size;
4334 cur_key_y += tuple_size;
4335 }
4336 }
4337
4338 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4339 MAX_META_DATA_LENGTH / 8;
4340
4341 hclge_fd_convert_meta_data(key_cfg,
4342 (__le32 *)(key_x + meta_data_region),
4343 (__le32 *)(key_y + meta_data_region),
4344 rule);
4345
4346 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4347 true);
4348 if (ret) {
4349 dev_err(&hdev->pdev->dev,
4350 "fd key_y config fail, loc=%d, ret=%d\n",
4351 rule->queue_id, ret);
4352 return ret;
4353 }
4354
4355 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4356 true);
4357 if (ret)
4358 dev_err(&hdev->pdev->dev,
4359 "fd key_x config fail, loc=%d, ret=%d\n",
4360 rule->queue_id, ret);
4361 return ret;
4362}
4363
4364static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4365 struct hclge_fd_rule *rule)
4366{
4367 struct hclge_fd_ad_data ad_data;
4368
4369 ad_data.ad_id = rule->location;
4370
4371 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4372 ad_data.drop_packet = true;
4373 ad_data.forward_to_direct_queue = false;
4374 ad_data.queue_id = 0;
4375 } else {
4376 ad_data.drop_packet = false;
4377 ad_data.forward_to_direct_queue = true;
4378 ad_data.queue_id = rule->queue_id;
4379 }
4380
4381 ad_data.use_counter = false;
4382 ad_data.counter_id = 0;
4383
4384 ad_data.use_next_stage = false;
4385 ad_data.next_input_key = 0;
4386
4387 ad_data.write_rule_id_to_bd = true;
4388 ad_data.rule_id = rule->location;
4389
4390 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4391}
4392
dd74f815
JS
4393static int hclge_fd_check_spec(struct hclge_dev *hdev,
4394 struct ethtool_rx_flow_spec *fs, u32 *unused)
4395{
4396 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4397 struct ethtool_usrip4_spec *usr_ip4_spec;
4398 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4399 struct ethtool_usrip6_spec *usr_ip6_spec;
4400 struct ethhdr *ether_spec;
4401
4402 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4403 return -EINVAL;
4404
4405 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4406 return -EOPNOTSUPP;
4407
4408 if ((fs->flow_type & FLOW_EXT) &&
4409 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4410 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4411 return -EOPNOTSUPP;
4412 }
4413
4414 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4415 case SCTP_V4_FLOW:
4416 case TCP_V4_FLOW:
4417 case UDP_V4_FLOW:
4418 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4419 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4420
4421 if (!tcp_ip4_spec->ip4src)
4422 *unused |= BIT(INNER_SRC_IP);
4423
4424 if (!tcp_ip4_spec->ip4dst)
4425 *unused |= BIT(INNER_DST_IP);
4426
4427 if (!tcp_ip4_spec->psrc)
4428 *unused |= BIT(INNER_SRC_PORT);
4429
4430 if (!tcp_ip4_spec->pdst)
4431 *unused |= BIT(INNER_DST_PORT);
4432
4433 if (!tcp_ip4_spec->tos)
4434 *unused |= BIT(INNER_IP_TOS);
4435
4436 break;
4437 case IP_USER_FLOW:
4438 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4439 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4440 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4441
4442 if (!usr_ip4_spec->ip4src)
4443 *unused |= BIT(INNER_SRC_IP);
4444
4445 if (!usr_ip4_spec->ip4dst)
4446 *unused |= BIT(INNER_DST_IP);
4447
4448 if (!usr_ip4_spec->tos)
4449 *unused |= BIT(INNER_IP_TOS);
4450
4451 if (!usr_ip4_spec->proto)
4452 *unused |= BIT(INNER_IP_PROTO);
4453
4454 if (usr_ip4_spec->l4_4_bytes)
4455 return -EOPNOTSUPP;
4456
4457 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4458 return -EOPNOTSUPP;
4459
4460 break;
4461 case SCTP_V6_FLOW:
4462 case TCP_V6_FLOW:
4463 case UDP_V6_FLOW:
4464 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4465 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4466 BIT(INNER_IP_TOS);
4467
4468 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4469 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4470 *unused |= BIT(INNER_SRC_IP);
4471
4472 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4473 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4474 *unused |= BIT(INNER_DST_IP);
4475
4476 if (!tcp_ip6_spec->psrc)
4477 *unused |= BIT(INNER_SRC_PORT);
4478
4479 if (!tcp_ip6_spec->pdst)
4480 *unused |= BIT(INNER_DST_PORT);
4481
4482 if (tcp_ip6_spec->tclass)
4483 return -EOPNOTSUPP;
4484
4485 break;
4486 case IPV6_USER_FLOW:
4487 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4488 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4489 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4490 BIT(INNER_DST_PORT);
4491
4492 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4493 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4494 *unused |= BIT(INNER_SRC_IP);
4495
4496 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4497 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4498 *unused |= BIT(INNER_DST_IP);
4499
4500 if (!usr_ip6_spec->l4_proto)
4501 *unused |= BIT(INNER_IP_PROTO);
4502
4503 if (usr_ip6_spec->tclass)
4504 return -EOPNOTSUPP;
4505
4506 if (usr_ip6_spec->l4_4_bytes)
4507 return -EOPNOTSUPP;
4508
4509 break;
4510 case ETHER_FLOW:
4511 ether_spec = &fs->h_u.ether_spec;
4512 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4513 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4514 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4515
4516 if (is_zero_ether_addr(ether_spec->h_source))
4517 *unused |= BIT(INNER_SRC_MAC);
4518
4519 if (is_zero_ether_addr(ether_spec->h_dest))
4520 *unused |= BIT(INNER_DST_MAC);
4521
4522 if (!ether_spec->h_proto)
4523 *unused |= BIT(INNER_ETH_TYPE);
4524
4525 break;
4526 default:
4527 return -EOPNOTSUPP;
4528 }
4529
4530 if ((fs->flow_type & FLOW_EXT)) {
4531 if (fs->h_ext.vlan_etype)
4532 return -EOPNOTSUPP;
4533 if (!fs->h_ext.vlan_tci)
4534 *unused |= BIT(INNER_VLAN_TAG_FST);
4535
4536 if (fs->m_ext.vlan_tci) {
4537 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4538 return -EINVAL;
4539 }
4540 } else {
4541 *unused |= BIT(INNER_VLAN_TAG_FST);
4542 }
4543
4544 if (fs->flow_type & FLOW_MAC_EXT) {
4545 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4546 return -EOPNOTSUPP;
4547
4548 if (is_zero_ether_addr(fs->h_ext.h_dest))
4549 *unused |= BIT(INNER_DST_MAC);
4550 else
4551 *unused &= ~(BIT(INNER_DST_MAC));
4552 }
4553
4554 return 0;
4555}
4556
4557static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4558{
4559 struct hclge_fd_rule *rule = NULL;
4560 struct hlist_node *node2;
4561
4562 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4563 if (rule->location >= location)
4564 break;
4565 }
4566
4567 return rule && rule->location == location;
4568}
4569
4570static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4571 struct hclge_fd_rule *new_rule,
4572 u16 location,
4573 bool is_add)
4574{
4575 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4576 struct hlist_node *node2;
4577
4578 if (is_add && !new_rule)
4579 return -EINVAL;
4580
4581 hlist_for_each_entry_safe(rule, node2,
4582 &hdev->fd_rule_list, rule_node) {
4583 if (rule->location >= location)
4584 break;
4585 parent = rule;
4586 }
4587
4588 if (rule && rule->location == location) {
4589 hlist_del(&rule->rule_node);
4590 kfree(rule);
4591 hdev->hclge_fd_rule_num--;
4592
4593 if (!is_add)
4594 return 0;
4595
4596 } else if (!is_add) {
4597 dev_err(&hdev->pdev->dev,
4598 "delete fail, rule %d is inexistent\n",
4599 location);
4600 return -EINVAL;
4601 }
4602
4603 INIT_HLIST_NODE(&new_rule->rule_node);
4604
4605 if (parent)
4606 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4607 else
4608 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4609
4610 hdev->hclge_fd_rule_num++;
4611
4612 return 0;
4613}
4614
4615static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4616 struct ethtool_rx_flow_spec *fs,
4617 struct hclge_fd_rule *rule)
4618{
4619 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4620
4621 switch (flow_type) {
4622 case SCTP_V4_FLOW:
4623 case TCP_V4_FLOW:
4624 case UDP_V4_FLOW:
4625 rule->tuples.src_ip[3] =
4626 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4627 rule->tuples_mask.src_ip[3] =
4628 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4629
4630 rule->tuples.dst_ip[3] =
4631 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4632 rule->tuples_mask.dst_ip[3] =
4633 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4634
4635 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4636 rule->tuples_mask.src_port =
4637 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4638
4639 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4640 rule->tuples_mask.dst_port =
4641 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4642
4643 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4644 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4645
4646 rule->tuples.ether_proto = ETH_P_IP;
4647 rule->tuples_mask.ether_proto = 0xFFFF;
4648
4649 break;
4650 case IP_USER_FLOW:
4651 rule->tuples.src_ip[3] =
4652 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4653 rule->tuples_mask.src_ip[3] =
4654 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4655
4656 rule->tuples.dst_ip[3] =
4657 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4658 rule->tuples_mask.dst_ip[3] =
4659 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4660
4661 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4662 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4663
4664 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4665 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4666
4667 rule->tuples.ether_proto = ETH_P_IP;
4668 rule->tuples_mask.ether_proto = 0xFFFF;
4669
4670 break;
4671 case SCTP_V6_FLOW:
4672 case TCP_V6_FLOW:
4673 case UDP_V6_FLOW:
4674 be32_to_cpu_array(rule->tuples.src_ip,
4675 fs->h_u.tcp_ip6_spec.ip6src, 4);
4676 be32_to_cpu_array(rule->tuples_mask.src_ip,
4677 fs->m_u.tcp_ip6_spec.ip6src, 4);
4678
4679 be32_to_cpu_array(rule->tuples.dst_ip,
4680 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4681 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4682 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4683
4684 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4685 rule->tuples_mask.src_port =
4686 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4687
4688 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4689 rule->tuples_mask.dst_port =
4690 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4691
4692 rule->tuples.ether_proto = ETH_P_IPV6;
4693 rule->tuples_mask.ether_proto = 0xFFFF;
4694
4695 break;
4696 case IPV6_USER_FLOW:
4697 be32_to_cpu_array(rule->tuples.src_ip,
4698 fs->h_u.usr_ip6_spec.ip6src, 4);
4699 be32_to_cpu_array(rule->tuples_mask.src_ip,
4700 fs->m_u.usr_ip6_spec.ip6src, 4);
4701
4702 be32_to_cpu_array(rule->tuples.dst_ip,
4703 fs->h_u.usr_ip6_spec.ip6dst, 4);
4704 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4705 fs->m_u.usr_ip6_spec.ip6dst, 4);
4706
4707 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4708 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4709
4710 rule->tuples.ether_proto = ETH_P_IPV6;
4711 rule->tuples_mask.ether_proto = 0xFFFF;
4712
4713 break;
4714 case ETHER_FLOW:
4715 ether_addr_copy(rule->tuples.src_mac,
4716 fs->h_u.ether_spec.h_source);
4717 ether_addr_copy(rule->tuples_mask.src_mac,
4718 fs->m_u.ether_spec.h_source);
4719
4720 ether_addr_copy(rule->tuples.dst_mac,
4721 fs->h_u.ether_spec.h_dest);
4722 ether_addr_copy(rule->tuples_mask.dst_mac,
4723 fs->m_u.ether_spec.h_dest);
4724
4725 rule->tuples.ether_proto =
4726 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4727 rule->tuples_mask.ether_proto =
4728 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4729
4730 break;
4731 default:
4732 return -EOPNOTSUPP;
4733 }
4734
4735 switch (flow_type) {
4736 case SCTP_V4_FLOW:
4737 case SCTP_V6_FLOW:
4738 rule->tuples.ip_proto = IPPROTO_SCTP;
4739 rule->tuples_mask.ip_proto = 0xFF;
4740 break;
4741 case TCP_V4_FLOW:
4742 case TCP_V6_FLOW:
4743 rule->tuples.ip_proto = IPPROTO_TCP;
4744 rule->tuples_mask.ip_proto = 0xFF;
4745 break;
4746 case UDP_V4_FLOW:
4747 case UDP_V6_FLOW:
4748 rule->tuples.ip_proto = IPPROTO_UDP;
4749 rule->tuples_mask.ip_proto = 0xFF;
4750 break;
4751 default:
4752 break;
4753 }
4754
4755 if ((fs->flow_type & FLOW_EXT)) {
4756 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4757 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4758 }
4759
4760 if (fs->flow_type & FLOW_MAC_EXT) {
4761 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4762 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4763 }
4764
4765 return 0;
4766}
4767
4768static int hclge_add_fd_entry(struct hnae3_handle *handle,
4769 struct ethtool_rxnfc *cmd)
4770{
4771 struct hclge_vport *vport = hclge_get_vport(handle);
4772 struct hclge_dev *hdev = vport->back;
4773 u16 dst_vport_id = 0, q_index = 0;
4774 struct ethtool_rx_flow_spec *fs;
4775 struct hclge_fd_rule *rule;
4776 u32 unused = 0;
4777 u8 action;
4778 int ret;
4779
4780 if (!hnae3_dev_fd_supported(hdev))
4781 return -EOPNOTSUPP;
4782
9abeb7d8 4783 if (!hdev->fd_en) {
dd74f815
JS
4784 dev_warn(&hdev->pdev->dev,
4785 "Please enable flow director first\n");
4786 return -EOPNOTSUPP;
4787 }
4788
4789 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4790
4791 ret = hclge_fd_check_spec(hdev, fs, &unused);
4792 if (ret) {
4793 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4794 return ret;
4795 }
4796
4797 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4798 action = HCLGE_FD_ACTION_DROP_PACKET;
4799 } else {
4800 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4801 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4802 u16 tqps;
4803
0285dbae
JS
4804 if (vf > hdev->num_req_vfs) {
4805 dev_err(&hdev->pdev->dev,
4806 "Error: vf id (%d) > max vf num (%d)\n",
4807 vf, hdev->num_req_vfs);
4808 return -EINVAL;
4809 }
4810
dd74f815
JS
4811 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4812 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4813
4814 if (ring >= tqps) {
4815 dev_err(&hdev->pdev->dev,
4816 "Error: queue id (%d) > max tqp num (%d)\n",
4817 ring, tqps - 1);
4818 return -EINVAL;
4819 }
4820
dd74f815
JS
4821 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4822 q_index = ring;
4823 }
4824
4825 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4826 if (!rule)
4827 return -ENOMEM;
4828
4829 ret = hclge_fd_get_tuple(hdev, fs, rule);
4830 if (ret)
4831 goto free_rule;
4832
4833 rule->flow_type = fs->flow_type;
4834
4835 rule->location = fs->location;
4836 rule->unused_tuple = unused;
4837 rule->vf_id = dst_vport_id;
4838 rule->queue_id = q_index;
4839 rule->action = action;
4840
4841 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4842 if (ret)
4843 goto free_rule;
4844
4845 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4846 if (ret)
4847 goto free_rule;
4848
4849 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4850 if (ret)
4851 goto free_rule;
4852
4853 return ret;
4854
4855free_rule:
4856 kfree(rule);
4857 return ret;
4858}
4859
4860static int hclge_del_fd_entry(struct hnae3_handle *handle,
4861 struct ethtool_rxnfc *cmd)
4862{
4863 struct hclge_vport *vport = hclge_get_vport(handle);
4864 struct hclge_dev *hdev = vport->back;
4865 struct ethtool_rx_flow_spec *fs;
4866 int ret;
4867
4868 if (!hnae3_dev_fd_supported(hdev))
4869 return -EOPNOTSUPP;
4870
4871 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4872
4873 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4874 return -EINVAL;
4875
4876 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4877 dev_err(&hdev->pdev->dev,
4878 "Delete fail, rule %d is inexistent\n",
4879 fs->location);
4880 return -ENOENT;
4881 }
4882
4883 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4884 fs->location, NULL, false);
4885 if (ret)
4886 return ret;
4887
4888 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4889 false);
4890}
4891
6871af29
JS
4892static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4893 bool clear_list)
4894{
4895 struct hclge_vport *vport = hclge_get_vport(handle);
4896 struct hclge_dev *hdev = vport->back;
4897 struct hclge_fd_rule *rule;
4898 struct hlist_node *node;
4899
4900 if (!hnae3_dev_fd_supported(hdev))
4901 return;
4902
4903 if (clear_list) {
4904 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4905 rule_node) {
4906 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4907 rule->location, NULL, false);
4908 hlist_del(&rule->rule_node);
4909 kfree(rule);
4910 hdev->hclge_fd_rule_num--;
4911 }
4912 } else {
4913 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4914 rule_node)
4915 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4916 rule->location, NULL, false);
4917 }
4918}
4919
4920static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4921{
4922 struct hclge_vport *vport = hclge_get_vport(handle);
4923 struct hclge_dev *hdev = vport->back;
4924 struct hclge_fd_rule *rule;
4925 struct hlist_node *node;
4926 int ret;
4927
65e41e7e
HT
4928 /* Return ok here, because reset error handling will check this
4929 * return value. If error is returned here, the reset process will
4930 * fail.
4931 */
6871af29 4932 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 4933 return 0;
6871af29 4934
8edc2285 4935 /* if fd is disabled, should not restore it when reset */
9abeb7d8 4936 if (!hdev->fd_en)
8edc2285
JS
4937 return 0;
4938
6871af29
JS
4939 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4940 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4941 if (!ret)
4942 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4943
4944 if (ret) {
4945 dev_warn(&hdev->pdev->dev,
4946 "Restore rule %d failed, remove it\n",
4947 rule->location);
4948 hlist_del(&rule->rule_node);
4949 kfree(rule);
4950 hdev->hclge_fd_rule_num--;
4951 }
4952 }
4953 return 0;
4954}
4955
05c2314f
JS
4956static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4957 struct ethtool_rxnfc *cmd)
4958{
4959 struct hclge_vport *vport = hclge_get_vport(handle);
4960 struct hclge_dev *hdev = vport->back;
4961
4962 if (!hnae3_dev_fd_supported(hdev))
4963 return -EOPNOTSUPP;
4964
4965 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4966 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4967
4968 return 0;
4969}
4970
4971static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4972 struct ethtool_rxnfc *cmd)
4973{
4974 struct hclge_vport *vport = hclge_get_vport(handle);
4975 struct hclge_fd_rule *rule = NULL;
4976 struct hclge_dev *hdev = vport->back;
4977 struct ethtool_rx_flow_spec *fs;
4978 struct hlist_node *node2;
4979
4980 if (!hnae3_dev_fd_supported(hdev))
4981 return -EOPNOTSUPP;
4982
4983 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4984
4985 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4986 if (rule->location >= fs->location)
4987 break;
4988 }
4989
4990 if (!rule || fs->location != rule->location)
4991 return -ENOENT;
4992
4993 fs->flow_type = rule->flow_type;
4994 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4995 case SCTP_V4_FLOW:
4996 case TCP_V4_FLOW:
4997 case UDP_V4_FLOW:
4998 fs->h_u.tcp_ip4_spec.ip4src =
4999 cpu_to_be32(rule->tuples.src_ip[3]);
5000 fs->m_u.tcp_ip4_spec.ip4src =
5001 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5002 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5003
5004 fs->h_u.tcp_ip4_spec.ip4dst =
5005 cpu_to_be32(rule->tuples.dst_ip[3]);
5006 fs->m_u.tcp_ip4_spec.ip4dst =
5007 rule->unused_tuple & BIT(INNER_DST_IP) ?
5008 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5009
5010 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5011 fs->m_u.tcp_ip4_spec.psrc =
5012 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5013 0 : cpu_to_be16(rule->tuples_mask.src_port);
5014
5015 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5016 fs->m_u.tcp_ip4_spec.pdst =
5017 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5018 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5019
5020 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5021 fs->m_u.tcp_ip4_spec.tos =
5022 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5023 0 : rule->tuples_mask.ip_tos;
5024
5025 break;
5026 case IP_USER_FLOW:
5027 fs->h_u.usr_ip4_spec.ip4src =
5028 cpu_to_be32(rule->tuples.src_ip[3]);
5029 fs->m_u.tcp_ip4_spec.ip4src =
5030 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5031 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5032
5033 fs->h_u.usr_ip4_spec.ip4dst =
5034 cpu_to_be32(rule->tuples.dst_ip[3]);
5035 fs->m_u.usr_ip4_spec.ip4dst =
5036 rule->unused_tuple & BIT(INNER_DST_IP) ?
5037 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5038
5039 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5040 fs->m_u.usr_ip4_spec.tos =
5041 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5042 0 : rule->tuples_mask.ip_tos;
5043
5044 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5045 fs->m_u.usr_ip4_spec.proto =
5046 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5047 0 : rule->tuples_mask.ip_proto;
5048
5049 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5050
5051 break;
5052 case SCTP_V6_FLOW:
5053 case TCP_V6_FLOW:
5054 case UDP_V6_FLOW:
5055 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5056 rule->tuples.src_ip, 4);
5057 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5058 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5059 else
5060 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5061 rule->tuples_mask.src_ip, 4);
5062
5063 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5064 rule->tuples.dst_ip, 4);
5065 if (rule->unused_tuple & BIT(INNER_DST_IP))
5066 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5067 else
5068 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5069 rule->tuples_mask.dst_ip, 4);
5070
5071 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5072 fs->m_u.tcp_ip6_spec.psrc =
5073 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5074 0 : cpu_to_be16(rule->tuples_mask.src_port);
5075
5076 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5077 fs->m_u.tcp_ip6_spec.pdst =
5078 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5079 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5080
5081 break;
5082 case IPV6_USER_FLOW:
5083 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5084 rule->tuples.src_ip, 4);
5085 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5086 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5087 else
5088 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5089 rule->tuples_mask.src_ip, 4);
5090
5091 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5092 rule->tuples.dst_ip, 4);
5093 if (rule->unused_tuple & BIT(INNER_DST_IP))
5094 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5095 else
5096 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5097 rule->tuples_mask.dst_ip, 4);
5098
5099 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5100 fs->m_u.usr_ip6_spec.l4_proto =
5101 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5102 0 : rule->tuples_mask.ip_proto;
5103
5104 break;
5105 case ETHER_FLOW:
5106 ether_addr_copy(fs->h_u.ether_spec.h_source,
5107 rule->tuples.src_mac);
5108 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5109 eth_zero_addr(fs->m_u.ether_spec.h_source);
5110 else
5111 ether_addr_copy(fs->m_u.ether_spec.h_source,
5112 rule->tuples_mask.src_mac);
5113
5114 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5115 rule->tuples.dst_mac);
5116 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5117 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5118 else
5119 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5120 rule->tuples_mask.dst_mac);
5121
5122 fs->h_u.ether_spec.h_proto =
5123 cpu_to_be16(rule->tuples.ether_proto);
5124 fs->m_u.ether_spec.h_proto =
5125 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5126 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5127
5128 break;
5129 default:
5130 return -EOPNOTSUPP;
5131 }
5132
5133 if (fs->flow_type & FLOW_EXT) {
5134 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5135 fs->m_ext.vlan_tci =
5136 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5137 cpu_to_be16(VLAN_VID_MASK) :
5138 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5139 }
5140
5141 if (fs->flow_type & FLOW_MAC_EXT) {
5142 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5143 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5144 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5145 else
5146 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5147 rule->tuples_mask.dst_mac);
5148 }
5149
5150 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5151 fs->ring_cookie = RX_CLS_FLOW_DISC;
5152 } else {
5153 u64 vf_id;
5154
5155 fs->ring_cookie = rule->queue_id;
5156 vf_id = rule->vf_id;
5157 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5158 fs->ring_cookie |= vf_id;
5159 }
5160
5161 return 0;
5162}
5163
5164static int hclge_get_all_rules(struct hnae3_handle *handle,
5165 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5166{
5167 struct hclge_vport *vport = hclge_get_vport(handle);
5168 struct hclge_dev *hdev = vport->back;
5169 struct hclge_fd_rule *rule;
5170 struct hlist_node *node2;
5171 int cnt = 0;
5172
5173 if (!hnae3_dev_fd_supported(hdev))
5174 return -EOPNOTSUPP;
5175
5176 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5177
5178 hlist_for_each_entry_safe(rule, node2,
5179 &hdev->fd_rule_list, rule_node) {
5180 if (cnt == cmd->rule_cnt)
5181 return -EMSGSIZE;
5182
5183 rule_locs[cnt] = rule->location;
5184 cnt++;
5185 }
5186
5187 cmd->rule_cnt = cnt;
5188
5189 return 0;
5190}
5191
4d60291b
HT
5192static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5193{
5194 struct hclge_vport *vport = hclge_get_vport(handle);
5195 struct hclge_dev *hdev = vport->back;
5196
5197 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5198 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5199}
5200
5201static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5202{
5203 struct hclge_vport *vport = hclge_get_vport(handle);
5204 struct hclge_dev *hdev = vport->back;
5205
5206 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5207}
5208
5209static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5210{
5211 struct hclge_vport *vport = hclge_get_vport(handle);
5212 struct hclge_dev *hdev = vport->back;
5213
5214 return hdev->reset_count;
5215}
5216
c17852a8
JS
5217static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5218{
5219 struct hclge_vport *vport = hclge_get_vport(handle);
5220 struct hclge_dev *hdev = vport->back;
5221
9abeb7d8 5222 hdev->fd_en = enable;
c17852a8
JS
5223 if (!enable)
5224 hclge_del_all_fd_entries(handle, false);
5225 else
5226 hclge_restore_fd_entries(handle);
5227}
5228
46a3df9f
S
5229static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5230{
5231 struct hclge_desc desc;
d44f9b63
YL
5232 struct hclge_config_mac_mode_cmd *req =
5233 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 5234 u32 loop_en = 0;
46a3df9f
S
5235 int ret;
5236
5237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
e4e87715
PL
5238 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5239 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5240 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5241 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5242 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5243 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5244 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5245 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5246 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5247 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5248 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5249 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5250 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5251 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
a90bb9a5 5252 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
5253
5254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5255 if (ret)
5256 dev_err(&hdev->pdev->dev,
5257 "mac enable fail, ret =%d.\n", ret);
5258}
5259
eb66d503 5260static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 5261{
c39c4d98 5262 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
5263 struct hclge_desc desc;
5264 u32 loop_en;
5265 int ret;
5266
e4d68dae
YL
5267 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5268 /* 1 Read out the MAC mode config at first */
5269 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5270 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5271 if (ret) {
5272 dev_err(&hdev->pdev->dev,
5273 "mac loopback get fail, ret =%d.\n", ret);
5274 return ret;
5275 }
c39c4d98 5276
e4d68dae
YL
5277 /* 2 Then setup the loopback flag */
5278 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 5279 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
0f29fc23
YL
5280 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5281 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
e4d68dae
YL
5282
5283 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 5284
e4d68dae
YL
5285 /* 3 Config mac work mode with loopback flag
5286 * and its original configure parameters
5287 */
5288 hclge_cmd_reuse_desc(&desc, false);
5289 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5290 if (ret)
5291 dev_err(&hdev->pdev->dev,
5292 "mac loopback set fail, ret =%d.\n", ret);
5293 return ret;
5294}
c39c4d98 5295
4dc13b96
FL
5296static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5297 enum hnae3_loop loop_mode)
5fd50ac3
PL
5298{
5299#define HCLGE_SERDES_RETRY_MS 10
5300#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 5301
5302#define HCLGE_MAC_LINK_STATUS_MS 20
5303#define HCLGE_MAC_LINK_STATUS_NUM 10
5304#define HCLGE_MAC_LINK_STATUS_DOWN 0
5305#define HCLGE_MAC_LINK_STATUS_UP 1
5306
5fd50ac3
PL
5307 struct hclge_serdes_lb_cmd *req;
5308 struct hclge_desc desc;
350fda0a 5309 int mac_link_ret = 0;
5fd50ac3 5310 int ret, i = 0;
4dc13b96 5311 u8 loop_mode_b;
5fd50ac3 5312
d0d72bac 5313 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
5314 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5315
4dc13b96
FL
5316 switch (loop_mode) {
5317 case HNAE3_LOOP_SERIAL_SERDES:
5318 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5319 break;
5320 case HNAE3_LOOP_PARALLEL_SERDES:
5321 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5322 break;
5323 default:
5324 dev_err(&hdev->pdev->dev,
5325 "unsupported serdes loopback mode %d\n", loop_mode);
5326 return -ENOTSUPP;
5327 }
5328
5fd50ac3 5329 if (en) {
4dc13b96
FL
5330 req->enable = loop_mode_b;
5331 req->mask = loop_mode_b;
350fda0a 5332 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5fd50ac3 5333 } else {
4dc13b96 5334 req->mask = loop_mode_b;
350fda0a 5335 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5fd50ac3
PL
5336 }
5337
5338 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5339 if (ret) {
5340 dev_err(&hdev->pdev->dev,
5341 "serdes loopback set fail, ret = %d\n", ret);
5342 return ret;
5343 }
5344
5345 do {
5346 msleep(HCLGE_SERDES_RETRY_MS);
5347 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5348 true);
5349 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5350 if (ret) {
5351 dev_err(&hdev->pdev->dev,
5352 "serdes loopback get, ret = %d\n", ret);
5353 return ret;
5354 }
5355 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5356 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5357
5358 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5359 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5360 return -EBUSY;
5361 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5362 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5363 return -EIO;
5364 }
5365
0f29fc23 5366 hclge_cfg_mac_mode(hdev, en);
350fda0a 5367
5368 i = 0;
5369 do {
5370 /* serdes Internal loopback, independent of the network cable.*/
5371 msleep(HCLGE_MAC_LINK_STATUS_MS);
5372 ret = hclge_get_mac_link_status(hdev);
5373 if (ret == mac_link_ret)
5374 return 0;
5375 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5376
5377 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5378
5379 return -EBUSY;
5fd50ac3
PL
5380}
5381
0f29fc23
YL
5382static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5383 int stream_id, bool enable)
5384{
5385 struct hclge_desc desc;
5386 struct hclge_cfg_com_tqp_queue_cmd *req =
5387 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5388 int ret;
5389
5390 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5391 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5392 req->stream_id = cpu_to_le16(stream_id);
5393 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5394
5395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5396 if (ret)
5397 dev_err(&hdev->pdev->dev,
5398 "Tqp enable fail, status =%d.\n", ret);
5399 return ret;
5400}
5401
e4d68dae
YL
5402static int hclge_set_loopback(struct hnae3_handle *handle,
5403 enum hnae3_loop loop_mode, bool en)
5404{
5405 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 5406 struct hnae3_knic_private_info *kinfo;
e4d68dae 5407 struct hclge_dev *hdev = vport->back;
0f29fc23 5408 int i, ret;
e4d68dae
YL
5409
5410 switch (loop_mode) {
eb66d503
FL
5411 case HNAE3_LOOP_APP:
5412 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 5413 break;
4dc13b96
FL
5414 case HNAE3_LOOP_SERIAL_SERDES:
5415 case HNAE3_LOOP_PARALLEL_SERDES:
5416 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 5417 break;
c39c4d98
YL
5418 default:
5419 ret = -ENOTSUPP;
5420 dev_err(&hdev->pdev->dev,
5421 "loop_mode %d is not supported\n", loop_mode);
5422 break;
5423 }
5424
47ef6dec
JS
5425 if (ret)
5426 return ret;
5427
205a24ca
HT
5428 kinfo = &vport->nic.kinfo;
5429 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
5430 ret = hclge_tqp_enable(hdev, i, 0, en);
5431 if (ret)
5432 return ret;
5433 }
46a3df9f 5434
0f29fc23 5435 return 0;
46a3df9f
S
5436}
5437
5438static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5439{
5440 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 5441 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
5442 struct hnae3_queue *queue;
5443 struct hclge_tqp *tqp;
5444 int i;
5445
205a24ca
HT
5446 kinfo = &vport->nic.kinfo;
5447 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
5448 queue = handle->kinfo.tqp[i];
5449 tqp = container_of(queue, struct hclge_tqp, q);
5450 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5451 }
5452}
5453
8cdb992f
JS
5454static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5455{
5456 struct hclge_vport *vport = hclge_get_vport(handle);
5457 struct hclge_dev *hdev = vport->back;
5458
5459 if (enable) {
5460 mod_timer(&hdev->service_timer, jiffies + HZ);
5461 } else {
5462 del_timer_sync(&hdev->service_timer);
5463 cancel_work_sync(&hdev->service_task);
5464 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5465 }
5466}
5467
46a3df9f
S
5468static int hclge_ae_start(struct hnae3_handle *handle)
5469{
5470 struct hclge_vport *vport = hclge_get_vport(handle);
5471 struct hclge_dev *hdev = vport->back;
46a3df9f 5472
46a3df9f
S
5473 /* mac enable */
5474 hclge_cfg_mac_mode(hdev, true);
5475 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 5476 hdev->hw.mac.link = 0;
46a3df9f 5477
b50ae26c
PL
5478 /* reset tqp stats */
5479 hclge_reset_tqp_stats(handle);
5480
b01b7cf1 5481 hclge_mac_start_phy(hdev);
46a3df9f 5482
46a3df9f
S
5483 return 0;
5484}
5485
5486static void hclge_ae_stop(struct hnae3_handle *handle)
5487{
5488 struct hclge_vport *vport = hclge_get_vport(handle);
5489 struct hclge_dev *hdev = vport->back;
39cfbc9c 5490 int i;
46a3df9f 5491
2f7e4896
FL
5492 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5493
35d93a30
HT
5494 /* If it is not PF reset, the firmware will disable the MAC,
5495 * so it only need to stop phy here.
5496 */
5497 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5498 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 5499 hclge_mac_stop_phy(hdev);
b50ae26c 5500 return;
9617f668 5501 }
b50ae26c 5502
39cfbc9c
HT
5503 for (i = 0; i < handle->kinfo.num_tqps; i++)
5504 hclge_reset_tqp(handle, i);
5505
46a3df9f
S
5506 /* Mac disable */
5507 hclge_cfg_mac_mode(hdev, false);
5508
5509 hclge_mac_stop_phy(hdev);
5510
5511 /* reset tqp stats */
5512 hclge_reset_tqp_stats(handle);
f30dfddc 5513 hclge_update_link_status(hdev);
46a3df9f
S
5514}
5515
a6d818e3
YL
5516int hclge_vport_start(struct hclge_vport *vport)
5517{
5518 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5519 vport->last_active_jiffies = jiffies;
5520 return 0;
5521}
5522
5523void hclge_vport_stop(struct hclge_vport *vport)
5524{
5525 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5526}
5527
5528static int hclge_client_start(struct hnae3_handle *handle)
5529{
5530 struct hclge_vport *vport = hclge_get_vport(handle);
5531
5532 return hclge_vport_start(vport);
5533}
5534
5535static void hclge_client_stop(struct hnae3_handle *handle)
5536{
5537 struct hclge_vport *vport = hclge_get_vport(handle);
5538
5539 hclge_vport_stop(vport);
5540}
5541
46a3df9f
S
5542static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5543 u16 cmdq_resp, u8 resp_code,
5544 enum hclge_mac_vlan_tbl_opcode op)
5545{
5546 struct hclge_dev *hdev = vport->back;
5547 int return_status = -EIO;
5548
5549 if (cmdq_resp) {
5550 dev_err(&hdev->pdev->dev,
5551 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5552 cmdq_resp);
5553 return -EIO;
5554 }
5555
5556 if (op == HCLGE_MAC_VLAN_ADD) {
5557 if ((!resp_code) || (resp_code == 1)) {
5558 return_status = 0;
5559 } else if (resp_code == 2) {
eefd00a5 5560 return_status = -ENOSPC;
46a3df9f
S
5561 dev_err(&hdev->pdev->dev,
5562 "add mac addr failed for uc_overflow.\n");
5563 } else if (resp_code == 3) {
eefd00a5 5564 return_status = -ENOSPC;
46a3df9f
S
5565 dev_err(&hdev->pdev->dev,
5566 "add mac addr failed for mc_overflow.\n");
5567 } else {
5568 dev_err(&hdev->pdev->dev,
5569 "add mac addr failed for undefined, code=%d.\n",
5570 resp_code);
5571 }
5572 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5573 if (!resp_code) {
5574 return_status = 0;
5575 } else if (resp_code == 1) {
eefd00a5 5576 return_status = -ENOENT;
46a3df9f
S
5577 dev_dbg(&hdev->pdev->dev,
5578 "remove mac addr failed for miss.\n");
5579 } else {
5580 dev_err(&hdev->pdev->dev,
5581 "remove mac addr failed for undefined, code=%d.\n",
5582 resp_code);
5583 }
5584 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5585 if (!resp_code) {
5586 return_status = 0;
5587 } else if (resp_code == 1) {
eefd00a5 5588 return_status = -ENOENT;
46a3df9f
S
5589 dev_dbg(&hdev->pdev->dev,
5590 "lookup mac addr failed for miss.\n");
5591 } else {
5592 dev_err(&hdev->pdev->dev,
5593 "lookup mac addr failed for undefined, code=%d.\n",
5594 resp_code);
5595 }
5596 } else {
eefd00a5 5597 return_status = -EINVAL;
46a3df9f
S
5598 dev_err(&hdev->pdev->dev,
5599 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5600 op);
5601 }
5602
5603 return return_status;
5604}
5605
5606static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5607{
5608 int word_num;
5609 int bit_num;
5610
5611 if (vfid > 255 || vfid < 0)
5612 return -EIO;
5613
5614 if (vfid >= 0 && vfid <= 191) {
5615 word_num = vfid / 32;
5616 bit_num = vfid % 32;
5617 if (clr)
a90bb9a5 5618 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 5619 else
a90bb9a5 5620 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
5621 } else {
5622 word_num = (vfid - 192) / 32;
5623 bit_num = vfid % 32;
5624 if (clr)
a90bb9a5 5625 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 5626 else
a90bb9a5 5627 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
5628 }
5629
5630 return 0;
5631}
5632
5633static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5634{
5635#define HCLGE_DESC_NUMBER 3
5636#define HCLGE_FUNC_NUMBER_PER_DESC 6
5637 int i, j;
5638
6c39d527 5639 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
5640 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5641 if (desc[i].data[j])
5642 return false;
5643
5644 return true;
5645}
5646
d44f9b63 5647static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 5648 const u8 *addr, bool is_mc)
46a3df9f
S
5649{
5650 const unsigned char *mac_addr = addr;
5651 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5652 (mac_addr[0]) | (mac_addr[1] << 8);
5653 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5654
3a586422
WL
5655 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5656 if (is_mc) {
5657 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5658 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5659 }
5660
46a3df9f
S
5661 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5662 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5663}
5664
46a3df9f 5665static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 5666 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
5667{
5668 struct hclge_dev *hdev = vport->back;
5669 struct hclge_desc desc;
5670 u8 resp_code;
a90bb9a5 5671 u16 retval;
46a3df9f
S
5672 int ret;
5673
5674 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5675
d44f9b63 5676 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
5677
5678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5679 if (ret) {
5680 dev_err(&hdev->pdev->dev,
5681 "del mac addr failed for cmd_send, ret =%d.\n",
5682 ret);
5683 return ret;
5684 }
a90bb9a5
YL
5685 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5686 retval = le16_to_cpu(desc.retval);
46a3df9f 5687
a90bb9a5 5688 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
5689 HCLGE_MAC_VLAN_REMOVE);
5690}
5691
5692static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 5693 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
5694 struct hclge_desc *desc,
5695 bool is_mc)
5696{
5697 struct hclge_dev *hdev = vport->back;
5698 u8 resp_code;
a90bb9a5 5699 u16 retval;
46a3df9f
S
5700 int ret;
5701
5702 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5703 if (is_mc) {
5704 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5705 memcpy(desc[0].data,
5706 req,
d44f9b63 5707 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
5708 hclge_cmd_setup_basic_desc(&desc[1],
5709 HCLGE_OPC_MAC_VLAN_ADD,
5710 true);
5711 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5712 hclge_cmd_setup_basic_desc(&desc[2],
5713 HCLGE_OPC_MAC_VLAN_ADD,
5714 true);
5715 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5716 } else {
5717 memcpy(desc[0].data,
5718 req,
d44f9b63 5719 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
5720 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5721 }
5722 if (ret) {
5723 dev_err(&hdev->pdev->dev,
5724 "lookup mac addr failed for cmd_send, ret =%d.\n",
5725 ret);
5726 return ret;
5727 }
a90bb9a5
YL
5728 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5729 retval = le16_to_cpu(desc[0].retval);
46a3df9f 5730
a90bb9a5 5731 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
5732 HCLGE_MAC_VLAN_LKUP);
5733}
5734
5735static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 5736 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
5737 struct hclge_desc *mc_desc)
5738{
5739 struct hclge_dev *hdev = vport->back;
5740 int cfg_status;
5741 u8 resp_code;
a90bb9a5 5742 u16 retval;
46a3df9f
S
5743 int ret;
5744
5745 if (!mc_desc) {
5746 struct hclge_desc desc;
5747
5748 hclge_cmd_setup_basic_desc(&desc,
5749 HCLGE_OPC_MAC_VLAN_ADD,
5750 false);
d44f9b63
YL
5751 memcpy(desc.data, req,
5752 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 5753 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
5754 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5755 retval = le16_to_cpu(desc.retval);
5756
5757 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
5758 resp_code,
5759 HCLGE_MAC_VLAN_ADD);
5760 } else {
c3b6f755 5761 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 5762 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 5763 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 5764 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 5765 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
5766 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5767 memcpy(mc_desc[0].data, req,
d44f9b63 5768 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 5769 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
5770 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5771 retval = le16_to_cpu(mc_desc[0].retval);
5772
5773 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
5774 resp_code,
5775 HCLGE_MAC_VLAN_ADD);
5776 }
5777
5778 if (ret) {
5779 dev_err(&hdev->pdev->dev,
5780 "add mac addr failed for cmd_send, ret =%d.\n",
5781 ret);
5782 return ret;
5783 }
5784
5785 return cfg_status;
5786}
5787
39932473
JS
5788static int hclge_init_umv_space(struct hclge_dev *hdev)
5789{
5790 u16 allocated_size = 0;
5791 int ret;
5792
5793 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5794 true);
5795 if (ret)
5796 return ret;
5797
5798 if (allocated_size < hdev->wanted_umv_size)
5799 dev_warn(&hdev->pdev->dev,
5800 "Alloc umv space failed, want %d, get %d\n",
5801 hdev->wanted_umv_size, allocated_size);
5802
5803 mutex_init(&hdev->umv_mutex);
5804 hdev->max_umv_size = allocated_size;
5805 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5806 hdev->share_umv_size = hdev->priv_umv_size +
5807 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5808
5809 return 0;
5810}
5811
5812static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5813{
5814 int ret;
5815
5816 if (hdev->max_umv_size > 0) {
5817 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5818 false);
5819 if (ret)
5820 return ret;
5821 hdev->max_umv_size = 0;
5822 }
5823 mutex_destroy(&hdev->umv_mutex);
5824
5825 return 0;
5826}
5827
5828static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5829 u16 *allocated_size, bool is_alloc)
5830{
5831 struct hclge_umv_spc_alc_cmd *req;
5832 struct hclge_desc desc;
5833 int ret;
5834
5835 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5836 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5837 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5838 req->space_size = cpu_to_le32(space_size);
5839
5840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5841 if (ret) {
5842 dev_err(&hdev->pdev->dev,
5843 "%s umv space failed for cmd_send, ret =%d\n",
5844 is_alloc ? "allocate" : "free", ret);
5845 return ret;
5846 }
5847
5848 if (is_alloc && allocated_size)
5849 *allocated_size = le32_to_cpu(desc.data[1]);
5850
5851 return 0;
5852}
5853
5854static void hclge_reset_umv_space(struct hclge_dev *hdev)
5855{
5856 struct hclge_vport *vport;
5857 int i;
5858
5859 for (i = 0; i < hdev->num_alloc_vport; i++) {
5860 vport = &hdev->vport[i];
5861 vport->used_umv_num = 0;
5862 }
5863
5864 mutex_lock(&hdev->umv_mutex);
5865 hdev->share_umv_size = hdev->priv_umv_size +
5866 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5867 mutex_unlock(&hdev->umv_mutex);
5868}
5869
5870static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5871{
5872 struct hclge_dev *hdev = vport->back;
5873 bool is_full;
5874
5875 mutex_lock(&hdev->umv_mutex);
5876 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5877 hdev->share_umv_size == 0);
5878 mutex_unlock(&hdev->umv_mutex);
5879
5880 return is_full;
5881}
5882
5883static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5884{
5885 struct hclge_dev *hdev = vport->back;
5886
5887 mutex_lock(&hdev->umv_mutex);
5888 if (is_free) {
5889 if (vport->used_umv_num > hdev->priv_umv_size)
5890 hdev->share_umv_size++;
54a395b6 5891
5892 if (vport->used_umv_num > 0)
5893 vport->used_umv_num--;
39932473 5894 } else {
54a395b6 5895 if (vport->used_umv_num >= hdev->priv_umv_size &&
5896 hdev->share_umv_size > 0)
39932473
JS
5897 hdev->share_umv_size--;
5898 vport->used_umv_num++;
5899 }
5900 mutex_unlock(&hdev->umv_mutex);
5901}
5902
46a3df9f
S
5903static int hclge_add_uc_addr(struct hnae3_handle *handle,
5904 const unsigned char *addr)
5905{
5906 struct hclge_vport *vport = hclge_get_vport(handle);
5907
5908 return hclge_add_uc_addr_common(vport, addr);
5909}
5910
5911int hclge_add_uc_addr_common(struct hclge_vport *vport,
5912 const unsigned char *addr)
5913{
5914 struct hclge_dev *hdev = vport->back;
d44f9b63 5915 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 5916 struct hclge_desc desc;
a90bb9a5 5917 u16 egress_port = 0;
aa7a795e 5918 int ret;
46a3df9f
S
5919
5920 /* mac addr check */
5921 if (is_zero_ether_addr(addr) ||
5922 is_broadcast_ether_addr(addr) ||
5923 is_multicast_ether_addr(addr)) {
5924 dev_err(&hdev->pdev->dev,
5925 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5926 addr,
5927 is_zero_ether_addr(addr),
5928 is_broadcast_ether_addr(addr),
5929 is_multicast_ether_addr(addr));
5930 return -EINVAL;
5931 }
5932
5933 memset(&req, 0, sizeof(req));
a90bb9a5 5934
e4e87715
PL
5935 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5936 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
5937
5938 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 5939
3a586422 5940 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 5941
d07b6bb4
JS
5942 /* Lookup the mac address in the mac_vlan table, and add
5943 * it if the entry is inexistent. Repeated unicast entry
5944 * is not allowed in the mac vlan table.
5945 */
5946 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473
JS
5947 if (ret == -ENOENT) {
5948 if (!hclge_is_umv_space_full(vport)) {
5949 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5950 if (!ret)
5951 hclge_update_umv_space(vport, false);
5952 return ret;
5953 }
5954
5955 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5956 hdev->priv_umv_size);
5957
5958 return -ENOSPC;
5959 }
d07b6bb4
JS
5960
5961 /* check if we just hit the duplicate */
72110b56
PL
5962 if (!ret) {
5963 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
5964 vport->vport_id, addr);
5965 return 0;
5966 }
d07b6bb4
JS
5967
5968 dev_err(&hdev->pdev->dev,
5969 "PF failed to add unicast entry(%pM) in the MAC table\n",
5970 addr);
46a3df9f 5971
aa7a795e 5972 return ret;
46a3df9f
S
5973}
5974
5975static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5976 const unsigned char *addr)
5977{
5978 struct hclge_vport *vport = hclge_get_vport(handle);
5979
5980 return hclge_rm_uc_addr_common(vport, addr);
5981}
5982
5983int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5984 const unsigned char *addr)
5985{
5986 struct hclge_dev *hdev = vport->back;
d44f9b63 5987 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 5988 int ret;
46a3df9f
S
5989
5990 /* mac addr check */
5991 if (is_zero_ether_addr(addr) ||
5992 is_broadcast_ether_addr(addr) ||
5993 is_multicast_ether_addr(addr)) {
5994 dev_dbg(&hdev->pdev->dev,
5995 "Remove mac err! invalid mac:%pM.\n",
5996 addr);
5997 return -EINVAL;
5998 }
5999
6000 memset(&req, 0, sizeof(req));
e4e87715 6001 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6002 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 6003 ret = hclge_remove_mac_vlan_tbl(vport, &req);
39932473
JS
6004 if (!ret)
6005 hclge_update_umv_space(vport, true);
46a3df9f 6006
aa7a795e 6007 return ret;
46a3df9f
S
6008}
6009
6010static int hclge_add_mc_addr(struct hnae3_handle *handle,
6011 const unsigned char *addr)
6012{
6013 struct hclge_vport *vport = hclge_get_vport(handle);
6014
a10829c4 6015 return hclge_add_mc_addr_common(vport, addr);
46a3df9f
S
6016}
6017
6018int hclge_add_mc_addr_common(struct hclge_vport *vport,
6019 const unsigned char *addr)
6020{
6021 struct hclge_dev *hdev = vport->back;
d44f9b63 6022 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 6023 struct hclge_desc desc[3];
46a3df9f
S
6024 int status;
6025
6026 /* mac addr check */
6027 if (!is_multicast_ether_addr(addr)) {
6028 dev_err(&hdev->pdev->dev,
6029 "Add mc mac err! invalid mac:%pM.\n",
6030 addr);
6031 return -EINVAL;
6032 }
6033 memset(&req, 0, sizeof(req));
e4e87715 6034 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6035 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
6036 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6037 if (!status) {
6038 /* This mac addr exist, update VFID for it */
6039 hclge_update_desc_vfid(desc, vport->vport_id, false);
6040 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6041 } else {
6042 /* This mac addr do not exist, add new entry for it */
6043 memset(desc[0].data, 0, sizeof(desc[0].data));
6044 memset(desc[1].data, 0, sizeof(desc[0].data));
6045 memset(desc[2].data, 0, sizeof(desc[0].data));
6046 hclge_update_desc_vfid(desc, vport->vport_id, false);
6047 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6048 }
6049
1f6db589
JS
6050 if (status == -ENOSPC)
6051 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
6052
6053 return status;
6054}
6055
6056static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6057 const unsigned char *addr)
6058{
6059 struct hclge_vport *vport = hclge_get_vport(handle);
6060
6061 return hclge_rm_mc_addr_common(vport, addr);
6062}
6063
6064int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6065 const unsigned char *addr)
6066{
6067 struct hclge_dev *hdev = vport->back;
d44f9b63 6068 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
6069 enum hclge_cmd_status status;
6070 struct hclge_desc desc[3];
46a3df9f
S
6071
6072 /* mac addr check */
6073 if (!is_multicast_ether_addr(addr)) {
6074 dev_dbg(&hdev->pdev->dev,
6075 "Remove mc mac err! invalid mac:%pM.\n",
6076 addr);
6077 return -EINVAL;
6078 }
6079
6080 memset(&req, 0, sizeof(req));
e4e87715 6081 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6082 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
6083 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6084 if (!status) {
6085 /* This mac addr exist, remove this handle's VFID for it */
6086 hclge_update_desc_vfid(desc, vport->vport_id, true);
6087
6088 if (hclge_is_all_function_id_zero(desc))
6089 /* All the vfid is zero, so need to delete this entry */
6090 status = hclge_remove_mac_vlan_tbl(vport, &req);
6091 else
6092 /* Not all the vfid is zero, update the vfid */
6093 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6094
6095 } else {
40cca1c5
XW
6096 /* Maybe this mac address is in mta table, but it cannot be
6097 * deleted here because an entry of mta represents an address
6098 * range rather than a specific address. the delete action to
6099 * all entries will take effect in update_mta_status called by
6100 * hns3_nic_set_rx_mode.
6101 */
6102 status = 0;
46a3df9f
S
6103 }
6104
46a3df9f
S
6105 return status;
6106}
6107
6dd86902 6108void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6109 enum HCLGE_MAC_ADDR_TYPE mac_type)
6110{
6111 struct hclge_vport_mac_addr_cfg *mac_cfg;
6112 struct list_head *list;
6113
6114 if (!vport->vport_id)
6115 return;
6116
6117 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6118 if (!mac_cfg)
6119 return;
6120
6121 mac_cfg->hd_tbl_status = true;
6122 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6123
6124 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6125 &vport->uc_mac_list : &vport->mc_mac_list;
6126
6127 list_add_tail(&mac_cfg->node, list);
6128}
6129
6130void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6131 bool is_write_tbl,
6132 enum HCLGE_MAC_ADDR_TYPE mac_type)
6133{
6134 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6135 struct list_head *list;
6136 bool uc_flag, mc_flag;
6137
6138 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6139 &vport->uc_mac_list : &vport->mc_mac_list;
6140
6141 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6142 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6143
6144 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6145 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6146 if (uc_flag && mac_cfg->hd_tbl_status)
6147 hclge_rm_uc_addr_common(vport, mac_addr);
6148
6149 if (mc_flag && mac_cfg->hd_tbl_status)
6150 hclge_rm_mc_addr_common(vport, mac_addr);
6151
6152 list_del(&mac_cfg->node);
6153 kfree(mac_cfg);
6154 break;
6155 }
6156 }
6157}
6158
6159void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6160 enum HCLGE_MAC_ADDR_TYPE mac_type)
6161{
6162 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6163 struct list_head *list;
6164
6165 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6166 &vport->uc_mac_list : &vport->mc_mac_list;
6167
6168 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6169 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6170 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6171
6172 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6173 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6174
6175 mac_cfg->hd_tbl_status = false;
6176 if (is_del_list) {
6177 list_del(&mac_cfg->node);
6178 kfree(mac_cfg);
6179 }
6180 }
6181}
6182
6183void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6184{
6185 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6186 struct hclge_vport *vport;
6187 int i;
6188
6189 mutex_lock(&hdev->vport_cfg_mutex);
6190 for (i = 0; i < hdev->num_alloc_vport; i++) {
6191 vport = &hdev->vport[i];
6192 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6193 list_del(&mac->node);
6194 kfree(mac);
6195 }
6196
6197 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6198 list_del(&mac->node);
6199 kfree(mac);
6200 }
6201 }
6202 mutex_unlock(&hdev->vport_cfg_mutex);
6203}
6204
f5aac71c
FL
6205static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6206 u16 cmdq_resp, u8 resp_code)
6207{
6208#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6209#define HCLGE_ETHERTYPE_ALREADY_ADD 1
6210#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6211#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6212
6213 int return_status;
6214
6215 if (cmdq_resp) {
6216 dev_err(&hdev->pdev->dev,
6217 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6218 cmdq_resp);
6219 return -EIO;
6220 }
6221
6222 switch (resp_code) {
6223 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6224 case HCLGE_ETHERTYPE_ALREADY_ADD:
6225 return_status = 0;
6226 break;
6227 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6228 dev_err(&hdev->pdev->dev,
6229 "add mac ethertype failed for manager table overflow.\n");
6230 return_status = -EIO;
6231 break;
6232 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6233 dev_err(&hdev->pdev->dev,
6234 "add mac ethertype failed for key conflict.\n");
6235 return_status = -EIO;
6236 break;
6237 default:
6238 dev_err(&hdev->pdev->dev,
6239 "add mac ethertype failed for undefined, code=%d.\n",
6240 resp_code);
6241 return_status = -EIO;
6242 }
6243
6244 return return_status;
6245}
6246
6247static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6248 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6249{
6250 struct hclge_desc desc;
6251 u8 resp_code;
6252 u16 retval;
6253 int ret;
6254
6255 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6256 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6257
6258 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6259 if (ret) {
6260 dev_err(&hdev->pdev->dev,
6261 "add mac ethertype failed for cmd_send, ret =%d.\n",
6262 ret);
6263 return ret;
6264 }
6265
6266 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6267 retval = le16_to_cpu(desc.retval);
6268
6269 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6270}
6271
6272static int init_mgr_tbl(struct hclge_dev *hdev)
6273{
6274 int ret;
6275 int i;
6276
6277 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6278 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6279 if (ret) {
6280 dev_err(&hdev->pdev->dev,
6281 "add mac ethertype failed, ret =%d.\n",
6282 ret);
6283 return ret;
6284 }
6285 }
6286
6287 return 0;
6288}
6289
46a3df9f
S
6290static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6291{
6292 struct hclge_vport *vport = hclge_get_vport(handle);
6293 struct hclge_dev *hdev = vport->back;
6294
6295 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6296}
6297
59098055
FL
6298static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6299 bool is_first)
46a3df9f
S
6300{
6301 const unsigned char *new_addr = (const unsigned char *)p;
6302 struct hclge_vport *vport = hclge_get_vport(handle);
6303 struct hclge_dev *hdev = vport->back;
18838d0c 6304 int ret;
46a3df9f
S
6305
6306 /* mac addr check */
6307 if (is_zero_ether_addr(new_addr) ||
6308 is_broadcast_ether_addr(new_addr) ||
6309 is_multicast_ether_addr(new_addr)) {
6310 dev_err(&hdev->pdev->dev,
6311 "Change uc mac err! invalid mac:%p.\n",
6312 new_addr);
6313 return -EINVAL;
6314 }
6315
962e31bd
YL
6316 if ((!is_first || is_kdump_kernel()) &&
6317 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 6318 dev_warn(&hdev->pdev->dev,
59098055 6319 "remove old uc mac address fail.\n");
46a3df9f 6320
18838d0c
FL
6321 ret = hclge_add_uc_addr(handle, new_addr);
6322 if (ret) {
6323 dev_err(&hdev->pdev->dev,
6324 "add uc mac address fail, ret =%d.\n",
6325 ret);
6326
59098055
FL
6327 if (!is_first &&
6328 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 6329 dev_err(&hdev->pdev->dev,
59098055 6330 "restore uc mac address fail.\n");
18838d0c
FL
6331
6332 return -EIO;
46a3df9f
S
6333 }
6334
e98d7183 6335 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
6336 if (ret) {
6337 dev_err(&hdev->pdev->dev,
6338 "configure mac pause address fail, ret =%d.\n",
6339 ret);
6340 return -EIO;
6341 }
6342
6343 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6344
6345 return 0;
46a3df9f
S
6346}
6347
26483246
XW
6348static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6349 int cmd)
6350{
6351 struct hclge_vport *vport = hclge_get_vport(handle);
6352 struct hclge_dev *hdev = vport->back;
6353
6354 if (!hdev->hw.mac.phydev)
6355 return -EOPNOTSUPP;
6356
6357 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6358}
6359
46a3df9f 6360static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 6361 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 6362{
d44f9b63 6363 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
6364 struct hclge_desc desc;
6365 int ret;
6366
6367 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6368
d44f9b63 6369 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 6370 req->vlan_type = vlan_type;
64d114f0 6371 req->vlan_fe = filter_en ? fe_type : 0;
30ebc576 6372 req->vf_id = vf_id;
46a3df9f
S
6373
6374 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 6375 if (ret)
46a3df9f
S
6376 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6377 ret);
46a3df9f 6378
3f639907 6379 return ret;
46a3df9f
S
6380}
6381
391b5e93
JS
6382#define HCLGE_FILTER_TYPE_VF 0
6383#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
6384#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6385#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6386#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6387#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6388#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6389#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6390 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6391#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6392 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
6393
6394static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6395{
6396 struct hclge_vport *vport = hclge_get_vport(handle);
6397 struct hclge_dev *hdev = vport->back;
6398
64d114f0
ZL
6399 if (hdev->pdev->revision >= 0x21) {
6400 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 6401 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 6402 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 6403 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
6404 } else {
6405 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
6406 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6407 0);
64d114f0 6408 }
c60edc17
JS
6409 if (enable)
6410 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6411 else
6412 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
6413}
6414
dc8131d8
YL
6415static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6416 bool is_kill, u16 vlan, u8 qos,
6417 __be16 proto)
46a3df9f
S
6418{
6419#define HCLGE_MAX_VF_BYTES 16
d44f9b63
YL
6420 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6421 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
6422 struct hclge_desc desc[2];
6423 u8 vf_byte_val;
6424 u8 vf_byte_off;
6425 int ret;
6426
6427 hclge_cmd_setup_basic_desc(&desc[0],
6428 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6429 hclge_cmd_setup_basic_desc(&desc[1],
6430 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6431
6432 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6433
6434 vf_byte_off = vfid / 8;
6435 vf_byte_val = 1 << (vfid % 8);
6436
d44f9b63
YL
6437 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6438 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 6439
a90bb9a5 6440 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
6441 req0->vlan_cfg = is_kill;
6442
6443 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6444 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6445 else
6446 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6447
6448 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6449 if (ret) {
6450 dev_err(&hdev->pdev->dev,
6451 "Send vf vlan command fail, ret =%d.\n",
6452 ret);
6453 return ret;
6454 }
6455
6456 if (!is_kill) {
6c251711 6457#define HCLGE_VF_VLAN_NO_ENTRY 2
46a3df9f
S
6458 if (!req0->resp_code || req0->resp_code == 1)
6459 return 0;
6460
6c251711
YL
6461 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6462 dev_warn(&hdev->pdev->dev,
6463 "vf vlan table is full, vf vlan filter is disabled\n");
6464 return 0;
6465 }
6466
46a3df9f
S
6467 dev_err(&hdev->pdev->dev,
6468 "Add vf vlan filter fail, ret =%d.\n",
6469 req0->resp_code);
6470 } else {
41dafea2 6471#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
46a3df9f
S
6472 if (!req0->resp_code)
6473 return 0;
6474
41dafea2
YL
6475 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6476 dev_warn(&hdev->pdev->dev,
6477 "vlan %d filter is not in vf vlan table\n",
6478 vlan);
6479 return 0;
6480 }
6481
46a3df9f
S
6482 dev_err(&hdev->pdev->dev,
6483 "Kill vf vlan filter fail, ret =%d.\n",
6484 req0->resp_code);
6485 }
6486
6487 return -EIO;
6488}
6489
dc8131d8
YL
6490static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6491 u16 vlan_id, bool is_kill)
46a3df9f 6492{
d44f9b63 6493 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
6494 struct hclge_desc desc;
6495 u8 vlan_offset_byte_val;
6496 u8 vlan_offset_byte;
6497 u8 vlan_offset_160;
6498 int ret;
6499
6500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6501
6502 vlan_offset_160 = vlan_id / 160;
6503 vlan_offset_byte = (vlan_id % 160) / 8;
6504 vlan_offset_byte_val = 1 << (vlan_id % 8);
6505
d44f9b63 6506 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
6507 req->vlan_offset = vlan_offset_160;
6508 req->vlan_cfg = is_kill;
6509 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6510
6511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
6512 if (ret)
6513 dev_err(&hdev->pdev->dev,
6514 "port vlan command, send fail, ret =%d.\n", ret);
6515 return ret;
6516}
6517
6518static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6519 u16 vport_id, u16 vlan_id, u8 qos,
6520 bool is_kill)
6521{
6522 u16 vport_idx, vport_num = 0;
6523 int ret;
6524
daaa8521
YL
6525 if (is_kill && !vlan_id)
6526 return 0;
6527
dc8131d8
YL
6528 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6529 0, proto);
46a3df9f
S
6530 if (ret) {
6531 dev_err(&hdev->pdev->dev,
dc8131d8
YL
6532 "Set %d vport vlan filter config fail, ret =%d.\n",
6533 vport_id, ret);
46a3df9f
S
6534 return ret;
6535 }
6536
dc8131d8
YL
6537 /* vlan 0 may be added twice when 8021q module is enabled */
6538 if (!is_kill && !vlan_id &&
6539 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6540 return 0;
6541
6542 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 6543 dev_err(&hdev->pdev->dev,
dc8131d8
YL
6544 "Add port vlan failed, vport %d is already in vlan %d\n",
6545 vport_id, vlan_id);
6546 return -EINVAL;
46a3df9f
S
6547 }
6548
dc8131d8
YL
6549 if (is_kill &&
6550 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6551 dev_err(&hdev->pdev->dev,
6552 "Delete port vlan failed, vport %d is not in vlan %d\n",
6553 vport_id, vlan_id);
6554 return -EINVAL;
6555 }
6556
54e97d11 6557 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
6558 vport_num++;
6559
6560 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6561 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6562 is_kill);
6563
6564 return ret;
6565}
6566
5f6ea83f
PL
6567static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6568{
6569 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6570 struct hclge_vport_vtag_tx_cfg_cmd *req;
6571 struct hclge_dev *hdev = vport->back;
6572 struct hclge_desc desc;
6573 int status;
6574
6575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6576
6577 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6578 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6579 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
6580 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6581 vcfg->accept_tag1 ? 1 : 0);
6582 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6583 vcfg->accept_untag1 ? 1 : 0);
6584 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6585 vcfg->accept_tag2 ? 1 : 0);
6586 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6587 vcfg->accept_untag2 ? 1 : 0);
6588 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6589 vcfg->insert_tag1_en ? 1 : 0);
6590 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6591 vcfg->insert_tag2_en ? 1 : 0);
6592 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
6593
6594 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6595 req->vf_bitmap[req->vf_offset] =
6596 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6597
6598 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6599 if (status)
6600 dev_err(&hdev->pdev->dev,
6601 "Send port txvlan cfg command fail, ret =%d\n",
6602 status);
6603
6604 return status;
6605}
6606
6607static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6608{
6609 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6610 struct hclge_vport_vtag_rx_cfg_cmd *req;
6611 struct hclge_dev *hdev = vport->back;
6612 struct hclge_desc desc;
6613 int status;
6614
6615 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6616
6617 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
6618 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6619 vcfg->strip_tag1_en ? 1 : 0);
6620 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6621 vcfg->strip_tag2_en ? 1 : 0);
6622 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6623 vcfg->vlan1_vlan_prionly ? 1 : 0);
6624 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6625 vcfg->vlan2_vlan_prionly ? 1 : 0);
5f6ea83f
PL
6626
6627 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6628 req->vf_bitmap[req->vf_offset] =
6629 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6630
6631 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6632 if (status)
6633 dev_err(&hdev->pdev->dev,
6634 "Send port rxvlan cfg command fail, ret =%d\n",
6635 status);
6636
6637 return status;
6638}
6639
741fca16
JS
6640static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6641 u16 port_base_vlan_state,
6642 u16 vlan_tag)
6643{
6644 int ret;
6645
6646 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6647 vport->txvlan_cfg.accept_tag1 = true;
6648 vport->txvlan_cfg.insert_tag1_en = false;
6649 vport->txvlan_cfg.default_tag1 = 0;
6650 } else {
6651 vport->txvlan_cfg.accept_tag1 = false;
6652 vport->txvlan_cfg.insert_tag1_en = true;
6653 vport->txvlan_cfg.default_tag1 = vlan_tag;
6654 }
6655
6656 vport->txvlan_cfg.accept_untag1 = true;
6657
6658 /* accept_tag2 and accept_untag2 are not supported on
6659 * pdev revision(0x20), new revision support them,
6660 * this two fields can not be configured by user.
6661 */
6662 vport->txvlan_cfg.accept_tag2 = true;
6663 vport->txvlan_cfg.accept_untag2 = true;
6664 vport->txvlan_cfg.insert_tag2_en = false;
6665 vport->txvlan_cfg.default_tag2 = 0;
6666
6667 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6668 vport->rxvlan_cfg.strip_tag1_en = false;
6669 vport->rxvlan_cfg.strip_tag2_en =
6670 vport->rxvlan_cfg.rx_vlan_offload_en;
6671 } else {
6672 vport->rxvlan_cfg.strip_tag1_en =
6673 vport->rxvlan_cfg.rx_vlan_offload_en;
6674 vport->rxvlan_cfg.strip_tag2_en = true;
6675 }
6676 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6677 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6678
6679 ret = hclge_set_vlan_tx_offload_cfg(vport);
6680 if (ret)
6681 return ret;
6682
6683 return hclge_set_vlan_rx_offload_cfg(vport);
6684}
6685
5f6ea83f
PL
6686static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6687{
6688 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6689 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6690 struct hclge_desc desc;
6691 int status;
6692
6693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6694 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6695 rx_req->ot_fst_vlan_type =
6696 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6697 rx_req->ot_sec_vlan_type =
6698 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6699 rx_req->in_fst_vlan_type =
6700 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6701 rx_req->in_sec_vlan_type =
6702 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6703
6704 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6705 if (status) {
6706 dev_err(&hdev->pdev->dev,
6707 "Send rxvlan protocol type command fail, ret =%d\n",
6708 status);
6709 return status;
6710 }
6711
6712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6713
d0d72bac 6714 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
6715 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6716 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6717
6718 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6719 if (status)
6720 dev_err(&hdev->pdev->dev,
6721 "Send txvlan protocol type command fail, ret =%d\n",
6722 status);
6723
6724 return status;
6725}
6726
46a3df9f
S
6727static int hclge_init_vlan_config(struct hclge_dev *hdev)
6728{
5f6ea83f
PL
6729#define HCLGE_DEF_VLAN_TYPE 0x8100
6730
c60edc17 6731 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 6732 struct hclge_vport *vport;
46a3df9f 6733 int ret;
5f6ea83f
PL
6734 int i;
6735
64d114f0 6736 if (hdev->pdev->revision >= 0x21) {
30ebc576
JS
6737 /* for revision 0x21, vf vlan filter is per function */
6738 for (i = 0; i < hdev->num_alloc_vport; i++) {
6739 vport = &hdev->vport[i];
6740 ret = hclge_set_vlan_filter_ctrl(hdev,
6741 HCLGE_FILTER_TYPE_VF,
6742 HCLGE_FILTER_FE_EGRESS,
6743 true,
6744 vport->vport_id);
6745 if (ret)
6746 return ret;
6747 }
46a3df9f 6748
64d114f0 6749 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
6750 HCLGE_FILTER_FE_INGRESS, true,
6751 0);
64d114f0
ZL
6752 if (ret)
6753 return ret;
6754 } else {
6755 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6756 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 6757 true, 0);
64d114f0
ZL
6758 if (ret)
6759 return ret;
6760 }
46a3df9f 6761
c60edc17
JS
6762 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6763
5f6ea83f
PL
6764 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6765 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6766 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6767 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6768 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6769 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6770
6771 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
6772 if (ret)
6773 return ret;
46a3df9f 6774
5f6ea83f 6775 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 6776 u16 vlan_tag;
dcb35cce 6777
741fca16
JS
6778 vport = &hdev->vport[i];
6779 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 6780
741fca16
JS
6781 ret = hclge_vlan_offload_cfg(vport,
6782 vport->port_base_vlan_cfg.state,
6783 vlan_tag);
5f6ea83f
PL
6784 if (ret)
6785 return ret;
6786 }
6787
dc8131d8 6788 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
6789}
6790
21e043cd
JS
6791static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6792 bool writen_to_tbl)
c6075b19 6793{
6794 struct hclge_vport_vlan_cfg *vlan;
6795
6796 /* vlan 0 is reserved */
6797 if (!vlan_id)
6798 return;
6799
6800 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6801 if (!vlan)
6802 return;
6803
21e043cd 6804 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 6805 vlan->vlan_id = vlan_id;
6806
6807 list_add_tail(&vlan->node, &vport->vlan_list);
6808}
6809
21e043cd
JS
6810static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
6811{
6812 struct hclge_vport_vlan_cfg *vlan, *tmp;
6813 struct hclge_dev *hdev = vport->back;
6814 int ret;
6815
6816 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6817 if (!vlan->hd_tbl_status) {
6818 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
6819 vport->vport_id,
6820 vlan->vlan_id, 0, false);
6821 if (ret) {
6822 dev_err(&hdev->pdev->dev,
6823 "restore vport vlan list failed, ret=%d\n",
6824 ret);
6825 return ret;
6826 }
6827 }
6828 vlan->hd_tbl_status = true;
6829 }
6830
6831 return 0;
6832}
6833
6834static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6835 bool is_write_tbl)
c6075b19 6836{
6837 struct hclge_vport_vlan_cfg *vlan, *tmp;
6838 struct hclge_dev *hdev = vport->back;
6839
6840 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6841 if (vlan->vlan_id == vlan_id) {
6842 if (is_write_tbl && vlan->hd_tbl_status)
6843 hclge_set_vlan_filter_hw(hdev,
6844 htons(ETH_P_8021Q),
6845 vport->vport_id,
6846 vlan_id, 0,
6847 true);
6848
6849 list_del(&vlan->node);
6850 kfree(vlan);
6851 break;
6852 }
6853 }
6854}
6855
6856void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6857{
6858 struct hclge_vport_vlan_cfg *vlan, *tmp;
6859 struct hclge_dev *hdev = vport->back;
6860
6861 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6862 if (vlan->hd_tbl_status)
6863 hclge_set_vlan_filter_hw(hdev,
6864 htons(ETH_P_8021Q),
6865 vport->vport_id,
6866 vlan->vlan_id, 0,
6867 true);
6868
6869 vlan->hd_tbl_status = false;
6870 if (is_del_list) {
6871 list_del(&vlan->node);
6872 kfree(vlan);
6873 }
6874 }
6875}
6876
6877void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6878{
6879 struct hclge_vport_vlan_cfg *vlan, *tmp;
6880 struct hclge_vport *vport;
6881 int i;
6882
6883 mutex_lock(&hdev->vport_cfg_mutex);
6884 for (i = 0; i < hdev->num_alloc_vport; i++) {
6885 vport = &hdev->vport[i];
6886 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6887 list_del(&vlan->node);
6888 kfree(vlan);
6889 }
6890 }
6891 mutex_unlock(&hdev->vport_cfg_mutex);
6892}
6893
b2641e2a 6894int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
6895{
6896 struct hclge_vport *vport = hclge_get_vport(handle);
6897
44e626f7
JS
6898 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6899 vport->rxvlan_cfg.strip_tag1_en = false;
6900 vport->rxvlan_cfg.strip_tag2_en = enable;
6901 } else {
6902 vport->rxvlan_cfg.strip_tag1_en = enable;
6903 vport->rxvlan_cfg.strip_tag2_en = true;
6904 }
052ece6d
PL
6905 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6906 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 6907 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
6908
6909 return hclge_set_vlan_rx_offload_cfg(vport);
6910}
6911
21e043cd
JS
6912static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
6913 u16 port_base_vlan_state,
6914 struct hclge_vlan_info *new_info,
6915 struct hclge_vlan_info *old_info)
6916{
6917 struct hclge_dev *hdev = vport->back;
6918 int ret;
6919
6920 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
6921 hclge_rm_vport_all_vlan_table(vport, false);
6922 return hclge_set_vlan_filter_hw(hdev,
6923 htons(new_info->vlan_proto),
6924 vport->vport_id,
6925 new_info->vlan_tag,
6926 new_info->qos, false);
6927 }
6928
6929 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
6930 vport->vport_id, old_info->vlan_tag,
6931 old_info->qos, true);
6932 if (ret)
6933 return ret;
6934
6935 return hclge_add_vport_all_vlan_table(vport);
6936}
6937
6938int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
6939 struct hclge_vlan_info *vlan_info)
6940{
6941 struct hnae3_handle *nic = &vport->nic;
6942 struct hclge_vlan_info *old_vlan_info;
6943 struct hclge_dev *hdev = vport->back;
6944 int ret;
6945
6946 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
6947
6948 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
6949 if (ret)
6950 return ret;
6951
6952 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
6953 /* add new VLAN tag */
6954 ret = hclge_set_vlan_filter_hw(hdev, vlan_info->vlan_proto,
6955 vport->vport_id,
6956 vlan_info->vlan_tag,
6957 vlan_info->qos, false);
6958 if (ret)
6959 return ret;
6960
6961 /* remove old VLAN tag */
6962 ret = hclge_set_vlan_filter_hw(hdev, old_vlan_info->vlan_proto,
6963 vport->vport_id,
6964 old_vlan_info->vlan_tag,
6965 old_vlan_info->qos, true);
6966 if (ret)
6967 return ret;
6968
6969 goto update;
6970 }
6971
6972 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
6973 old_vlan_info);
6974 if (ret)
6975 return ret;
6976
6977 /* update state only when disable/enable port based VLAN */
6978 vport->port_base_vlan_cfg.state = state;
6979 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
6980 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
6981 else
6982 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
6983
6984update:
6985 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
6986 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
6987 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
6988
6989 return 0;
6990}
6991
6992static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
6993 enum hnae3_port_base_vlan_state state,
6994 u16 vlan)
6995{
6996 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6997 if (!vlan)
6998 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
6999 else
7000 return HNAE3_PORT_BASE_VLAN_ENABLE;
7001 } else {
7002 if (!vlan)
7003 return HNAE3_PORT_BASE_VLAN_DISABLE;
7004 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7005 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7006 else
7007 return HNAE3_PORT_BASE_VLAN_MODIFY;
7008 }
7009}
7010
7011static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7012 u16 vlan, u8 qos, __be16 proto)
7013{
7014 struct hclge_vport *vport = hclge_get_vport(handle);
7015 struct hclge_dev *hdev = vport->back;
7016 struct hclge_vlan_info vlan_info;
7017 u16 state;
7018 int ret;
7019
7020 if (hdev->pdev->revision == 0x20)
7021 return -EOPNOTSUPP;
7022
7023 /* qos is a 3 bits value, so can not be bigger than 7 */
7024 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7025 return -EINVAL;
7026 if (proto != htons(ETH_P_8021Q))
7027 return -EPROTONOSUPPORT;
7028
7029 vport = &hdev->vport[vfid];
7030 state = hclge_get_port_base_vlan_state(vport,
7031 vport->port_base_vlan_cfg.state,
7032 vlan);
7033 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7034 return 0;
7035
7036 vlan_info.vlan_tag = vlan;
7037 vlan_info.qos = qos;
7038 vlan_info.vlan_proto = ntohs(proto);
7039
7040 /* update port based VLAN for PF */
7041 if (!vfid) {
7042 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7043 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7044 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7045
7046 return ret;
7047 }
7048
92f11ea1
JS
7049 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7050 return hclge_update_port_base_vlan_cfg(vport, state,
7051 &vlan_info);
7052 } else {
7053 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7054 (u8)vfid, state,
7055 vlan, qos,
7056 ntohs(proto));
7057 return ret;
7058 }
21e043cd
JS
7059}
7060
7061int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7062 u16 vlan_id, bool is_kill)
7063{
7064 struct hclge_vport *vport = hclge_get_vport(handle);
7065 struct hclge_dev *hdev = vport->back;
7066 bool writen_to_tbl = false;
7067 int ret = 0;
7068
7069 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7070 * filter entry. In this case, we don't update VLAN filter table
7071 * when user add new VLAN or remove exist VLAN, just update the vport
7072 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7073 * table until port based VLAN disabled
7074 */
7075 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7076 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7077 vlan_id, 0, is_kill);
7078 writen_to_tbl = true;
7079 }
7080
7081 if (ret)
7082 return ret;
7083
7084 if (is_kill)
7085 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7086 else
7087 hclge_add_vport_vlan_table(vport, vlan_id,
7088 writen_to_tbl);
7089
7090 return 0;
7091}
7092
e6d7d79d 7093static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 7094{
d44f9b63 7095 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 7096 struct hclge_desc desc;
46a3df9f 7097
46a3df9f
S
7098 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7099
d44f9b63 7100 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 7101 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 7102 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 7103
e6d7d79d 7104 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
7105}
7106
dd72140c
FL
7107static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7108{
7109 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
7110
7111 return hclge_set_vport_mtu(vport, new_mtu);
7112}
7113
7114int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7115{
dd72140c 7116 struct hclge_dev *hdev = vport->back;
818f1675 7117 int i, max_frm_size, ret = 0;
dd72140c 7118
e6d7d79d
YL
7119 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7120 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7121 max_frm_size > HCLGE_MAC_MAX_FRAME)
7122 return -EINVAL;
7123
818f1675
YL
7124 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7125 mutex_lock(&hdev->vport_lock);
7126 /* VF's mps must fit within hdev->mps */
7127 if (vport->vport_id && max_frm_size > hdev->mps) {
7128 mutex_unlock(&hdev->vport_lock);
7129 return -EINVAL;
7130 } else if (vport->vport_id) {
7131 vport->mps = max_frm_size;
7132 mutex_unlock(&hdev->vport_lock);
7133 return 0;
7134 }
7135
7136 /* PF's mps must be greater then VF's mps */
7137 for (i = 1; i < hdev->num_alloc_vport; i++)
7138 if (max_frm_size < hdev->vport[i].mps) {
7139 mutex_unlock(&hdev->vport_lock);
7140 return -EINVAL;
7141 }
7142
cdca4c48
YL
7143 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7144
e6d7d79d 7145 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
7146 if (ret) {
7147 dev_err(&hdev->pdev->dev,
7148 "Change mtu fail, ret =%d\n", ret);
818f1675 7149 goto out;
dd72140c
FL
7150 }
7151
e6d7d79d 7152 hdev->mps = max_frm_size;
818f1675 7153 vport->mps = max_frm_size;
e6d7d79d 7154
dd72140c
FL
7155 ret = hclge_buffer_alloc(hdev);
7156 if (ret)
7157 dev_err(&hdev->pdev->dev,
7158 "Allocate buffer fail, ret =%d\n", ret);
7159
818f1675 7160out:
cdca4c48 7161 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 7162 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
7163 return ret;
7164}
7165
46a3df9f
S
7166static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7167 bool enable)
7168{
d44f9b63 7169 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
7170 struct hclge_desc desc;
7171 int ret;
7172
7173 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7174
d44f9b63 7175 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f 7176 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
e4e87715 7177 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
46a3df9f
S
7178
7179 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7180 if (ret) {
7181 dev_err(&hdev->pdev->dev,
7182 "Send tqp reset cmd error, status =%d\n", ret);
7183 return ret;
7184 }
7185
7186 return 0;
7187}
7188
7189static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7190{
d44f9b63 7191 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
7192 struct hclge_desc desc;
7193 int ret;
7194
7195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7196
d44f9b63 7197 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
7198 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7199
7200 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7201 if (ret) {
7202 dev_err(&hdev->pdev->dev,
7203 "Get reset status error, status =%d\n", ret);
7204 return ret;
7205 }
7206
e4e87715 7207 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
7208}
7209
0c29d191 7210u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
7211{
7212 struct hnae3_queue *queue;
7213 struct hclge_tqp *tqp;
7214
7215 queue = handle->kinfo.tqp[queue_id];
7216 tqp = container_of(queue, struct hclge_tqp, q);
7217
7218 return tqp->index;
7219}
7220
7fa6be4f 7221int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
7222{
7223 struct hclge_vport *vport = hclge_get_vport(handle);
7224 struct hclge_dev *hdev = vport->back;
7225 int reset_try_times = 0;
7226 int reset_status;
814e0274 7227 u16 queue_gid;
7fa6be4f 7228 int ret = 0;
46a3df9f 7229
814e0274
PL
7230 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7231
46a3df9f
S
7232 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7233 if (ret) {
7fa6be4f
HT
7234 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7235 return ret;
46a3df9f
S
7236 }
7237
814e0274 7238 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 7239 if (ret) {
7fa6be4f
HT
7240 dev_err(&hdev->pdev->dev,
7241 "Send reset tqp cmd fail, ret = %d\n", ret);
7242 return ret;
46a3df9f
S
7243 }
7244
7245 reset_try_times = 0;
7246 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7247 /* Wait for tqp hw reset */
7248 msleep(20);
814e0274 7249 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
7250 if (reset_status)
7251 break;
7252 }
7253
7254 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
7255 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7256 return ret;
46a3df9f
S
7257 }
7258
814e0274 7259 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
7260 if (ret)
7261 dev_err(&hdev->pdev->dev,
7262 "Deassert the soft reset fail, ret = %d\n", ret);
7263
7264 return ret;
46a3df9f
S
7265}
7266
1a426f8b
PL
7267void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7268{
7269 struct hclge_dev *hdev = vport->back;
7270 int reset_try_times = 0;
7271 int reset_status;
7272 u16 queue_gid;
7273 int ret;
7274
7275 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7276
7277 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7278 if (ret) {
7279 dev_warn(&hdev->pdev->dev,
7280 "Send reset tqp cmd fail, ret = %d\n", ret);
7281 return;
7282 }
7283
7284 reset_try_times = 0;
7285 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7286 /* Wait for tqp hw reset */
7287 msleep(20);
7288 reset_status = hclge_get_reset_status(hdev, queue_gid);
7289 if (reset_status)
7290 break;
7291 }
7292
7293 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7294 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7295 return;
7296 }
7297
7298 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7299 if (ret)
7300 dev_warn(&hdev->pdev->dev,
7301 "Deassert the soft reset fail, ret = %d\n", ret);
7302}
7303
46a3df9f
S
7304static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7305{
7306 struct hclge_vport *vport = hclge_get_vport(handle);
7307 struct hclge_dev *hdev = vport->back;
7308
7309 return hdev->fw_version;
7310}
7311
61387774
PL
7312static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7313{
7314 struct phy_device *phydev = hdev->hw.mac.phydev;
7315
7316 if (!phydev)
7317 return;
7318
70814e81 7319 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
7320}
7321
7322static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7323{
61387774
PL
7324 int ret;
7325
7326 if (rx_en && tx_en)
40173a2e 7327 hdev->fc_mode_last_time = HCLGE_FC_FULL;
61387774 7328 else if (rx_en && !tx_en)
40173a2e 7329 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
61387774 7330 else if (!rx_en && tx_en)
40173a2e 7331 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
61387774 7332 else
40173a2e 7333 hdev->fc_mode_last_time = HCLGE_FC_NONE;
61387774 7334
40173a2e 7335 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 7336 return 0;
61387774
PL
7337
7338 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7339 if (ret) {
7340 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7341 ret);
7342 return ret;
7343 }
7344
40173a2e 7345 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
61387774
PL
7346
7347 return 0;
7348}
7349
1770a7a3
PL
7350int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7351{
7352 struct phy_device *phydev = hdev->hw.mac.phydev;
7353 u16 remote_advertising = 0;
7354 u16 local_advertising = 0;
7355 u32 rx_pause, tx_pause;
7356 u8 flowctl;
7357
7358 if (!phydev->link || !phydev->autoneg)
7359 return 0;
7360
3c1bcc86 7361 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
7362
7363 if (phydev->pause)
7364 remote_advertising = LPA_PAUSE_CAP;
7365
7366 if (phydev->asym_pause)
7367 remote_advertising |= LPA_PAUSE_ASYM;
7368
7369 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7370 remote_advertising);
7371 tx_pause = flowctl & FLOW_CTRL_TX;
7372 rx_pause = flowctl & FLOW_CTRL_RX;
7373
7374 if (phydev->duplex == HCLGE_MAC_HALF) {
7375 tx_pause = 0;
7376 rx_pause = 0;
7377 }
7378
7379 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7380}
7381
46a3df9f
S
7382static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7383 u32 *rx_en, u32 *tx_en)
7384{
7385 struct hclge_vport *vport = hclge_get_vport(handle);
7386 struct hclge_dev *hdev = vport->back;
7387
7388 *auto_neg = hclge_get_autoneg(handle);
7389
7390 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7391 *rx_en = 0;
7392 *tx_en = 0;
7393 return;
7394 }
7395
7396 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7397 *rx_en = 1;
7398 *tx_en = 0;
7399 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7400 *tx_en = 1;
7401 *rx_en = 0;
7402 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7403 *rx_en = 1;
7404 *tx_en = 1;
7405 } else {
7406 *rx_en = 0;
7407 *tx_en = 0;
7408 }
7409}
7410
61387774
PL
7411static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7412 u32 rx_en, u32 tx_en)
7413{
7414 struct hclge_vport *vport = hclge_get_vport(handle);
7415 struct hclge_dev *hdev = vport->back;
7416 struct phy_device *phydev = hdev->hw.mac.phydev;
7417 u32 fc_autoneg;
7418
61387774
PL
7419 fc_autoneg = hclge_get_autoneg(handle);
7420 if (auto_neg != fc_autoneg) {
7421 dev_info(&hdev->pdev->dev,
7422 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7423 return -EOPNOTSUPP;
7424 }
7425
7426 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7427 dev_info(&hdev->pdev->dev,
7428 "Priority flow control enabled. Cannot set link flow control.\n");
7429 return -EOPNOTSUPP;
7430 }
7431
7432 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7433
7434 if (!fc_autoneg)
7435 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7436
0c963e8c
FL
7437 /* Only support flow control negotiation for netdev with
7438 * phy attached for now.
7439 */
7440 if (!phydev)
7441 return -EOPNOTSUPP;
7442
61387774
PL
7443 return phy_start_aneg(phydev);
7444}
7445
46a3df9f
S
7446static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7447 u8 *auto_neg, u32 *speed, u8 *duplex)
7448{
7449 struct hclge_vport *vport = hclge_get_vport(handle);
7450 struct hclge_dev *hdev = vport->back;
7451
7452 if (speed)
7453 *speed = hdev->hw.mac.speed;
7454 if (duplex)
7455 *duplex = hdev->hw.mac.duplex;
7456 if (auto_neg)
7457 *auto_neg = hdev->hw.mac.autoneg;
7458}
7459
7460static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7461{
7462 struct hclge_vport *vport = hclge_get_vport(handle);
7463 struct hclge_dev *hdev = vport->back;
7464
7465 if (media_type)
7466 *media_type = hdev->hw.mac.media_type;
7467}
7468
7469static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7470 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7471{
7472 struct hclge_vport *vport = hclge_get_vport(handle);
7473 struct hclge_dev *hdev = vport->back;
7474 struct phy_device *phydev = hdev->hw.mac.phydev;
7475 int mdix_ctrl, mdix, retval, is_resolved;
7476
7477 if (!phydev) {
7478 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7479 *tp_mdix = ETH_TP_MDI_INVALID;
7480 return;
7481 }
7482
7483 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7484
7485 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
7486 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7487 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
7488
7489 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
7490 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7491 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
7492
7493 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7494
7495 switch (mdix_ctrl) {
7496 case 0x0:
7497 *tp_mdix_ctrl = ETH_TP_MDI;
7498 break;
7499 case 0x1:
7500 *tp_mdix_ctrl = ETH_TP_MDI_X;
7501 break;
7502 case 0x3:
7503 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7504 break;
7505 default:
7506 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7507 break;
7508 }
7509
7510 if (!is_resolved)
7511 *tp_mdix = ETH_TP_MDI_INVALID;
7512 else if (mdix)
7513 *tp_mdix = ETH_TP_MDI_X;
7514 else
7515 *tp_mdix = ETH_TP_MDI;
7516}
7517
7518static int hclge_init_client_instance(struct hnae3_client *client,
7519 struct hnae3_ae_dev *ae_dev)
7520{
7521 struct hclge_dev *hdev = ae_dev->priv;
7522 struct hclge_vport *vport;
7523 int i, ret;
7524
7525 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7526 vport = &hdev->vport[i];
7527
7528 switch (client->type) {
7529 case HNAE3_CLIENT_KNIC:
7530
7531 hdev->nic_client = client;
7532 vport->nic.client = client;
7533 ret = client->ops->init_instance(&vport->nic);
7534 if (ret)
49dd8054 7535 goto clear_nic;
46a3df9f 7536
d9f28fc2
JS
7537 hnae3_set_client_init_flag(client, ae_dev, 1);
7538
46a3df9f 7539 if (hdev->roce_client &&
e92a0843 7540 hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
7541 struct hnae3_client *rc = hdev->roce_client;
7542
7543 ret = hclge_init_roce_base_info(vport);
7544 if (ret)
49dd8054 7545 goto clear_roce;
46a3df9f
S
7546
7547 ret = rc->ops->init_instance(&vport->roce);
7548 if (ret)
49dd8054 7549 goto clear_roce;
d9f28fc2
JS
7550
7551 hnae3_set_client_init_flag(hdev->roce_client,
7552 ae_dev, 1);
46a3df9f
S
7553 }
7554
7555 break;
7556 case HNAE3_CLIENT_UNIC:
7557 hdev->nic_client = client;
7558 vport->nic.client = client;
7559
7560 ret = client->ops->init_instance(&vport->nic);
7561 if (ret)
49dd8054 7562 goto clear_nic;
46a3df9f 7563
d9f28fc2
JS
7564 hnae3_set_client_init_flag(client, ae_dev, 1);
7565
46a3df9f
S
7566 break;
7567 case HNAE3_CLIENT_ROCE:
e92a0843 7568 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
7569 hdev->roce_client = client;
7570 vport->roce.client = client;
7571 }
7572
3a46f34d 7573 if (hdev->roce_client && hdev->nic_client) {
46a3df9f
S
7574 ret = hclge_init_roce_base_info(vport);
7575 if (ret)
49dd8054 7576 goto clear_roce;
46a3df9f
S
7577
7578 ret = client->ops->init_instance(&vport->roce);
7579 if (ret)
49dd8054 7580 goto clear_roce;
d9f28fc2
JS
7581
7582 hnae3_set_client_init_flag(client, ae_dev, 1);
46a3df9f 7583 }
fa7a4bd5
JS
7584
7585 break;
7586 default:
7587 return -EINVAL;
46a3df9f
S
7588 }
7589 }
7590
7591 return 0;
49dd8054
JS
7592
7593clear_nic:
7594 hdev->nic_client = NULL;
7595 vport->nic.client = NULL;
7596 return ret;
7597clear_roce:
7598 hdev->roce_client = NULL;
7599 vport->roce.client = NULL;
7600 return ret;
46a3df9f
S
7601}
7602
7603static void hclge_uninit_client_instance(struct hnae3_client *client,
7604 struct hnae3_ae_dev *ae_dev)
7605{
7606 struct hclge_dev *hdev = ae_dev->priv;
7607 struct hclge_vport *vport;
7608 int i;
7609
7610 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7611 vport = &hdev->vport[i];
a17dcf3f 7612 if (hdev->roce_client) {
46a3df9f
S
7613 hdev->roce_client->ops->uninit_instance(&vport->roce,
7614 0);
a17dcf3f
L
7615 hdev->roce_client = NULL;
7616 vport->roce.client = NULL;
7617 }
46a3df9f
S
7618 if (client->type == HNAE3_CLIENT_ROCE)
7619 return;
49dd8054 7620 if (hdev->nic_client && client->ops->uninit_instance) {
46a3df9f 7621 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
7622 hdev->nic_client = NULL;
7623 vport->nic.client = NULL;
7624 }
46a3df9f
S
7625 }
7626}
7627
7628static int hclge_pci_init(struct hclge_dev *hdev)
7629{
7630 struct pci_dev *pdev = hdev->pdev;
7631 struct hclge_hw *hw;
7632 int ret;
7633
7634 ret = pci_enable_device(pdev);
7635 if (ret) {
7636 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 7637 return ret;
46a3df9f
S
7638 }
7639
7640 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7641 if (ret) {
7642 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7643 if (ret) {
7644 dev_err(&pdev->dev,
7645 "can't set consistent PCI DMA");
7646 goto err_disable_device;
7647 }
7648 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7649 }
7650
7651 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7652 if (ret) {
7653 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7654 goto err_disable_device;
7655 }
7656
7657 pci_set_master(pdev);
7658 hw = &hdev->hw;
46a3df9f
S
7659 hw->io_base = pcim_iomap(pdev, 2, 0);
7660 if (!hw->io_base) {
7661 dev_err(&pdev->dev, "Can't map configuration register space\n");
7662 ret = -ENOMEM;
7663 goto err_clr_master;
7664 }
7665
709eb41a
L
7666 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7667
46a3df9f
S
7668 return 0;
7669err_clr_master:
7670 pci_clear_master(pdev);
7671 pci_release_regions(pdev);
7672err_disable_device:
7673 pci_disable_device(pdev);
46a3df9f
S
7674
7675 return ret;
7676}
7677
7678static void hclge_pci_uninit(struct hclge_dev *hdev)
7679{
7680 struct pci_dev *pdev = hdev->pdev;
7681
6a814413 7682 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 7683 pci_free_irq_vectors(pdev);
46a3df9f
S
7684 pci_clear_master(pdev);
7685 pci_release_mem_regions(pdev);
7686 pci_disable_device(pdev);
7687}
7688
48569cda
PL
7689static void hclge_state_init(struct hclge_dev *hdev)
7690{
7691 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7692 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7693 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7694 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7695 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7696 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7697}
7698
7699static void hclge_state_uninit(struct hclge_dev *hdev)
7700{
7701 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7702
7703 if (hdev->service_timer.function)
7704 del_timer_sync(&hdev->service_timer);
65e41e7e
HT
7705 if (hdev->reset_timer.function)
7706 del_timer_sync(&hdev->reset_timer);
48569cda
PL
7707 if (hdev->service_task.func)
7708 cancel_work_sync(&hdev->service_task);
7709 if (hdev->rst_service_task.func)
7710 cancel_work_sync(&hdev->rst_service_task);
7711 if (hdev->mbx_service_task.func)
7712 cancel_work_sync(&hdev->mbx_service_task);
7713}
7714
6b9a97ee
HT
7715static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7716{
7717#define HCLGE_FLR_WAIT_MS 100
7718#define HCLGE_FLR_WAIT_CNT 50
7719 struct hclge_dev *hdev = ae_dev->priv;
7720 int cnt = 0;
7721
7722 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7723 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7724 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7725 hclge_reset_event(hdev->pdev, NULL);
7726
7727 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7728 cnt++ < HCLGE_FLR_WAIT_CNT)
7729 msleep(HCLGE_FLR_WAIT_MS);
7730
7731 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7732 dev_err(&hdev->pdev->dev,
7733 "flr wait down timeout: %d\n", cnt);
7734}
7735
7736static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7737{
7738 struct hclge_dev *hdev = ae_dev->priv;
7739
7740 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7741}
7742
46a3df9f
S
7743static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7744{
7745 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
7746 struct hclge_dev *hdev;
7747 int ret;
7748
7749 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7750 if (!hdev) {
7751 ret = -ENOMEM;
ffd5656e 7752 goto out;
46a3df9f
S
7753 }
7754
46a3df9f
S
7755 hdev->pdev = pdev;
7756 hdev->ae_dev = ae_dev;
4ed340ab 7757 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 7758 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 7759 ae_dev->priv = hdev;
e6d7d79d 7760 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 7761
818f1675 7762 mutex_init(&hdev->vport_lock);
6dd86902 7763 mutex_init(&hdev->vport_cfg_mutex);
818f1675 7764
46a3df9f
S
7765 ret = hclge_pci_init(hdev);
7766 if (ret) {
7767 dev_err(&pdev->dev, "PCI init failed\n");
ffd5656e 7768 goto out;
46a3df9f
S
7769 }
7770
3efb960f
L
7771 /* Firmware command queue initialize */
7772 ret = hclge_cmd_queue_init(hdev);
7773 if (ret) {
7774 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
ffd5656e 7775 goto err_pci_uninit;
3efb960f
L
7776 }
7777
7778 /* Firmware command initialize */
46a3df9f
S
7779 ret = hclge_cmd_init(hdev);
7780 if (ret)
ffd5656e 7781 goto err_cmd_uninit;
46a3df9f
S
7782
7783 ret = hclge_get_cap(hdev);
7784 if (ret) {
e00e2197
CIK
7785 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7786 ret);
ffd5656e 7787 goto err_cmd_uninit;
46a3df9f
S
7788 }
7789
7790 ret = hclge_configure(hdev);
7791 if (ret) {
7792 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 7793 goto err_cmd_uninit;
46a3df9f
S
7794 }
7795
887c3820 7796 ret = hclge_init_msi(hdev);
46a3df9f 7797 if (ret) {
887c3820 7798 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 7799 goto err_cmd_uninit;
46a3df9f
S
7800 }
7801
466b0c00
L
7802 ret = hclge_misc_irq_init(hdev);
7803 if (ret) {
7804 dev_err(&pdev->dev,
7805 "Misc IRQ(vector0) init error, ret = %d.\n",
7806 ret);
ffd5656e 7807 goto err_msi_uninit;
466b0c00
L
7808 }
7809
46a3df9f
S
7810 ret = hclge_alloc_tqps(hdev);
7811 if (ret) {
7812 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 7813 goto err_msi_irq_uninit;
46a3df9f
S
7814 }
7815
7816 ret = hclge_alloc_vport(hdev);
7817 if (ret) {
7818 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
ffd5656e 7819 goto err_msi_irq_uninit;
46a3df9f
S
7820 }
7821
7df7dad6
L
7822 ret = hclge_map_tqp(hdev);
7823 if (ret) {
7824 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
2312e050 7825 goto err_msi_irq_uninit;
7df7dad6
L
7826 }
7827
c5ef83cb
HT
7828 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7829 ret = hclge_mac_mdio_config(hdev);
7830 if (ret) {
7831 dev_err(&hdev->pdev->dev,
7832 "mdio config fail ret=%d\n", ret);
2312e050 7833 goto err_msi_irq_uninit;
c5ef83cb 7834 }
cf9cca2d 7835 }
7836
39932473
JS
7837 ret = hclge_init_umv_space(hdev);
7838 if (ret) {
7839 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9fc55413 7840 goto err_mdiobus_unreg;
39932473
JS
7841 }
7842
46a3df9f
S
7843 ret = hclge_mac_init(hdev);
7844 if (ret) {
7845 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 7846 goto err_mdiobus_unreg;
46a3df9f 7847 }
46a3df9f
S
7848
7849 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7850 if (ret) {
7851 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 7852 goto err_mdiobus_unreg;
46a3df9f
S
7853 }
7854
b26a6fea
PL
7855 ret = hclge_config_gro(hdev, true);
7856 if (ret)
7857 goto err_mdiobus_unreg;
7858
46a3df9f
S
7859 ret = hclge_init_vlan_config(hdev);
7860 if (ret) {
7861 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 7862 goto err_mdiobus_unreg;
46a3df9f
S
7863 }
7864
7865 ret = hclge_tm_schd_init(hdev);
7866 if (ret) {
7867 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 7868 goto err_mdiobus_unreg;
68ece54e
YL
7869 }
7870
268f5dfa 7871 hclge_rss_init_cfg(hdev);
68ece54e
YL
7872 ret = hclge_rss_init_hw(hdev);
7873 if (ret) {
7874 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 7875 goto err_mdiobus_unreg;
46a3df9f
S
7876 }
7877
f5aac71c
FL
7878 ret = init_mgr_tbl(hdev);
7879 if (ret) {
7880 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 7881 goto err_mdiobus_unreg;
f5aac71c
FL
7882 }
7883
d695964d
JS
7884 ret = hclge_init_fd_config(hdev);
7885 if (ret) {
7886 dev_err(&pdev->dev,
7887 "fd table init fail, ret=%d\n", ret);
7888 goto err_mdiobus_unreg;
7889 }
7890
99714195
SJ
7891 ret = hclge_hw_error_set_state(hdev, true);
7892 if (ret) {
7893 dev_err(&pdev->dev,
f3fa4a94 7894 "fail(%d) to enable hw error interrupts\n", ret);
99714195
SJ
7895 goto err_mdiobus_unreg;
7896 }
7897
cacde272
YL
7898 hclge_dcb_ops_set(hdev);
7899
d039ef68 7900 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
65e41e7e 7901 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
46a3df9f 7902 INIT_WORK(&hdev->service_task, hclge_service_task);
cb1b9f77 7903 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
c1a81619 7904 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
46a3df9f 7905
8e52a602
XW
7906 hclge_clear_all_event_cause(hdev);
7907
466b0c00
L
7908 /* Enable MISC vector(vector0) */
7909 hclge_enable_vector(&hdev->misc_vector, true);
7910
48569cda 7911 hclge_state_init(hdev);
0742ed7c 7912 hdev->last_reset_time = jiffies;
46a3df9f
S
7913
7914 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7915 return 0;
7916
ffd5656e
HT
7917err_mdiobus_unreg:
7918 if (hdev->hw.mac.phydev)
7919 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
7920err_msi_irq_uninit:
7921 hclge_misc_irq_uninit(hdev);
7922err_msi_uninit:
7923 pci_free_irq_vectors(pdev);
7924err_cmd_uninit:
232d0d55 7925 hclge_cmd_uninit(hdev);
ffd5656e 7926err_pci_uninit:
6a814413 7927 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 7928 pci_clear_master(pdev);
46a3df9f 7929 pci_release_regions(pdev);
ffd5656e 7930 pci_disable_device(pdev);
ffd5656e 7931out:
46a3df9f
S
7932 return ret;
7933}
7934
c6dc5213 7935static void hclge_stats_clear(struct hclge_dev *hdev)
7936{
7937 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7938}
7939
a6d818e3
YL
7940static void hclge_reset_vport_state(struct hclge_dev *hdev)
7941{
7942 struct hclge_vport *vport = hdev->vport;
7943 int i;
7944
7945 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 7946 hclge_vport_stop(vport);
a6d818e3
YL
7947 vport++;
7948 }
7949}
7950
4ed340ab
L
7951static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7952{
7953 struct hclge_dev *hdev = ae_dev->priv;
7954 struct pci_dev *pdev = ae_dev->pdev;
7955 int ret;
7956
7957 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7958
c6dc5213 7959 hclge_stats_clear(hdev);
dc8131d8 7960 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
c6dc5213 7961
4ed340ab
L
7962 ret = hclge_cmd_init(hdev);
7963 if (ret) {
7964 dev_err(&pdev->dev, "Cmd queue init failed\n");
7965 return ret;
7966 }
7967
4ed340ab
L
7968 ret = hclge_map_tqp(hdev);
7969 if (ret) {
7970 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7971 return ret;
7972 }
7973
39932473
JS
7974 hclge_reset_umv_space(hdev);
7975
4ed340ab
L
7976 ret = hclge_mac_init(hdev);
7977 if (ret) {
7978 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7979 return ret;
7980 }
7981
4ed340ab
L
7982 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7983 if (ret) {
7984 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7985 return ret;
7986 }
7987
b26a6fea
PL
7988 ret = hclge_config_gro(hdev, true);
7989 if (ret)
7990 return ret;
7991
4ed340ab
L
7992 ret = hclge_init_vlan_config(hdev);
7993 if (ret) {
7994 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7995 return ret;
7996 }
7997
44e59e37 7998 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 7999 if (ret) {
f31c1ba6 8000 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
8001 return ret;
8002 }
8003
8004 ret = hclge_rss_init_hw(hdev);
8005 if (ret) {
8006 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8007 return ret;
8008 }
8009
d695964d
JS
8010 ret = hclge_init_fd_config(hdev);
8011 if (ret) {
8012 dev_err(&pdev->dev,
8013 "fd table init fail, ret=%d\n", ret);
8014 return ret;
8015 }
8016
f3fa4a94
SJ
8017 /* Re-enable the hw error interrupts because
8018 * the interrupts get disabled on core/global reset.
01865a50 8019 */
f3fa4a94
SJ
8020 ret = hclge_hw_error_set_state(hdev, true);
8021 if (ret) {
8022 dev_err(&pdev->dev,
8023 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8024 return ret;
8025 }
01865a50 8026
a6d818e3
YL
8027 hclge_reset_vport_state(hdev);
8028
4ed340ab
L
8029 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8030 HCLGE_DRIVER_NAME);
8031
8032 return 0;
8033}
8034
46a3df9f
S
8035static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8036{
8037 struct hclge_dev *hdev = ae_dev->priv;
8038 struct hclge_mac *mac = &hdev->hw.mac;
8039
48569cda 8040 hclge_state_uninit(hdev);
46a3df9f
S
8041
8042 if (mac->phydev)
8043 mdiobus_unregister(mac->mdio_bus);
8044
39932473
JS
8045 hclge_uninit_umv_space(hdev);
8046
466b0c00
L
8047 /* Disable MISC vector(vector0) */
8048 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
8049 synchronize_irq(hdev->misc_vector.vector_irq);
8050
99714195 8051 hclge_hw_error_set_state(hdev, false);
232d0d55 8052 hclge_cmd_uninit(hdev);
ca1d7669 8053 hclge_misc_irq_uninit(hdev);
46a3df9f 8054 hclge_pci_uninit(hdev);
818f1675 8055 mutex_destroy(&hdev->vport_lock);
6dd86902 8056 hclge_uninit_vport_mac_table(hdev);
c6075b19 8057 hclge_uninit_vport_vlan_table(hdev);
6dd86902 8058 mutex_destroy(&hdev->vport_cfg_mutex);
46a3df9f
S
8059 ae_dev->priv = NULL;
8060}
8061
482d2e9c
PL
8062static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8063{
8064 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8065 struct hclge_vport *vport = hclge_get_vport(handle);
8066 struct hclge_dev *hdev = vport->back;
8067
c3b9c50d
HT
8068 return min_t(u32, hdev->rss_size_max,
8069 vport->alloc_tqps / kinfo->num_tc);
482d2e9c
PL
8070}
8071
8072static void hclge_get_channels(struct hnae3_handle *handle,
8073 struct ethtool_channels *ch)
8074{
482d2e9c
PL
8075 ch->max_combined = hclge_get_max_channels(handle);
8076 ch->other_count = 1;
8077 ch->max_other = 1;
c3b9c50d 8078 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
8079}
8080
09f2af64 8081static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 8082 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
8083{
8084 struct hclge_vport *vport = hclge_get_vport(handle);
8085 struct hclge_dev *hdev = vport->back;
09f2af64 8086
0d43bf45 8087 *alloc_tqps = vport->alloc_tqps;
09f2af64
PL
8088 *max_rss_size = hdev->rss_size_max;
8089}
8090
90c68a41
YL
8091static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8092 bool rxfh_configured)
09f2af64
PL
8093{
8094 struct hclge_vport *vport = hclge_get_vport(handle);
8095 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8096 struct hclge_dev *hdev = vport->back;
8097 int cur_rss_size = kinfo->rss_size;
8098 int cur_tqps = kinfo->num_tqps;
8099 u16 tc_offset[HCLGE_MAX_TC_NUM];
8100 u16 tc_valid[HCLGE_MAX_TC_NUM];
8101 u16 tc_size[HCLGE_MAX_TC_NUM];
8102 u16 roundup_size;
8103 u32 *rss_indir;
8104 int ret, i;
8105
672ad0ed 8106 kinfo->req_rss_size = new_tqps_num;
09f2af64 8107
672ad0ed 8108 ret = hclge_tm_vport_map_update(hdev);
09f2af64 8109 if (ret) {
672ad0ed 8110 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
8111 return ret;
8112 }
8113
8114 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8115 roundup_size = ilog2(roundup_size);
8116 /* Set the RSS TC mode according to the new RSS size */
8117 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8118 tc_valid[i] = 0;
8119
8120 if (!(hdev->hw_tc_map & BIT(i)))
8121 continue;
8122
8123 tc_valid[i] = 1;
8124 tc_size[i] = roundup_size;
8125 tc_offset[i] = kinfo->rss_size * i;
8126 }
8127 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8128 if (ret)
8129 return ret;
8130
90c68a41
YL
8131 /* RSS indirection table has been configuared by user */
8132 if (rxfh_configured)
8133 goto out;
8134
09f2af64
PL
8135 /* Reinitializes the rss indirect table according to the new RSS size */
8136 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8137 if (!rss_indir)
8138 return -ENOMEM;
8139
8140 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8141 rss_indir[i] = i % kinfo->rss_size;
8142
8143 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8144 if (ret)
8145 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8146 ret);
8147
8148 kfree(rss_indir);
8149
90c68a41 8150out:
09f2af64
PL
8151 if (!ret)
8152 dev_info(&hdev->pdev->dev,
8153 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8154 cur_rss_size, kinfo->rss_size,
8155 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8156
8157 return ret;
8158}
8159
77b34110
FL
8160static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8161 u32 *regs_num_64_bit)
8162{
8163 struct hclge_desc desc;
8164 u32 total_num;
8165 int ret;
8166
8167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8168 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8169 if (ret) {
8170 dev_err(&hdev->pdev->dev,
8171 "Query register number cmd failed, ret = %d.\n", ret);
8172 return ret;
8173 }
8174
8175 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8176 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8177
8178 total_num = *regs_num_32_bit + *regs_num_64_bit;
8179 if (!total_num)
8180 return -EINVAL;
8181
8182 return 0;
8183}
8184
8185static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8186 void *data)
8187{
8188#define HCLGE_32_BIT_REG_RTN_DATANUM 8
8189
8190 struct hclge_desc *desc;
8191 u32 *reg_val = data;
8192 __le32 *desc_data;
8193 int cmd_num;
8194 int i, k, n;
8195 int ret;
8196
8197 if (regs_num == 0)
8198 return 0;
8199
8200 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8201 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8202 if (!desc)
8203 return -ENOMEM;
8204
8205 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8206 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8207 if (ret) {
8208 dev_err(&hdev->pdev->dev,
8209 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8210 kfree(desc);
8211 return ret;
8212 }
8213
8214 for (i = 0; i < cmd_num; i++) {
8215 if (i == 0) {
8216 desc_data = (__le32 *)(&desc[i].data[0]);
8217 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8218 } else {
8219 desc_data = (__le32 *)(&desc[i]);
8220 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8221 }
8222 for (k = 0; k < n; k++) {
8223 *reg_val++ = le32_to_cpu(*desc_data++);
8224
8225 regs_num--;
8226 if (!regs_num)
8227 break;
8228 }
8229 }
8230
8231 kfree(desc);
8232 return 0;
8233}
8234
8235static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8236 void *data)
8237{
8238#define HCLGE_64_BIT_REG_RTN_DATANUM 4
8239
8240 struct hclge_desc *desc;
8241 u64 *reg_val = data;
8242 __le64 *desc_data;
8243 int cmd_num;
8244 int i, k, n;
8245 int ret;
8246
8247 if (regs_num == 0)
8248 return 0;
8249
8250 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8251 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8252 if (!desc)
8253 return -ENOMEM;
8254
8255 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8256 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8257 if (ret) {
8258 dev_err(&hdev->pdev->dev,
8259 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8260 kfree(desc);
8261 return ret;
8262 }
8263
8264 for (i = 0; i < cmd_num; i++) {
8265 if (i == 0) {
8266 desc_data = (__le64 *)(&desc[i].data[0]);
8267 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8268 } else {
8269 desc_data = (__le64 *)(&desc[i]);
8270 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8271 }
8272 for (k = 0; k < n; k++) {
8273 *reg_val++ = le64_to_cpu(*desc_data++);
8274
8275 regs_num--;
8276 if (!regs_num)
8277 break;
8278 }
8279 }
8280
8281 kfree(desc);
8282 return 0;
8283}
8284
ea4750ca
JS
8285#define MAX_SEPARATE_NUM 4
8286#define SEPARATOR_VALUE 0xFFFFFFFF
8287#define REG_NUM_PER_LINE 4
8288#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8289
77b34110
FL
8290static int hclge_get_regs_len(struct hnae3_handle *handle)
8291{
ea4750ca
JS
8292 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8293 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
77b34110
FL
8294 struct hclge_vport *vport = hclge_get_vport(handle);
8295 struct hclge_dev *hdev = vport->back;
8296 u32 regs_num_32_bit, regs_num_64_bit;
8297 int ret;
8298
8299 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8300 if (ret) {
8301 dev_err(&hdev->pdev->dev,
8302 "Get register number failed, ret = %d.\n", ret);
8303 return -EOPNOTSUPP;
8304 }
8305
ea4750ca
JS
8306 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8307 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8308 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8309 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8310
8311 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8312 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8313 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
77b34110
FL
8314}
8315
8316static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8317 void *data)
8318{
ea4750ca 8319 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
77b34110
FL
8320 struct hclge_vport *vport = hclge_get_vport(handle);
8321 struct hclge_dev *hdev = vport->back;
8322 u32 regs_num_32_bit, regs_num_64_bit;
ea4750ca
JS
8323 int i, j, reg_um, separator_num;
8324 u32 *reg = data;
77b34110
FL
8325 int ret;
8326
8327 *version = hdev->fw_version;
8328
8329 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8330 if (ret) {
8331 dev_err(&hdev->pdev->dev,
8332 "Get register number failed, ret = %d.\n", ret);
8333 return;
8334 }
8335
ea4750ca
JS
8336 /* fetching per-PF registers valus from PF PCIe register space */
8337 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8338 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8339 for (i = 0; i < reg_um; i++)
8340 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8341 for (i = 0; i < separator_num; i++)
8342 *reg++ = SEPARATOR_VALUE;
8343
8344 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8345 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8346 for (i = 0; i < reg_um; i++)
8347 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8348 for (i = 0; i < separator_num; i++)
8349 *reg++ = SEPARATOR_VALUE;
8350
8351 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8352 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8353 for (j = 0; j < kinfo->num_tqps; j++) {
8354 for (i = 0; i < reg_um; i++)
8355 *reg++ = hclge_read_dev(&hdev->hw,
8356 ring_reg_addr_list[i] +
8357 0x200 * j);
8358 for (i = 0; i < separator_num; i++)
8359 *reg++ = SEPARATOR_VALUE;
8360 }
8361
8362 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8363 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8364 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8365 for (i = 0; i < reg_um; i++)
8366 *reg++ = hclge_read_dev(&hdev->hw,
8367 tqp_intr_reg_addr_list[i] +
8368 4 * j);
8369 for (i = 0; i < separator_num; i++)
8370 *reg++ = SEPARATOR_VALUE;
8371 }
8372
8373 /* fetching PF common registers values from firmware */
8374 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
8375 if (ret) {
8376 dev_err(&hdev->pdev->dev,
8377 "Get 32 bit register failed, ret = %d.\n", ret);
8378 return;
8379 }
8380
ea4750ca
JS
8381 reg += regs_num_32_bit;
8382 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
77b34110
FL
8383 if (ret)
8384 dev_err(&hdev->pdev->dev,
8385 "Get 64 bit register failed, ret = %d.\n", ret);
8386}
8387
f6f75abc 8388static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
8389{
8390 struct hclge_set_led_state_cmd *req;
8391 struct hclge_desc desc;
8392 int ret;
8393
8394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8395
8396 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
8397 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8398 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
8399
8400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8401 if (ret)
8402 dev_err(&hdev->pdev->dev,
8403 "Send set led state cmd error, ret =%d\n", ret);
8404
8405 return ret;
8406}
8407
8408enum hclge_led_status {
8409 HCLGE_LED_OFF,
8410 HCLGE_LED_ON,
8411 HCLGE_LED_NO_CHANGE = 0xFF,
8412};
8413
8414static int hclge_set_led_id(struct hnae3_handle *handle,
8415 enum ethtool_phys_id_state status)
8416{
07f8e940
JS
8417 struct hclge_vport *vport = hclge_get_vport(handle);
8418 struct hclge_dev *hdev = vport->back;
07f8e940
JS
8419
8420 switch (status) {
8421 case ETHTOOL_ID_ACTIVE:
f6f75abc 8422 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 8423 case ETHTOOL_ID_INACTIVE:
f6f75abc 8424 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 8425 default:
f6f75abc 8426 return -EINVAL;
07f8e940 8427 }
07f8e940
JS
8428}
8429
0979aa0b
FL
8430static void hclge_get_link_mode(struct hnae3_handle *handle,
8431 unsigned long *supported,
8432 unsigned long *advertising)
8433{
8434 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8435 struct hclge_vport *vport = hclge_get_vport(handle);
8436 struct hclge_dev *hdev = vport->back;
8437 unsigned int idx = 0;
8438
8439 for (; idx < size; idx++) {
8440 supported[idx] = hdev->hw.mac.supported[idx];
8441 advertising[idx] = hdev->hw.mac.advertising[idx];
8442 }
8443}
8444
1731be4c 8445static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
8446{
8447 struct hclge_vport *vport = hclge_get_vport(handle);
8448 struct hclge_dev *hdev = vport->back;
8449
8450 return hclge_config_gro(hdev, enable);
8451}
8452
46a3df9f
S
8453static const struct hnae3_ae_ops hclge_ops = {
8454 .init_ae_dev = hclge_init_ae_dev,
8455 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
8456 .flr_prepare = hclge_flr_prepare,
8457 .flr_done = hclge_flr_done,
46a3df9f
S
8458 .init_client_instance = hclge_init_client_instance,
8459 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
8460 .map_ring_to_vector = hclge_map_ring_to_vector,
8461 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 8462 .get_vector = hclge_get_vector,
0d3e6631 8463 .put_vector = hclge_put_vector,
46a3df9f 8464 .set_promisc_mode = hclge_set_promisc_mode,
c39c4d98 8465 .set_loopback = hclge_set_loopback,
46a3df9f
S
8466 .start = hclge_ae_start,
8467 .stop = hclge_ae_stop,
a6d818e3
YL
8468 .client_start = hclge_client_start,
8469 .client_stop = hclge_client_stop,
46a3df9f
S
8470 .get_status = hclge_get_status,
8471 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8472 .update_speed_duplex_h = hclge_update_speed_duplex_h,
8473 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8474 .get_media_type = hclge_get_media_type,
8475 .get_rss_key_size = hclge_get_rss_key_size,
8476 .get_rss_indir_size = hclge_get_rss_indir_size,
8477 .get_rss = hclge_get_rss,
8478 .set_rss = hclge_set_rss,
f7db940a 8479 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 8480 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
8481 .get_tc_size = hclge_get_tc_size,
8482 .get_mac_addr = hclge_get_mac_addr,
8483 .set_mac_addr = hclge_set_mac_addr,
26483246 8484 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
8485 .add_uc_addr = hclge_add_uc_addr,
8486 .rm_uc_addr = hclge_rm_uc_addr,
8487 .add_mc_addr = hclge_add_mc_addr,
8488 .rm_mc_addr = hclge_rm_mc_addr,
8489 .set_autoneg = hclge_set_autoneg,
8490 .get_autoneg = hclge_get_autoneg,
8491 .get_pauseparam = hclge_get_pauseparam,
61387774 8492 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
8493 .set_mtu = hclge_set_mtu,
8494 .reset_queue = hclge_reset_tqp,
8495 .get_stats = hclge_get_stats,
8496 .update_stats = hclge_update_stats,
8497 .get_strings = hclge_get_strings,
8498 .get_sset_count = hclge_get_sset_count,
8499 .get_fw_version = hclge_get_fw_version,
8500 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 8501 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 8502 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 8503 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 8504 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 8505 .reset_event = hclge_reset_event,
720bd583 8506 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
8507 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8508 .set_channels = hclge_set_channels,
482d2e9c 8509 .get_channels = hclge_get_channels,
77b34110
FL
8510 .get_regs_len = hclge_get_regs_len,
8511 .get_regs = hclge_get_regs,
07f8e940 8512 .set_led_id = hclge_set_led_id,
0979aa0b 8513 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
8514 .add_fd_entry = hclge_add_fd_entry,
8515 .del_fd_entry = hclge_del_fd_entry,
6871af29 8516 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
8517 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8518 .get_fd_rule_info = hclge_get_fd_rule_info,
8519 .get_fd_all_rules = hclge_get_all_rules,
6871af29 8520 .restore_fd_rules = hclge_restore_fd_entries,
c17852a8 8521 .enable_fd = hclge_enable_fd,
3c666b58 8522 .dbg_run_cmd = hclge_dbg_run_cmd,
381c356e 8523 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
8524 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8525 .ae_dev_resetting = hclge_ae_dev_resetting,
8526 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 8527 .set_gro_en = hclge_gro_en,
0c29d191 8528 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 8529 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
8530 .mac_connect_phy = hclge_mac_connect_phy,
8531 .mac_disconnect_phy = hclge_mac_disconnect_phy,
46a3df9f
S
8532};
8533
8534static struct hnae3_ae_algo ae_algo = {
8535 .ops = &hclge_ops,
46a3df9f
S
8536 .pdev_id_table = ae_algo_pci_tbl,
8537};
8538
8539static int hclge_init(void)
8540{
8541 pr_info("%s is initializing\n", HCLGE_NAME);
8542
854cf33a
FL
8543 hnae3_register_ae_algo(&ae_algo);
8544
8545 return 0;
46a3df9f
S
8546}
8547
8548static void hclge_exit(void)
8549{
8550 hnae3_unregister_ae_algo(&ae_algo);
8551}
8552module_init(hclge_init);
8553module_exit(hclge_exit);
8554
8555MODULE_LICENSE("GPL");
8556MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8557MODULE_DESCRIPTION("HCLGE Driver");
8558MODULE_VERSION(HCLGE_MOD_VERSION);