net: hns3: dump more information when tx timeout happens
[linux-2.6-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
f2f432f2 16#include <net/rtnetlink.h>
46a3df9f 17#include "hclge_cmd.h"
cacde272 18#include "hclge_dcb.h"
46a3df9f 19#include "hclge_main.h"
dde1a86e 20#include "hclge_mbx.h"
46a3df9f
S
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
5a9f0eac 23#include "hclge_err.h"
46a3df9f
S
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 29
b9a400ac
YL
30#define HCLGE_BUF_SIZE_UNIT 256
31
e6d7d79d 32static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 33static int hclge_init_vlan_config(struct hclge_dev *hdev);
4ed340ab 34static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 35static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
39932473
JS
36static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
46a3df9f
S
38
39static struct hnae3_ae_algo ae_algo;
40
41static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 49 /* required last entry */
46a3df9f
S
50 {0, }
51};
52
2f550a46
YL
53MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54
ea4750ca
JS
55static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 HCLGE_CMDQ_TX_ADDR_H_REG,
57 HCLGE_CMDQ_TX_DEPTH_REG,
58 HCLGE_CMDQ_TX_TAIL_REG,
59 HCLGE_CMDQ_TX_HEAD_REG,
60 HCLGE_CMDQ_RX_ADDR_L_REG,
61 HCLGE_CMDQ_RX_ADDR_H_REG,
62 HCLGE_CMDQ_RX_DEPTH_REG,
63 HCLGE_CMDQ_RX_TAIL_REG,
64 HCLGE_CMDQ_RX_HEAD_REG,
65 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 HCLGE_CMDQ_INTR_STS_REG,
67 HCLGE_CMDQ_INTR_EN_REG,
68 HCLGE_CMDQ_INTR_GEN_REG};
69
70static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 HCLGE_VECTOR0_OTER_EN_REG,
72 HCLGE_MISC_RESET_STS_REG,
73 HCLGE_MISC_VECTOR_INT_STS,
74 HCLGE_GLOBAL_RESET_REG,
75 HCLGE_FUN_RST_ING,
76 HCLGE_GRO_EN_REG};
77
78static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 HCLGE_RING_RX_ADDR_H_REG,
80 HCLGE_RING_RX_BD_NUM_REG,
81 HCLGE_RING_RX_BD_LENGTH_REG,
82 HCLGE_RING_RX_MERGE_EN_REG,
83 HCLGE_RING_RX_TAIL_REG,
84 HCLGE_RING_RX_HEAD_REG,
85 HCLGE_RING_RX_FBD_NUM_REG,
86 HCLGE_RING_RX_OFFSET_REG,
87 HCLGE_RING_RX_FBD_OFFSET_REG,
88 HCLGE_RING_RX_STASH_REG,
89 HCLGE_RING_RX_BD_ERR_REG,
90 HCLGE_RING_TX_ADDR_L_REG,
91 HCLGE_RING_TX_ADDR_H_REG,
92 HCLGE_RING_TX_BD_NUM_REG,
93 HCLGE_RING_TX_PRIORITY_REG,
94 HCLGE_RING_TX_TC_REG,
95 HCLGE_RING_TX_MERGE_EN_REG,
96 HCLGE_RING_TX_TAIL_REG,
97 HCLGE_RING_TX_HEAD_REG,
98 HCLGE_RING_TX_FBD_NUM_REG,
99 HCLGE_RING_TX_OFFSET_REG,
100 HCLGE_RING_TX_EBD_NUM_REG,
101 HCLGE_RING_TX_EBD_OFFSET_REG,
102 HCLGE_RING_TX_BD_ERR_REG,
103 HCLGE_RING_EN_REG};
104
105static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 HCLGE_TQP_INTR_GL0_REG,
107 HCLGE_TQP_INTR_GL1_REG,
108 HCLGE_TQP_INTR_GL2_REG,
109 HCLGE_TQP_INTR_RL_REG};
110
46a3df9f 111static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 112 "App Loopback test",
4dc13b96
FL
113 "Serdes serial Loopback test",
114 "Serdes parallel Loopback test",
46a3df9f
S
115 "Phy Loopback test"
116};
117
46a3df9f
S
118static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 {"mac_tx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 {"mac_rx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 123 {"mac_tx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 {"mac_rx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 {"mac_tx_pfc_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
129 {"mac_tx_pfc_pri0_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 {"mac_tx_pfc_pri1_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 {"mac_tx_pfc_pri2_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 {"mac_tx_pfc_pri3_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 {"mac_tx_pfc_pri4_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 {"mac_tx_pfc_pri5_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 {"mac_tx_pfc_pri6_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 {"mac_tx_pfc_pri7_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 145 {"mac_rx_pfc_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
147 {"mac_rx_pfc_pri0_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 {"mac_rx_pfc_pri1_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 {"mac_rx_pfc_pri2_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 {"mac_rx_pfc_pri3_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 {"mac_rx_pfc_pri4_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 {"mac_rx_pfc_pri5_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 {"mac_rx_pfc_pri6_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 {"mac_rx_pfc_pri7_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 {"mac_tx_total_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 {"mac_tx_total_oct_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 {"mac_tx_good_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 {"mac_tx_bad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 {"mac_tx_good_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 {"mac_tx_bad_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 {"mac_tx_uni_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 {"mac_tx_multi_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 {"mac_tx_broad_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 {"mac_tx_undersize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
183 {"mac_tx_oversize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
185 {"mac_tx_64_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 {"mac_tx_65_127_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 {"mac_tx_128_255_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 {"mac_tx_256_511_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 {"mac_tx_512_1023_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 {"mac_tx_1024_1518_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
197 {"mac_tx_1519_2047_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 {"mac_tx_2048_4095_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 {"mac_tx_4096_8191_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
203 {"mac_tx_8192_9216_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 {"mac_tx_9217_12287_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 {"mac_tx_12288_16383_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 {"mac_tx_1519_max_good_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 {"mac_tx_1519_max_bad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
213 {"mac_rx_total_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 {"mac_rx_total_oct_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 {"mac_rx_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 {"mac_rx_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 {"mac_rx_good_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 {"mac_rx_bad_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 {"mac_rx_uni_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 {"mac_rx_multi_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 {"mac_rx_broad_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 {"mac_rx_undersize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
233 {"mac_rx_oversize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
235 {"mac_rx_64_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 {"mac_rx_65_127_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 {"mac_rx_128_255_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 {"mac_rx_256_511_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 {"mac_rx_512_1023_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 {"mac_rx_1024_1518_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
247 {"mac_rx_1519_2047_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 {"mac_rx_2048_4095_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 {"mac_rx_4096_8191_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
253 {"mac_rx_8192_9216_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 {"mac_rx_9217_12287_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 {"mac_rx_12288_16383_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 {"mac_rx_1519_max_good_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 {"mac_rx_1519_max_bad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 263
a6c51c26
JS
264 {"mac_tx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 {"mac_tx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 {"mac_tx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 {"mac_tx_err_all_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 {"mac_tx_from_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 {"mac_tx_from_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 {"mac_rx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 {"mac_rx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 {"mac_rx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 {"mac_rx_fcs_err_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 {"mac_rx_send_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 {"mac_rx_send_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
288};
289
f5aac71c
FL
290static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291 {
292 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 .i_port_bitmap = 0x1,
297 },
298};
299
472d7ece
JS
300static const u8 hclge_hash_key[] = {
301 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306};
307
d174ea75 308static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 309{
91f384f6 310#define HCLGE_MAC_CMD_NUM 21
46a3df9f
S
311
312 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 314 __le64 *desc_data;
46a3df9f
S
315 int i, k, n;
316 int ret;
317
318 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320 if (ret) {
321 dev_err(&hdev->pdev->dev,
322 "Get MAC pkt stats fail, status = %d.\n", ret);
323
324 return ret;
325 }
326
327 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 328 /* for special opcode 0032, only the first desc has the head */
46a3df9f 329 if (unlikely(i == 0)) {
a90bb9a5 330 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 331 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 332 } else {
a90bb9a5 333 desc_data = (__le64 *)(&desc[i]);
d174ea75 334 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 335 }
d174ea75 336
46a3df9f 337 for (k = 0; k < n; k++) {
d174ea75 338 *data += le64_to_cpu(*desc_data);
339 data++;
46a3df9f
S
340 desc_data++;
341 }
342 }
343
344 return 0;
345}
346
d174ea75 347static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348{
349 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 struct hclge_desc *desc;
351 __le64 *desc_data;
352 u16 i, k, n;
353 int ret;
354
355 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
39ee6e82
DC
356 if (!desc)
357 return -ENOMEM;
d174ea75 358 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360 if (ret) {
361 kfree(desc);
362 return ret;
363 }
364
365 for (i = 0; i < desc_num; i++) {
366 /* for special opcode 0034, only the first desc has the head */
367 if (i == 0) {
368 desc_data = (__le64 *)(&desc[i].data[0]);
369 n = HCLGE_RD_FIRST_STATS_NUM;
370 } else {
371 desc_data = (__le64 *)(&desc[i]);
372 n = HCLGE_RD_OTHER_STATS_NUM;
373 }
374
375 for (k = 0; k < n; k++) {
376 *data += le64_to_cpu(*desc_data);
377 data++;
378 desc_data++;
379 }
380 }
381
382 kfree(desc);
383
384 return 0;
385}
386
387static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388{
389 struct hclge_desc desc;
390 __le32 *desc_data;
391 u32 reg_num;
392 int ret;
393
394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396 if (ret)
397 return ret;
398
399 desc_data = (__le32 *)(&desc.data[0]);
400 reg_num = le32_to_cpu(*desc_data);
401
402 *desc_num = 1 + ((reg_num - 3) >> 2) +
403 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404
405 return 0;
406}
407
408static int hclge_mac_update_stats(struct hclge_dev *hdev)
409{
410 u32 desc_num;
411 int ret;
412
413 ret = hclge_mac_query_reg_num(hdev, &desc_num);
414
415 /* The firmware supports the new statistics acquisition method */
416 if (!ret)
417 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 else if (ret == -EOPNOTSUPP)
419 ret = hclge_mac_update_stats_defective(hdev);
420 else
421 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422
423 return ret;
424}
425
46a3df9f
S
426static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427{
428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 struct hclge_vport *vport = hclge_get_vport(handle);
430 struct hclge_dev *hdev = vport->back;
431 struct hnae3_queue *queue;
432 struct hclge_desc desc[1];
433 struct hclge_tqp *tqp;
434 int ret, i;
435
436 for (i = 0; i < kinfo->num_tqps; i++) {
437 queue = handle->kinfo.tqp[i];
438 tqp = container_of(queue, struct hclge_tqp, q);
439 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440 hclge_cmd_setup_basic_desc(&desc[0],
441 HCLGE_OPC_QUERY_RX_STATUS,
442 true);
443
a90bb9a5 444 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
446 if (ret) {
447 dev_err(&hdev->pdev->dev,
448 "Query tqp stat fail, status = %d,queue = %d\n",
449 ret, i);
450 return ret;
451 }
452 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 453 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
454 }
455
456 for (i = 0; i < kinfo->num_tqps; i++) {
457 queue = handle->kinfo.tqp[i];
458 tqp = container_of(queue, struct hclge_tqp, q);
459 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460 hclge_cmd_setup_basic_desc(&desc[0],
461 HCLGE_OPC_QUERY_TX_STATUS,
462 true);
463
a90bb9a5 464 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
465 ret = hclge_cmd_send(&hdev->hw, desc, 1);
466 if (ret) {
467 dev_err(&hdev->pdev->dev,
468 "Query tqp stat fail, status = %d,queue = %d\n",
469 ret, i);
470 return ret;
471 }
472 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 473 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
474 }
475
476 return 0;
477}
478
479static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480{
481 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 struct hclge_tqp *tqp;
483 u64 *buff = data;
484 int i;
485
486 for (i = 0; i < kinfo->num_tqps; i++) {
487 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 488 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
489 }
490
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 493 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
494 }
495
496 return buff;
497}
498
499static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500{
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502
503 return kinfo->num_tqps * (2);
504}
505
506static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507{
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509 u8 *buff = data;
510 int i = 0;
511
512 for (i = 0; i < kinfo->num_tqps; i++) {
513 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 struct hclge_tqp, q);
0c218123 515 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
46a3df9f
S
516 tqp->index);
517 buff = buff + ETH_GSTRING_LEN;
518 }
519
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 struct hclge_tqp, q);
0c218123 523 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
46a3df9f
S
524 tqp->index);
525 buff = buff + ETH_GSTRING_LEN;
526 }
527
528 return buff;
529}
530
531static u64 *hclge_comm_get_stats(void *comm_stats,
532 const struct hclge_comm_stats_str strs[],
533 int size, u64 *data)
534{
535 u64 *buf = data;
536 u32 i;
537
538 for (i = 0; i < size; i++)
539 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540
541 return buf + size;
542}
543
544static u8 *hclge_comm_get_strings(u32 stringset,
545 const struct hclge_comm_stats_str strs[],
546 int size, u8 *data)
547{
548 char *buff = (char *)data;
549 u32 i;
550
551 if (stringset != ETH_SS_STATS)
552 return buff;
553
554 for (i = 0; i < size; i++) {
555 snprintf(buff, ETH_GSTRING_LEN,
556 strs[i].desc);
557 buff = buff + ETH_GSTRING_LEN;
558 }
559
560 return (u8 *)buff;
561}
562
46a3df9f
S
563static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564{
565 struct hnae3_handle *handle;
566 int status;
567
568 handle = &hdev->vport[0].nic;
569 if (handle->client) {
570 status = hclge_tqps_update_stats(handle);
571 if (status) {
572 dev_err(&hdev->pdev->dev,
573 "Update TQPS stats fail, status = %d.\n",
574 status);
575 }
576 }
577
578 status = hclge_mac_update_stats(hdev);
579 if (status)
580 dev_err(&hdev->pdev->dev,
581 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
582}
583
584static void hclge_update_stats(struct hnae3_handle *handle,
585 struct net_device_stats *net_stats)
586{
587 struct hclge_vport *vport = hclge_get_vport(handle);
588 struct hclge_dev *hdev = vport->back;
46a3df9f
S
589 int status;
590
c5f65480
JS
591 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592 return;
593
46a3df9f
S
594 status = hclge_mac_update_stats(hdev);
595 if (status)
596 dev_err(&hdev->pdev->dev,
597 "Update MAC stats fail, status = %d.\n",
598 status);
599
46a3df9f
S
600 status = hclge_tqps_update_stats(handle);
601 if (status)
602 dev_err(&hdev->pdev->dev,
603 "Update TQPS stats fail, status = %d.\n",
604 status);
605
c5f65480 606 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
607}
608
609static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610{
4dc13b96
FL
611#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 HNAE3_SUPPORT_PHY_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
615
616 struct hclge_vport *vport = hclge_get_vport(handle);
617 struct hclge_dev *hdev = vport->back;
618 int count = 0;
619
620 /* Loopback test support rules:
621 * mac: only GE mode support
622 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 * phy: only support when phy device exist on board
624 */
625 if (stringset == ETH_SS_TEST) {
626 /* clear loopback bit flags at first */
627 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
3ff6cde8 628 if (hdev->pdev->revision >= 0x21 ||
4dc13b96 629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632 count += 1;
eb66d503 633 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 634 }
5fd50ac3 635
4dc13b96
FL
636 count += 2;
637 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
46a3df9f
S
639 } else if (stringset == ETH_SS_STATS) {
640 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
641 hclge_tqps_get_sset_count(handle, stringset);
642 }
643
644 return count;
645}
646
647static void hclge_get_strings(struct hnae3_handle *handle,
648 u32 stringset,
649 u8 *data)
650{
651 u8 *p = (char *)data;
652 int size;
653
654 if (stringset == ETH_SS_STATS) {
655 size = ARRAY_SIZE(g_mac_stats_string);
656 p = hclge_comm_get_strings(stringset,
657 g_mac_stats_string,
658 size,
659 p);
46a3df9f
S
660 p = hclge_tqps_get_strings(handle, p);
661 } else if (stringset == ETH_SS_TEST) {
eb66d503 662 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
46a3df9f 663 memcpy(p,
eb66d503 664 hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
665 ETH_GSTRING_LEN);
666 p += ETH_GSTRING_LEN;
667 }
4dc13b96 668 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
46a3df9f 669 memcpy(p,
4dc13b96
FL
670 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671 ETH_GSTRING_LEN);
672 p += ETH_GSTRING_LEN;
673 }
674 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675 memcpy(p,
676 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
677 ETH_GSTRING_LEN);
678 p += ETH_GSTRING_LEN;
679 }
680 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681 memcpy(p,
a7b687b3 682 hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
683 ETH_GSTRING_LEN);
684 p += ETH_GSTRING_LEN;
685 }
686 }
687}
688
689static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690{
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
693 u64 *p;
694
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696 g_mac_stats_string,
697 ARRAY_SIZE(g_mac_stats_string),
698 data);
46a3df9f
S
699 p = hclge_tqps_get_stats(handle, p);
700}
701
e511c97d
JS
702static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
703 u64 *rx_cnt)
704{
705 struct hclge_vport *vport = hclge_get_vport(handle);
706 struct hclge_dev *hdev = vport->back;
707
708 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
710}
711
46a3df9f 712static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 713 struct hclge_func_status_cmd *status)
46a3df9f
S
714{
715 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
716 return -EINVAL;
717
718 /* Set the pf to main pf */
719 if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 hdev->flag |= HCLGE_FLAG_MAIN;
721 else
722 hdev->flag &= ~HCLGE_FLAG_MAIN;
723
46a3df9f
S
724 return 0;
725}
726
727static int hclge_query_function_status(struct hclge_dev *hdev)
728{
d44f9b63 729 struct hclge_func_status_cmd *req;
46a3df9f
S
730 struct hclge_desc desc;
731 int timeout = 0;
732 int ret;
733
734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 735 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
736
737 do {
738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
739 if (ret) {
740 dev_err(&hdev->pdev->dev,
741 "query function status failed %d.\n",
742 ret);
743
744 return ret;
745 }
746
747 /* Check pf reset is done */
748 if (req->pf_state)
749 break;
750 usleep_range(1000, 2000);
751 } while (timeout++ < 5);
752
753 ret = hclge_parse_func_status(hdev, req);
754
755 return ret;
756}
757
758static int hclge_query_pf_resource(struct hclge_dev *hdev)
759{
d44f9b63 760 struct hclge_pf_res_cmd *req;
46a3df9f
S
761 struct hclge_desc desc;
762 int ret;
763
764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
766 if (ret) {
767 dev_err(&hdev->pdev->dev,
768 "query pf resource failed %d.\n", ret);
769 return ret;
770 }
771
d44f9b63 772 req = (struct hclge_pf_res_cmd *)desc.data;
46a3df9f
S
773 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
775
368686be
YL
776 if (req->tx_buf_size)
777 hdev->tx_buf_size =
778 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
779 else
780 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
781
b9a400ac
YL
782 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
783
368686be
YL
784 if (req->dv_buf_size)
785 hdev->dv_buf_size =
786 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
787 else
788 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
789
b9a400ac
YL
790 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
791
e92a0843 792 if (hnae3_dev_roce_supported(hdev)) {
375dd5e4
JS
793 hdev->roce_base_msix_offset =
794 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
887c3820 796 hdev->num_roce_msi =
e4e87715
PL
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f
S
799
800 /* PF should have NIC vectors and Roce vectors,
801 * NIC vectors are queued before Roce vectors.
802 */
375dd5e4
JS
803 hdev->num_msi = hdev->num_roce_msi +
804 hdev->roce_base_msix_offset;
46a3df9f
S
805 } else {
806 hdev->num_msi =
e4e87715
PL
807 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f
S
809 }
810
811 return 0;
812}
813
814static int hclge_parse_speed(int speed_cmd, int *speed)
815{
816 switch (speed_cmd) {
817 case 6:
818 *speed = HCLGE_MAC_SPEED_10M;
819 break;
820 case 7:
821 *speed = HCLGE_MAC_SPEED_100M;
822 break;
823 case 0:
824 *speed = HCLGE_MAC_SPEED_1G;
825 break;
826 case 1:
827 *speed = HCLGE_MAC_SPEED_10G;
828 break;
829 case 2:
830 *speed = HCLGE_MAC_SPEED_25G;
831 break;
832 case 3:
833 *speed = HCLGE_MAC_SPEED_40G;
834 break;
835 case 4:
836 *speed = HCLGE_MAC_SPEED_50G;
837 break;
838 case 5:
839 *speed = HCLGE_MAC_SPEED_100G;
840 break;
841 default:
842 return -EINVAL;
843 }
844
845 return 0;
846}
847
0979aa0b
FL
848static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
849 u8 speed_ability)
850{
851 unsigned long *supported = hdev->hw.mac.supported;
852
853 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
db68ca0e
JS
854 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
855 supported);
0979aa0b
FL
856
857 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e
JS
858 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
859 supported);
0979aa0b
FL
860
861 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e
JS
862 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
863 supported);
0979aa0b
FL
864
865 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
db68ca0e
JS
866 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
867 supported);
0979aa0b
FL
868
869 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
db68ca0e
JS
870 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
871 supported);
0979aa0b 872
db68ca0e
JS
873 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
874 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
0979aa0b
FL
875}
876
f18635d5
JS
877static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
878 u8 speed_ability)
879{
880 unsigned long *supported = hdev->hw.mac.supported;
881
882 /* default to support all speed for GE port */
883 if (!speed_ability)
884 speed_ability = HCLGE_SUPPORT_GE;
885
886 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
887 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
888 supported);
889
890 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
891 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
892 supported);
893 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
894 supported);
895 }
896
897 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
898 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
899 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
900 }
901
902 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
903 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
904 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
905}
906
0979aa0b
FL
907static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
908{
909 u8 media_type = hdev->hw.mac.media_type;
910
f18635d5
JS
911 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
912 hclge_parse_fiber_link_mode(hdev, speed_ability);
913 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
914 hclge_parse_copper_link_mode(hdev, speed_ability);
0979aa0b
FL
915}
916
46a3df9f
S
917static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
918{
d44f9b63 919 struct hclge_cfg_param_cmd *req;
46a3df9f
S
920 u64 mac_addr_tmp_high;
921 u64 mac_addr_tmp;
922 int i;
923
d44f9b63 924 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
925
926 /* get the configuration */
e4e87715
PL
927 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
928 HCLGE_CFG_VMDQ_M,
929 HCLGE_CFG_VMDQ_S);
930 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
931 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
932 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
933 HCLGE_CFG_TQP_DESC_N_M,
934 HCLGE_CFG_TQP_DESC_N_S);
935
936 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
937 HCLGE_CFG_PHY_ADDR_M,
938 HCLGE_CFG_PHY_ADDR_S);
939 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
940 HCLGE_CFG_MEDIA_TP_M,
941 HCLGE_CFG_MEDIA_TP_S);
942 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
943 HCLGE_CFG_RX_BUF_LEN_M,
944 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
945 /* get mac_address */
946 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
947 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
948 HCLGE_CFG_MAC_ADDR_H_M,
949 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
950
951 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
952
e4e87715
PL
953 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
954 HCLGE_CFG_DEFAULT_SPEED_M,
955 HCLGE_CFG_DEFAULT_SPEED_S);
956 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
957 HCLGE_CFG_RSS_SIZE_M,
958 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 959
46a3df9f
S
960 for (i = 0; i < ETH_ALEN; i++)
961 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
962
d44f9b63 963 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 964 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 965
e4e87715
PL
966 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
967 HCLGE_CFG_SPEED_ABILITY_M,
968 HCLGE_CFG_SPEED_ABILITY_S);
39932473
JS
969 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
970 HCLGE_CFG_UMV_TBL_SPACE_M,
971 HCLGE_CFG_UMV_TBL_SPACE_S);
972 if (!cfg->umv_space)
973 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
46a3df9f
S
974}
975
976/* hclge_get_cfg: query the static parameter from flash
977 * @hdev: pointer to struct hclge_dev
978 * @hcfg: the config structure to be getted
979 */
980static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
981{
982 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 983 struct hclge_cfg_param_cmd *req;
46a3df9f
S
984 int i, ret;
985
986 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
987 u32 offset = 0;
988
d44f9b63 989 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
990 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
991 true);
e4e87715
PL
992 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
993 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 994 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
995 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
996 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 997 req->offset = cpu_to_le32(offset);
46a3df9f
S
998 }
999
1000 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1001 if (ret) {
3f639907 1002 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
1003 return ret;
1004 }
1005
1006 hclge_parse_cfg(hcfg, desc);
3f639907 1007
46a3df9f
S
1008 return 0;
1009}
1010
1011static int hclge_get_cap(struct hclge_dev *hdev)
1012{
1013 int ret;
1014
1015 ret = hclge_query_function_status(hdev);
1016 if (ret) {
1017 dev_err(&hdev->pdev->dev,
1018 "query function status error %d.\n", ret);
1019 return ret;
1020 }
1021
1022 /* get pf resource */
1023 ret = hclge_query_pf_resource(hdev);
3f639907
JS
1024 if (ret)
1025 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
46a3df9f 1026
3f639907 1027 return ret;
46a3df9f
S
1028}
1029
962e31bd
YL
1030static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1031{
1032#define HCLGE_MIN_TX_DESC 64
1033#define HCLGE_MIN_RX_DESC 64
1034
1035 if (!is_kdump_kernel())
1036 return;
1037
1038 dev_info(&hdev->pdev->dev,
1039 "Running kdump kernel. Using minimal resources\n");
1040
1041 /* minimal queue pairs equals to the number of vports */
1042 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1043 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1044 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1045}
1046
46a3df9f
S
1047static int hclge_configure(struct hclge_dev *hdev)
1048{
1049 struct hclge_cfg cfg;
1050 int ret, i;
1051
1052 ret = hclge_get_cfg(hdev, &cfg);
1053 if (ret) {
1054 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1055 return ret;
1056 }
1057
1058 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1059 hdev->base_tqp_pid = 0;
0e7a40cd 1060 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1061 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1062 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1063 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1064 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1065 hdev->num_tx_desc = cfg.tqp_desc_num;
1066 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1067 hdev->tm_info.num_pg = 1;
cacde272 1068 hdev->tc_max = cfg.tc_num;
46a3df9f 1069 hdev->tm_info.hw_pfc_map = 0;
39932473 1070 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1071
9abeb7d8
JS
1072 if (hnae3_dev_fd_supported(hdev))
1073 hdev->fd_en = true;
1074
46a3df9f
S
1075 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1076 if (ret) {
1077 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1078 return ret;
1079 }
1080
0979aa0b
FL
1081 hclge_parse_link_mode(hdev, cfg.speed_ability);
1082
cacde272
YL
1083 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1084 (hdev->tc_max < 1)) {
46a3df9f 1085 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
cacde272
YL
1086 hdev->tc_max);
1087 hdev->tc_max = 1;
46a3df9f
S
1088 }
1089
cacde272
YL
1090 /* Dev does not support DCB */
1091 if (!hnae3_dev_dcb_supported(hdev)) {
1092 hdev->tc_max = 1;
1093 hdev->pfc_max = 0;
1094 } else {
1095 hdev->pfc_max = hdev->tc_max;
1096 }
1097
a2987975 1098 hdev->tm_info.num_tc = 1;
cacde272 1099
46a3df9f 1100 /* Currently not support uncontiuous tc */
cacde272 1101 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1102 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1103
71b83869 1104 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1105
962e31bd
YL
1106 hclge_init_kdump_kernel_config(hdev);
1107
46a3df9f
S
1108 return ret;
1109}
1110
1111static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1112 int tso_mss_max)
1113{
d44f9b63 1114 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1115 struct hclge_desc desc;
a90bb9a5 1116 u16 tso_mss;
46a3df9f
S
1117
1118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1119
d44f9b63 1120 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1121
1122 tso_mss = 0;
e4e87715
PL
1123 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1124 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1125 req->tso_mss_min = cpu_to_le16(tso_mss);
1126
1127 tso_mss = 0;
e4e87715
PL
1128 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1129 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1130 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1131
1132 return hclge_cmd_send(&hdev->hw, &desc, 1);
1133}
1134
b26a6fea
PL
1135static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1136{
1137 struct hclge_cfg_gro_status_cmd *req;
1138 struct hclge_desc desc;
1139 int ret;
1140
1141 if (!hnae3_dev_gro_supported(hdev))
1142 return 0;
1143
1144 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1145 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1146
1147 req->gro_en = cpu_to_le16(en ? 1 : 0);
1148
1149 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1150 if (ret)
1151 dev_err(&hdev->pdev->dev,
1152 "GRO hardware config cmd failed, ret = %d\n", ret);
1153
1154 return ret;
1155}
1156
46a3df9f
S
1157static int hclge_alloc_tqps(struct hclge_dev *hdev)
1158{
1159 struct hclge_tqp *tqp;
1160 int i;
1161
1162 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1163 sizeof(struct hclge_tqp), GFP_KERNEL);
1164 if (!hdev->htqp)
1165 return -ENOMEM;
1166
1167 tqp = hdev->htqp;
1168
1169 for (i = 0; i < hdev->num_tqps; i++) {
1170 tqp->dev = &hdev->pdev->dev;
1171 tqp->index = i;
1172
1173 tqp->q.ae_algo = &ae_algo;
1174 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1175 tqp->q.tx_desc_num = hdev->num_tx_desc;
1176 tqp->q.rx_desc_num = hdev->num_rx_desc;
46a3df9f
S
1177 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1178 i * HCLGE_TQP_REG_SIZE;
1179
1180 tqp++;
1181 }
1182
1183 return 0;
1184}
1185
1186static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1187 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1188{
d44f9b63 1189 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1190 struct hclge_desc desc;
1191 int ret;
1192
1193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1194
d44f9b63 1195 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1196 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1197 req->tqp_vf = func_id;
46a3df9f
S
1198 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1199 1 << HCLGE_TQP_MAP_EN_B;
1200 req->tqp_vid = cpu_to_le16(tqp_vid);
1201
1202 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1203 if (ret)
1204 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1205
3f639907 1206 return ret;
46a3df9f
S
1207}
1208
672ad0ed 1209static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1210{
128b900d 1211 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1212 struct hclge_dev *hdev = vport->back;
7df7dad6 1213 int i, alloced;
46a3df9f
S
1214
1215 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1216 alloced < num_tqps; i++) {
46a3df9f
S
1217 if (!hdev->htqp[i].alloced) {
1218 hdev->htqp[i].q.handle = &vport->nic;
1219 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1220 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1221 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1222 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1223 hdev->htqp[i].alloced = true;
46a3df9f
S
1224 alloced++;
1225 }
1226 }
672ad0ed
HT
1227 vport->alloc_tqps = alloced;
1228 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1229 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f
S
1230
1231 return 0;
1232}
1233
c0425944
PL
1234static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1235 u16 num_tx_desc, u16 num_rx_desc)
1236
46a3df9f
S
1237{
1238 struct hnae3_handle *nic = &vport->nic;
1239 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1240 struct hclge_dev *hdev = vport->back;
af958827 1241 int ret;
46a3df9f 1242
c0425944
PL
1243 kinfo->num_tx_desc = num_tx_desc;
1244 kinfo->num_rx_desc = num_rx_desc;
1245
46a3df9f 1246 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1247
672ad0ed 1248 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1249 sizeof(struct hnae3_queue *), GFP_KERNEL);
1250 if (!kinfo->tqp)
1251 return -ENOMEM;
1252
672ad0ed 1253 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1254 if (ret)
46a3df9f 1255 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1256
3f639907 1257 return ret;
46a3df9f
S
1258}
1259
7df7dad6
L
1260static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1261 struct hclge_vport *vport)
1262{
1263 struct hnae3_handle *nic = &vport->nic;
1264 struct hnae3_knic_private_info *kinfo;
1265 u16 i;
1266
1267 kinfo = &nic->kinfo;
205a24ca 1268 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1269 struct hclge_tqp *q =
1270 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1271 bool is_pf;
1272 int ret;
1273
1274 is_pf = !(vport->vport_id);
1275 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1276 i, is_pf);
1277 if (ret)
1278 return ret;
1279 }
1280
1281 return 0;
1282}
1283
1284static int hclge_map_tqp(struct hclge_dev *hdev)
1285{
1286 struct hclge_vport *vport = hdev->vport;
1287 u16 i, num_vport;
1288
1289 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1290 for (i = 0; i < num_vport; i++) {
1291 int ret;
1292
1293 ret = hclge_map_tqp_to_vport(hdev, vport);
1294 if (ret)
1295 return ret;
1296
1297 vport++;
1298 }
1299
1300 return 0;
1301}
1302
46a3df9f
S
1303static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1304{
1305 /* this would be initialized later */
1306}
1307
1308static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1309{
1310 struct hnae3_handle *nic = &vport->nic;
1311 struct hclge_dev *hdev = vport->back;
1312 int ret;
1313
1314 nic->pdev = hdev->pdev;
1315 nic->ae_algo = &ae_algo;
1316 nic->numa_node_mask = hdev->numa_node_mask;
1317
1318 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
c0425944
PL
1319 ret = hclge_knic_setup(vport, num_tqps,
1320 hdev->num_tx_desc, hdev->num_rx_desc);
1321
46a3df9f
S
1322 if (ret) {
1323 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1324 ret);
1325 return ret;
1326 }
1327 } else {
1328 hclge_unic_setup(vport, num_tqps);
1329 }
1330
1331 return 0;
1332}
1333
1334static int hclge_alloc_vport(struct hclge_dev *hdev)
1335{
1336 struct pci_dev *pdev = hdev->pdev;
1337 struct hclge_vport *vport;
1338 u32 tqp_main_vport;
1339 u32 tqp_per_vport;
1340 int num_vport, i;
1341 int ret;
1342
1343 /* We need to alloc a vport for main NIC of PF */
1344 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1345
38e62046
HT
1346 if (hdev->num_tqps < num_vport) {
1347 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1348 hdev->num_tqps, num_vport);
1349 return -EINVAL;
1350 }
46a3df9f
S
1351
1352 /* Alloc the same number of TQPs for every vport */
1353 tqp_per_vport = hdev->num_tqps / num_vport;
1354 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1355
1356 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1357 GFP_KERNEL);
1358 if (!vport)
1359 return -ENOMEM;
1360
1361 hdev->vport = vport;
1362 hdev->num_alloc_vport = num_vport;
1363
2312e050
FL
1364 if (IS_ENABLED(CONFIG_PCI_IOV))
1365 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1366
1367 for (i = 0; i < num_vport; i++) {
1368 vport->back = hdev;
1369 vport->vport_id = i;
818f1675 1370 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1371 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1372 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1373 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1374 INIT_LIST_HEAD(&vport->uc_mac_list);
1375 INIT_LIST_HEAD(&vport->mc_mac_list);
46a3df9f
S
1376
1377 if (i == 0)
1378 ret = hclge_vport_setup(vport, tqp_main_vport);
1379 else
1380 ret = hclge_vport_setup(vport, tqp_per_vport);
1381 if (ret) {
1382 dev_err(&pdev->dev,
1383 "vport setup failed for vport %d, %d\n",
1384 i, ret);
1385 return ret;
1386 }
1387
1388 vport++;
1389 }
1390
1391 return 0;
1392}
1393
acf61ecd
YL
1394static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1395 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1396{
1397/* TX buffer size is unit by 128 byte */
1398#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1399#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1400 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1401 struct hclge_desc desc;
1402 int ret;
1403 u8 i;
1404
d44f9b63 1405 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1406
1407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1408 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1409 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1410
46a3df9f
S
1411 req->tx_pkt_buff[i] =
1412 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1413 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1414 }
46a3df9f
S
1415
1416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1417 if (ret)
46a3df9f
S
1418 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1419 ret);
46a3df9f 1420
3f639907 1421 return ret;
46a3df9f
S
1422}
1423
acf61ecd
YL
1424static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1425 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1426{
acf61ecd 1427 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1428
3f639907
JS
1429 if (ret)
1430 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1431
3f639907 1432 return ret;
46a3df9f
S
1433}
1434
1a49f3c6 1435static u32 hclge_get_tc_num(struct hclge_dev *hdev)
46a3df9f
S
1436{
1437 int i, cnt = 0;
1438
1439 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1440 if (hdev->hw_tc_map & BIT(i))
1441 cnt++;
1442 return cnt;
1443}
1444
46a3df9f 1445/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1446static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1447 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1448{
1449 struct hclge_priv_buf *priv;
1450 int i, cnt = 0;
1451
1452 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1453 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1454 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1455 priv->enable)
1456 cnt++;
1457 }
1458
1459 return cnt;
1460}
1461
1462/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1463static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1464 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1465{
1466 struct hclge_priv_buf *priv;
1467 int i, cnt = 0;
1468
1469 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1470 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1471 if (hdev->hw_tc_map & BIT(i) &&
1472 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1473 priv->enable)
1474 cnt++;
1475 }
1476
1477 return cnt;
1478}
1479
acf61ecd 1480static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1481{
1482 struct hclge_priv_buf *priv;
1483 u32 rx_priv = 0;
1484 int i;
1485
1486 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1487 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1488 if (priv->enable)
1489 rx_priv += priv->buf_size;
1490 }
1491 return rx_priv;
1492}
1493
acf61ecd 1494static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1495{
1496 u32 i, total_tx_size = 0;
1497
1498 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1499 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1500
1501 return total_tx_size;
1502}
1503
acf61ecd
YL
1504static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1505 struct hclge_pkt_buf_alloc *buf_alloc,
1506 u32 rx_all)
46a3df9f 1507{
1a49f3c6
YL
1508 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1509 u32 tc_num = hclge_get_tc_num(hdev);
b9a400ac 1510 u32 shared_buf, aligned_mps;
46a3df9f
S
1511 u32 rx_priv;
1512 int i;
1513
b9a400ac 1514 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1515
d221df4e 1516 if (hnae3_dev_dcb_supported(hdev))
b9a400ac 1517 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
d221df4e 1518 else
b9a400ac 1519 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1520 + hdev->dv_buf_size;
d221df4e 1521
db5936db 1522 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1523 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1524 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1525
acf61ecd 1526 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1527 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1528 return false;
1529
b9a400ac 1530 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1531 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1532 if (hnae3_dev_dcb_supported(hdev)) {
1533 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1534 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b9a400ac 1535 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
368686be 1536 } else {
b9a400ac 1537 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1538 HCLGE_NON_DCB_ADDITIONAL_BUF;
1a49f3c6
YL
1539 buf_alloc->s_buf.self.low = aligned_mps;
1540 }
1541
1542 if (hnae3_dev_dcb_supported(hdev)) {
1543 if (tc_num)
1544 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1545 else
1546 hi_thrd = shared_buf - hdev->dv_buf_size;
1547
1548 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1549 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1550 lo_thrd = hi_thrd - aligned_mps / 2;
1551 } else {
1552 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1553 lo_thrd = aligned_mps;
368686be 1554 }
46a3df9f
S
1555
1556 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1a49f3c6
YL
1557 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1558 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
46a3df9f
S
1559 }
1560
1561 return true;
1562}
1563
acf61ecd
YL
1564static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1565 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1566{
1567 u32 i, total_size;
1568
1569 total_size = hdev->pkt_buf_size;
1570
1571 /* alloc tx buffer for all enabled tc */
1572 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1573 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 1574
b6b4f987
HT
1575 if (hdev->hw_tc_map & BIT(i)) {
1576 if (total_size < hdev->tx_buf_size)
1577 return -ENOMEM;
9ffe79a9 1578
368686be 1579 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 1580 } else {
9ffe79a9 1581 priv->tx_buf_size = 0;
b6b4f987 1582 }
9ffe79a9
YL
1583
1584 total_size -= priv->tx_buf_size;
1585 }
1586
1587 return 0;
1588}
1589
8ca754b1
YL
1590static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1591 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1592{
8ca754b1
YL
1593 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1594 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1595 int i;
1596
46a3df9f 1597 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 1598 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 1599
bb1fe9ea
YL
1600 priv->enable = 0;
1601 priv->wl.low = 0;
1602 priv->wl.high = 0;
1603 priv->buf_size = 0;
1604
1605 if (!(hdev->hw_tc_map & BIT(i)))
1606 continue;
1607
1608 priv->enable = 1;
46a3df9f
S
1609
1610 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
8ca754b1
YL
1611 priv->wl.low = max ? aligned_mps : 256;
1612 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1613 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1614 } else {
1615 priv->wl.low = 0;
8ca754b1 1616 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
46a3df9f 1617 }
8ca754b1
YL
1618
1619 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
1620 }
1621
8ca754b1
YL
1622 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1623}
46a3df9f 1624
8ca754b1
YL
1625static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1626 struct hclge_pkt_buf_alloc *buf_alloc)
1627{
1628 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1629 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1630 int i;
46a3df9f
S
1631
1632 /* let the last to be cleared first */
1633 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1634 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1635
1636 if (hdev->hw_tc_map & BIT(i) &&
1637 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1638 /* Clear the no pfc TC private buffer */
1639 priv->wl.low = 0;
1640 priv->wl.high = 0;
1641 priv->buf_size = 0;
1642 priv->enable = 0;
1643 no_pfc_priv_num--;
1644 }
1645
acf61ecd 1646 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1647 no_pfc_priv_num == 0)
1648 break;
1649 }
1650
8ca754b1
YL
1651 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1652}
46a3df9f 1653
8ca754b1
YL
1654static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1655 struct hclge_pkt_buf_alloc *buf_alloc)
1656{
1657 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1658 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1659 int i;
46a3df9f
S
1660
1661 /* let the last to be cleared first */
1662 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1663 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1664
1665 if (hdev->hw_tc_map & BIT(i) &&
1666 hdev->tm_info.hw_pfc_map & BIT(i)) {
1667 /* Reduce the number of pfc TC with private buffer */
1668 priv->wl.low = 0;
1669 priv->enable = 0;
1670 priv->wl.high = 0;
1671 priv->buf_size = 0;
1672 pfc_priv_num--;
1673 }
1674
acf61ecd 1675 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1676 pfc_priv_num == 0)
1677 break;
1678 }
8ca754b1
YL
1679
1680 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1681}
1682
1683/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1684 * @hdev: pointer to struct hclge_dev
1685 * @buf_alloc: pointer to buffer calculation data
1686 * @return: 0: calculate sucessful, negative: fail
1687 */
1688static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1689 struct hclge_pkt_buf_alloc *buf_alloc)
1690{
1691 /* When DCB is not supported, rx private buffer is not allocated. */
1692 if (!hnae3_dev_dcb_supported(hdev)) {
1693 u32 rx_all = hdev->pkt_buf_size;
1694
1695 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1696 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1697 return -ENOMEM;
1698
1699 return 0;
1700 }
1701
1702 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1703 return 0;
1704
1705 /* try to decrease the buffer size */
1706 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1707 return 0;
1708
1709 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1710 return 0;
1711
1712 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
1713 return 0;
1714
1715 return -ENOMEM;
1716}
1717
acf61ecd
YL
1718static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1719 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1720{
d44f9b63 1721 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
1722 struct hclge_desc desc;
1723 int ret;
1724 int i;
1725
1726 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 1727 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
1728
1729 /* Alloc private buffer TCs */
1730 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1731 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1732
1733 req->buf_num[i] =
1734 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1735 req->buf_num[i] |=
5bca3b94 1736 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
1737 }
1738
b8c8bf47 1739 req->shared_buf =
acf61ecd 1740 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
1741 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1742
46a3df9f 1743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1744 if (ret)
46a3df9f
S
1745 dev_err(&hdev->pdev->dev,
1746 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 1747
3f639907 1748 return ret;
46a3df9f
S
1749}
1750
acf61ecd
YL
1751static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1753{
1754 struct hclge_rx_priv_wl_buf *req;
1755 struct hclge_priv_buf *priv;
1756 struct hclge_desc desc[2];
1757 int i, j;
1758 int ret;
1759
1760 for (i = 0; i < 2; i++) {
1761 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1762 false);
1763 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1764
1765 /* The first descriptor set the NEXT bit to 1 */
1766 if (i == 0)
1767 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1768 else
1769 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1770
1771 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
1772 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1773
1774 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
1775 req->tc_wl[j].high =
1776 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1777 req->tc_wl[j].high |=
3738287c 1778 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1779 req->tc_wl[j].low =
1780 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1781 req->tc_wl[j].low |=
3738287c 1782 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1783 }
1784 }
1785
1786 /* Send 2 descriptor at one time */
1787 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 1788 if (ret)
46a3df9f
S
1789 dev_err(&hdev->pdev->dev,
1790 "rx private waterline config cmd failed %d\n",
1791 ret);
3f639907 1792 return ret;
46a3df9f
S
1793}
1794
acf61ecd
YL
1795static int hclge_common_thrd_config(struct hclge_dev *hdev,
1796 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1797{
acf61ecd 1798 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
1799 struct hclge_rx_com_thrd *req;
1800 struct hclge_desc desc[2];
1801 struct hclge_tc_thrd *tc;
1802 int i, j;
1803 int ret;
1804
1805 for (i = 0; i < 2; i++) {
1806 hclge_cmd_setup_basic_desc(&desc[i],
1807 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1808 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1809
1810 /* The first descriptor set the NEXT bit to 1 */
1811 if (i == 0)
1812 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1813 else
1814 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1815
1816 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1817 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1818
1819 req->com_thrd[j].high =
1820 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1821 req->com_thrd[j].high |=
3738287c 1822 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1823 req->com_thrd[j].low =
1824 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1825 req->com_thrd[j].low |=
3738287c 1826 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1827 }
1828 }
1829
1830 /* Send 2 descriptors at one time */
1831 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 1832 if (ret)
46a3df9f
S
1833 dev_err(&hdev->pdev->dev,
1834 "common threshold config cmd failed %d\n", ret);
3f639907 1835 return ret;
46a3df9f
S
1836}
1837
acf61ecd
YL
1838static int hclge_common_wl_config(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1840{
acf61ecd 1841 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
1842 struct hclge_rx_com_wl *req;
1843 struct hclge_desc desc;
1844 int ret;
1845
1846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1847
1848 req = (struct hclge_rx_com_wl *)desc.data;
1849 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 1850 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1851
1852 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 1853 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
1854
1855 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1856 if (ret)
46a3df9f
S
1857 dev_err(&hdev->pdev->dev,
1858 "common waterline config cmd failed %d\n", ret);
46a3df9f 1859
3f639907 1860 return ret;
46a3df9f
S
1861}
1862
1863int hclge_buffer_alloc(struct hclge_dev *hdev)
1864{
acf61ecd 1865 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
1866 int ret;
1867
acf61ecd
YL
1868 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1869 if (!pkt_buf)
46a3df9f
S
1870 return -ENOMEM;
1871
acf61ecd 1872 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
1873 if (ret) {
1874 dev_err(&hdev->pdev->dev,
1875 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 1876 goto out;
9ffe79a9
YL
1877 }
1878
acf61ecd 1879 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
1880 if (ret) {
1881 dev_err(&hdev->pdev->dev,
1882 "could not alloc tx buffers %d\n", ret);
acf61ecd 1883 goto out;
46a3df9f
S
1884 }
1885
acf61ecd 1886 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
1887 if (ret) {
1888 dev_err(&hdev->pdev->dev,
1889 "could not calc rx priv buffer size for all TCs %d\n",
1890 ret);
acf61ecd 1891 goto out;
46a3df9f
S
1892 }
1893
acf61ecd 1894 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
1895 if (ret) {
1896 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1897 ret);
acf61ecd 1898 goto out;
46a3df9f
S
1899 }
1900
2daf4a65 1901 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 1902 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
1903 if (ret) {
1904 dev_err(&hdev->pdev->dev,
1905 "could not configure rx private waterline %d\n",
1906 ret);
acf61ecd 1907 goto out;
2daf4a65 1908 }
46a3df9f 1909
acf61ecd 1910 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
1911 if (ret) {
1912 dev_err(&hdev->pdev->dev,
1913 "could not configure common threshold %d\n",
1914 ret);
acf61ecd 1915 goto out;
2daf4a65 1916 }
46a3df9f
S
1917 }
1918
acf61ecd
YL
1919 ret = hclge_common_wl_config(hdev, pkt_buf);
1920 if (ret)
46a3df9f
S
1921 dev_err(&hdev->pdev->dev,
1922 "could not configure common waterline %d\n", ret);
46a3df9f 1923
acf61ecd
YL
1924out:
1925 kfree(pkt_buf);
1926 return ret;
46a3df9f
S
1927}
1928
1929static int hclge_init_roce_base_info(struct hclge_vport *vport)
1930{
1931 struct hnae3_handle *roce = &vport->roce;
1932 struct hnae3_handle *nic = &vport->nic;
1933
887c3820 1934 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
1935
1936 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1937 vport->back->num_msi_left == 0)
1938 return -EINVAL;
1939
1940 roce->rinfo.base_vector = vport->back->roce_base_vector;
1941
1942 roce->rinfo.netdev = nic->kinfo.netdev;
1943 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1944
1945 roce->pdev = nic->pdev;
1946 roce->ae_algo = nic->ae_algo;
1947 roce->numa_node_mask = nic->numa_node_mask;
1948
1949 return 0;
1950}
1951
887c3820 1952static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
1953{
1954 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
1955 int vectors;
1956 int i;
46a3df9f 1957
887c3820
SM
1958 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1959 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1960 if (vectors < 0) {
1961 dev_err(&pdev->dev,
1962 "failed(%d) to allocate MSI/MSI-X vectors\n",
1963 vectors);
1964 return vectors;
46a3df9f 1965 }
887c3820
SM
1966 if (vectors < hdev->num_msi)
1967 dev_warn(&hdev->pdev->dev,
1968 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1969 hdev->num_msi, vectors);
46a3df9f 1970
887c3820
SM
1971 hdev->num_msi = vectors;
1972 hdev->num_msi_left = vectors;
1973 hdev->base_msi_vector = pdev->irq;
46a3df9f 1974 hdev->roce_base_vector = hdev->base_msi_vector +
375dd5e4 1975 hdev->roce_base_msix_offset;
46a3df9f 1976
46a3df9f
S
1977 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1978 sizeof(u16), GFP_KERNEL);
887c3820
SM
1979 if (!hdev->vector_status) {
1980 pci_free_irq_vectors(pdev);
46a3df9f 1981 return -ENOMEM;
887c3820 1982 }
46a3df9f
S
1983
1984 for (i = 0; i < hdev->num_msi; i++)
1985 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1986
887c3820
SM
1987 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1988 sizeof(int), GFP_KERNEL);
1989 if (!hdev->vector_irq) {
1990 pci_free_irq_vectors(pdev);
1991 return -ENOMEM;
46a3df9f 1992 }
46a3df9f
S
1993
1994 return 0;
1995}
1996
2d03eacc 1997static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 1998{
46a3df9f 1999
2d03eacc
YL
2000 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2001 duplex = HCLGE_MAC_FULL;
46a3df9f 2002
2d03eacc 2003 return duplex;
46a3df9f
S
2004}
2005
2d03eacc
YL
2006static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2007 u8 duplex)
46a3df9f 2008{
d44f9b63 2009 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2010 struct hclge_desc desc;
2011 int ret;
2012
d44f9b63 2013 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2014
2015 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2016
e4e87715 2017 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
46a3df9f
S
2018
2019 switch (speed) {
2020 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
2021 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2022 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2023 break;
2024 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2025 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2026 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2027 break;
2028 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2029 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2030 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2031 break;
2032 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2033 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2034 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2035 break;
2036 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2037 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2038 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2039 break;
2040 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2041 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2042 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2043 break;
2044 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2045 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2046 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2047 break;
2048 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2049 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2050 HCLGE_CFG_SPEED_S, 5);
46a3df9f
S
2051 break;
2052 default:
d7629e74 2053 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2054 return -EINVAL;
2055 }
2056
e4e87715
PL
2057 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2058 1);
46a3df9f
S
2059
2060 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2061 if (ret) {
2062 dev_err(&hdev->pdev->dev,
2063 "mac speed/duplex config cmd failed %d.\n", ret);
2064 return ret;
2065 }
2066
2d03eacc
YL
2067 return 0;
2068}
2069
2070int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2071{
2072 int ret;
2073
2074 duplex = hclge_check_speed_dup(duplex, speed);
2075 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2076 return 0;
2077
2078 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2079 if (ret)
2080 return ret;
2081
2082 hdev->hw.mac.speed = speed;
2083 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2084
2085 return 0;
2086}
2087
2088static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2089 u8 duplex)
2090{
2091 struct hclge_vport *vport = hclge_get_vport(handle);
2092 struct hclge_dev *hdev = vport->back;
2093
2094 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2095}
2096
46a3df9f
S
2097static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2098{
d44f9b63 2099 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2100 struct hclge_desc desc;
a90bb9a5 2101 u32 flag = 0;
46a3df9f
S
2102 int ret;
2103
2104 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2105
d44f9b63 2106 req = (struct hclge_config_auto_neg_cmd *)desc.data;
e4e87715 2107 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
a90bb9a5 2108 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2109
2110 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2111 if (ret)
46a3df9f
S
2112 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2113 ret);
46a3df9f 2114
3f639907 2115 return ret;
46a3df9f
S
2116}
2117
2118static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2119{
2120 struct hclge_vport *vport = hclge_get_vport(handle);
2121 struct hclge_dev *hdev = vport->back;
2122
2123 return hclge_set_autoneg_en(hdev, enable);
2124}
2125
2126static int hclge_get_autoneg(struct hnae3_handle *handle)
2127{
2128 struct hclge_vport *vport = hclge_get_vport(handle);
2129 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2130 struct phy_device *phydev = hdev->hw.mac.phydev;
2131
2132 if (phydev)
2133 return phydev->autoneg;
46a3df9f
S
2134
2135 return hdev->hw.mac.autoneg;
2136}
2137
2138static int hclge_mac_init(struct hclge_dev *hdev)
2139{
2140 struct hclge_mac *mac = &hdev->hw.mac;
2141 int ret;
2142
5d497936 2143 hdev->support_sfp_query = true;
2d03eacc
YL
2144 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2145 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2146 hdev->hw.mac.duplex);
46a3df9f
S
2147 if (ret) {
2148 dev_err(&hdev->pdev->dev,
2149 "Config mac speed dup fail ret=%d\n", ret);
2150 return ret;
2151 }
2152
2153 mac->link = 0;
2154
e6d7d79d
YL
2155 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2156 if (ret) {
2157 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2158 return ret;
2159 }
f9fd82a9 2160
e6d7d79d 2161 ret = hclge_buffer_alloc(hdev);
3f639907 2162 if (ret)
f9fd82a9 2163 dev_err(&hdev->pdev->dev,
e6d7d79d 2164 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2165
3f639907 2166 return ret;
46a3df9f
S
2167}
2168
c1a81619
SM
2169static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2170{
18e24888
HT
2171 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2172 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
c1a81619
SM
2173 schedule_work(&hdev->mbx_service_task);
2174}
2175
cb1b9f77
SM
2176static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2177{
2178 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2179 schedule_work(&hdev->rst_service_task);
2180}
2181
46a3df9f
S
2182static void hclge_task_schedule(struct hclge_dev *hdev)
2183{
2184 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2185 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2186 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2187 (void)schedule_work(&hdev->service_task);
2188}
2189
2190static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2191{
d44f9b63 2192 struct hclge_link_status_cmd *req;
46a3df9f
S
2193 struct hclge_desc desc;
2194 int link_status;
2195 int ret;
2196
2197 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2198 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2199 if (ret) {
2200 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2201 ret);
2202 return ret;
2203 }
2204
d44f9b63 2205 req = (struct hclge_link_status_cmd *)desc.data;
c79301d8 2206 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
46a3df9f
S
2207
2208 return !!link_status;
2209}
2210
2211static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2212{
2213 int mac_state;
2214 int link_stat;
2215
582d37bb
PL
2216 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2217 return 0;
2218
46a3df9f
S
2219 mac_state = hclge_get_mac_link_status(hdev);
2220
2221 if (hdev->hw.mac.phydev) {
fd813314 2222 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
46a3df9f
S
2223 link_stat = mac_state &
2224 hdev->hw.mac.phydev->link;
2225 else
2226 link_stat = 0;
2227
2228 } else {
2229 link_stat = mac_state;
2230 }
2231
2232 return !!link_stat;
2233}
2234
2235static void hclge_update_link_status(struct hclge_dev *hdev)
2236{
45e92b7e 2237 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2238 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2239 struct hnae3_handle *rhandle;
46a3df9f
S
2240 struct hnae3_handle *handle;
2241 int state;
2242 int i;
2243
2244 if (!client)
2245 return;
2246 state = hclge_get_mac_phy_link(hdev);
2247 if (state != hdev->hw.mac.link) {
2248 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2249 handle = &hdev->vport[i].nic;
2250 client->ops->link_status_change(handle, state);
45e92b7e
PL
2251 rhandle = &hdev->vport[i].roce;
2252 if (rclient && rclient->ops->link_status_change)
2253 rclient->ops->link_status_change(rhandle,
2254 state);
46a3df9f
S
2255 }
2256 hdev->hw.mac.link = state;
2257 }
2258}
2259
5d497936
PL
2260static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2261{
2262 struct hclge_sfp_speed_cmd *resp = NULL;
2263 struct hclge_desc desc;
2264 int ret;
2265
2266 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2267 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2268 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2269 if (ret == -EOPNOTSUPP) {
2270 dev_warn(&hdev->pdev->dev,
2271 "IMP do not support get SFP speed %d\n", ret);
2272 return ret;
2273 } else if (ret) {
2274 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2275 return ret;
2276 }
2277
2278 *speed = resp->sfp_speed;
2279
2280 return 0;
2281}
2282
46a3df9f
S
2283static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2284{
2285 struct hclge_mac mac = hdev->hw.mac;
46a3df9f
S
2286 int speed;
2287 int ret;
2288
5d497936 2289 /* get the speed from SFP cmd when phy
46a3df9f
S
2290 * doesn't exit.
2291 */
5d497936 2292 if (mac.phydev)
46a3df9f
S
2293 return 0;
2294
5d497936
PL
2295 /* if IMP does not support get SFP/qSFP speed, return directly */
2296 if (!hdev->support_sfp_query)
2297 return 0;
46a3df9f 2298
5d497936
PL
2299 ret = hclge_get_sfp_speed(hdev, &speed);
2300 if (ret == -EOPNOTSUPP) {
2301 hdev->support_sfp_query = false;
2302 return ret;
2303 } else if (ret) {
2d03eacc 2304 return ret;
46a3df9f
S
2305 }
2306
5d497936
PL
2307 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2308 return 0; /* do nothing if no SFP */
2309
2310 /* must config full duplex for SFP */
2311 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
46a3df9f
S
2312}
2313
2314static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2315{
2316 struct hclge_vport *vport = hclge_get_vport(handle);
2317 struct hclge_dev *hdev = vport->back;
2318
2319 return hclge_update_speed_duplex(hdev);
2320}
2321
2322static int hclge_get_status(struct hnae3_handle *handle)
2323{
2324 struct hclge_vport *vport = hclge_get_vport(handle);
2325 struct hclge_dev *hdev = vport->back;
2326
2327 hclge_update_link_status(hdev);
2328
2329 return hdev->hw.mac.link;
2330}
2331
d039ef68 2332static void hclge_service_timer(struct timer_list *t)
46a3df9f 2333{
d039ef68 2334 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
46a3df9f 2335
d039ef68 2336 mod_timer(&hdev->service_timer, jiffies + HZ);
c5f65480 2337 hdev->hw_stats.stats_timer++;
46a3df9f
S
2338 hclge_task_schedule(hdev);
2339}
2340
2341static void hclge_service_complete(struct hclge_dev *hdev)
2342{
2343 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2344
2345 /* Flush memory before next watchdog */
2346 smp_mb__before_atomic();
2347 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2348}
2349
ca1d7669
SM
2350static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2351{
f6162d44 2352 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
ca1d7669
SM
2353
2354 /* fetch the events from their corresponding regs */
9ca8d1a7 2355 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619 2356 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
f6162d44
SM
2357 msix_src_reg = hclge_read_dev(&hdev->hw,
2358 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
c1a81619
SM
2359
2360 /* Assumption: If by any chance reset and mailbox events are reported
2361 * together then we will only process reset event in this go and will
2362 * defer the processing of the mailbox events. Since, we would have not
2363 * cleared RX CMDQ event this time we would receive again another
2364 * interrupt from H/W just for the mailbox.
2365 */
ca1d7669
SM
2366
2367 /* check for vector0 reset event sources */
6dd22bbc
HT
2368 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2369 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2370 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2371 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2372 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
f02eb82d 2373 hdev->rst_stats.imp_rst_cnt++;
6dd22bbc
HT
2374 return HCLGE_VECTOR0_EVENT_RST;
2375 }
2376
ca1d7669 2377 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
65e41e7e 2378 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 2379 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2380 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2381 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
f02eb82d 2382 hdev->rst_stats.global_rst_cnt++;
ca1d7669
SM
2383 return HCLGE_VECTOR0_EVENT_RST;
2384 }
2385
2386 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
65e41e7e 2387 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
8d40854f 2388 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2389 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2390 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
f02eb82d 2391 hdev->rst_stats.core_rst_cnt++;
ca1d7669
SM
2392 return HCLGE_VECTOR0_EVENT_RST;
2393 }
2394
f6162d44 2395 /* check for vector0 msix event source */
147175c9
HT
2396 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2397 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2398 msix_src_reg);
f6162d44 2399 return HCLGE_VECTOR0_EVENT_ERR;
147175c9 2400 }
f6162d44 2401
c1a81619
SM
2402 /* check for vector0 mailbox(=CMDQ RX) event source */
2403 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2404 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2405 *clearval = cmdq_src_reg;
2406 return HCLGE_VECTOR0_EVENT_MBX;
2407 }
ca1d7669 2408
147175c9
HT
2409 /* print other vector0 event source */
2410 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2411 cmdq_src_reg, msix_src_reg);
ca1d7669
SM
2412 return HCLGE_VECTOR0_EVENT_OTHER;
2413}
2414
2415static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2416 u32 regclr)
2417{
c1a81619
SM
2418 switch (event_type) {
2419 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 2420 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
2421 break;
2422 case HCLGE_VECTOR0_EVENT_MBX:
2423 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2424 break;
fa7a4bd5
JS
2425 default:
2426 break;
c1a81619 2427 }
ca1d7669
SM
2428}
2429
8e52a602
XW
2430static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2431{
2432 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2433 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2434 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2435 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2436 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2437}
2438
466b0c00
L
2439static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2440{
2441 writel(enable ? 1 : 0, vector->addr);
2442}
2443
2444static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2445{
2446 struct hclge_dev *hdev = data;
ca1d7669
SM
2447 u32 event_cause;
2448 u32 clearval;
466b0c00
L
2449
2450 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
2451 event_cause = hclge_check_event_cause(hdev, &clearval);
2452
c1a81619 2453 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 2454 switch (event_cause) {
f6162d44
SM
2455 case HCLGE_VECTOR0_EVENT_ERR:
2456 /* we do not know what type of reset is required now. This could
2457 * only be decided after we fetch the type of errors which
2458 * caused this event. Therefore, we will do below for now:
2459 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2460 * have defered type of reset to be used.
2461 * 2. Schedule the reset serivce task.
2462 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2463 * will fetch the correct type of reset. This would be done
2464 * by first decoding the types of errors.
2465 */
2466 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2467 /* fall through */
ca1d7669 2468 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 2469 hclge_reset_task_schedule(hdev);
ca1d7669 2470 break;
c1a81619
SM
2471 case HCLGE_VECTOR0_EVENT_MBX:
2472 /* If we are here then,
2473 * 1. Either we are not handling any mbx task and we are not
2474 * scheduled as well
2475 * OR
2476 * 2. We could be handling a mbx task but nothing more is
2477 * scheduled.
2478 * In both cases, we should schedule mbx task as there are more
2479 * mbx messages reported by this interrupt.
2480 */
2481 hclge_mbx_task_schedule(hdev);
f0ad97ac 2482 break;
ca1d7669 2483 default:
f0ad97ac
YL
2484 dev_warn(&hdev->pdev->dev,
2485 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
2486 break;
2487 }
2488
cd8c5c26 2489 /* clear the source of interrupt if it is not cause by reset */
0d441140 2490 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
2491 hclge_clear_event_cause(hdev, event_cause, clearval);
2492 hclge_enable_vector(&hdev->misc_vector, true);
2493 }
466b0c00
L
2494
2495 return IRQ_HANDLED;
2496}
2497
2498static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2499{
36cbbdf6
PL
2500 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2501 dev_warn(&hdev->pdev->dev,
2502 "vector(vector_id %d) has been freed.\n", vector_id);
2503 return;
2504 }
2505
466b0c00
L
2506 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2507 hdev->num_msi_left += 1;
2508 hdev->num_msi_used -= 1;
2509}
2510
2511static void hclge_get_misc_vector(struct hclge_dev *hdev)
2512{
2513 struct hclge_misc_vector *vector = &hdev->misc_vector;
2514
2515 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2516
2517 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2518 hdev->vector_status[0] = 0;
2519
2520 hdev->num_msi_left -= 1;
2521 hdev->num_msi_used += 1;
2522}
2523
2524static int hclge_misc_irq_init(struct hclge_dev *hdev)
2525{
2526 int ret;
2527
2528 hclge_get_misc_vector(hdev);
2529
ca1d7669
SM
2530 /* this would be explicitly freed in the end */
2531 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2532 0, "hclge_misc", hdev);
466b0c00
L
2533 if (ret) {
2534 hclge_free_vector(hdev, 0);
2535 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2536 hdev->misc_vector.vector_irq);
2537 }
2538
2539 return ret;
2540}
2541
ca1d7669
SM
2542static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2543{
2544 free_irq(hdev->misc_vector.vector_irq, hdev);
2545 hclge_free_vector(hdev, 0);
2546}
2547
af013903
HT
2548int hclge_notify_client(struct hclge_dev *hdev,
2549 enum hnae3_reset_notify_type type)
4ed340ab
L
2550{
2551 struct hnae3_client *client = hdev->nic_client;
2552 u16 i;
2553
2554 if (!client->ops->reset_notify)
2555 return -EOPNOTSUPP;
2556
2557 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2558 struct hnae3_handle *handle = &hdev->vport[i].nic;
2559 int ret;
2560
2561 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
2562 if (ret) {
2563 dev_err(&hdev->pdev->dev,
2564 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 2565 return ret;
65e41e7e 2566 }
4ed340ab
L
2567 }
2568
2569 return 0;
2570}
2571
f403a84f
HT
2572static int hclge_notify_roce_client(struct hclge_dev *hdev,
2573 enum hnae3_reset_notify_type type)
2574{
2575 struct hnae3_client *client = hdev->roce_client;
2576 int ret = 0;
2577 u16 i;
2578
2579 if (!client)
2580 return 0;
2581
2582 if (!client->ops->reset_notify)
2583 return -EOPNOTSUPP;
2584
2585 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2586 struct hnae3_handle *handle = &hdev->vport[i].roce;
2587
2588 ret = client->ops->reset_notify(handle, type);
2589 if (ret) {
2590 dev_err(&hdev->pdev->dev,
2591 "notify roce client failed %d(%d)",
2592 type, ret);
2593 return ret;
2594 }
2595 }
2596
2597 return ret;
2598}
2599
4ed340ab
L
2600static int hclge_reset_wait(struct hclge_dev *hdev)
2601{
2602#define HCLGE_RESET_WATI_MS 100
6dd22bbc 2603#define HCLGE_RESET_WAIT_CNT 200
4ed340ab
L
2604 u32 val, reg, reg_bit;
2605 u32 cnt = 0;
2606
2607 switch (hdev->reset_type) {
6dd22bbc
HT
2608 case HNAE3_IMP_RESET:
2609 reg = HCLGE_GLOBAL_RESET_REG;
2610 reg_bit = HCLGE_IMP_RESET_BIT;
2611 break;
4ed340ab
L
2612 case HNAE3_GLOBAL_RESET:
2613 reg = HCLGE_GLOBAL_RESET_REG;
2614 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2615 break;
2616 case HNAE3_CORE_RESET:
2617 reg = HCLGE_GLOBAL_RESET_REG;
2618 reg_bit = HCLGE_CORE_RESET_BIT;
2619 break;
2620 case HNAE3_FUNC_RESET:
2621 reg = HCLGE_FUN_RST_ING;
2622 reg_bit = HCLGE_FUN_RST_ING_B;
2623 break;
6b9a97ee
HT
2624 case HNAE3_FLR_RESET:
2625 break;
4ed340ab
L
2626 default:
2627 dev_err(&hdev->pdev->dev,
2628 "Wait for unsupported reset type: %d\n",
2629 hdev->reset_type);
2630 return -EINVAL;
2631 }
2632
6b9a97ee
HT
2633 if (hdev->reset_type == HNAE3_FLR_RESET) {
2634 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2635 cnt++ < HCLGE_RESET_WAIT_CNT)
2636 msleep(HCLGE_RESET_WATI_MS);
2637
2638 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2639 dev_err(&hdev->pdev->dev,
2640 "flr wait timeout: %d\n", cnt);
2641 return -EBUSY;
2642 }
2643
2644 return 0;
2645 }
2646
4ed340ab 2647 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 2648 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
2649 msleep(HCLGE_RESET_WATI_MS);
2650 val = hclge_read_dev(&hdev->hw, reg);
2651 cnt++;
2652 }
2653
4ed340ab
L
2654 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2655 dev_warn(&hdev->pdev->dev,
2656 "Wait for reset timeout: %d\n", hdev->reset_type);
2657 return -EBUSY;
2658 }
2659
2660 return 0;
2661}
2662
aa5c4f17
HT
2663static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2664{
2665 struct hclge_vf_rst_cmd *req;
2666 struct hclge_desc desc;
2667
2668 req = (struct hclge_vf_rst_cmd *)desc.data;
2669 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2670 req->dest_vfid = func_id;
2671
2672 if (reset)
2673 req->vf_rst = 0x1;
2674
2675 return hclge_cmd_send(&hdev->hw, &desc, 1);
2676}
2677
e511f17b 2678static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
2679{
2680 int i;
2681
2682 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2683 struct hclge_vport *vport = &hdev->vport[i];
2684 int ret;
2685
2686 /* Send cmd to set/clear VF's FUNC_RST_ING */
2687 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2688 if (ret) {
2689 dev_err(&hdev->pdev->dev,
790cd1a8 2690 "set vf(%d) rst failed %d!\n",
aa5c4f17
HT
2691 vport->vport_id, ret);
2692 return ret;
2693 }
2694
cc645dfa 2695 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
2696 continue;
2697
2698 /* Inform VF to process the reset.
2699 * hclge_inform_reset_assert_to_vf may fail if VF
2700 * driver is not loaded.
2701 */
2702 ret = hclge_inform_reset_assert_to_vf(vport);
2703 if (ret)
2704 dev_warn(&hdev->pdev->dev,
790cd1a8 2705 "inform reset to vf(%d) failed %d!\n",
aa5c4f17
HT
2706 vport->vport_id, ret);
2707 }
2708
2709 return 0;
2710}
2711
2bfbd35d 2712int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
2713{
2714 struct hclge_desc desc;
2715 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2716 int ret;
2717
2718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 2719 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
2720 req->fun_reset_vfid = func_id;
2721
2722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2723 if (ret)
2724 dev_err(&hdev->pdev->dev,
2725 "send function reset cmd fail, status =%d\n", ret);
2726
2727 return ret;
2728}
2729
f2f432f2 2730static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 2731{
4f765d3e 2732 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
2733 struct pci_dev *pdev = hdev->pdev;
2734 u32 val;
2735
4f765d3e
HT
2736 if (hclge_get_hw_reset_stat(handle)) {
2737 dev_info(&pdev->dev, "Hardware reset not finish\n");
2738 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2739 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2740 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2741 return;
2742 }
2743
f2f432f2 2744 switch (hdev->reset_type) {
4ed340ab
L
2745 case HNAE3_GLOBAL_RESET:
2746 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 2747 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab
L
2748 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2749 dev_info(&pdev->dev, "Global Reset requested\n");
2750 break;
2751 case HNAE3_CORE_RESET:
2752 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 2753 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
4ed340ab
L
2754 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2755 dev_info(&pdev->dev, "Core Reset requested\n");
2756 break;
2757 case HNAE3_FUNC_RESET:
2758 dev_info(&pdev->dev, "PF Reset requested\n");
cb1b9f77
SM
2759 /* schedule again to check later */
2760 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2761 hclge_reset_task_schedule(hdev);
4ed340ab 2762 break;
6b9a97ee
HT
2763 case HNAE3_FLR_RESET:
2764 dev_info(&pdev->dev, "FLR requested\n");
2765 /* schedule again to check later */
2766 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2767 hclge_reset_task_schedule(hdev);
2768 break;
4ed340ab
L
2769 default:
2770 dev_warn(&pdev->dev,
f2f432f2 2771 "Unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
2772 break;
2773 }
2774}
2775
f2f432f2
SM
2776static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2777 unsigned long *addr)
2778{
2779 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2780
f6162d44
SM
2781 /* first, resolve any unknown reset type to the known type(s) */
2782 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2783 /* we will intentionally ignore any errors from this function
2784 * as we will end up in *some* reset request in any case
2785 */
2786 hclge_handle_hw_msix_error(hdev, addr);
2787 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2788 /* We defered the clearing of the error event which caused
2789 * interrupt since it was not posssible to do that in
2790 * interrupt context (and this is the reason we introduced
2791 * new UNKNOWN reset type). Now, the errors have been
2792 * handled and cleared in hardware we can safely enable
2793 * interrupts. This is an exception to the norm.
2794 */
2795 hclge_enable_vector(&hdev->misc_vector, true);
2796 }
2797
f2f432f2 2798 /* return the highest priority reset level amongst all */
7cea834d
HT
2799 if (test_bit(HNAE3_IMP_RESET, addr)) {
2800 rst_level = HNAE3_IMP_RESET;
2801 clear_bit(HNAE3_IMP_RESET, addr);
2802 clear_bit(HNAE3_GLOBAL_RESET, addr);
2803 clear_bit(HNAE3_CORE_RESET, addr);
2804 clear_bit(HNAE3_FUNC_RESET, addr);
2805 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 2806 rst_level = HNAE3_GLOBAL_RESET;
7cea834d
HT
2807 clear_bit(HNAE3_GLOBAL_RESET, addr);
2808 clear_bit(HNAE3_CORE_RESET, addr);
2809 clear_bit(HNAE3_FUNC_RESET, addr);
2810 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
f2f432f2 2811 rst_level = HNAE3_CORE_RESET;
7cea834d
HT
2812 clear_bit(HNAE3_CORE_RESET, addr);
2813 clear_bit(HNAE3_FUNC_RESET, addr);
2814 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 2815 rst_level = HNAE3_FUNC_RESET;
7cea834d 2816 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
2817 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2818 rst_level = HNAE3_FLR_RESET;
2819 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 2820 }
f2f432f2 2821
0fdf4d30
HT
2822 if (hdev->reset_type != HNAE3_NONE_RESET &&
2823 rst_level < hdev->reset_type)
2824 return HNAE3_NONE_RESET;
2825
f2f432f2
SM
2826 return rst_level;
2827}
2828
cd8c5c26
YL
2829static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2830{
2831 u32 clearval = 0;
2832
2833 switch (hdev->reset_type) {
2834 case HNAE3_IMP_RESET:
2835 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2836 break;
2837 case HNAE3_GLOBAL_RESET:
2838 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2839 break;
2840 case HNAE3_CORE_RESET:
2841 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2842 break;
2843 default:
cd8c5c26
YL
2844 break;
2845 }
2846
2847 if (!clearval)
2848 return;
2849
2850 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2851 hclge_enable_vector(&hdev->misc_vector, true);
2852}
2853
aa5c4f17
HT
2854static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2855{
2856 int ret = 0;
2857
2858 switch (hdev->reset_type) {
2859 case HNAE3_FUNC_RESET:
6b9a97ee
HT
2860 /* fall through */
2861 case HNAE3_FLR_RESET:
aa5c4f17
HT
2862 ret = hclge_set_all_vf_rst(hdev, true);
2863 break;
2864 default:
2865 break;
2866 }
2867
2868 return ret;
2869}
2870
35d93a30
HT
2871static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2872{
6dd22bbc 2873 u32 reg_val;
35d93a30
HT
2874 int ret = 0;
2875
2876 switch (hdev->reset_type) {
2877 case HNAE3_FUNC_RESET:
aa5c4f17
HT
2878 /* There is no mechanism for PF to know if VF has stopped IO
2879 * for now, just wait 100 ms for VF to stop IO
2880 */
2881 msleep(100);
35d93a30
HT
2882 ret = hclge_func_reset_cmd(hdev, 0);
2883 if (ret) {
2884 dev_err(&hdev->pdev->dev,
141b95d5 2885 "asserting function reset fail %d!\n", ret);
35d93a30
HT
2886 return ret;
2887 }
2888
2889 /* After performaning pf reset, it is not necessary to do the
2890 * mailbox handling or send any command to firmware, because
2891 * any mailbox handling or command to firmware is only valid
2892 * after hclge_cmd_init is called.
2893 */
2894 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
f02eb82d 2895 hdev->rst_stats.pf_rst_cnt++;
35d93a30 2896 break;
6b9a97ee
HT
2897 case HNAE3_FLR_RESET:
2898 /* There is no mechanism for PF to know if VF has stopped IO
2899 * for now, just wait 100 ms for VF to stop IO
2900 */
2901 msleep(100);
2902 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2903 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
f02eb82d 2904 hdev->rst_stats.flr_rst_cnt++;
6b9a97ee 2905 break;
6dd22bbc
HT
2906 case HNAE3_IMP_RESET:
2907 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2908 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2909 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2910 break;
35d93a30
HT
2911 default:
2912 break;
2913 }
2914
2915 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2916
2917 return ret;
2918}
2919
65e41e7e
HT
2920static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2921{
2922#define MAX_RESET_FAIL_CNT 5
2923#define RESET_UPGRADE_DELAY_SEC 10
2924
2925 if (hdev->reset_pending) {
2926 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2927 hdev->reset_pending);
2928 return true;
2929 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2930 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2931 BIT(HCLGE_IMP_RESET_BIT))) {
2932 dev_info(&hdev->pdev->dev,
2933 "reset failed because IMP Reset is pending\n");
2934 hclge_clear_reset_cause(hdev);
2935 return false;
2936 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2937 hdev->reset_fail_cnt++;
2938 if (is_timeout) {
2939 set_bit(hdev->reset_type, &hdev->reset_pending);
2940 dev_info(&hdev->pdev->dev,
2941 "re-schedule to wait for hw reset done\n");
2942 return true;
2943 }
2944
2945 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2946 hclge_clear_reset_cause(hdev);
2947 mod_timer(&hdev->reset_timer,
2948 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2949
2950 return false;
2951 }
2952
2953 hclge_clear_reset_cause(hdev);
2954 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2955 return false;
2956}
2957
aa5c4f17
HT
2958static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2959{
2960 int ret = 0;
2961
2962 switch (hdev->reset_type) {
2963 case HNAE3_FUNC_RESET:
6b9a97ee
HT
2964 /* fall through */
2965 case HNAE3_FLR_RESET:
aa5c4f17
HT
2966 ret = hclge_set_all_vf_rst(hdev, false);
2967 break;
2968 default:
2969 break;
2970 }
2971
2972 return ret;
2973}
2974
f2f432f2
SM
2975static void hclge_reset(struct hclge_dev *hdev)
2976{
6871af29 2977 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
65e41e7e
HT
2978 bool is_timeout = false;
2979 int ret;
9de0b86f 2980
6871af29
JS
2981 /* Initialize ae_dev reset status as well, in case enet layer wants to
2982 * know if device is undergoing reset
2983 */
2984 ae_dev->reset_type = hdev->reset_type;
f02eb82d 2985 hdev->rst_stats.reset_cnt++;
f2f432f2 2986 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
2987 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2988 if (ret)
2989 goto err_reset;
2990
aa5c4f17
HT
2991 ret = hclge_reset_prepare_down(hdev);
2992 if (ret)
2993 goto err_reset;
2994
6d4fab39 2995 rtnl_lock();
65e41e7e
HT
2996 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2997 if (ret)
2998 goto err_reset_lock;
f2f432f2 2999
65e41e7e 3000 rtnl_unlock();
35d93a30 3001
65e41e7e
HT
3002 ret = hclge_reset_prepare_wait(hdev);
3003 if (ret)
3004 goto err_reset;
cd8c5c26 3005
65e41e7e
HT
3006 if (hclge_reset_wait(hdev)) {
3007 is_timeout = true;
3008 goto err_reset;
f2f432f2
SM
3009 }
3010
f02eb82d
HT
3011 hdev->rst_stats.hw_reset_done_cnt++;
3012
65e41e7e
HT
3013 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3014 if (ret)
3015 goto err_reset;
3016
3017 rtnl_lock();
3018 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3019 if (ret)
3020 goto err_reset_lock;
3021
3022 ret = hclge_reset_ae_dev(hdev->ae_dev);
3023 if (ret)
3024 goto err_reset_lock;
3025
3026 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3027 if (ret)
3028 goto err_reset_lock;
3029
1f609492
YL
3030 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3031 if (ret)
3032 goto err_reset_lock;
3033
65e41e7e
HT
3034 hclge_clear_reset_cause(hdev);
3035
aa5c4f17
HT
3036 ret = hclge_reset_prepare_up(hdev);
3037 if (ret)
3038 goto err_reset_lock;
3039
65e41e7e
HT
3040 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3041 if (ret)
3042 goto err_reset_lock;
3043
6d4fab39 3044 rtnl_unlock();
f403a84f 3045
65e41e7e
HT
3046 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3047 if (ret)
3048 goto err_reset;
3049
3050 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3051 if (ret)
3052 goto err_reset;
3053
b644a8d4
HT
3054 hdev->last_reset_time = jiffies;
3055 hdev->reset_fail_cnt = 0;
f02eb82d 3056 hdev->rst_stats.reset_done_cnt++;
b644a8d4 3057 ae_dev->reset_type = HNAE3_NONE_RESET;
056cbab3 3058 del_timer(&hdev->reset_timer);
b644a8d4 3059
65e41e7e
HT
3060 return;
3061
3062err_reset_lock:
3063 rtnl_unlock();
3064err_reset:
3065 if (hclge_reset_err_handle(hdev, is_timeout))
3066 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3067}
3068
6ae4e733
SJ
3069static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3070{
3071 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3072 struct hclge_dev *hdev = ae_dev->priv;
3073
3074 /* We might end up getting called broadly because of 2 below cases:
3075 * 1. Recoverable error was conveyed through APEI and only way to bring
3076 * normalcy is to reset.
3077 * 2. A new reset request from the stack due to timeout
3078 *
3079 * For the first case,error event might not have ae handle available.
3080 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3081 * last reset attempt did not succeed and watchdog hit us again. We will
3082 * know this if last reset request did not occur very recently (watchdog
3083 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3084 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3085 * And if it is a repeat reset request of the most recent one then we
3086 * want to make sure we throttle the reset request. Therefore, we will
3087 * not allow it again before 3*HZ times.
6d4c3981 3088 */
6ae4e733
SJ
3089 if (!handle)
3090 handle = &hdev->vport[0].nic;
3091
0742ed7c 3092 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
9de0b86f 3093 return;
720bd583 3094 else if (hdev->default_reset_request)
0742ed7c 3095 hdev->reset_level =
720bd583
HT
3096 hclge_get_reset_level(hdev,
3097 &hdev->default_reset_request);
0742ed7c
HT
3098 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3099 hdev->reset_level = HNAE3_FUNC_RESET;
4ed340ab 3100
6d4c3981 3101 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
0742ed7c 3102 hdev->reset_level);
6d4c3981
SM
3103
3104 /* request reset & schedule reset task */
0742ed7c 3105 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3106 hclge_reset_task_schedule(hdev);
3107
0742ed7c
HT
3108 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3109 hdev->reset_level++;
4ed340ab
L
3110}
3111
720bd583
HT
3112static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3113 enum hnae3_reset_type rst_type)
3114{
3115 struct hclge_dev *hdev = ae_dev->priv;
3116
3117 set_bit(rst_type, &hdev->default_reset_request);
3118}
3119
65e41e7e
HT
3120static void hclge_reset_timer(struct timer_list *t)
3121{
3122 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3123
3124 dev_info(&hdev->pdev->dev,
3125 "triggering global reset in reset timer\n");
3126 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3127 hclge_reset_event(hdev->pdev, NULL);
3128}
3129
4ed340ab
L
3130static void hclge_reset_subtask(struct hclge_dev *hdev)
3131{
f2f432f2
SM
3132 /* check if there is any ongoing reset in the hardware. This status can
3133 * be checked from reset_pending. If there is then, we need to wait for
3134 * hardware to complete reset.
3135 * a. If we are able to figure out in reasonable time that hardware
3136 * has fully resetted then, we can proceed with driver, client
3137 * reset.
3138 * b. else, we can come back later to check this status so re-sched
3139 * now.
3140 */
0742ed7c 3141 hdev->last_reset_time = jiffies;
f2f432f2
SM
3142 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3143 if (hdev->reset_type != HNAE3_NONE_RESET)
3144 hclge_reset(hdev);
4ed340ab 3145
f2f432f2
SM
3146 /* check if we got any *new* reset requests to be honored */
3147 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3148 if (hdev->reset_type != HNAE3_NONE_RESET)
3149 hclge_do_reset(hdev);
4ed340ab 3150
4ed340ab
L
3151 hdev->reset_type = HNAE3_NONE_RESET;
3152}
3153
cb1b9f77 3154static void hclge_reset_service_task(struct work_struct *work)
466b0c00 3155{
cb1b9f77
SM
3156 struct hclge_dev *hdev =
3157 container_of(work, struct hclge_dev, rst_service_task);
3158
3159 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3160 return;
3161
3162 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3163
4ed340ab 3164 hclge_reset_subtask(hdev);
cb1b9f77
SM
3165
3166 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
466b0c00
L
3167}
3168
c1a81619
SM
3169static void hclge_mailbox_service_task(struct work_struct *work)
3170{
3171 struct hclge_dev *hdev =
3172 container_of(work, struct hclge_dev, mbx_service_task);
3173
3174 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3175 return;
3176
3177 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3178
3179 hclge_mbx_handler(hdev);
3180
3181 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3182}
3183
a6d818e3
YL
3184static void hclge_update_vport_alive(struct hclge_dev *hdev)
3185{
3186 int i;
3187
3188 /* start from vport 1 for PF is always alive */
3189 for (i = 1; i < hdev->num_alloc_vport; i++) {
3190 struct hclge_vport *vport = &hdev->vport[i];
3191
3192 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3193 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
3194
3195 /* If vf is not alive, set to default value */
3196 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3197 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
3198 }
3199}
3200
46a3df9f
S
3201static void hclge_service_task(struct work_struct *work)
3202{
3203 struct hclge_dev *hdev =
3204 container_of(work, struct hclge_dev, service_task);
3205
c5f65480
JS
3206 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3207 hclge_update_stats_for_all(hdev);
3208 hdev->hw_stats.stats_timer = 0;
3209 }
3210
46a3df9f
S
3211 hclge_update_speed_duplex(hdev);
3212 hclge_update_link_status(hdev);
a6d818e3 3213 hclge_update_vport_alive(hdev);
46a3df9f
S
3214 hclge_service_complete(hdev);
3215}
3216
46a3df9f
S
3217struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3218{
3219 /* VF handle has no client */
3220 if (!handle->client)
3221 return container_of(handle, struct hclge_vport, nic);
3222 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3223 return container_of(handle, struct hclge_vport, roce);
3224 else
3225 return container_of(handle, struct hclge_vport, nic);
3226}
3227
3228static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3229 struct hnae3_vector_info *vector_info)
3230{
3231 struct hclge_vport *vport = hclge_get_vport(handle);
3232 struct hnae3_vector_info *vector = vector_info;
3233 struct hclge_dev *hdev = vport->back;
3234 int alloc = 0;
3235 int i, j;
3236
3237 vector_num = min(hdev->num_msi_left, vector_num);
3238
3239 for (j = 0; j < vector_num; j++) {
3240 for (i = 1; i < hdev->num_msi; i++) {
3241 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3242 vector->vector = pci_irq_vector(hdev->pdev, i);
3243 vector->io_addr = hdev->hw.io_base +
3244 HCLGE_VECTOR_REG_BASE +
3245 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3246 vport->vport_id *
3247 HCLGE_VECTOR_VF_OFFSET;
3248 hdev->vector_status[i] = vport->vport_id;
887c3820 3249 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
3250
3251 vector++;
3252 alloc++;
3253
3254 break;
3255 }
3256 }
3257 }
3258 hdev->num_msi_left -= alloc;
3259 hdev->num_msi_used += alloc;
3260
3261 return alloc;
3262}
3263
3264static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3265{
3266 int i;
3267
887c3820
SM
3268 for (i = 0; i < hdev->num_msi; i++)
3269 if (vector == hdev->vector_irq[i])
3270 return i;
3271
46a3df9f
S
3272 return -EINVAL;
3273}
3274
0d3e6631
YL
3275static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3276{
3277 struct hclge_vport *vport = hclge_get_vport(handle);
3278 struct hclge_dev *hdev = vport->back;
3279 int vector_id;
3280
3281 vector_id = hclge_get_vector_index(hdev, vector);
3282 if (vector_id < 0) {
3283 dev_err(&hdev->pdev->dev,
3284 "Get vector index fail. vector_id =%d\n", vector_id);
3285 return vector_id;
3286 }
3287
3288 hclge_free_vector(hdev, vector_id);
3289
3290 return 0;
3291}
3292
46a3df9f
S
3293static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3294{
3295 return HCLGE_RSS_KEY_SIZE;
3296}
3297
3298static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3299{
3300 return HCLGE_RSS_IND_TBL_SIZE;
3301}
3302
46a3df9f
S
3303static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3304 const u8 hfunc, const u8 *key)
3305{
d44f9b63 3306 struct hclge_rss_config_cmd *req;
46a3df9f
S
3307 struct hclge_desc desc;
3308 int key_offset;
3309 int key_size;
3310 int ret;
3311
d44f9b63 3312 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f
S
3313
3314 for (key_offset = 0; key_offset < 3; key_offset++) {
3315 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3316 false);
3317
3318 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3319 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3320
3321 if (key_offset == 2)
3322 key_size =
3323 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3324 else
3325 key_size = HCLGE_RSS_HASH_KEY_NUM;
3326
3327 memcpy(req->hash_key,
3328 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3329
3330 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3331 if (ret) {
3332 dev_err(&hdev->pdev->dev,
3333 "Configure RSS config fail, status = %d\n",
3334 ret);
3335 return ret;
3336 }
3337 }
3338 return 0;
3339}
3340
89523cfa 3341static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
46a3df9f 3342{
d44f9b63 3343 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
3344 struct hclge_desc desc;
3345 int i, j;
3346 int ret;
3347
d44f9b63 3348 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
3349
3350 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3351 hclge_cmd_setup_basic_desc
3352 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3353
a90bb9a5
YL
3354 req->start_table_index =
3355 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3356 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
3357
3358 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3359 req->rss_result[j] =
3360 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3361
3362 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3363 if (ret) {
3364 dev_err(&hdev->pdev->dev,
3365 "Configure rss indir table fail,status = %d\n",
3366 ret);
3367 return ret;
3368 }
3369 }
3370 return 0;
3371}
3372
3373static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3374 u16 *tc_size, u16 *tc_offset)
3375{
d44f9b63 3376 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
3377 struct hclge_desc desc;
3378 int ret;
3379 int i;
3380
3381 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 3382 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
3383
3384 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
3385 u16 mode = 0;
3386
e4e87715
PL
3387 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3388 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3389 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3390 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3391 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
3392
3393 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
3394 }
3395
3396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 3397 if (ret)
46a3df9f
S
3398 dev_err(&hdev->pdev->dev,
3399 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 3400
3f639907 3401 return ret;
46a3df9f
S
3402}
3403
232fc64b
PL
3404static void hclge_get_rss_type(struct hclge_vport *vport)
3405{
3406 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3407 vport->rss_tuple_sets.ipv4_udp_en ||
3408 vport->rss_tuple_sets.ipv4_sctp_en ||
3409 vport->rss_tuple_sets.ipv6_tcp_en ||
3410 vport->rss_tuple_sets.ipv6_udp_en ||
3411 vport->rss_tuple_sets.ipv6_sctp_en)
3412 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3413 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3414 vport->rss_tuple_sets.ipv6_fragment_en)
3415 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3416 else
3417 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3418}
3419
46a3df9f
S
3420static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3421{
d44f9b63 3422 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
3423 struct hclge_desc desc;
3424 int ret;
3425
3426 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3427
d44f9b63 3428 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
3429
3430 /* Get the tuple cfg from pf */
3431 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3432 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3433 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3434 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3435 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3436 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3437 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3438 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 3439 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 3440 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 3441 if (ret)
46a3df9f
S
3442 dev_err(&hdev->pdev->dev,
3443 "Configure rss input fail, status = %d\n", ret);
3f639907 3444 return ret;
46a3df9f
S
3445}
3446
3447static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3448 u8 *key, u8 *hfunc)
3449{
3450 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
3451 int i;
3452
3453 /* Get hash algorithm */
775501a1
JS
3454 if (hfunc) {
3455 switch (vport->rss_algo) {
3456 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3457 *hfunc = ETH_RSS_HASH_TOP;
3458 break;
3459 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3460 *hfunc = ETH_RSS_HASH_XOR;
3461 break;
3462 default:
3463 *hfunc = ETH_RSS_HASH_UNKNOWN;
3464 break;
3465 }
3466 }
46a3df9f
S
3467
3468 /* Get the RSS Key required by the user */
3469 if (key)
3470 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3471
3472 /* Get indirect table */
3473 if (indir)
3474 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3475 indir[i] = vport->rss_indirection_tbl[i];
3476
3477 return 0;
3478}
3479
3480static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3481 const u8 *key, const u8 hfunc)
3482{
3483 struct hclge_vport *vport = hclge_get_vport(handle);
3484 struct hclge_dev *hdev = vport->back;
3485 u8 hash_algo;
3486 int ret, i;
3487
3488 /* Set the RSS Hash Key if specififed by the user */
3489 if (key) {
775501a1
JS
3490 switch (hfunc) {
3491 case ETH_RSS_HASH_TOP:
46a3df9f 3492 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
3493 break;
3494 case ETH_RSS_HASH_XOR:
3495 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3496 break;
3497 case ETH_RSS_HASH_NO_CHANGE:
3498 hash_algo = vport->rss_algo;
3499 break;
3500 default:
46a3df9f 3501 return -EINVAL;
775501a1
JS
3502 }
3503
46a3df9f
S
3504 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3505 if (ret)
3506 return ret;
89523cfa
YL
3507
3508 /* Update the shadow RSS key with user specified qids */
3509 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3510 vport->rss_algo = hash_algo;
46a3df9f
S
3511 }
3512
3513 /* Update the shadow RSS table with user specified qids */
3514 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3515 vport->rss_indirection_tbl[i] = indir[i];
3516
3517 /* Update the hardware */
89523cfa 3518 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
3519}
3520
f7db940a
L
3521static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3522{
3523 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3524
3525 if (nfc->data & RXH_L4_B_2_3)
3526 hash_sets |= HCLGE_D_PORT_BIT;
3527 else
3528 hash_sets &= ~HCLGE_D_PORT_BIT;
3529
3530 if (nfc->data & RXH_IP_SRC)
3531 hash_sets |= HCLGE_S_IP_BIT;
3532 else
3533 hash_sets &= ~HCLGE_S_IP_BIT;
3534
3535 if (nfc->data & RXH_IP_DST)
3536 hash_sets |= HCLGE_D_IP_BIT;
3537 else
3538 hash_sets &= ~HCLGE_D_IP_BIT;
3539
3540 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3541 hash_sets |= HCLGE_V_TAG_BIT;
3542
3543 return hash_sets;
3544}
3545
3546static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3547 struct ethtool_rxnfc *nfc)
3548{
3549 struct hclge_vport *vport = hclge_get_vport(handle);
3550 struct hclge_dev *hdev = vport->back;
3551 struct hclge_rss_input_tuple_cmd *req;
3552 struct hclge_desc desc;
3553 u8 tuple_sets;
3554 int ret;
3555
3556 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3557 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3558 return -EINVAL;
3559
3560 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429 3561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
f7db940a 3562
6f2af429
YL
3563 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3564 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3565 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3566 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3567 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3568 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3569 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3570 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
3571
3572 tuple_sets = hclge_get_rss_hash_bits(nfc);
3573 switch (nfc->flow_type) {
3574 case TCP_V4_FLOW:
3575 req->ipv4_tcp_en = tuple_sets;
3576 break;
3577 case TCP_V6_FLOW:
3578 req->ipv6_tcp_en = tuple_sets;
3579 break;
3580 case UDP_V4_FLOW:
3581 req->ipv4_udp_en = tuple_sets;
3582 break;
3583 case UDP_V6_FLOW:
3584 req->ipv6_udp_en = tuple_sets;
3585 break;
3586 case SCTP_V4_FLOW:
3587 req->ipv4_sctp_en = tuple_sets;
3588 break;
3589 case SCTP_V6_FLOW:
3590 if ((nfc->data & RXH_L4_B_0_1) ||
3591 (nfc->data & RXH_L4_B_2_3))
3592 return -EINVAL;
3593
3594 req->ipv6_sctp_en = tuple_sets;
3595 break;
3596 case IPV4_FLOW:
3597 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3598 break;
3599 case IPV6_FLOW:
3600 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3601 break;
3602 default:
3603 return -EINVAL;
3604 }
3605
3606 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 3607 if (ret) {
f7db940a
L
3608 dev_err(&hdev->pdev->dev,
3609 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
3610 return ret;
3611 }
f7db940a 3612
6f2af429
YL
3613 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3614 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3615 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3616 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3617 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3618 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3619 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3620 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 3621 hclge_get_rss_type(vport);
6f2af429 3622 return 0;
f7db940a
L
3623}
3624
07d29954
L
3625static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3626 struct ethtool_rxnfc *nfc)
3627{
3628 struct hclge_vport *vport = hclge_get_vport(handle);
07d29954 3629 u8 tuple_sets;
07d29954
L
3630
3631 nfc->data = 0;
3632
07d29954
L
3633 switch (nfc->flow_type) {
3634 case TCP_V4_FLOW:
6f2af429 3635 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
3636 break;
3637 case UDP_V4_FLOW:
6f2af429 3638 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
3639 break;
3640 case TCP_V6_FLOW:
6f2af429 3641 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
3642 break;
3643 case UDP_V6_FLOW:
6f2af429 3644 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
3645 break;
3646 case SCTP_V4_FLOW:
6f2af429 3647 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
3648 break;
3649 case SCTP_V6_FLOW:
6f2af429 3650 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
3651 break;
3652 case IPV4_FLOW:
3653 case IPV6_FLOW:
3654 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3655 break;
3656 default:
3657 return -EINVAL;
3658 }
3659
3660 if (!tuple_sets)
3661 return 0;
3662
3663 if (tuple_sets & HCLGE_D_PORT_BIT)
3664 nfc->data |= RXH_L4_B_2_3;
3665 if (tuple_sets & HCLGE_S_PORT_BIT)
3666 nfc->data |= RXH_L4_B_0_1;
3667 if (tuple_sets & HCLGE_D_IP_BIT)
3668 nfc->data |= RXH_IP_DST;
3669 if (tuple_sets & HCLGE_S_IP_BIT)
3670 nfc->data |= RXH_IP_SRC;
3671
3672 return 0;
3673}
3674
46a3df9f
S
3675static int hclge_get_tc_size(struct hnae3_handle *handle)
3676{
3677 struct hclge_vport *vport = hclge_get_vport(handle);
3678 struct hclge_dev *hdev = vport->back;
3679
3680 return hdev->rss_size_max;
3681}
3682
77f255c1 3683int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f 3684{
46a3df9f 3685 struct hclge_vport *vport = hdev->vport;
268f5dfa
YL
3686 u8 *rss_indir = vport[0].rss_indirection_tbl;
3687 u16 rss_size = vport[0].alloc_rss_size;
3688 u8 *key = vport[0].rss_hash_key;
3689 u8 hfunc = vport[0].rss_algo;
46a3df9f 3690 u16 tc_offset[HCLGE_MAX_TC_NUM];
46a3df9f
S
3691 u16 tc_valid[HCLGE_MAX_TC_NUM];
3692 u16 tc_size[HCLGE_MAX_TC_NUM];
268f5dfa
YL
3693 u16 roundup_size;
3694 int i, ret;
68ece54e 3695
46a3df9f
S
3696 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3697 if (ret)
268f5dfa 3698 return ret;
46a3df9f 3699
46a3df9f
S
3700 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3701 if (ret)
268f5dfa 3702 return ret;
46a3df9f
S
3703
3704 ret = hclge_set_rss_input_tuple(hdev);
3705 if (ret)
268f5dfa 3706 return ret;
46a3df9f 3707
68ece54e
YL
3708 /* Each TC have the same queue size, and tc_size set to hardware is
3709 * the log2 of roundup power of two of rss_size, the acutal queue
3710 * size is limited by indirection table.
3711 */
3712 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3713 dev_err(&hdev->pdev->dev,
3714 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3715 rss_size);
268f5dfa 3716 return -EINVAL;
68ece54e
YL
3717 }
3718
3719 roundup_size = roundup_pow_of_two(rss_size);
3720 roundup_size = ilog2(roundup_size);
3721
46a3df9f 3722 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 3723 tc_valid[i] = 0;
46a3df9f 3724
68ece54e
YL
3725 if (!(hdev->hw_tc_map & BIT(i)))
3726 continue;
3727
3728 tc_valid[i] = 1;
3729 tc_size[i] = roundup_size;
3730 tc_offset[i] = rss_size * i;
46a3df9f 3731 }
68ece54e 3732
268f5dfa
YL
3733 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3734}
46a3df9f 3735
268f5dfa
YL
3736void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3737{
3738 struct hclge_vport *vport = hdev->vport;
3739 int i, j;
46a3df9f 3740
268f5dfa
YL
3741 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3742 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3743 vport[j].rss_indirection_tbl[i] =
3744 i % vport[j].alloc_rss_size;
3745 }
3746}
3747
3748static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3749{
472d7ece 3750 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 3751 struct hclge_vport *vport = hdev->vport;
472d7ece
JS
3752
3753 if (hdev->pdev->revision >= 0x21)
3754 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 3755
268f5dfa
YL
3756 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3757 vport[i].rss_tuple_sets.ipv4_tcp_en =
3758 HCLGE_RSS_INPUT_TUPLE_OTHER;
3759 vport[i].rss_tuple_sets.ipv4_udp_en =
3760 HCLGE_RSS_INPUT_TUPLE_OTHER;
3761 vport[i].rss_tuple_sets.ipv4_sctp_en =
3762 HCLGE_RSS_INPUT_TUPLE_SCTP;
3763 vport[i].rss_tuple_sets.ipv4_fragment_en =
3764 HCLGE_RSS_INPUT_TUPLE_OTHER;
3765 vport[i].rss_tuple_sets.ipv6_tcp_en =
3766 HCLGE_RSS_INPUT_TUPLE_OTHER;
3767 vport[i].rss_tuple_sets.ipv6_udp_en =
3768 HCLGE_RSS_INPUT_TUPLE_OTHER;
3769 vport[i].rss_tuple_sets.ipv6_sctp_en =
3770 HCLGE_RSS_INPUT_TUPLE_SCTP;
3771 vport[i].rss_tuple_sets.ipv6_fragment_en =
3772 HCLGE_RSS_INPUT_TUPLE_OTHER;
3773
472d7ece 3774 vport[i].rss_algo = rss_algo;
ea739c90 3775
472d7ece
JS
3776 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3777 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
3778 }
3779
3780 hclge_rss_indir_init_cfg(hdev);
46a3df9f
S
3781}
3782
84e095d6
SM
3783int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3784 int vector_id, bool en,
3785 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3786{
3787 struct hclge_dev *hdev = vport->back;
46a3df9f
S
3788 struct hnae3_ring_chain_node *node;
3789 struct hclge_desc desc;
84e095d6
SM
3790 struct hclge_ctrl_vector_chain_cmd *req
3791 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3792 enum hclge_cmd_status status;
3793 enum hclge_opcode_type op;
3794 u16 tqp_type_and_id;
46a3df9f
S
3795 int i;
3796
84e095d6
SM
3797 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3798 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
3799 req->int_vector_id = vector_id;
3800
3801 i = 0;
3802 for (node = ring_chain; node; node = node->next) {
84e095d6 3803 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
3804 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3805 HCLGE_INT_TYPE_S,
3806 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3807 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3808 HCLGE_TQP_ID_S, node->tqp_index);
3809 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3810 HCLGE_INT_GL_IDX_S,
3811 hnae3_get_field(node->int_gl_idx,
3812 HNAE3_RING_GL_IDX_M,
3813 HNAE3_RING_GL_IDX_S));
84e095d6 3814 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
3815 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3816 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 3817 req->vfid = vport->vport_id;
46a3df9f 3818
84e095d6
SM
3819 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3820 if (status) {
46a3df9f
S
3821 dev_err(&hdev->pdev->dev,
3822 "Map TQP fail, status is %d.\n",
84e095d6
SM
3823 status);
3824 return -EIO;
46a3df9f
S
3825 }
3826 i = 0;
3827
3828 hclge_cmd_setup_basic_desc(&desc,
84e095d6 3829 op,
46a3df9f
S
3830 false);
3831 req->int_vector_id = vector_id;
3832 }
3833 }
3834
3835 if (i > 0) {
3836 req->int_cause_num = i;
84e095d6
SM
3837 req->vfid = vport->vport_id;
3838 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3839 if (status) {
46a3df9f 3840 dev_err(&hdev->pdev->dev,
84e095d6
SM
3841 "Map TQP fail, status is %d.\n", status);
3842 return -EIO;
46a3df9f
S
3843 }
3844 }
3845
3846 return 0;
3847}
3848
84e095d6
SM
3849static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3850 int vector,
3851 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3852{
3853 struct hclge_vport *vport = hclge_get_vport(handle);
3854 struct hclge_dev *hdev = vport->back;
3855 int vector_id;
3856
3857 vector_id = hclge_get_vector_index(hdev, vector);
3858 if (vector_id < 0) {
3859 dev_err(&hdev->pdev->dev,
84e095d6 3860 "Get vector index fail. vector_id =%d\n", vector_id);
46a3df9f
S
3861 return vector_id;
3862 }
3863
84e095d6 3864 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
3865}
3866
84e095d6
SM
3867static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3868 int vector,
3869 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3870{
3871 struct hclge_vport *vport = hclge_get_vport(handle);
3872 struct hclge_dev *hdev = vport->back;
84e095d6 3873 int vector_id, ret;
46a3df9f 3874
b50ae26c
PL
3875 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3876 return 0;
3877
46a3df9f
S
3878 vector_id = hclge_get_vector_index(hdev, vector);
3879 if (vector_id < 0) {
3880 dev_err(&handle->pdev->dev,
3881 "Get vector index fail. ret =%d\n", vector_id);
3882 return vector_id;
3883 }
3884
84e095d6 3885 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 3886 if (ret)
84e095d6
SM
3887 dev_err(&handle->pdev->dev,
3888 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3889 vector_id,
3890 ret);
46a3df9f 3891
0d3e6631 3892 return ret;
46a3df9f
S
3893}
3894
3895int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3896 struct hclge_promisc_param *param)
3897{
d44f9b63 3898 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
3899 struct hclge_desc desc;
3900 int ret;
3901
3902 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3903
d44f9b63 3904 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f 3905 req->vf_id = param->vf_id;
96c0e861
PL
3906
3907 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3908 * pdev revision(0x20), new revision support them. The
3909 * value of this two fields will not return error when driver
3910 * send command to fireware in revision(0x20).
3911 */
3912 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3913 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
46a3df9f
S
3914
3915 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 3916 if (ret)
46a3df9f
S
3917 dev_err(&hdev->pdev->dev,
3918 "Set promisc mode fail, status is %d.\n", ret);
3f639907
JS
3919
3920 return ret;
46a3df9f
S
3921}
3922
3923void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3924 bool en_mc, bool en_bc, int vport_id)
3925{
3926 if (!param)
3927 return;
3928
3929 memset(param, 0, sizeof(struct hclge_promisc_param));
3930 if (en_uc)
3931 param->enable = HCLGE_PROMISC_EN_UC;
3932 if (en_mc)
3933 param->enable |= HCLGE_PROMISC_EN_MC;
3934 if (en_bc)
3935 param->enable |= HCLGE_PROMISC_EN_BC;
3936 param->vf_id = vport_id;
3937}
3938
7fa6be4f
HT
3939static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3940 bool en_mc_pmc)
46a3df9f
S
3941{
3942 struct hclge_vport *vport = hclge_get_vport(handle);
3943 struct hclge_dev *hdev = vport->back;
3944 struct hclge_promisc_param param;
28673b33 3945 bool en_bc_pmc = true;
46a3df9f 3946
28673b33
JS
3947 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3948 * always bypassed. So broadcast promisc should be disabled until
3949 * user enable promisc mode
3950 */
3951 if (handle->pdev->revision == 0x20)
3952 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3953
3954 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3b75c3df 3955 vport->vport_id);
7fa6be4f 3956 return hclge_cmd_set_promisc_mode(hdev, &param);
46a3df9f
S
3957}
3958
d695964d
JS
3959static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3960{
3961 struct hclge_get_fd_mode_cmd *req;
3962 struct hclge_desc desc;
3963 int ret;
3964
3965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3966
3967 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3968
3969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3970 if (ret) {
3971 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3972 return ret;
3973 }
3974
3975 *fd_mode = req->mode;
3976
3977 return ret;
3978}
3979
3980static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3981 u32 *stage1_entry_num,
3982 u32 *stage2_entry_num,
3983 u16 *stage1_counter_num,
3984 u16 *stage2_counter_num)
3985{
3986 struct hclge_get_fd_allocation_cmd *req;
3987 struct hclge_desc desc;
3988 int ret;
3989
3990 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3991
3992 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3993
3994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3995 if (ret) {
3996 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3997 ret);
3998 return ret;
3999 }
4000
4001 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4002 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4003 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4004 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4005
4006 return ret;
4007}
4008
4009static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4010{
4011 struct hclge_set_fd_key_config_cmd *req;
4012 struct hclge_fd_key_cfg *stage;
4013 struct hclge_desc desc;
4014 int ret;
4015
4016 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4017
4018 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4019 stage = &hdev->fd_cfg.key_cfg[stage_num];
4020 req->stage = stage_num;
4021 req->key_select = stage->key_sel;
4022 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4023 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4024 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4025 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4026 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4027 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4028
4029 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4030 if (ret)
4031 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4032
4033 return ret;
4034}
4035
4036static int hclge_init_fd_config(struct hclge_dev *hdev)
4037{
4038#define LOW_2_WORDS 0x03
4039 struct hclge_fd_key_cfg *key_cfg;
4040 int ret;
4041
4042 if (!hnae3_dev_fd_supported(hdev))
4043 return 0;
4044
4045 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4046 if (ret)
4047 return ret;
4048
4049 switch (hdev->fd_cfg.fd_mode) {
4050 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4051 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4052 break;
4053 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4054 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4055 break;
4056 default:
4057 dev_err(&hdev->pdev->dev,
4058 "Unsupported flow director mode %d\n",
4059 hdev->fd_cfg.fd_mode);
4060 return -EOPNOTSUPP;
4061 }
4062
d695964d
JS
4063 hdev->fd_cfg.proto_support =
4064 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4065 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4066 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4067 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4068 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4069 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4070 key_cfg->outer_sipv6_word_en = 0;
4071 key_cfg->outer_dipv6_word_en = 0;
4072
4073 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4074 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4075 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4076 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4077
4078 /* If use max 400bit key, we can support tuples for ether type */
4079 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4080 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4081 key_cfg->tuple_active |=
4082 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4083 }
4084
4085 /* roce_type is used to filter roce frames
4086 * dst_vport is used to specify the rule
4087 */
4088 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4089
4090 ret = hclge_get_fd_allocation(hdev,
4091 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4092 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4093 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4094 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4095 if (ret)
4096 return ret;
4097
4098 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4099}
4100
11732868
JS
4101static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4102 int loc, u8 *key, bool is_add)
4103{
4104 struct hclge_fd_tcam_config_1_cmd *req1;
4105 struct hclge_fd_tcam_config_2_cmd *req2;
4106 struct hclge_fd_tcam_config_3_cmd *req3;
4107 struct hclge_desc desc[3];
4108 int ret;
4109
4110 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4111 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4112 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4113 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4114 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4115
4116 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4117 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4118 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4119
4120 req1->stage = stage;
4121 req1->xy_sel = sel_x ? 1 : 0;
4122 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4123 req1->index = cpu_to_le32(loc);
4124 req1->entry_vld = sel_x ? is_add : 0;
4125
4126 if (key) {
4127 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4128 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4129 sizeof(req2->tcam_data));
4130 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4131 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4132 }
4133
4134 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4135 if (ret)
4136 dev_err(&hdev->pdev->dev,
4137 "config tcam key fail, ret=%d\n",
4138 ret);
4139
4140 return ret;
4141}
4142
4143static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4144 struct hclge_fd_ad_data *action)
4145{
4146 struct hclge_fd_ad_config_cmd *req;
4147 struct hclge_desc desc;
4148 u64 ad_data = 0;
4149 int ret;
4150
4151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4152
4153 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4154 req->index = cpu_to_le32(loc);
4155 req->stage = stage;
4156
4157 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4158 action->write_rule_id_to_bd);
4159 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4160 action->rule_id);
4161 ad_data <<= 32;
4162 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4163 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4164 action->forward_to_direct_queue);
4165 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4166 action->queue_id);
4167 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4168 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4169 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4170 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4171 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4172 action->counter_id);
4173
4174 req->ad_data = cpu_to_le64(ad_data);
4175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4176 if (ret)
4177 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4178
4179 return ret;
4180}
4181
4182static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4183 struct hclge_fd_rule *rule)
4184{
4185 u16 tmp_x_s, tmp_y_s;
4186 u32 tmp_x_l, tmp_y_l;
4187 int i;
4188
4189 if (rule->unused_tuple & tuple_bit)
4190 return true;
4191
4192 switch (tuple_bit) {
4193 case 0:
4194 return false;
4195 case BIT(INNER_DST_MAC):
4196 for (i = 0; i < 6; i++) {
4197 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4198 rule->tuples_mask.dst_mac[i]);
4199 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4200 rule->tuples_mask.dst_mac[i]);
4201 }
4202
4203 return true;
4204 case BIT(INNER_SRC_MAC):
4205 for (i = 0; i < 6; i++) {
4206 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4207 rule->tuples.src_mac[i]);
4208 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4209 rule->tuples.src_mac[i]);
4210 }
4211
4212 return true;
4213 case BIT(INNER_VLAN_TAG_FST):
4214 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4215 rule->tuples_mask.vlan_tag1);
4216 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4217 rule->tuples_mask.vlan_tag1);
4218 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4219 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4220
4221 return true;
4222 case BIT(INNER_ETH_TYPE):
4223 calc_x(tmp_x_s, rule->tuples.ether_proto,
4224 rule->tuples_mask.ether_proto);
4225 calc_y(tmp_y_s, rule->tuples.ether_proto,
4226 rule->tuples_mask.ether_proto);
4227 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4228 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4229
4230 return true;
4231 case BIT(INNER_IP_TOS):
4232 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4233 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4234
4235 return true;
4236 case BIT(INNER_IP_PROTO):
4237 calc_x(*key_x, rule->tuples.ip_proto,
4238 rule->tuples_mask.ip_proto);
4239 calc_y(*key_y, rule->tuples.ip_proto,
4240 rule->tuples_mask.ip_proto);
4241
4242 return true;
4243 case BIT(INNER_SRC_IP):
4244 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4245 rule->tuples_mask.src_ip[3]);
4246 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4247 rule->tuples_mask.src_ip[3]);
4248 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4249 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4250
4251 return true;
4252 case BIT(INNER_DST_IP):
4253 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4254 rule->tuples_mask.dst_ip[3]);
4255 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4256 rule->tuples_mask.dst_ip[3]);
4257 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4258 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4259
4260 return true;
4261 case BIT(INNER_SRC_PORT):
4262 calc_x(tmp_x_s, rule->tuples.src_port,
4263 rule->tuples_mask.src_port);
4264 calc_y(tmp_y_s, rule->tuples.src_port,
4265 rule->tuples_mask.src_port);
4266 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4267 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4268
4269 return true;
4270 case BIT(INNER_DST_PORT):
4271 calc_x(tmp_x_s, rule->tuples.dst_port,
4272 rule->tuples_mask.dst_port);
4273 calc_y(tmp_y_s, rule->tuples.dst_port,
4274 rule->tuples_mask.dst_port);
4275 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4276 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4277
4278 return true;
4279 default:
4280 return false;
4281 }
4282}
4283
4284static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4285 u8 vf_id, u8 network_port_id)
4286{
4287 u32 port_number = 0;
4288
4289 if (port_type == HOST_PORT) {
4290 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4291 pf_id);
4292 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4293 vf_id);
4294 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4295 } else {
4296 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4297 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4298 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4299 }
4300
4301 return port_number;
4302}
4303
4304static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4305 __le32 *key_x, __le32 *key_y,
4306 struct hclge_fd_rule *rule)
4307{
4308 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4309 u8 cur_pos = 0, tuple_size, shift_bits;
4310 int i;
4311
4312 for (i = 0; i < MAX_META_DATA; i++) {
4313 tuple_size = meta_data_key_info[i].key_length;
4314 tuple_bit = key_cfg->meta_data_active & BIT(i);
4315
4316 switch (tuple_bit) {
4317 case BIT(ROCE_TYPE):
4318 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4319 cur_pos += tuple_size;
4320 break;
4321 case BIT(DST_VPORT):
4322 port_number = hclge_get_port_number(HOST_PORT, 0,
4323 rule->vf_id, 0);
4324 hnae3_set_field(meta_data,
4325 GENMASK(cur_pos + tuple_size, cur_pos),
4326 cur_pos, port_number);
4327 cur_pos += tuple_size;
4328 break;
4329 default:
4330 break;
4331 }
4332 }
4333
4334 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4335 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4336 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4337
4338 *key_x = cpu_to_le32(tmp_x << shift_bits);
4339 *key_y = cpu_to_le32(tmp_y << shift_bits);
4340}
4341
4342/* A complete key is combined with meta data key and tuple key.
4343 * Meta data key is stored at the MSB region, and tuple key is stored at
4344 * the LSB region, unused bits will be filled 0.
4345 */
4346static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4347 struct hclge_fd_rule *rule)
4348{
4349 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4350 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4351 u8 *cur_key_x, *cur_key_y;
4352 int i, ret, tuple_size;
4353 u8 meta_data_region;
4354
4355 memset(key_x, 0, sizeof(key_x));
4356 memset(key_y, 0, sizeof(key_y));
4357 cur_key_x = key_x;
4358 cur_key_y = key_y;
4359
4360 for (i = 0 ; i < MAX_TUPLE; i++) {
4361 bool tuple_valid;
4362 u32 check_tuple;
4363
4364 tuple_size = tuple_key_info[i].key_length / 8;
4365 check_tuple = key_cfg->tuple_active & BIT(i);
4366
4367 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4368 cur_key_y, rule);
4369 if (tuple_valid) {
4370 cur_key_x += tuple_size;
4371 cur_key_y += tuple_size;
4372 }
4373 }
4374
4375 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4376 MAX_META_DATA_LENGTH / 8;
4377
4378 hclge_fd_convert_meta_data(key_cfg,
4379 (__le32 *)(key_x + meta_data_region),
4380 (__le32 *)(key_y + meta_data_region),
4381 rule);
4382
4383 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4384 true);
4385 if (ret) {
4386 dev_err(&hdev->pdev->dev,
4387 "fd key_y config fail, loc=%d, ret=%d\n",
4388 rule->queue_id, ret);
4389 return ret;
4390 }
4391
4392 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4393 true);
4394 if (ret)
4395 dev_err(&hdev->pdev->dev,
4396 "fd key_x config fail, loc=%d, ret=%d\n",
4397 rule->queue_id, ret);
4398 return ret;
4399}
4400
4401static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4402 struct hclge_fd_rule *rule)
4403{
4404 struct hclge_fd_ad_data ad_data;
4405
4406 ad_data.ad_id = rule->location;
4407
4408 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4409 ad_data.drop_packet = true;
4410 ad_data.forward_to_direct_queue = false;
4411 ad_data.queue_id = 0;
4412 } else {
4413 ad_data.drop_packet = false;
4414 ad_data.forward_to_direct_queue = true;
4415 ad_data.queue_id = rule->queue_id;
4416 }
4417
4418 ad_data.use_counter = false;
4419 ad_data.counter_id = 0;
4420
4421 ad_data.use_next_stage = false;
4422 ad_data.next_input_key = 0;
4423
4424 ad_data.write_rule_id_to_bd = true;
4425 ad_data.rule_id = rule->location;
4426
4427 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4428}
4429
dd74f815
JS
4430static int hclge_fd_check_spec(struct hclge_dev *hdev,
4431 struct ethtool_rx_flow_spec *fs, u32 *unused)
4432{
4433 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4434 struct ethtool_usrip4_spec *usr_ip4_spec;
4435 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4436 struct ethtool_usrip6_spec *usr_ip6_spec;
4437 struct ethhdr *ether_spec;
4438
4439 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4440 return -EINVAL;
4441
4442 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4443 return -EOPNOTSUPP;
4444
4445 if ((fs->flow_type & FLOW_EXT) &&
4446 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4447 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4448 return -EOPNOTSUPP;
4449 }
4450
4451 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4452 case SCTP_V4_FLOW:
4453 case TCP_V4_FLOW:
4454 case UDP_V4_FLOW:
4455 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4456 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4457
4458 if (!tcp_ip4_spec->ip4src)
4459 *unused |= BIT(INNER_SRC_IP);
4460
4461 if (!tcp_ip4_spec->ip4dst)
4462 *unused |= BIT(INNER_DST_IP);
4463
4464 if (!tcp_ip4_spec->psrc)
4465 *unused |= BIT(INNER_SRC_PORT);
4466
4467 if (!tcp_ip4_spec->pdst)
4468 *unused |= BIT(INNER_DST_PORT);
4469
4470 if (!tcp_ip4_spec->tos)
4471 *unused |= BIT(INNER_IP_TOS);
4472
4473 break;
4474 case IP_USER_FLOW:
4475 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4476 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4477 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4478
4479 if (!usr_ip4_spec->ip4src)
4480 *unused |= BIT(INNER_SRC_IP);
4481
4482 if (!usr_ip4_spec->ip4dst)
4483 *unused |= BIT(INNER_DST_IP);
4484
4485 if (!usr_ip4_spec->tos)
4486 *unused |= BIT(INNER_IP_TOS);
4487
4488 if (!usr_ip4_spec->proto)
4489 *unused |= BIT(INNER_IP_PROTO);
4490
4491 if (usr_ip4_spec->l4_4_bytes)
4492 return -EOPNOTSUPP;
4493
4494 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4495 return -EOPNOTSUPP;
4496
4497 break;
4498 case SCTP_V6_FLOW:
4499 case TCP_V6_FLOW:
4500 case UDP_V6_FLOW:
4501 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4502 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4503 BIT(INNER_IP_TOS);
4504
4505 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4506 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4507 *unused |= BIT(INNER_SRC_IP);
4508
4509 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4510 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4511 *unused |= BIT(INNER_DST_IP);
4512
4513 if (!tcp_ip6_spec->psrc)
4514 *unused |= BIT(INNER_SRC_PORT);
4515
4516 if (!tcp_ip6_spec->pdst)
4517 *unused |= BIT(INNER_DST_PORT);
4518
4519 if (tcp_ip6_spec->tclass)
4520 return -EOPNOTSUPP;
4521
4522 break;
4523 case IPV6_USER_FLOW:
4524 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4525 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4526 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4527 BIT(INNER_DST_PORT);
4528
4529 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4530 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4531 *unused |= BIT(INNER_SRC_IP);
4532
4533 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4534 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4535 *unused |= BIT(INNER_DST_IP);
4536
4537 if (!usr_ip6_spec->l4_proto)
4538 *unused |= BIT(INNER_IP_PROTO);
4539
4540 if (usr_ip6_spec->tclass)
4541 return -EOPNOTSUPP;
4542
4543 if (usr_ip6_spec->l4_4_bytes)
4544 return -EOPNOTSUPP;
4545
4546 break;
4547 case ETHER_FLOW:
4548 ether_spec = &fs->h_u.ether_spec;
4549 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4550 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4551 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4552
4553 if (is_zero_ether_addr(ether_spec->h_source))
4554 *unused |= BIT(INNER_SRC_MAC);
4555
4556 if (is_zero_ether_addr(ether_spec->h_dest))
4557 *unused |= BIT(INNER_DST_MAC);
4558
4559 if (!ether_spec->h_proto)
4560 *unused |= BIT(INNER_ETH_TYPE);
4561
4562 break;
4563 default:
4564 return -EOPNOTSUPP;
4565 }
4566
4567 if ((fs->flow_type & FLOW_EXT)) {
4568 if (fs->h_ext.vlan_etype)
4569 return -EOPNOTSUPP;
4570 if (!fs->h_ext.vlan_tci)
4571 *unused |= BIT(INNER_VLAN_TAG_FST);
4572
4573 if (fs->m_ext.vlan_tci) {
4574 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4575 return -EINVAL;
4576 }
4577 } else {
4578 *unused |= BIT(INNER_VLAN_TAG_FST);
4579 }
4580
4581 if (fs->flow_type & FLOW_MAC_EXT) {
4582 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4583 return -EOPNOTSUPP;
4584
4585 if (is_zero_ether_addr(fs->h_ext.h_dest))
4586 *unused |= BIT(INNER_DST_MAC);
4587 else
4588 *unused &= ~(BIT(INNER_DST_MAC));
4589 }
4590
4591 return 0;
4592}
4593
4594static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4595{
4596 struct hclge_fd_rule *rule = NULL;
4597 struct hlist_node *node2;
4598
4599 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4600 if (rule->location >= location)
4601 break;
4602 }
4603
4604 return rule && rule->location == location;
4605}
4606
4607static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4608 struct hclge_fd_rule *new_rule,
4609 u16 location,
4610 bool is_add)
4611{
4612 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4613 struct hlist_node *node2;
4614
4615 if (is_add && !new_rule)
4616 return -EINVAL;
4617
4618 hlist_for_each_entry_safe(rule, node2,
4619 &hdev->fd_rule_list, rule_node) {
4620 if (rule->location >= location)
4621 break;
4622 parent = rule;
4623 }
4624
4625 if (rule && rule->location == location) {
4626 hlist_del(&rule->rule_node);
4627 kfree(rule);
4628 hdev->hclge_fd_rule_num--;
4629
4630 if (!is_add)
4631 return 0;
4632
4633 } else if (!is_add) {
4634 dev_err(&hdev->pdev->dev,
4635 "delete fail, rule %d is inexistent\n",
4636 location);
4637 return -EINVAL;
4638 }
4639
4640 INIT_HLIST_NODE(&new_rule->rule_node);
4641
4642 if (parent)
4643 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4644 else
4645 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4646
4647 hdev->hclge_fd_rule_num++;
4648
4649 return 0;
4650}
4651
4652static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4653 struct ethtool_rx_flow_spec *fs,
4654 struct hclge_fd_rule *rule)
4655{
4656 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4657
4658 switch (flow_type) {
4659 case SCTP_V4_FLOW:
4660 case TCP_V4_FLOW:
4661 case UDP_V4_FLOW:
4662 rule->tuples.src_ip[3] =
4663 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4664 rule->tuples_mask.src_ip[3] =
4665 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4666
4667 rule->tuples.dst_ip[3] =
4668 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4669 rule->tuples_mask.dst_ip[3] =
4670 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4671
4672 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4673 rule->tuples_mask.src_port =
4674 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4675
4676 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4677 rule->tuples_mask.dst_port =
4678 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4679
4680 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4681 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4682
4683 rule->tuples.ether_proto = ETH_P_IP;
4684 rule->tuples_mask.ether_proto = 0xFFFF;
4685
4686 break;
4687 case IP_USER_FLOW:
4688 rule->tuples.src_ip[3] =
4689 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4690 rule->tuples_mask.src_ip[3] =
4691 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4692
4693 rule->tuples.dst_ip[3] =
4694 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4695 rule->tuples_mask.dst_ip[3] =
4696 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4697
4698 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4699 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4700
4701 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4702 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4703
4704 rule->tuples.ether_proto = ETH_P_IP;
4705 rule->tuples_mask.ether_proto = 0xFFFF;
4706
4707 break;
4708 case SCTP_V6_FLOW:
4709 case TCP_V6_FLOW:
4710 case UDP_V6_FLOW:
4711 be32_to_cpu_array(rule->tuples.src_ip,
4712 fs->h_u.tcp_ip6_spec.ip6src, 4);
4713 be32_to_cpu_array(rule->tuples_mask.src_ip,
4714 fs->m_u.tcp_ip6_spec.ip6src, 4);
4715
4716 be32_to_cpu_array(rule->tuples.dst_ip,
4717 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4718 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4719 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4720
4721 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4722 rule->tuples_mask.src_port =
4723 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4724
4725 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4726 rule->tuples_mask.dst_port =
4727 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4728
4729 rule->tuples.ether_proto = ETH_P_IPV6;
4730 rule->tuples_mask.ether_proto = 0xFFFF;
4731
4732 break;
4733 case IPV6_USER_FLOW:
4734 be32_to_cpu_array(rule->tuples.src_ip,
4735 fs->h_u.usr_ip6_spec.ip6src, 4);
4736 be32_to_cpu_array(rule->tuples_mask.src_ip,
4737 fs->m_u.usr_ip6_spec.ip6src, 4);
4738
4739 be32_to_cpu_array(rule->tuples.dst_ip,
4740 fs->h_u.usr_ip6_spec.ip6dst, 4);
4741 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4742 fs->m_u.usr_ip6_spec.ip6dst, 4);
4743
4744 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4745 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4746
4747 rule->tuples.ether_proto = ETH_P_IPV6;
4748 rule->tuples_mask.ether_proto = 0xFFFF;
4749
4750 break;
4751 case ETHER_FLOW:
4752 ether_addr_copy(rule->tuples.src_mac,
4753 fs->h_u.ether_spec.h_source);
4754 ether_addr_copy(rule->tuples_mask.src_mac,
4755 fs->m_u.ether_spec.h_source);
4756
4757 ether_addr_copy(rule->tuples.dst_mac,
4758 fs->h_u.ether_spec.h_dest);
4759 ether_addr_copy(rule->tuples_mask.dst_mac,
4760 fs->m_u.ether_spec.h_dest);
4761
4762 rule->tuples.ether_proto =
4763 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4764 rule->tuples_mask.ether_proto =
4765 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4766
4767 break;
4768 default:
4769 return -EOPNOTSUPP;
4770 }
4771
4772 switch (flow_type) {
4773 case SCTP_V4_FLOW:
4774 case SCTP_V6_FLOW:
4775 rule->tuples.ip_proto = IPPROTO_SCTP;
4776 rule->tuples_mask.ip_proto = 0xFF;
4777 break;
4778 case TCP_V4_FLOW:
4779 case TCP_V6_FLOW:
4780 rule->tuples.ip_proto = IPPROTO_TCP;
4781 rule->tuples_mask.ip_proto = 0xFF;
4782 break;
4783 case UDP_V4_FLOW:
4784 case UDP_V6_FLOW:
4785 rule->tuples.ip_proto = IPPROTO_UDP;
4786 rule->tuples_mask.ip_proto = 0xFF;
4787 break;
4788 default:
4789 break;
4790 }
4791
4792 if ((fs->flow_type & FLOW_EXT)) {
4793 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4794 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4795 }
4796
4797 if (fs->flow_type & FLOW_MAC_EXT) {
4798 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4799 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4800 }
4801
4802 return 0;
4803}
4804
4805static int hclge_add_fd_entry(struct hnae3_handle *handle,
4806 struct ethtool_rxnfc *cmd)
4807{
4808 struct hclge_vport *vport = hclge_get_vport(handle);
4809 struct hclge_dev *hdev = vport->back;
4810 u16 dst_vport_id = 0, q_index = 0;
4811 struct ethtool_rx_flow_spec *fs;
4812 struct hclge_fd_rule *rule;
4813 u32 unused = 0;
4814 u8 action;
4815 int ret;
4816
4817 if (!hnae3_dev_fd_supported(hdev))
4818 return -EOPNOTSUPP;
4819
9abeb7d8 4820 if (!hdev->fd_en) {
dd74f815
JS
4821 dev_warn(&hdev->pdev->dev,
4822 "Please enable flow director first\n");
4823 return -EOPNOTSUPP;
4824 }
4825
4826 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4827
4828 ret = hclge_fd_check_spec(hdev, fs, &unused);
4829 if (ret) {
4830 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4831 return ret;
4832 }
4833
4834 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4835 action = HCLGE_FD_ACTION_DROP_PACKET;
4836 } else {
4837 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4838 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4839 u16 tqps;
4840
0285dbae
JS
4841 if (vf > hdev->num_req_vfs) {
4842 dev_err(&hdev->pdev->dev,
4843 "Error: vf id (%d) > max vf num (%d)\n",
4844 vf, hdev->num_req_vfs);
4845 return -EINVAL;
4846 }
4847
dd74f815
JS
4848 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4849 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4850
4851 if (ring >= tqps) {
4852 dev_err(&hdev->pdev->dev,
4853 "Error: queue id (%d) > max tqp num (%d)\n",
4854 ring, tqps - 1);
4855 return -EINVAL;
4856 }
4857
dd74f815
JS
4858 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4859 q_index = ring;
4860 }
4861
4862 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4863 if (!rule)
4864 return -ENOMEM;
4865
4866 ret = hclge_fd_get_tuple(hdev, fs, rule);
4867 if (ret)
4868 goto free_rule;
4869
4870 rule->flow_type = fs->flow_type;
4871
4872 rule->location = fs->location;
4873 rule->unused_tuple = unused;
4874 rule->vf_id = dst_vport_id;
4875 rule->queue_id = q_index;
4876 rule->action = action;
4877
4878 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4879 if (ret)
4880 goto free_rule;
4881
4882 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4883 if (ret)
4884 goto free_rule;
4885
4886 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4887 if (ret)
4888 goto free_rule;
4889
4890 return ret;
4891
4892free_rule:
4893 kfree(rule);
4894 return ret;
4895}
4896
4897static int hclge_del_fd_entry(struct hnae3_handle *handle,
4898 struct ethtool_rxnfc *cmd)
4899{
4900 struct hclge_vport *vport = hclge_get_vport(handle);
4901 struct hclge_dev *hdev = vport->back;
4902 struct ethtool_rx_flow_spec *fs;
4903 int ret;
4904
4905 if (!hnae3_dev_fd_supported(hdev))
4906 return -EOPNOTSUPP;
4907
4908 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4909
4910 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4911 return -EINVAL;
4912
4913 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4914 dev_err(&hdev->pdev->dev,
4915 "Delete fail, rule %d is inexistent\n",
4916 fs->location);
4917 return -ENOENT;
4918 }
4919
4920 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4921 fs->location, NULL, false);
4922 if (ret)
4923 return ret;
4924
4925 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4926 false);
4927}
4928
6871af29
JS
4929static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4930 bool clear_list)
4931{
4932 struct hclge_vport *vport = hclge_get_vport(handle);
4933 struct hclge_dev *hdev = vport->back;
4934 struct hclge_fd_rule *rule;
4935 struct hlist_node *node;
4936
4937 if (!hnae3_dev_fd_supported(hdev))
4938 return;
4939
4940 if (clear_list) {
4941 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4942 rule_node) {
4943 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4944 rule->location, NULL, false);
4945 hlist_del(&rule->rule_node);
4946 kfree(rule);
4947 hdev->hclge_fd_rule_num--;
4948 }
4949 } else {
4950 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4951 rule_node)
4952 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4953 rule->location, NULL, false);
4954 }
4955}
4956
4957static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4958{
4959 struct hclge_vport *vport = hclge_get_vport(handle);
4960 struct hclge_dev *hdev = vport->back;
4961 struct hclge_fd_rule *rule;
4962 struct hlist_node *node;
4963 int ret;
4964
65e41e7e
HT
4965 /* Return ok here, because reset error handling will check this
4966 * return value. If error is returned here, the reset process will
4967 * fail.
4968 */
6871af29 4969 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 4970 return 0;
6871af29 4971
8edc2285 4972 /* if fd is disabled, should not restore it when reset */
9abeb7d8 4973 if (!hdev->fd_en)
8edc2285
JS
4974 return 0;
4975
6871af29
JS
4976 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4977 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4978 if (!ret)
4979 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4980
4981 if (ret) {
4982 dev_warn(&hdev->pdev->dev,
4983 "Restore rule %d failed, remove it\n",
4984 rule->location);
4985 hlist_del(&rule->rule_node);
4986 kfree(rule);
4987 hdev->hclge_fd_rule_num--;
4988 }
4989 }
4990 return 0;
4991}
4992
05c2314f
JS
4993static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4994 struct ethtool_rxnfc *cmd)
4995{
4996 struct hclge_vport *vport = hclge_get_vport(handle);
4997 struct hclge_dev *hdev = vport->back;
4998
4999 if (!hnae3_dev_fd_supported(hdev))
5000 return -EOPNOTSUPP;
5001
5002 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5003 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5004
5005 return 0;
5006}
5007
5008static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5009 struct ethtool_rxnfc *cmd)
5010{
5011 struct hclge_vport *vport = hclge_get_vport(handle);
5012 struct hclge_fd_rule *rule = NULL;
5013 struct hclge_dev *hdev = vport->back;
5014 struct ethtool_rx_flow_spec *fs;
5015 struct hlist_node *node2;
5016
5017 if (!hnae3_dev_fd_supported(hdev))
5018 return -EOPNOTSUPP;
5019
5020 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5021
5022 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5023 if (rule->location >= fs->location)
5024 break;
5025 }
5026
5027 if (!rule || fs->location != rule->location)
5028 return -ENOENT;
5029
5030 fs->flow_type = rule->flow_type;
5031 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5032 case SCTP_V4_FLOW:
5033 case TCP_V4_FLOW:
5034 case UDP_V4_FLOW:
5035 fs->h_u.tcp_ip4_spec.ip4src =
5036 cpu_to_be32(rule->tuples.src_ip[3]);
5037 fs->m_u.tcp_ip4_spec.ip4src =
5038 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5039 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5040
5041 fs->h_u.tcp_ip4_spec.ip4dst =
5042 cpu_to_be32(rule->tuples.dst_ip[3]);
5043 fs->m_u.tcp_ip4_spec.ip4dst =
5044 rule->unused_tuple & BIT(INNER_DST_IP) ?
5045 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5046
5047 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5048 fs->m_u.tcp_ip4_spec.psrc =
5049 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5050 0 : cpu_to_be16(rule->tuples_mask.src_port);
5051
5052 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5053 fs->m_u.tcp_ip4_spec.pdst =
5054 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5055 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5056
5057 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5058 fs->m_u.tcp_ip4_spec.tos =
5059 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5060 0 : rule->tuples_mask.ip_tos;
5061
5062 break;
5063 case IP_USER_FLOW:
5064 fs->h_u.usr_ip4_spec.ip4src =
5065 cpu_to_be32(rule->tuples.src_ip[3]);
5066 fs->m_u.tcp_ip4_spec.ip4src =
5067 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5068 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5069
5070 fs->h_u.usr_ip4_spec.ip4dst =
5071 cpu_to_be32(rule->tuples.dst_ip[3]);
5072 fs->m_u.usr_ip4_spec.ip4dst =
5073 rule->unused_tuple & BIT(INNER_DST_IP) ?
5074 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5075
5076 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5077 fs->m_u.usr_ip4_spec.tos =
5078 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5079 0 : rule->tuples_mask.ip_tos;
5080
5081 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5082 fs->m_u.usr_ip4_spec.proto =
5083 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5084 0 : rule->tuples_mask.ip_proto;
5085
5086 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5087
5088 break;
5089 case SCTP_V6_FLOW:
5090 case TCP_V6_FLOW:
5091 case UDP_V6_FLOW:
5092 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5093 rule->tuples.src_ip, 4);
5094 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5095 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5096 else
5097 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5098 rule->tuples_mask.src_ip, 4);
5099
5100 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5101 rule->tuples.dst_ip, 4);
5102 if (rule->unused_tuple & BIT(INNER_DST_IP))
5103 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5104 else
5105 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5106 rule->tuples_mask.dst_ip, 4);
5107
5108 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5109 fs->m_u.tcp_ip6_spec.psrc =
5110 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5111 0 : cpu_to_be16(rule->tuples_mask.src_port);
5112
5113 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5114 fs->m_u.tcp_ip6_spec.pdst =
5115 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5116 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5117
5118 break;
5119 case IPV6_USER_FLOW:
5120 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5121 rule->tuples.src_ip, 4);
5122 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5123 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5124 else
5125 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5126 rule->tuples_mask.src_ip, 4);
5127
5128 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5129 rule->tuples.dst_ip, 4);
5130 if (rule->unused_tuple & BIT(INNER_DST_IP))
5131 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5132 else
5133 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5134 rule->tuples_mask.dst_ip, 4);
5135
5136 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5137 fs->m_u.usr_ip6_spec.l4_proto =
5138 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5139 0 : rule->tuples_mask.ip_proto;
5140
5141 break;
5142 case ETHER_FLOW:
5143 ether_addr_copy(fs->h_u.ether_spec.h_source,
5144 rule->tuples.src_mac);
5145 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5146 eth_zero_addr(fs->m_u.ether_spec.h_source);
5147 else
5148 ether_addr_copy(fs->m_u.ether_spec.h_source,
5149 rule->tuples_mask.src_mac);
5150
5151 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5152 rule->tuples.dst_mac);
5153 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5154 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5155 else
5156 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5157 rule->tuples_mask.dst_mac);
5158
5159 fs->h_u.ether_spec.h_proto =
5160 cpu_to_be16(rule->tuples.ether_proto);
5161 fs->m_u.ether_spec.h_proto =
5162 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5163 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5164
5165 break;
5166 default:
5167 return -EOPNOTSUPP;
5168 }
5169
5170 if (fs->flow_type & FLOW_EXT) {
5171 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5172 fs->m_ext.vlan_tci =
5173 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5174 cpu_to_be16(VLAN_VID_MASK) :
5175 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5176 }
5177
5178 if (fs->flow_type & FLOW_MAC_EXT) {
5179 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5180 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5181 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5182 else
5183 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5184 rule->tuples_mask.dst_mac);
5185 }
5186
5187 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5188 fs->ring_cookie = RX_CLS_FLOW_DISC;
5189 } else {
5190 u64 vf_id;
5191
5192 fs->ring_cookie = rule->queue_id;
5193 vf_id = rule->vf_id;
5194 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5195 fs->ring_cookie |= vf_id;
5196 }
5197
5198 return 0;
5199}
5200
5201static int hclge_get_all_rules(struct hnae3_handle *handle,
5202 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5203{
5204 struct hclge_vport *vport = hclge_get_vport(handle);
5205 struct hclge_dev *hdev = vport->back;
5206 struct hclge_fd_rule *rule;
5207 struct hlist_node *node2;
5208 int cnt = 0;
5209
5210 if (!hnae3_dev_fd_supported(hdev))
5211 return -EOPNOTSUPP;
5212
5213 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5214
5215 hlist_for_each_entry_safe(rule, node2,
5216 &hdev->fd_rule_list, rule_node) {
5217 if (cnt == cmd->rule_cnt)
5218 return -EMSGSIZE;
5219
5220 rule_locs[cnt] = rule->location;
5221 cnt++;
5222 }
5223
5224 cmd->rule_cnt = cnt;
5225
5226 return 0;
5227}
5228
4d60291b
HT
5229static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5230{
5231 struct hclge_vport *vport = hclge_get_vport(handle);
5232 struct hclge_dev *hdev = vport->back;
5233
5234 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5235 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5236}
5237
5238static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5239{
5240 struct hclge_vport *vport = hclge_get_vport(handle);
5241 struct hclge_dev *hdev = vport->back;
5242
5243 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5244}
5245
5246static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5247{
5248 struct hclge_vport *vport = hclge_get_vport(handle);
5249 struct hclge_dev *hdev = vport->back;
5250
f02eb82d 5251 return hdev->rst_stats.hw_reset_done_cnt;
4d60291b
HT
5252}
5253
c17852a8
JS
5254static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5255{
5256 struct hclge_vport *vport = hclge_get_vport(handle);
5257 struct hclge_dev *hdev = vport->back;
5258
9abeb7d8 5259 hdev->fd_en = enable;
c17852a8
JS
5260 if (!enable)
5261 hclge_del_all_fd_entries(handle, false);
5262 else
5263 hclge_restore_fd_entries(handle);
5264}
5265
46a3df9f
S
5266static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5267{
5268 struct hclge_desc desc;
d44f9b63
YL
5269 struct hclge_config_mac_mode_cmd *req =
5270 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 5271 u32 loop_en = 0;
46a3df9f
S
5272 int ret;
5273
5274 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
e4e87715
PL
5275 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5276 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5277 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5278 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5279 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5280 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5281 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5282 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5283 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5284 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5285 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5286 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5287 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5288 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
a90bb9a5 5289 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
5290
5291 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5292 if (ret)
5293 dev_err(&hdev->pdev->dev,
5294 "mac enable fail, ret =%d.\n", ret);
5295}
5296
eb66d503 5297static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 5298{
c39c4d98 5299 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
5300 struct hclge_desc desc;
5301 u32 loop_en;
5302 int ret;
5303
e4d68dae
YL
5304 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5305 /* 1 Read out the MAC mode config at first */
5306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5307 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5308 if (ret) {
5309 dev_err(&hdev->pdev->dev,
5310 "mac loopback get fail, ret =%d.\n", ret);
5311 return ret;
5312 }
c39c4d98 5313
e4d68dae
YL
5314 /* 2 Then setup the loopback flag */
5315 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 5316 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
0f29fc23
YL
5317 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5318 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
e4d68dae
YL
5319
5320 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 5321
e4d68dae
YL
5322 /* 3 Config mac work mode with loopback flag
5323 * and its original configure parameters
5324 */
5325 hclge_cmd_reuse_desc(&desc, false);
5326 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5327 if (ret)
5328 dev_err(&hdev->pdev->dev,
5329 "mac loopback set fail, ret =%d.\n", ret);
5330 return ret;
5331}
c39c4d98 5332
4dc13b96
FL
5333static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5334 enum hnae3_loop loop_mode)
5fd50ac3
PL
5335{
5336#define HCLGE_SERDES_RETRY_MS 10
5337#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 5338
5339#define HCLGE_MAC_LINK_STATUS_MS 20
5340#define HCLGE_MAC_LINK_STATUS_NUM 10
5341#define HCLGE_MAC_LINK_STATUS_DOWN 0
5342#define HCLGE_MAC_LINK_STATUS_UP 1
5343
5fd50ac3
PL
5344 struct hclge_serdes_lb_cmd *req;
5345 struct hclge_desc desc;
350fda0a 5346 int mac_link_ret = 0;
5fd50ac3 5347 int ret, i = 0;
4dc13b96 5348 u8 loop_mode_b;
5fd50ac3 5349
d0d72bac 5350 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
5351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5352
4dc13b96
FL
5353 switch (loop_mode) {
5354 case HNAE3_LOOP_SERIAL_SERDES:
5355 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5356 break;
5357 case HNAE3_LOOP_PARALLEL_SERDES:
5358 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5359 break;
5360 default:
5361 dev_err(&hdev->pdev->dev,
5362 "unsupported serdes loopback mode %d\n", loop_mode);
5363 return -ENOTSUPP;
5364 }
5365
5fd50ac3 5366 if (en) {
4dc13b96
FL
5367 req->enable = loop_mode_b;
5368 req->mask = loop_mode_b;
350fda0a 5369 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5fd50ac3 5370 } else {
4dc13b96 5371 req->mask = loop_mode_b;
350fda0a 5372 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5fd50ac3
PL
5373 }
5374
5375 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5376 if (ret) {
5377 dev_err(&hdev->pdev->dev,
5378 "serdes loopback set fail, ret = %d\n", ret);
5379 return ret;
5380 }
5381
5382 do {
5383 msleep(HCLGE_SERDES_RETRY_MS);
5384 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5385 true);
5386 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5387 if (ret) {
5388 dev_err(&hdev->pdev->dev,
5389 "serdes loopback get, ret = %d\n", ret);
5390 return ret;
5391 }
5392 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5393 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5394
5395 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5396 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5397 return -EBUSY;
5398 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5399 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5400 return -EIO;
5401 }
5402
0f29fc23 5403 hclge_cfg_mac_mode(hdev, en);
350fda0a 5404
5405 i = 0;
5406 do {
5407 /* serdes Internal loopback, independent of the network cable.*/
5408 msleep(HCLGE_MAC_LINK_STATUS_MS);
5409 ret = hclge_get_mac_link_status(hdev);
5410 if (ret == mac_link_ret)
5411 return 0;
5412 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5413
5414 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5415
5416 return -EBUSY;
5fd50ac3
PL
5417}
5418
0f29fc23
YL
5419static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5420 int stream_id, bool enable)
5421{
5422 struct hclge_desc desc;
5423 struct hclge_cfg_com_tqp_queue_cmd *req =
5424 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5425 int ret;
5426
5427 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5428 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5429 req->stream_id = cpu_to_le16(stream_id);
5430 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5431
5432 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5433 if (ret)
5434 dev_err(&hdev->pdev->dev,
5435 "Tqp enable fail, status =%d.\n", ret);
5436 return ret;
5437}
5438
e4d68dae
YL
5439static int hclge_set_loopback(struct hnae3_handle *handle,
5440 enum hnae3_loop loop_mode, bool en)
5441{
5442 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 5443 struct hnae3_knic_private_info *kinfo;
e4d68dae 5444 struct hclge_dev *hdev = vport->back;
0f29fc23 5445 int i, ret;
e4d68dae
YL
5446
5447 switch (loop_mode) {
eb66d503
FL
5448 case HNAE3_LOOP_APP:
5449 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 5450 break;
4dc13b96
FL
5451 case HNAE3_LOOP_SERIAL_SERDES:
5452 case HNAE3_LOOP_PARALLEL_SERDES:
5453 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 5454 break;
c39c4d98
YL
5455 default:
5456 ret = -ENOTSUPP;
5457 dev_err(&hdev->pdev->dev,
5458 "loop_mode %d is not supported\n", loop_mode);
5459 break;
5460 }
5461
47ef6dec
JS
5462 if (ret)
5463 return ret;
5464
205a24ca
HT
5465 kinfo = &vport->nic.kinfo;
5466 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
5467 ret = hclge_tqp_enable(hdev, i, 0, en);
5468 if (ret)
5469 return ret;
5470 }
46a3df9f 5471
0f29fc23 5472 return 0;
46a3df9f
S
5473}
5474
5475static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5476{
5477 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 5478 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
5479 struct hnae3_queue *queue;
5480 struct hclge_tqp *tqp;
5481 int i;
5482
205a24ca
HT
5483 kinfo = &vport->nic.kinfo;
5484 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
5485 queue = handle->kinfo.tqp[i];
5486 tqp = container_of(queue, struct hclge_tqp, q);
5487 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5488 }
5489}
5490
8cdb992f
JS
5491static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5492{
5493 struct hclge_vport *vport = hclge_get_vport(handle);
5494 struct hclge_dev *hdev = vport->back;
5495
5496 if (enable) {
5497 mod_timer(&hdev->service_timer, jiffies + HZ);
5498 } else {
5499 del_timer_sync(&hdev->service_timer);
5500 cancel_work_sync(&hdev->service_task);
5501 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5502 }
5503}
5504
46a3df9f
S
5505static int hclge_ae_start(struct hnae3_handle *handle)
5506{
5507 struct hclge_vport *vport = hclge_get_vport(handle);
5508 struct hclge_dev *hdev = vport->back;
46a3df9f 5509
46a3df9f
S
5510 /* mac enable */
5511 hclge_cfg_mac_mode(hdev, true);
5512 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 5513 hdev->hw.mac.link = 0;
46a3df9f 5514
b50ae26c
PL
5515 /* reset tqp stats */
5516 hclge_reset_tqp_stats(handle);
5517
b01b7cf1 5518 hclge_mac_start_phy(hdev);
46a3df9f 5519
46a3df9f
S
5520 return 0;
5521}
5522
5523static void hclge_ae_stop(struct hnae3_handle *handle)
5524{
5525 struct hclge_vport *vport = hclge_get_vport(handle);
5526 struct hclge_dev *hdev = vport->back;
39cfbc9c 5527 int i;
46a3df9f 5528
2f7e4896
FL
5529 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5530
35d93a30
HT
5531 /* If it is not PF reset, the firmware will disable the MAC,
5532 * so it only need to stop phy here.
5533 */
5534 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5535 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 5536 hclge_mac_stop_phy(hdev);
b50ae26c 5537 return;
9617f668 5538 }
b50ae26c 5539
39cfbc9c
HT
5540 for (i = 0; i < handle->kinfo.num_tqps; i++)
5541 hclge_reset_tqp(handle, i);
5542
46a3df9f
S
5543 /* Mac disable */
5544 hclge_cfg_mac_mode(hdev, false);
5545
5546 hclge_mac_stop_phy(hdev);
5547
5548 /* reset tqp stats */
5549 hclge_reset_tqp_stats(handle);
f30dfddc 5550 hclge_update_link_status(hdev);
46a3df9f
S
5551}
5552
a6d818e3
YL
5553int hclge_vport_start(struct hclge_vport *vport)
5554{
5555 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5556 vport->last_active_jiffies = jiffies;
5557 return 0;
5558}
5559
5560void hclge_vport_stop(struct hclge_vport *vport)
5561{
5562 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5563}
5564
5565static int hclge_client_start(struct hnae3_handle *handle)
5566{
5567 struct hclge_vport *vport = hclge_get_vport(handle);
5568
5569 return hclge_vport_start(vport);
5570}
5571
5572static void hclge_client_stop(struct hnae3_handle *handle)
5573{
5574 struct hclge_vport *vport = hclge_get_vport(handle);
5575
5576 hclge_vport_stop(vport);
5577}
5578
46a3df9f
S
5579static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5580 u16 cmdq_resp, u8 resp_code,
5581 enum hclge_mac_vlan_tbl_opcode op)
5582{
5583 struct hclge_dev *hdev = vport->back;
5584 int return_status = -EIO;
5585
5586 if (cmdq_resp) {
5587 dev_err(&hdev->pdev->dev,
5588 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5589 cmdq_resp);
5590 return -EIO;
5591 }
5592
5593 if (op == HCLGE_MAC_VLAN_ADD) {
5594 if ((!resp_code) || (resp_code == 1)) {
5595 return_status = 0;
5596 } else if (resp_code == 2) {
eefd00a5 5597 return_status = -ENOSPC;
46a3df9f
S
5598 dev_err(&hdev->pdev->dev,
5599 "add mac addr failed for uc_overflow.\n");
5600 } else if (resp_code == 3) {
eefd00a5 5601 return_status = -ENOSPC;
46a3df9f
S
5602 dev_err(&hdev->pdev->dev,
5603 "add mac addr failed for mc_overflow.\n");
5604 } else {
5605 dev_err(&hdev->pdev->dev,
5606 "add mac addr failed for undefined, code=%d.\n",
5607 resp_code);
5608 }
5609 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5610 if (!resp_code) {
5611 return_status = 0;
5612 } else if (resp_code == 1) {
eefd00a5 5613 return_status = -ENOENT;
46a3df9f
S
5614 dev_dbg(&hdev->pdev->dev,
5615 "remove mac addr failed for miss.\n");
5616 } else {
5617 dev_err(&hdev->pdev->dev,
5618 "remove mac addr failed for undefined, code=%d.\n",
5619 resp_code);
5620 }
5621 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5622 if (!resp_code) {
5623 return_status = 0;
5624 } else if (resp_code == 1) {
eefd00a5 5625 return_status = -ENOENT;
46a3df9f
S
5626 dev_dbg(&hdev->pdev->dev,
5627 "lookup mac addr failed for miss.\n");
5628 } else {
5629 dev_err(&hdev->pdev->dev,
5630 "lookup mac addr failed for undefined, code=%d.\n",
5631 resp_code);
5632 }
5633 } else {
eefd00a5 5634 return_status = -EINVAL;
46a3df9f
S
5635 dev_err(&hdev->pdev->dev,
5636 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5637 op);
5638 }
5639
5640 return return_status;
5641}
5642
5643static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5644{
5645 int word_num;
5646 int bit_num;
5647
5648 if (vfid > 255 || vfid < 0)
5649 return -EIO;
5650
5651 if (vfid >= 0 && vfid <= 191) {
5652 word_num = vfid / 32;
5653 bit_num = vfid % 32;
5654 if (clr)
a90bb9a5 5655 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 5656 else
a90bb9a5 5657 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
5658 } else {
5659 word_num = (vfid - 192) / 32;
5660 bit_num = vfid % 32;
5661 if (clr)
a90bb9a5 5662 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 5663 else
a90bb9a5 5664 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
5665 }
5666
5667 return 0;
5668}
5669
5670static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5671{
5672#define HCLGE_DESC_NUMBER 3
5673#define HCLGE_FUNC_NUMBER_PER_DESC 6
5674 int i, j;
5675
6c39d527 5676 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
5677 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5678 if (desc[i].data[j])
5679 return false;
5680
5681 return true;
5682}
5683
d44f9b63 5684static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 5685 const u8 *addr, bool is_mc)
46a3df9f
S
5686{
5687 const unsigned char *mac_addr = addr;
5688 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5689 (mac_addr[0]) | (mac_addr[1] << 8);
5690 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5691
3a586422
WL
5692 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5693 if (is_mc) {
5694 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5695 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5696 }
5697
46a3df9f
S
5698 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5699 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5700}
5701
46a3df9f 5702static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 5703 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
5704{
5705 struct hclge_dev *hdev = vport->back;
5706 struct hclge_desc desc;
5707 u8 resp_code;
a90bb9a5 5708 u16 retval;
46a3df9f
S
5709 int ret;
5710
5711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5712
d44f9b63 5713 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
5714
5715 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5716 if (ret) {
5717 dev_err(&hdev->pdev->dev,
5718 "del mac addr failed for cmd_send, ret =%d.\n",
5719 ret);
5720 return ret;
5721 }
a90bb9a5
YL
5722 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5723 retval = le16_to_cpu(desc.retval);
46a3df9f 5724
a90bb9a5 5725 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
5726 HCLGE_MAC_VLAN_REMOVE);
5727}
5728
5729static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 5730 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
5731 struct hclge_desc *desc,
5732 bool is_mc)
5733{
5734 struct hclge_dev *hdev = vport->back;
5735 u8 resp_code;
a90bb9a5 5736 u16 retval;
46a3df9f
S
5737 int ret;
5738
5739 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5740 if (is_mc) {
5741 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5742 memcpy(desc[0].data,
5743 req,
d44f9b63 5744 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
5745 hclge_cmd_setup_basic_desc(&desc[1],
5746 HCLGE_OPC_MAC_VLAN_ADD,
5747 true);
5748 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5749 hclge_cmd_setup_basic_desc(&desc[2],
5750 HCLGE_OPC_MAC_VLAN_ADD,
5751 true);
5752 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5753 } else {
5754 memcpy(desc[0].data,
5755 req,
d44f9b63 5756 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
5757 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5758 }
5759 if (ret) {
5760 dev_err(&hdev->pdev->dev,
5761 "lookup mac addr failed for cmd_send, ret =%d.\n",
5762 ret);
5763 return ret;
5764 }
a90bb9a5
YL
5765 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5766 retval = le16_to_cpu(desc[0].retval);
46a3df9f 5767
a90bb9a5 5768 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
5769 HCLGE_MAC_VLAN_LKUP);
5770}
5771
5772static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 5773 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
5774 struct hclge_desc *mc_desc)
5775{
5776 struct hclge_dev *hdev = vport->back;
5777 int cfg_status;
5778 u8 resp_code;
a90bb9a5 5779 u16 retval;
46a3df9f
S
5780 int ret;
5781
5782 if (!mc_desc) {
5783 struct hclge_desc desc;
5784
5785 hclge_cmd_setup_basic_desc(&desc,
5786 HCLGE_OPC_MAC_VLAN_ADD,
5787 false);
d44f9b63
YL
5788 memcpy(desc.data, req,
5789 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 5790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
5791 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5792 retval = le16_to_cpu(desc.retval);
5793
5794 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
5795 resp_code,
5796 HCLGE_MAC_VLAN_ADD);
5797 } else {
c3b6f755 5798 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 5799 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 5800 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 5801 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 5802 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
5803 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5804 memcpy(mc_desc[0].data, req,
d44f9b63 5805 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 5806 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
5807 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5808 retval = le16_to_cpu(mc_desc[0].retval);
5809
5810 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
5811 resp_code,
5812 HCLGE_MAC_VLAN_ADD);
5813 }
5814
5815 if (ret) {
5816 dev_err(&hdev->pdev->dev,
5817 "add mac addr failed for cmd_send, ret =%d.\n",
5818 ret);
5819 return ret;
5820 }
5821
5822 return cfg_status;
5823}
5824
39932473
JS
5825static int hclge_init_umv_space(struct hclge_dev *hdev)
5826{
5827 u16 allocated_size = 0;
5828 int ret;
5829
5830 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5831 true);
5832 if (ret)
5833 return ret;
5834
5835 if (allocated_size < hdev->wanted_umv_size)
5836 dev_warn(&hdev->pdev->dev,
5837 "Alloc umv space failed, want %d, get %d\n",
5838 hdev->wanted_umv_size, allocated_size);
5839
5840 mutex_init(&hdev->umv_mutex);
5841 hdev->max_umv_size = allocated_size;
5842 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5843 hdev->share_umv_size = hdev->priv_umv_size +
5844 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5845
5846 return 0;
5847}
5848
5849static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5850{
5851 int ret;
5852
5853 if (hdev->max_umv_size > 0) {
5854 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5855 false);
5856 if (ret)
5857 return ret;
5858 hdev->max_umv_size = 0;
5859 }
5860 mutex_destroy(&hdev->umv_mutex);
5861
5862 return 0;
5863}
5864
5865static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5866 u16 *allocated_size, bool is_alloc)
5867{
5868 struct hclge_umv_spc_alc_cmd *req;
5869 struct hclge_desc desc;
5870 int ret;
5871
5872 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5873 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5874 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5875 req->space_size = cpu_to_le32(space_size);
5876
5877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5878 if (ret) {
5879 dev_err(&hdev->pdev->dev,
5880 "%s umv space failed for cmd_send, ret =%d\n",
5881 is_alloc ? "allocate" : "free", ret);
5882 return ret;
5883 }
5884
5885 if (is_alloc && allocated_size)
5886 *allocated_size = le32_to_cpu(desc.data[1]);
5887
5888 return 0;
5889}
5890
5891static void hclge_reset_umv_space(struct hclge_dev *hdev)
5892{
5893 struct hclge_vport *vport;
5894 int i;
5895
5896 for (i = 0; i < hdev->num_alloc_vport; i++) {
5897 vport = &hdev->vport[i];
5898 vport->used_umv_num = 0;
5899 }
5900
5901 mutex_lock(&hdev->umv_mutex);
5902 hdev->share_umv_size = hdev->priv_umv_size +
5903 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5904 mutex_unlock(&hdev->umv_mutex);
5905}
5906
5907static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5908{
5909 struct hclge_dev *hdev = vport->back;
5910 bool is_full;
5911
5912 mutex_lock(&hdev->umv_mutex);
5913 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5914 hdev->share_umv_size == 0);
5915 mutex_unlock(&hdev->umv_mutex);
5916
5917 return is_full;
5918}
5919
5920static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5921{
5922 struct hclge_dev *hdev = vport->back;
5923
5924 mutex_lock(&hdev->umv_mutex);
5925 if (is_free) {
5926 if (vport->used_umv_num > hdev->priv_umv_size)
5927 hdev->share_umv_size++;
54a395b6 5928
5929 if (vport->used_umv_num > 0)
5930 vport->used_umv_num--;
39932473 5931 } else {
54a395b6 5932 if (vport->used_umv_num >= hdev->priv_umv_size &&
5933 hdev->share_umv_size > 0)
39932473
JS
5934 hdev->share_umv_size--;
5935 vport->used_umv_num++;
5936 }
5937 mutex_unlock(&hdev->umv_mutex);
5938}
5939
46a3df9f
S
5940static int hclge_add_uc_addr(struct hnae3_handle *handle,
5941 const unsigned char *addr)
5942{
5943 struct hclge_vport *vport = hclge_get_vport(handle);
5944
5945 return hclge_add_uc_addr_common(vport, addr);
5946}
5947
5948int hclge_add_uc_addr_common(struct hclge_vport *vport,
5949 const unsigned char *addr)
5950{
5951 struct hclge_dev *hdev = vport->back;
d44f9b63 5952 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 5953 struct hclge_desc desc;
a90bb9a5 5954 u16 egress_port = 0;
aa7a795e 5955 int ret;
46a3df9f
S
5956
5957 /* mac addr check */
5958 if (is_zero_ether_addr(addr) ||
5959 is_broadcast_ether_addr(addr) ||
5960 is_multicast_ether_addr(addr)) {
5961 dev_err(&hdev->pdev->dev,
5962 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5963 addr,
5964 is_zero_ether_addr(addr),
5965 is_broadcast_ether_addr(addr),
5966 is_multicast_ether_addr(addr));
5967 return -EINVAL;
5968 }
5969
5970 memset(&req, 0, sizeof(req));
a90bb9a5 5971
e4e87715
PL
5972 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5973 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
5974
5975 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 5976
3a586422 5977 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 5978
d07b6bb4
JS
5979 /* Lookup the mac address in the mac_vlan table, and add
5980 * it if the entry is inexistent. Repeated unicast entry
5981 * is not allowed in the mac vlan table.
5982 */
5983 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473
JS
5984 if (ret == -ENOENT) {
5985 if (!hclge_is_umv_space_full(vport)) {
5986 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5987 if (!ret)
5988 hclge_update_umv_space(vport, false);
5989 return ret;
5990 }
5991
5992 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5993 hdev->priv_umv_size);
5994
5995 return -ENOSPC;
5996 }
d07b6bb4
JS
5997
5998 /* check if we just hit the duplicate */
72110b56
PL
5999 if (!ret) {
6000 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6001 vport->vport_id, addr);
6002 return 0;
6003 }
d07b6bb4
JS
6004
6005 dev_err(&hdev->pdev->dev,
6006 "PF failed to add unicast entry(%pM) in the MAC table\n",
6007 addr);
46a3df9f 6008
aa7a795e 6009 return ret;
46a3df9f
S
6010}
6011
6012static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6013 const unsigned char *addr)
6014{
6015 struct hclge_vport *vport = hclge_get_vport(handle);
6016
6017 return hclge_rm_uc_addr_common(vport, addr);
6018}
6019
6020int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6021 const unsigned char *addr)
6022{
6023 struct hclge_dev *hdev = vport->back;
d44f9b63 6024 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 6025 int ret;
46a3df9f
S
6026
6027 /* mac addr check */
6028 if (is_zero_ether_addr(addr) ||
6029 is_broadcast_ether_addr(addr) ||
6030 is_multicast_ether_addr(addr)) {
6031 dev_dbg(&hdev->pdev->dev,
6032 "Remove mac err! invalid mac:%pM.\n",
6033 addr);
6034 return -EINVAL;
6035 }
6036
6037 memset(&req, 0, sizeof(req));
e4e87715 6038 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6039 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 6040 ret = hclge_remove_mac_vlan_tbl(vport, &req);
39932473
JS
6041 if (!ret)
6042 hclge_update_umv_space(vport, true);
46a3df9f 6043
aa7a795e 6044 return ret;
46a3df9f
S
6045}
6046
6047static int hclge_add_mc_addr(struct hnae3_handle *handle,
6048 const unsigned char *addr)
6049{
6050 struct hclge_vport *vport = hclge_get_vport(handle);
6051
a10829c4 6052 return hclge_add_mc_addr_common(vport, addr);
46a3df9f
S
6053}
6054
6055int hclge_add_mc_addr_common(struct hclge_vport *vport,
6056 const unsigned char *addr)
6057{
6058 struct hclge_dev *hdev = vport->back;
d44f9b63 6059 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 6060 struct hclge_desc desc[3];
46a3df9f
S
6061 int status;
6062
6063 /* mac addr check */
6064 if (!is_multicast_ether_addr(addr)) {
6065 dev_err(&hdev->pdev->dev,
6066 "Add mc mac err! invalid mac:%pM.\n",
6067 addr);
6068 return -EINVAL;
6069 }
6070 memset(&req, 0, sizeof(req));
e4e87715 6071 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6072 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
6073 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6074 if (!status) {
6075 /* This mac addr exist, update VFID for it */
6076 hclge_update_desc_vfid(desc, vport->vport_id, false);
6077 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6078 } else {
6079 /* This mac addr do not exist, add new entry for it */
6080 memset(desc[0].data, 0, sizeof(desc[0].data));
6081 memset(desc[1].data, 0, sizeof(desc[0].data));
6082 memset(desc[2].data, 0, sizeof(desc[0].data));
6083 hclge_update_desc_vfid(desc, vport->vport_id, false);
6084 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6085 }
6086
1f6db589
JS
6087 if (status == -ENOSPC)
6088 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
6089
6090 return status;
6091}
6092
6093static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6094 const unsigned char *addr)
6095{
6096 struct hclge_vport *vport = hclge_get_vport(handle);
6097
6098 return hclge_rm_mc_addr_common(vport, addr);
6099}
6100
6101int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6102 const unsigned char *addr)
6103{
6104 struct hclge_dev *hdev = vport->back;
d44f9b63 6105 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
6106 enum hclge_cmd_status status;
6107 struct hclge_desc desc[3];
46a3df9f
S
6108
6109 /* mac addr check */
6110 if (!is_multicast_ether_addr(addr)) {
6111 dev_dbg(&hdev->pdev->dev,
6112 "Remove mc mac err! invalid mac:%pM.\n",
6113 addr);
6114 return -EINVAL;
6115 }
6116
6117 memset(&req, 0, sizeof(req));
e4e87715 6118 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6119 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
6120 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6121 if (!status) {
6122 /* This mac addr exist, remove this handle's VFID for it */
6123 hclge_update_desc_vfid(desc, vport->vport_id, true);
6124
6125 if (hclge_is_all_function_id_zero(desc))
6126 /* All the vfid is zero, so need to delete this entry */
6127 status = hclge_remove_mac_vlan_tbl(vport, &req);
6128 else
6129 /* Not all the vfid is zero, update the vfid */
6130 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6131
6132 } else {
40cca1c5
XW
6133 /* Maybe this mac address is in mta table, but it cannot be
6134 * deleted here because an entry of mta represents an address
6135 * range rather than a specific address. the delete action to
6136 * all entries will take effect in update_mta_status called by
6137 * hns3_nic_set_rx_mode.
6138 */
6139 status = 0;
46a3df9f
S
6140 }
6141
46a3df9f
S
6142 return status;
6143}
6144
6dd86902 6145void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6146 enum HCLGE_MAC_ADDR_TYPE mac_type)
6147{
6148 struct hclge_vport_mac_addr_cfg *mac_cfg;
6149 struct list_head *list;
6150
6151 if (!vport->vport_id)
6152 return;
6153
6154 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6155 if (!mac_cfg)
6156 return;
6157
6158 mac_cfg->hd_tbl_status = true;
6159 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6160
6161 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6162 &vport->uc_mac_list : &vport->mc_mac_list;
6163
6164 list_add_tail(&mac_cfg->node, list);
6165}
6166
6167void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6168 bool is_write_tbl,
6169 enum HCLGE_MAC_ADDR_TYPE mac_type)
6170{
6171 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6172 struct list_head *list;
6173 bool uc_flag, mc_flag;
6174
6175 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6176 &vport->uc_mac_list : &vport->mc_mac_list;
6177
6178 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6179 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6180
6181 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6182 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6183 if (uc_flag && mac_cfg->hd_tbl_status)
6184 hclge_rm_uc_addr_common(vport, mac_addr);
6185
6186 if (mc_flag && mac_cfg->hd_tbl_status)
6187 hclge_rm_mc_addr_common(vport, mac_addr);
6188
6189 list_del(&mac_cfg->node);
6190 kfree(mac_cfg);
6191 break;
6192 }
6193 }
6194}
6195
6196void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6197 enum HCLGE_MAC_ADDR_TYPE mac_type)
6198{
6199 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6200 struct list_head *list;
6201
6202 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6203 &vport->uc_mac_list : &vport->mc_mac_list;
6204
6205 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6206 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6207 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6208
6209 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6210 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6211
6212 mac_cfg->hd_tbl_status = false;
6213 if (is_del_list) {
6214 list_del(&mac_cfg->node);
6215 kfree(mac_cfg);
6216 }
6217 }
6218}
6219
6220void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6221{
6222 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6223 struct hclge_vport *vport;
6224 int i;
6225
6226 mutex_lock(&hdev->vport_cfg_mutex);
6227 for (i = 0; i < hdev->num_alloc_vport; i++) {
6228 vport = &hdev->vport[i];
6229 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6230 list_del(&mac->node);
6231 kfree(mac);
6232 }
6233
6234 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6235 list_del(&mac->node);
6236 kfree(mac);
6237 }
6238 }
6239 mutex_unlock(&hdev->vport_cfg_mutex);
6240}
6241
f5aac71c
FL
6242static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6243 u16 cmdq_resp, u8 resp_code)
6244{
6245#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6246#define HCLGE_ETHERTYPE_ALREADY_ADD 1
6247#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6248#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6249
6250 int return_status;
6251
6252 if (cmdq_resp) {
6253 dev_err(&hdev->pdev->dev,
6254 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6255 cmdq_resp);
6256 return -EIO;
6257 }
6258
6259 switch (resp_code) {
6260 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6261 case HCLGE_ETHERTYPE_ALREADY_ADD:
6262 return_status = 0;
6263 break;
6264 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6265 dev_err(&hdev->pdev->dev,
6266 "add mac ethertype failed for manager table overflow.\n");
6267 return_status = -EIO;
6268 break;
6269 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6270 dev_err(&hdev->pdev->dev,
6271 "add mac ethertype failed for key conflict.\n");
6272 return_status = -EIO;
6273 break;
6274 default:
6275 dev_err(&hdev->pdev->dev,
6276 "add mac ethertype failed for undefined, code=%d.\n",
6277 resp_code);
6278 return_status = -EIO;
6279 }
6280
6281 return return_status;
6282}
6283
6284static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6285 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6286{
6287 struct hclge_desc desc;
6288 u8 resp_code;
6289 u16 retval;
6290 int ret;
6291
6292 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6293 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6294
6295 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6296 if (ret) {
6297 dev_err(&hdev->pdev->dev,
6298 "add mac ethertype failed for cmd_send, ret =%d.\n",
6299 ret);
6300 return ret;
6301 }
6302
6303 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6304 retval = le16_to_cpu(desc.retval);
6305
6306 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6307}
6308
6309static int init_mgr_tbl(struct hclge_dev *hdev)
6310{
6311 int ret;
6312 int i;
6313
6314 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6315 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6316 if (ret) {
6317 dev_err(&hdev->pdev->dev,
6318 "add mac ethertype failed, ret =%d.\n",
6319 ret);
6320 return ret;
6321 }
6322 }
6323
6324 return 0;
6325}
6326
46a3df9f
S
6327static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6328{
6329 struct hclge_vport *vport = hclge_get_vport(handle);
6330 struct hclge_dev *hdev = vport->back;
6331
6332 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6333}
6334
59098055
FL
6335static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6336 bool is_first)
46a3df9f
S
6337{
6338 const unsigned char *new_addr = (const unsigned char *)p;
6339 struct hclge_vport *vport = hclge_get_vport(handle);
6340 struct hclge_dev *hdev = vport->back;
18838d0c 6341 int ret;
46a3df9f
S
6342
6343 /* mac addr check */
6344 if (is_zero_ether_addr(new_addr) ||
6345 is_broadcast_ether_addr(new_addr) ||
6346 is_multicast_ether_addr(new_addr)) {
6347 dev_err(&hdev->pdev->dev,
6348 "Change uc mac err! invalid mac:%p.\n",
6349 new_addr);
6350 return -EINVAL;
6351 }
6352
962e31bd
YL
6353 if ((!is_first || is_kdump_kernel()) &&
6354 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 6355 dev_warn(&hdev->pdev->dev,
59098055 6356 "remove old uc mac address fail.\n");
46a3df9f 6357
18838d0c
FL
6358 ret = hclge_add_uc_addr(handle, new_addr);
6359 if (ret) {
6360 dev_err(&hdev->pdev->dev,
6361 "add uc mac address fail, ret =%d.\n",
6362 ret);
6363
59098055
FL
6364 if (!is_first &&
6365 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 6366 dev_err(&hdev->pdev->dev,
59098055 6367 "restore uc mac address fail.\n");
18838d0c
FL
6368
6369 return -EIO;
46a3df9f
S
6370 }
6371
e98d7183 6372 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
6373 if (ret) {
6374 dev_err(&hdev->pdev->dev,
6375 "configure mac pause address fail, ret =%d.\n",
6376 ret);
6377 return -EIO;
6378 }
6379
6380 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6381
6382 return 0;
46a3df9f
S
6383}
6384
26483246
XW
6385static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6386 int cmd)
6387{
6388 struct hclge_vport *vport = hclge_get_vport(handle);
6389 struct hclge_dev *hdev = vport->back;
6390
6391 if (!hdev->hw.mac.phydev)
6392 return -EOPNOTSUPP;
6393
6394 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6395}
6396
46a3df9f 6397static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 6398 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 6399{
d44f9b63 6400 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
6401 struct hclge_desc desc;
6402 int ret;
6403
6404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6405
d44f9b63 6406 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 6407 req->vlan_type = vlan_type;
64d114f0 6408 req->vlan_fe = filter_en ? fe_type : 0;
30ebc576 6409 req->vf_id = vf_id;
46a3df9f
S
6410
6411 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 6412 if (ret)
46a3df9f
S
6413 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6414 ret);
46a3df9f 6415
3f639907 6416 return ret;
46a3df9f
S
6417}
6418
391b5e93
JS
6419#define HCLGE_FILTER_TYPE_VF 0
6420#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
6421#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6422#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6423#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6424#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6425#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6426#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6427 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6428#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6429 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
6430
6431static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6432{
6433 struct hclge_vport *vport = hclge_get_vport(handle);
6434 struct hclge_dev *hdev = vport->back;
6435
64d114f0
ZL
6436 if (hdev->pdev->revision >= 0x21) {
6437 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 6438 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 6439 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 6440 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
6441 } else {
6442 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
6443 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6444 0);
64d114f0 6445 }
c60edc17
JS
6446 if (enable)
6447 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6448 else
6449 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
6450}
6451
dc8131d8
YL
6452static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6453 bool is_kill, u16 vlan, u8 qos,
6454 __be16 proto)
46a3df9f
S
6455{
6456#define HCLGE_MAX_VF_BYTES 16
d44f9b63
YL
6457 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6458 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
6459 struct hclge_desc desc[2];
6460 u8 vf_byte_val;
6461 u8 vf_byte_off;
6462 int ret;
6463
6464 hclge_cmd_setup_basic_desc(&desc[0],
6465 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6466 hclge_cmd_setup_basic_desc(&desc[1],
6467 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6468
6469 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6470
6471 vf_byte_off = vfid / 8;
6472 vf_byte_val = 1 << (vfid % 8);
6473
d44f9b63
YL
6474 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6475 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 6476
a90bb9a5 6477 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
6478 req0->vlan_cfg = is_kill;
6479
6480 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6481 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6482 else
6483 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6484
6485 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6486 if (ret) {
6487 dev_err(&hdev->pdev->dev,
6488 "Send vf vlan command fail, ret =%d.\n",
6489 ret);
6490 return ret;
6491 }
6492
6493 if (!is_kill) {
6c251711 6494#define HCLGE_VF_VLAN_NO_ENTRY 2
46a3df9f
S
6495 if (!req0->resp_code || req0->resp_code == 1)
6496 return 0;
6497
6c251711
YL
6498 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6499 dev_warn(&hdev->pdev->dev,
6500 "vf vlan table is full, vf vlan filter is disabled\n");
6501 return 0;
6502 }
6503
46a3df9f
S
6504 dev_err(&hdev->pdev->dev,
6505 "Add vf vlan filter fail, ret =%d.\n",
6506 req0->resp_code);
6507 } else {
41dafea2 6508#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
46a3df9f
S
6509 if (!req0->resp_code)
6510 return 0;
6511
41dafea2
YL
6512 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6513 dev_warn(&hdev->pdev->dev,
6514 "vlan %d filter is not in vf vlan table\n",
6515 vlan);
6516 return 0;
6517 }
6518
46a3df9f
S
6519 dev_err(&hdev->pdev->dev,
6520 "Kill vf vlan filter fail, ret =%d.\n",
6521 req0->resp_code);
6522 }
6523
6524 return -EIO;
6525}
6526
dc8131d8
YL
6527static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6528 u16 vlan_id, bool is_kill)
46a3df9f 6529{
d44f9b63 6530 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
6531 struct hclge_desc desc;
6532 u8 vlan_offset_byte_val;
6533 u8 vlan_offset_byte;
6534 u8 vlan_offset_160;
6535 int ret;
6536
6537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6538
6539 vlan_offset_160 = vlan_id / 160;
6540 vlan_offset_byte = (vlan_id % 160) / 8;
6541 vlan_offset_byte_val = 1 << (vlan_id % 8);
6542
d44f9b63 6543 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
6544 req->vlan_offset = vlan_offset_160;
6545 req->vlan_cfg = is_kill;
6546 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6547
6548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
6549 if (ret)
6550 dev_err(&hdev->pdev->dev,
6551 "port vlan command, send fail, ret =%d.\n", ret);
6552 return ret;
6553}
6554
6555static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6556 u16 vport_id, u16 vlan_id, u8 qos,
6557 bool is_kill)
6558{
6559 u16 vport_idx, vport_num = 0;
6560 int ret;
6561
daaa8521
YL
6562 if (is_kill && !vlan_id)
6563 return 0;
6564
dc8131d8
YL
6565 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6566 0, proto);
46a3df9f
S
6567 if (ret) {
6568 dev_err(&hdev->pdev->dev,
dc8131d8
YL
6569 "Set %d vport vlan filter config fail, ret =%d.\n",
6570 vport_id, ret);
46a3df9f
S
6571 return ret;
6572 }
6573
dc8131d8
YL
6574 /* vlan 0 may be added twice when 8021q module is enabled */
6575 if (!is_kill && !vlan_id &&
6576 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6577 return 0;
6578
6579 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 6580 dev_err(&hdev->pdev->dev,
dc8131d8
YL
6581 "Add port vlan failed, vport %d is already in vlan %d\n",
6582 vport_id, vlan_id);
6583 return -EINVAL;
46a3df9f
S
6584 }
6585
dc8131d8
YL
6586 if (is_kill &&
6587 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6588 dev_err(&hdev->pdev->dev,
6589 "Delete port vlan failed, vport %d is not in vlan %d\n",
6590 vport_id, vlan_id);
6591 return -EINVAL;
6592 }
6593
54e97d11 6594 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
6595 vport_num++;
6596
6597 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6598 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6599 is_kill);
6600
6601 return ret;
6602}
6603
5f6ea83f
PL
6604static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6605{
6606 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6607 struct hclge_vport_vtag_tx_cfg_cmd *req;
6608 struct hclge_dev *hdev = vport->back;
6609 struct hclge_desc desc;
6610 int status;
6611
6612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6613
6614 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6615 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6616 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
6617 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6618 vcfg->accept_tag1 ? 1 : 0);
6619 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6620 vcfg->accept_untag1 ? 1 : 0);
6621 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6622 vcfg->accept_tag2 ? 1 : 0);
6623 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6624 vcfg->accept_untag2 ? 1 : 0);
6625 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6626 vcfg->insert_tag1_en ? 1 : 0);
6627 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6628 vcfg->insert_tag2_en ? 1 : 0);
6629 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
6630
6631 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6632 req->vf_bitmap[req->vf_offset] =
6633 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6634
6635 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6636 if (status)
6637 dev_err(&hdev->pdev->dev,
6638 "Send port txvlan cfg command fail, ret =%d\n",
6639 status);
6640
6641 return status;
6642}
6643
6644static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6645{
6646 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6647 struct hclge_vport_vtag_rx_cfg_cmd *req;
6648 struct hclge_dev *hdev = vport->back;
6649 struct hclge_desc desc;
6650 int status;
6651
6652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6653
6654 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
6655 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6656 vcfg->strip_tag1_en ? 1 : 0);
6657 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6658 vcfg->strip_tag2_en ? 1 : 0);
6659 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6660 vcfg->vlan1_vlan_prionly ? 1 : 0);
6661 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6662 vcfg->vlan2_vlan_prionly ? 1 : 0);
5f6ea83f
PL
6663
6664 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6665 req->vf_bitmap[req->vf_offset] =
6666 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6667
6668 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6669 if (status)
6670 dev_err(&hdev->pdev->dev,
6671 "Send port rxvlan cfg command fail, ret =%d\n",
6672 status);
6673
6674 return status;
6675}
6676
741fca16
JS
6677static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6678 u16 port_base_vlan_state,
6679 u16 vlan_tag)
6680{
6681 int ret;
6682
6683 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6684 vport->txvlan_cfg.accept_tag1 = true;
6685 vport->txvlan_cfg.insert_tag1_en = false;
6686 vport->txvlan_cfg.default_tag1 = 0;
6687 } else {
6688 vport->txvlan_cfg.accept_tag1 = false;
6689 vport->txvlan_cfg.insert_tag1_en = true;
6690 vport->txvlan_cfg.default_tag1 = vlan_tag;
6691 }
6692
6693 vport->txvlan_cfg.accept_untag1 = true;
6694
6695 /* accept_tag2 and accept_untag2 are not supported on
6696 * pdev revision(0x20), new revision support them,
6697 * this two fields can not be configured by user.
6698 */
6699 vport->txvlan_cfg.accept_tag2 = true;
6700 vport->txvlan_cfg.accept_untag2 = true;
6701 vport->txvlan_cfg.insert_tag2_en = false;
6702 vport->txvlan_cfg.default_tag2 = 0;
6703
6704 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6705 vport->rxvlan_cfg.strip_tag1_en = false;
6706 vport->rxvlan_cfg.strip_tag2_en =
6707 vport->rxvlan_cfg.rx_vlan_offload_en;
6708 } else {
6709 vport->rxvlan_cfg.strip_tag1_en =
6710 vport->rxvlan_cfg.rx_vlan_offload_en;
6711 vport->rxvlan_cfg.strip_tag2_en = true;
6712 }
6713 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6714 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6715
6716 ret = hclge_set_vlan_tx_offload_cfg(vport);
6717 if (ret)
6718 return ret;
6719
6720 return hclge_set_vlan_rx_offload_cfg(vport);
6721}
6722
5f6ea83f
PL
6723static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6724{
6725 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6726 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6727 struct hclge_desc desc;
6728 int status;
6729
6730 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6731 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6732 rx_req->ot_fst_vlan_type =
6733 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6734 rx_req->ot_sec_vlan_type =
6735 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6736 rx_req->in_fst_vlan_type =
6737 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6738 rx_req->in_sec_vlan_type =
6739 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6740
6741 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6742 if (status) {
6743 dev_err(&hdev->pdev->dev,
6744 "Send rxvlan protocol type command fail, ret =%d\n",
6745 status);
6746 return status;
6747 }
6748
6749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6750
d0d72bac 6751 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
6752 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6753 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6754
6755 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6756 if (status)
6757 dev_err(&hdev->pdev->dev,
6758 "Send txvlan protocol type command fail, ret =%d\n",
6759 status);
6760
6761 return status;
6762}
6763
46a3df9f
S
6764static int hclge_init_vlan_config(struct hclge_dev *hdev)
6765{
5f6ea83f
PL
6766#define HCLGE_DEF_VLAN_TYPE 0x8100
6767
c60edc17 6768 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 6769 struct hclge_vport *vport;
46a3df9f 6770 int ret;
5f6ea83f
PL
6771 int i;
6772
64d114f0 6773 if (hdev->pdev->revision >= 0x21) {
30ebc576
JS
6774 /* for revision 0x21, vf vlan filter is per function */
6775 for (i = 0; i < hdev->num_alloc_vport; i++) {
6776 vport = &hdev->vport[i];
6777 ret = hclge_set_vlan_filter_ctrl(hdev,
6778 HCLGE_FILTER_TYPE_VF,
6779 HCLGE_FILTER_FE_EGRESS,
6780 true,
6781 vport->vport_id);
6782 if (ret)
6783 return ret;
6784 }
46a3df9f 6785
64d114f0 6786 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
6787 HCLGE_FILTER_FE_INGRESS, true,
6788 0);
64d114f0
ZL
6789 if (ret)
6790 return ret;
6791 } else {
6792 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6793 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 6794 true, 0);
64d114f0
ZL
6795 if (ret)
6796 return ret;
6797 }
46a3df9f 6798
c60edc17
JS
6799 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6800
5f6ea83f
PL
6801 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6802 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6803 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6804 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6805 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6806 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6807
6808 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
6809 if (ret)
6810 return ret;
46a3df9f 6811
5f6ea83f 6812 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 6813 u16 vlan_tag;
dcb35cce 6814
741fca16
JS
6815 vport = &hdev->vport[i];
6816 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 6817
741fca16
JS
6818 ret = hclge_vlan_offload_cfg(vport,
6819 vport->port_base_vlan_cfg.state,
6820 vlan_tag);
5f6ea83f
PL
6821 if (ret)
6822 return ret;
6823 }
6824
dc8131d8 6825 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
6826}
6827
21e043cd
JS
6828static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6829 bool writen_to_tbl)
c6075b19 6830{
6831 struct hclge_vport_vlan_cfg *vlan;
6832
6833 /* vlan 0 is reserved */
6834 if (!vlan_id)
6835 return;
6836
6837 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6838 if (!vlan)
6839 return;
6840
21e043cd 6841 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 6842 vlan->vlan_id = vlan_id;
6843
6844 list_add_tail(&vlan->node, &vport->vlan_list);
6845}
6846
21e043cd
JS
6847static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
6848{
6849 struct hclge_vport_vlan_cfg *vlan, *tmp;
6850 struct hclge_dev *hdev = vport->back;
6851 int ret;
6852
6853 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6854 if (!vlan->hd_tbl_status) {
6855 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
6856 vport->vport_id,
6857 vlan->vlan_id, 0, false);
6858 if (ret) {
6859 dev_err(&hdev->pdev->dev,
6860 "restore vport vlan list failed, ret=%d\n",
6861 ret);
6862 return ret;
6863 }
6864 }
6865 vlan->hd_tbl_status = true;
6866 }
6867
6868 return 0;
6869}
6870
6871static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6872 bool is_write_tbl)
c6075b19 6873{
6874 struct hclge_vport_vlan_cfg *vlan, *tmp;
6875 struct hclge_dev *hdev = vport->back;
6876
6877 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6878 if (vlan->vlan_id == vlan_id) {
6879 if (is_write_tbl && vlan->hd_tbl_status)
6880 hclge_set_vlan_filter_hw(hdev,
6881 htons(ETH_P_8021Q),
6882 vport->vport_id,
6883 vlan_id, 0,
6884 true);
6885
6886 list_del(&vlan->node);
6887 kfree(vlan);
6888 break;
6889 }
6890 }
6891}
6892
6893void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6894{
6895 struct hclge_vport_vlan_cfg *vlan, *tmp;
6896 struct hclge_dev *hdev = vport->back;
6897
6898 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6899 if (vlan->hd_tbl_status)
6900 hclge_set_vlan_filter_hw(hdev,
6901 htons(ETH_P_8021Q),
6902 vport->vport_id,
6903 vlan->vlan_id, 0,
6904 true);
6905
6906 vlan->hd_tbl_status = false;
6907 if (is_del_list) {
6908 list_del(&vlan->node);
6909 kfree(vlan);
6910 }
6911 }
6912}
6913
6914void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6915{
6916 struct hclge_vport_vlan_cfg *vlan, *tmp;
6917 struct hclge_vport *vport;
6918 int i;
6919
6920 mutex_lock(&hdev->vport_cfg_mutex);
6921 for (i = 0; i < hdev->num_alloc_vport; i++) {
6922 vport = &hdev->vport[i];
6923 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6924 list_del(&vlan->node);
6925 kfree(vlan);
6926 }
6927 }
6928 mutex_unlock(&hdev->vport_cfg_mutex);
6929}
6930
b2641e2a 6931int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
6932{
6933 struct hclge_vport *vport = hclge_get_vport(handle);
6934
44e626f7
JS
6935 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6936 vport->rxvlan_cfg.strip_tag1_en = false;
6937 vport->rxvlan_cfg.strip_tag2_en = enable;
6938 } else {
6939 vport->rxvlan_cfg.strip_tag1_en = enable;
6940 vport->rxvlan_cfg.strip_tag2_en = true;
6941 }
052ece6d
PL
6942 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6943 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 6944 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
6945
6946 return hclge_set_vlan_rx_offload_cfg(vport);
6947}
6948
21e043cd
JS
6949static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
6950 u16 port_base_vlan_state,
6951 struct hclge_vlan_info *new_info,
6952 struct hclge_vlan_info *old_info)
6953{
6954 struct hclge_dev *hdev = vport->back;
6955 int ret;
6956
6957 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
6958 hclge_rm_vport_all_vlan_table(vport, false);
6959 return hclge_set_vlan_filter_hw(hdev,
6960 htons(new_info->vlan_proto),
6961 vport->vport_id,
6962 new_info->vlan_tag,
6963 new_info->qos, false);
6964 }
6965
6966 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
6967 vport->vport_id, old_info->vlan_tag,
6968 old_info->qos, true);
6969 if (ret)
6970 return ret;
6971
6972 return hclge_add_vport_all_vlan_table(vport);
6973}
6974
6975int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
6976 struct hclge_vlan_info *vlan_info)
6977{
6978 struct hnae3_handle *nic = &vport->nic;
6979 struct hclge_vlan_info *old_vlan_info;
6980 struct hclge_dev *hdev = vport->back;
6981 int ret;
6982
6983 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
6984
6985 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
6986 if (ret)
6987 return ret;
6988
6989 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
6990 /* add new VLAN tag */
8a9a654b
JS
6991 ret = hclge_set_vlan_filter_hw(hdev,
6992 htons(vlan_info->vlan_proto),
21e043cd
JS
6993 vport->vport_id,
6994 vlan_info->vlan_tag,
6995 vlan_info->qos, false);
6996 if (ret)
6997 return ret;
6998
6999 /* remove old VLAN tag */
8a9a654b
JS
7000 ret = hclge_set_vlan_filter_hw(hdev,
7001 htons(old_vlan_info->vlan_proto),
21e043cd
JS
7002 vport->vport_id,
7003 old_vlan_info->vlan_tag,
7004 old_vlan_info->qos, true);
7005 if (ret)
7006 return ret;
7007
7008 goto update;
7009 }
7010
7011 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7012 old_vlan_info);
7013 if (ret)
7014 return ret;
7015
7016 /* update state only when disable/enable port based VLAN */
7017 vport->port_base_vlan_cfg.state = state;
7018 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7019 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7020 else
7021 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7022
7023update:
7024 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7025 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7026 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7027
7028 return 0;
7029}
7030
7031static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7032 enum hnae3_port_base_vlan_state state,
7033 u16 vlan)
7034{
7035 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7036 if (!vlan)
7037 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7038 else
7039 return HNAE3_PORT_BASE_VLAN_ENABLE;
7040 } else {
7041 if (!vlan)
7042 return HNAE3_PORT_BASE_VLAN_DISABLE;
7043 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7044 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7045 else
7046 return HNAE3_PORT_BASE_VLAN_MODIFY;
7047 }
7048}
7049
7050static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7051 u16 vlan, u8 qos, __be16 proto)
7052{
7053 struct hclge_vport *vport = hclge_get_vport(handle);
7054 struct hclge_dev *hdev = vport->back;
7055 struct hclge_vlan_info vlan_info;
7056 u16 state;
7057 int ret;
7058
7059 if (hdev->pdev->revision == 0x20)
7060 return -EOPNOTSUPP;
7061
7062 /* qos is a 3 bits value, so can not be bigger than 7 */
7063 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7064 return -EINVAL;
7065 if (proto != htons(ETH_P_8021Q))
7066 return -EPROTONOSUPPORT;
7067
7068 vport = &hdev->vport[vfid];
7069 state = hclge_get_port_base_vlan_state(vport,
7070 vport->port_base_vlan_cfg.state,
7071 vlan);
7072 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7073 return 0;
7074
7075 vlan_info.vlan_tag = vlan;
7076 vlan_info.qos = qos;
7077 vlan_info.vlan_proto = ntohs(proto);
7078
7079 /* update port based VLAN for PF */
7080 if (!vfid) {
7081 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7082 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7083 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7084
7085 return ret;
7086 }
7087
92f11ea1
JS
7088 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7089 return hclge_update_port_base_vlan_cfg(vport, state,
7090 &vlan_info);
7091 } else {
7092 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7093 (u8)vfid, state,
7094 vlan, qos,
7095 ntohs(proto));
7096 return ret;
7097 }
21e043cd
JS
7098}
7099
7100int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7101 u16 vlan_id, bool is_kill)
7102{
7103 struct hclge_vport *vport = hclge_get_vport(handle);
7104 struct hclge_dev *hdev = vport->back;
7105 bool writen_to_tbl = false;
7106 int ret = 0;
7107
7108 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7109 * filter entry. In this case, we don't update VLAN filter table
7110 * when user add new VLAN or remove exist VLAN, just update the vport
7111 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7112 * table until port based VLAN disabled
7113 */
7114 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7115 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7116 vlan_id, 0, is_kill);
7117 writen_to_tbl = true;
7118 }
7119
7120 if (ret)
7121 return ret;
7122
7123 if (is_kill)
7124 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7125 else
7126 hclge_add_vport_vlan_table(vport, vlan_id,
7127 writen_to_tbl);
7128
7129 return 0;
7130}
7131
e6d7d79d 7132static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 7133{
d44f9b63 7134 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 7135 struct hclge_desc desc;
46a3df9f 7136
46a3df9f
S
7137 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7138
d44f9b63 7139 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 7140 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 7141 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 7142
e6d7d79d 7143 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
7144}
7145
dd72140c
FL
7146static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7147{
7148 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
7149
7150 return hclge_set_vport_mtu(vport, new_mtu);
7151}
7152
7153int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7154{
dd72140c 7155 struct hclge_dev *hdev = vport->back;
818f1675 7156 int i, max_frm_size, ret = 0;
dd72140c 7157
e6d7d79d
YL
7158 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7159 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7160 max_frm_size > HCLGE_MAC_MAX_FRAME)
7161 return -EINVAL;
7162
818f1675
YL
7163 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7164 mutex_lock(&hdev->vport_lock);
7165 /* VF's mps must fit within hdev->mps */
7166 if (vport->vport_id && max_frm_size > hdev->mps) {
7167 mutex_unlock(&hdev->vport_lock);
7168 return -EINVAL;
7169 } else if (vport->vport_id) {
7170 vport->mps = max_frm_size;
7171 mutex_unlock(&hdev->vport_lock);
7172 return 0;
7173 }
7174
7175 /* PF's mps must be greater then VF's mps */
7176 for (i = 1; i < hdev->num_alloc_vport; i++)
7177 if (max_frm_size < hdev->vport[i].mps) {
7178 mutex_unlock(&hdev->vport_lock);
7179 return -EINVAL;
7180 }
7181
cdca4c48
YL
7182 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7183
e6d7d79d 7184 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
7185 if (ret) {
7186 dev_err(&hdev->pdev->dev,
7187 "Change mtu fail, ret =%d\n", ret);
818f1675 7188 goto out;
dd72140c
FL
7189 }
7190
e6d7d79d 7191 hdev->mps = max_frm_size;
818f1675 7192 vport->mps = max_frm_size;
e6d7d79d 7193
dd72140c
FL
7194 ret = hclge_buffer_alloc(hdev);
7195 if (ret)
7196 dev_err(&hdev->pdev->dev,
7197 "Allocate buffer fail, ret =%d\n", ret);
7198
818f1675 7199out:
cdca4c48 7200 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 7201 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
7202 return ret;
7203}
7204
46a3df9f
S
7205static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7206 bool enable)
7207{
d44f9b63 7208 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
7209 struct hclge_desc desc;
7210 int ret;
7211
7212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7213
d44f9b63 7214 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f 7215 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
e4e87715 7216 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
46a3df9f
S
7217
7218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7219 if (ret) {
7220 dev_err(&hdev->pdev->dev,
7221 "Send tqp reset cmd error, status =%d\n", ret);
7222 return ret;
7223 }
7224
7225 return 0;
7226}
7227
7228static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7229{
d44f9b63 7230 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
7231 struct hclge_desc desc;
7232 int ret;
7233
7234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7235
d44f9b63 7236 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
7237 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7238
7239 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7240 if (ret) {
7241 dev_err(&hdev->pdev->dev,
7242 "Get reset status error, status =%d\n", ret);
7243 return ret;
7244 }
7245
e4e87715 7246 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
7247}
7248
0c29d191 7249u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
7250{
7251 struct hnae3_queue *queue;
7252 struct hclge_tqp *tqp;
7253
7254 queue = handle->kinfo.tqp[queue_id];
7255 tqp = container_of(queue, struct hclge_tqp, q);
7256
7257 return tqp->index;
7258}
7259
7fa6be4f 7260int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
7261{
7262 struct hclge_vport *vport = hclge_get_vport(handle);
7263 struct hclge_dev *hdev = vport->back;
7264 int reset_try_times = 0;
7265 int reset_status;
814e0274 7266 u16 queue_gid;
7fa6be4f 7267 int ret = 0;
46a3df9f 7268
814e0274
PL
7269 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7270
46a3df9f
S
7271 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7272 if (ret) {
7fa6be4f
HT
7273 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7274 return ret;
46a3df9f
S
7275 }
7276
814e0274 7277 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 7278 if (ret) {
7fa6be4f
HT
7279 dev_err(&hdev->pdev->dev,
7280 "Send reset tqp cmd fail, ret = %d\n", ret);
7281 return ret;
46a3df9f
S
7282 }
7283
7284 reset_try_times = 0;
7285 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7286 /* Wait for tqp hw reset */
7287 msleep(20);
814e0274 7288 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
7289 if (reset_status)
7290 break;
7291 }
7292
7293 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
7294 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7295 return ret;
46a3df9f
S
7296 }
7297
814e0274 7298 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
7299 if (ret)
7300 dev_err(&hdev->pdev->dev,
7301 "Deassert the soft reset fail, ret = %d\n", ret);
7302
7303 return ret;
46a3df9f
S
7304}
7305
1a426f8b
PL
7306void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7307{
7308 struct hclge_dev *hdev = vport->back;
7309 int reset_try_times = 0;
7310 int reset_status;
7311 u16 queue_gid;
7312 int ret;
7313
7314 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7315
7316 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7317 if (ret) {
7318 dev_warn(&hdev->pdev->dev,
7319 "Send reset tqp cmd fail, ret = %d\n", ret);
7320 return;
7321 }
7322
7323 reset_try_times = 0;
7324 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7325 /* Wait for tqp hw reset */
7326 msleep(20);
7327 reset_status = hclge_get_reset_status(hdev, queue_gid);
7328 if (reset_status)
7329 break;
7330 }
7331
7332 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7333 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7334 return;
7335 }
7336
7337 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7338 if (ret)
7339 dev_warn(&hdev->pdev->dev,
7340 "Deassert the soft reset fail, ret = %d\n", ret);
7341}
7342
46a3df9f
S
7343static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7344{
7345 struct hclge_vport *vport = hclge_get_vport(handle);
7346 struct hclge_dev *hdev = vport->back;
7347
7348 return hdev->fw_version;
7349}
7350
61387774
PL
7351static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7352{
7353 struct phy_device *phydev = hdev->hw.mac.phydev;
7354
7355 if (!phydev)
7356 return;
7357
70814e81 7358 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
7359}
7360
7361static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7362{
61387774
PL
7363 int ret;
7364
7365 if (rx_en && tx_en)
40173a2e 7366 hdev->fc_mode_last_time = HCLGE_FC_FULL;
61387774 7367 else if (rx_en && !tx_en)
40173a2e 7368 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
61387774 7369 else if (!rx_en && tx_en)
40173a2e 7370 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
61387774 7371 else
40173a2e 7372 hdev->fc_mode_last_time = HCLGE_FC_NONE;
61387774 7373
40173a2e 7374 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 7375 return 0;
61387774
PL
7376
7377 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7378 if (ret) {
7379 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7380 ret);
7381 return ret;
7382 }
7383
40173a2e 7384 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
61387774
PL
7385
7386 return 0;
7387}
7388
1770a7a3
PL
7389int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7390{
7391 struct phy_device *phydev = hdev->hw.mac.phydev;
7392 u16 remote_advertising = 0;
7393 u16 local_advertising = 0;
7394 u32 rx_pause, tx_pause;
7395 u8 flowctl;
7396
7397 if (!phydev->link || !phydev->autoneg)
7398 return 0;
7399
3c1bcc86 7400 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
7401
7402 if (phydev->pause)
7403 remote_advertising = LPA_PAUSE_CAP;
7404
7405 if (phydev->asym_pause)
7406 remote_advertising |= LPA_PAUSE_ASYM;
7407
7408 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7409 remote_advertising);
7410 tx_pause = flowctl & FLOW_CTRL_TX;
7411 rx_pause = flowctl & FLOW_CTRL_RX;
7412
7413 if (phydev->duplex == HCLGE_MAC_HALF) {
7414 tx_pause = 0;
7415 rx_pause = 0;
7416 }
7417
7418 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7419}
7420
46a3df9f
S
7421static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7422 u32 *rx_en, u32 *tx_en)
7423{
7424 struct hclge_vport *vport = hclge_get_vport(handle);
7425 struct hclge_dev *hdev = vport->back;
7426
7427 *auto_neg = hclge_get_autoneg(handle);
7428
7429 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7430 *rx_en = 0;
7431 *tx_en = 0;
7432 return;
7433 }
7434
7435 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7436 *rx_en = 1;
7437 *tx_en = 0;
7438 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7439 *tx_en = 1;
7440 *rx_en = 0;
7441 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7442 *rx_en = 1;
7443 *tx_en = 1;
7444 } else {
7445 *rx_en = 0;
7446 *tx_en = 0;
7447 }
7448}
7449
61387774
PL
7450static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7451 u32 rx_en, u32 tx_en)
7452{
7453 struct hclge_vport *vport = hclge_get_vport(handle);
7454 struct hclge_dev *hdev = vport->back;
7455 struct phy_device *phydev = hdev->hw.mac.phydev;
7456 u32 fc_autoneg;
7457
61387774
PL
7458 fc_autoneg = hclge_get_autoneg(handle);
7459 if (auto_neg != fc_autoneg) {
7460 dev_info(&hdev->pdev->dev,
7461 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7462 return -EOPNOTSUPP;
7463 }
7464
7465 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7466 dev_info(&hdev->pdev->dev,
7467 "Priority flow control enabled. Cannot set link flow control.\n");
7468 return -EOPNOTSUPP;
7469 }
7470
7471 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7472
7473 if (!fc_autoneg)
7474 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7475
0c963e8c
FL
7476 /* Only support flow control negotiation for netdev with
7477 * phy attached for now.
7478 */
7479 if (!phydev)
7480 return -EOPNOTSUPP;
7481
61387774
PL
7482 return phy_start_aneg(phydev);
7483}
7484
46a3df9f
S
7485static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7486 u8 *auto_neg, u32 *speed, u8 *duplex)
7487{
7488 struct hclge_vport *vport = hclge_get_vport(handle);
7489 struct hclge_dev *hdev = vport->back;
7490
7491 if (speed)
7492 *speed = hdev->hw.mac.speed;
7493 if (duplex)
7494 *duplex = hdev->hw.mac.duplex;
7495 if (auto_neg)
7496 *auto_neg = hdev->hw.mac.autoneg;
7497}
7498
7499static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7500{
7501 struct hclge_vport *vport = hclge_get_vport(handle);
7502 struct hclge_dev *hdev = vport->back;
7503
7504 if (media_type)
7505 *media_type = hdev->hw.mac.media_type;
7506}
7507
7508static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7509 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7510{
7511 struct hclge_vport *vport = hclge_get_vport(handle);
7512 struct hclge_dev *hdev = vport->back;
7513 struct phy_device *phydev = hdev->hw.mac.phydev;
7514 int mdix_ctrl, mdix, retval, is_resolved;
7515
7516 if (!phydev) {
7517 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7518 *tp_mdix = ETH_TP_MDI_INVALID;
7519 return;
7520 }
7521
7522 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7523
7524 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
7525 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7526 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
7527
7528 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
7529 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7530 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
7531
7532 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7533
7534 switch (mdix_ctrl) {
7535 case 0x0:
7536 *tp_mdix_ctrl = ETH_TP_MDI;
7537 break;
7538 case 0x1:
7539 *tp_mdix_ctrl = ETH_TP_MDI_X;
7540 break;
7541 case 0x3:
7542 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7543 break;
7544 default:
7545 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7546 break;
7547 }
7548
7549 if (!is_resolved)
7550 *tp_mdix = ETH_TP_MDI_INVALID;
7551 else if (mdix)
7552 *tp_mdix = ETH_TP_MDI_X;
7553 else
7554 *tp_mdix = ETH_TP_MDI;
7555}
7556
7557static int hclge_init_client_instance(struct hnae3_client *client,
7558 struct hnae3_ae_dev *ae_dev)
7559{
7560 struct hclge_dev *hdev = ae_dev->priv;
7561 struct hclge_vport *vport;
7562 int i, ret;
7563
7564 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7565 vport = &hdev->vport[i];
7566
7567 switch (client->type) {
7568 case HNAE3_CLIENT_KNIC:
7569
7570 hdev->nic_client = client;
7571 vport->nic.client = client;
7572 ret = client->ops->init_instance(&vport->nic);
7573 if (ret)
49dd8054 7574 goto clear_nic;
46a3df9f 7575
d9f28fc2
JS
7576 hnae3_set_client_init_flag(client, ae_dev, 1);
7577
46a3df9f 7578 if (hdev->roce_client &&
e92a0843 7579 hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
7580 struct hnae3_client *rc = hdev->roce_client;
7581
7582 ret = hclge_init_roce_base_info(vport);
7583 if (ret)
49dd8054 7584 goto clear_roce;
46a3df9f
S
7585
7586 ret = rc->ops->init_instance(&vport->roce);
7587 if (ret)
49dd8054 7588 goto clear_roce;
d9f28fc2
JS
7589
7590 hnae3_set_client_init_flag(hdev->roce_client,
7591 ae_dev, 1);
46a3df9f
S
7592 }
7593
7594 break;
7595 case HNAE3_CLIENT_UNIC:
7596 hdev->nic_client = client;
7597 vport->nic.client = client;
7598
7599 ret = client->ops->init_instance(&vport->nic);
7600 if (ret)
49dd8054 7601 goto clear_nic;
46a3df9f 7602
d9f28fc2
JS
7603 hnae3_set_client_init_flag(client, ae_dev, 1);
7604
46a3df9f
S
7605 break;
7606 case HNAE3_CLIENT_ROCE:
e92a0843 7607 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
7608 hdev->roce_client = client;
7609 vport->roce.client = client;
7610 }
7611
3a46f34d 7612 if (hdev->roce_client && hdev->nic_client) {
46a3df9f
S
7613 ret = hclge_init_roce_base_info(vport);
7614 if (ret)
49dd8054 7615 goto clear_roce;
46a3df9f
S
7616
7617 ret = client->ops->init_instance(&vport->roce);
7618 if (ret)
49dd8054 7619 goto clear_roce;
d9f28fc2
JS
7620
7621 hnae3_set_client_init_flag(client, ae_dev, 1);
46a3df9f 7622 }
fa7a4bd5
JS
7623
7624 break;
7625 default:
7626 return -EINVAL;
46a3df9f
S
7627 }
7628 }
7629
7630 return 0;
49dd8054
JS
7631
7632clear_nic:
7633 hdev->nic_client = NULL;
7634 vport->nic.client = NULL;
7635 return ret;
7636clear_roce:
7637 hdev->roce_client = NULL;
7638 vport->roce.client = NULL;
7639 return ret;
46a3df9f
S
7640}
7641
7642static void hclge_uninit_client_instance(struct hnae3_client *client,
7643 struct hnae3_ae_dev *ae_dev)
7644{
7645 struct hclge_dev *hdev = ae_dev->priv;
7646 struct hclge_vport *vport;
7647 int i;
7648
7649 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7650 vport = &hdev->vport[i];
a17dcf3f 7651 if (hdev->roce_client) {
46a3df9f
S
7652 hdev->roce_client->ops->uninit_instance(&vport->roce,
7653 0);
a17dcf3f
L
7654 hdev->roce_client = NULL;
7655 vport->roce.client = NULL;
7656 }
46a3df9f
S
7657 if (client->type == HNAE3_CLIENT_ROCE)
7658 return;
49dd8054 7659 if (hdev->nic_client && client->ops->uninit_instance) {
46a3df9f 7660 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
7661 hdev->nic_client = NULL;
7662 vport->nic.client = NULL;
7663 }
46a3df9f
S
7664 }
7665}
7666
7667static int hclge_pci_init(struct hclge_dev *hdev)
7668{
7669 struct pci_dev *pdev = hdev->pdev;
7670 struct hclge_hw *hw;
7671 int ret;
7672
7673 ret = pci_enable_device(pdev);
7674 if (ret) {
7675 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 7676 return ret;
46a3df9f
S
7677 }
7678
7679 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7680 if (ret) {
7681 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7682 if (ret) {
7683 dev_err(&pdev->dev,
7684 "can't set consistent PCI DMA");
7685 goto err_disable_device;
7686 }
7687 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7688 }
7689
7690 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7691 if (ret) {
7692 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7693 goto err_disable_device;
7694 }
7695
7696 pci_set_master(pdev);
7697 hw = &hdev->hw;
46a3df9f
S
7698 hw->io_base = pcim_iomap(pdev, 2, 0);
7699 if (!hw->io_base) {
7700 dev_err(&pdev->dev, "Can't map configuration register space\n");
7701 ret = -ENOMEM;
7702 goto err_clr_master;
7703 }
7704
709eb41a
L
7705 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7706
46a3df9f
S
7707 return 0;
7708err_clr_master:
7709 pci_clear_master(pdev);
7710 pci_release_regions(pdev);
7711err_disable_device:
7712 pci_disable_device(pdev);
46a3df9f
S
7713
7714 return ret;
7715}
7716
7717static void hclge_pci_uninit(struct hclge_dev *hdev)
7718{
7719 struct pci_dev *pdev = hdev->pdev;
7720
6a814413 7721 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 7722 pci_free_irq_vectors(pdev);
46a3df9f
S
7723 pci_clear_master(pdev);
7724 pci_release_mem_regions(pdev);
7725 pci_disable_device(pdev);
7726}
7727
48569cda
PL
7728static void hclge_state_init(struct hclge_dev *hdev)
7729{
7730 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7731 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7732 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7733 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7734 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7735 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7736}
7737
7738static void hclge_state_uninit(struct hclge_dev *hdev)
7739{
7740 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7741
7742 if (hdev->service_timer.function)
7743 del_timer_sync(&hdev->service_timer);
65e41e7e
HT
7744 if (hdev->reset_timer.function)
7745 del_timer_sync(&hdev->reset_timer);
48569cda
PL
7746 if (hdev->service_task.func)
7747 cancel_work_sync(&hdev->service_task);
7748 if (hdev->rst_service_task.func)
7749 cancel_work_sync(&hdev->rst_service_task);
7750 if (hdev->mbx_service_task.func)
7751 cancel_work_sync(&hdev->mbx_service_task);
7752}
7753
6b9a97ee
HT
7754static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7755{
7756#define HCLGE_FLR_WAIT_MS 100
7757#define HCLGE_FLR_WAIT_CNT 50
7758 struct hclge_dev *hdev = ae_dev->priv;
7759 int cnt = 0;
7760
7761 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7762 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7763 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7764 hclge_reset_event(hdev->pdev, NULL);
7765
7766 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7767 cnt++ < HCLGE_FLR_WAIT_CNT)
7768 msleep(HCLGE_FLR_WAIT_MS);
7769
7770 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7771 dev_err(&hdev->pdev->dev,
7772 "flr wait down timeout: %d\n", cnt);
7773}
7774
7775static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7776{
7777 struct hclge_dev *hdev = ae_dev->priv;
7778
7779 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7780}
7781
46a3df9f
S
7782static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7783{
7784 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
7785 struct hclge_dev *hdev;
7786 int ret;
7787
7788 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7789 if (!hdev) {
7790 ret = -ENOMEM;
ffd5656e 7791 goto out;
46a3df9f
S
7792 }
7793
46a3df9f
S
7794 hdev->pdev = pdev;
7795 hdev->ae_dev = ae_dev;
4ed340ab 7796 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 7797 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 7798 ae_dev->priv = hdev;
e6d7d79d 7799 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 7800
818f1675 7801 mutex_init(&hdev->vport_lock);
6dd86902 7802 mutex_init(&hdev->vport_cfg_mutex);
818f1675 7803
46a3df9f
S
7804 ret = hclge_pci_init(hdev);
7805 if (ret) {
7806 dev_err(&pdev->dev, "PCI init failed\n");
ffd5656e 7807 goto out;
46a3df9f
S
7808 }
7809
3efb960f
L
7810 /* Firmware command queue initialize */
7811 ret = hclge_cmd_queue_init(hdev);
7812 if (ret) {
7813 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
ffd5656e 7814 goto err_pci_uninit;
3efb960f
L
7815 }
7816
7817 /* Firmware command initialize */
46a3df9f
S
7818 ret = hclge_cmd_init(hdev);
7819 if (ret)
ffd5656e 7820 goto err_cmd_uninit;
46a3df9f
S
7821
7822 ret = hclge_get_cap(hdev);
7823 if (ret) {
e00e2197
CIK
7824 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7825 ret);
ffd5656e 7826 goto err_cmd_uninit;
46a3df9f
S
7827 }
7828
7829 ret = hclge_configure(hdev);
7830 if (ret) {
7831 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 7832 goto err_cmd_uninit;
46a3df9f
S
7833 }
7834
887c3820 7835 ret = hclge_init_msi(hdev);
46a3df9f 7836 if (ret) {
887c3820 7837 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 7838 goto err_cmd_uninit;
46a3df9f
S
7839 }
7840
466b0c00
L
7841 ret = hclge_misc_irq_init(hdev);
7842 if (ret) {
7843 dev_err(&pdev->dev,
7844 "Misc IRQ(vector0) init error, ret = %d.\n",
7845 ret);
ffd5656e 7846 goto err_msi_uninit;
466b0c00
L
7847 }
7848
46a3df9f
S
7849 ret = hclge_alloc_tqps(hdev);
7850 if (ret) {
7851 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 7852 goto err_msi_irq_uninit;
46a3df9f
S
7853 }
7854
7855 ret = hclge_alloc_vport(hdev);
7856 if (ret) {
7857 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
ffd5656e 7858 goto err_msi_irq_uninit;
46a3df9f
S
7859 }
7860
7df7dad6
L
7861 ret = hclge_map_tqp(hdev);
7862 if (ret) {
7863 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
2312e050 7864 goto err_msi_irq_uninit;
7df7dad6
L
7865 }
7866
c5ef83cb
HT
7867 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7868 ret = hclge_mac_mdio_config(hdev);
7869 if (ret) {
7870 dev_err(&hdev->pdev->dev,
7871 "mdio config fail ret=%d\n", ret);
2312e050 7872 goto err_msi_irq_uninit;
c5ef83cb 7873 }
cf9cca2d 7874 }
7875
39932473
JS
7876 ret = hclge_init_umv_space(hdev);
7877 if (ret) {
7878 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9fc55413 7879 goto err_mdiobus_unreg;
39932473
JS
7880 }
7881
46a3df9f
S
7882 ret = hclge_mac_init(hdev);
7883 if (ret) {
7884 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 7885 goto err_mdiobus_unreg;
46a3df9f 7886 }
46a3df9f
S
7887
7888 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7889 if (ret) {
7890 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 7891 goto err_mdiobus_unreg;
46a3df9f
S
7892 }
7893
b26a6fea
PL
7894 ret = hclge_config_gro(hdev, true);
7895 if (ret)
7896 goto err_mdiobus_unreg;
7897
46a3df9f
S
7898 ret = hclge_init_vlan_config(hdev);
7899 if (ret) {
7900 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 7901 goto err_mdiobus_unreg;
46a3df9f
S
7902 }
7903
7904 ret = hclge_tm_schd_init(hdev);
7905 if (ret) {
7906 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 7907 goto err_mdiobus_unreg;
68ece54e
YL
7908 }
7909
268f5dfa 7910 hclge_rss_init_cfg(hdev);
68ece54e
YL
7911 ret = hclge_rss_init_hw(hdev);
7912 if (ret) {
7913 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 7914 goto err_mdiobus_unreg;
46a3df9f
S
7915 }
7916
f5aac71c
FL
7917 ret = init_mgr_tbl(hdev);
7918 if (ret) {
7919 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 7920 goto err_mdiobus_unreg;
f5aac71c
FL
7921 }
7922
d695964d
JS
7923 ret = hclge_init_fd_config(hdev);
7924 if (ret) {
7925 dev_err(&pdev->dev,
7926 "fd table init fail, ret=%d\n", ret);
7927 goto err_mdiobus_unreg;
7928 }
7929
99714195
SJ
7930 ret = hclge_hw_error_set_state(hdev, true);
7931 if (ret) {
7932 dev_err(&pdev->dev,
f3fa4a94 7933 "fail(%d) to enable hw error interrupts\n", ret);
99714195
SJ
7934 goto err_mdiobus_unreg;
7935 }
7936
cacde272
YL
7937 hclge_dcb_ops_set(hdev);
7938
d039ef68 7939 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
65e41e7e 7940 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
46a3df9f 7941 INIT_WORK(&hdev->service_task, hclge_service_task);
cb1b9f77 7942 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
c1a81619 7943 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
46a3df9f 7944
8e52a602
XW
7945 hclge_clear_all_event_cause(hdev);
7946
466b0c00
L
7947 /* Enable MISC vector(vector0) */
7948 hclge_enable_vector(&hdev->misc_vector, true);
7949
48569cda 7950 hclge_state_init(hdev);
0742ed7c 7951 hdev->last_reset_time = jiffies;
46a3df9f
S
7952
7953 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7954 return 0;
7955
ffd5656e
HT
7956err_mdiobus_unreg:
7957 if (hdev->hw.mac.phydev)
7958 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
7959err_msi_irq_uninit:
7960 hclge_misc_irq_uninit(hdev);
7961err_msi_uninit:
7962 pci_free_irq_vectors(pdev);
7963err_cmd_uninit:
232d0d55 7964 hclge_cmd_uninit(hdev);
ffd5656e 7965err_pci_uninit:
6a814413 7966 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 7967 pci_clear_master(pdev);
46a3df9f 7968 pci_release_regions(pdev);
ffd5656e 7969 pci_disable_device(pdev);
ffd5656e 7970out:
46a3df9f
S
7971 return ret;
7972}
7973
c6dc5213 7974static void hclge_stats_clear(struct hclge_dev *hdev)
7975{
7976 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7977}
7978
a6d818e3
YL
7979static void hclge_reset_vport_state(struct hclge_dev *hdev)
7980{
7981 struct hclge_vport *vport = hdev->vport;
7982 int i;
7983
7984 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 7985 hclge_vport_stop(vport);
a6d818e3
YL
7986 vport++;
7987 }
7988}
7989
4ed340ab
L
7990static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7991{
7992 struct hclge_dev *hdev = ae_dev->priv;
7993 struct pci_dev *pdev = ae_dev->pdev;
7994 int ret;
7995
7996 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7997
c6dc5213 7998 hclge_stats_clear(hdev);
dc8131d8 7999 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
c6dc5213 8000
4ed340ab
L
8001 ret = hclge_cmd_init(hdev);
8002 if (ret) {
8003 dev_err(&pdev->dev, "Cmd queue init failed\n");
8004 return ret;
8005 }
8006
4ed340ab
L
8007 ret = hclge_map_tqp(hdev);
8008 if (ret) {
8009 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8010 return ret;
8011 }
8012
39932473
JS
8013 hclge_reset_umv_space(hdev);
8014
4ed340ab
L
8015 ret = hclge_mac_init(hdev);
8016 if (ret) {
8017 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8018 return ret;
8019 }
8020
4ed340ab
L
8021 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8022 if (ret) {
8023 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8024 return ret;
8025 }
8026
b26a6fea
PL
8027 ret = hclge_config_gro(hdev, true);
8028 if (ret)
8029 return ret;
8030
4ed340ab
L
8031 ret = hclge_init_vlan_config(hdev);
8032 if (ret) {
8033 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8034 return ret;
8035 }
8036
44e59e37 8037 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 8038 if (ret) {
f31c1ba6 8039 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
8040 return ret;
8041 }
8042
8043 ret = hclge_rss_init_hw(hdev);
8044 if (ret) {
8045 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8046 return ret;
8047 }
8048
d695964d
JS
8049 ret = hclge_init_fd_config(hdev);
8050 if (ret) {
8051 dev_err(&pdev->dev,
8052 "fd table init fail, ret=%d\n", ret);
8053 return ret;
8054 }
8055
f3fa4a94
SJ
8056 /* Re-enable the hw error interrupts because
8057 * the interrupts get disabled on core/global reset.
01865a50 8058 */
f3fa4a94
SJ
8059 ret = hclge_hw_error_set_state(hdev, true);
8060 if (ret) {
8061 dev_err(&pdev->dev,
8062 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8063 return ret;
8064 }
01865a50 8065
a6d818e3
YL
8066 hclge_reset_vport_state(hdev);
8067
4ed340ab
L
8068 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8069 HCLGE_DRIVER_NAME);
8070
8071 return 0;
8072}
8073
46a3df9f
S
8074static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8075{
8076 struct hclge_dev *hdev = ae_dev->priv;
8077 struct hclge_mac *mac = &hdev->hw.mac;
8078
48569cda 8079 hclge_state_uninit(hdev);
46a3df9f
S
8080
8081 if (mac->phydev)
8082 mdiobus_unregister(mac->mdio_bus);
8083
39932473
JS
8084 hclge_uninit_umv_space(hdev);
8085
466b0c00
L
8086 /* Disable MISC vector(vector0) */
8087 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
8088 synchronize_irq(hdev->misc_vector.vector_irq);
8089
99714195 8090 hclge_hw_error_set_state(hdev, false);
232d0d55 8091 hclge_cmd_uninit(hdev);
ca1d7669 8092 hclge_misc_irq_uninit(hdev);
46a3df9f 8093 hclge_pci_uninit(hdev);
818f1675 8094 mutex_destroy(&hdev->vport_lock);
6dd86902 8095 hclge_uninit_vport_mac_table(hdev);
c6075b19 8096 hclge_uninit_vport_vlan_table(hdev);
6dd86902 8097 mutex_destroy(&hdev->vport_cfg_mutex);
46a3df9f
S
8098 ae_dev->priv = NULL;
8099}
8100
482d2e9c
PL
8101static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8102{
8103 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8104 struct hclge_vport *vport = hclge_get_vport(handle);
8105 struct hclge_dev *hdev = vport->back;
8106
c3b9c50d
HT
8107 return min_t(u32, hdev->rss_size_max,
8108 vport->alloc_tqps / kinfo->num_tc);
482d2e9c
PL
8109}
8110
8111static void hclge_get_channels(struct hnae3_handle *handle,
8112 struct ethtool_channels *ch)
8113{
482d2e9c
PL
8114 ch->max_combined = hclge_get_max_channels(handle);
8115 ch->other_count = 1;
8116 ch->max_other = 1;
c3b9c50d 8117 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
8118}
8119
09f2af64 8120static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 8121 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
8122{
8123 struct hclge_vport *vport = hclge_get_vport(handle);
8124 struct hclge_dev *hdev = vport->back;
09f2af64 8125
0d43bf45 8126 *alloc_tqps = vport->alloc_tqps;
09f2af64
PL
8127 *max_rss_size = hdev->rss_size_max;
8128}
8129
90c68a41
YL
8130static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8131 bool rxfh_configured)
09f2af64
PL
8132{
8133 struct hclge_vport *vport = hclge_get_vport(handle);
8134 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8135 struct hclge_dev *hdev = vport->back;
8136 int cur_rss_size = kinfo->rss_size;
8137 int cur_tqps = kinfo->num_tqps;
8138 u16 tc_offset[HCLGE_MAX_TC_NUM];
8139 u16 tc_valid[HCLGE_MAX_TC_NUM];
8140 u16 tc_size[HCLGE_MAX_TC_NUM];
8141 u16 roundup_size;
8142 u32 *rss_indir;
8143 int ret, i;
8144
672ad0ed 8145 kinfo->req_rss_size = new_tqps_num;
09f2af64 8146
672ad0ed 8147 ret = hclge_tm_vport_map_update(hdev);
09f2af64 8148 if (ret) {
672ad0ed 8149 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
8150 return ret;
8151 }
8152
8153 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8154 roundup_size = ilog2(roundup_size);
8155 /* Set the RSS TC mode according to the new RSS size */
8156 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8157 tc_valid[i] = 0;
8158
8159 if (!(hdev->hw_tc_map & BIT(i)))
8160 continue;
8161
8162 tc_valid[i] = 1;
8163 tc_size[i] = roundup_size;
8164 tc_offset[i] = kinfo->rss_size * i;
8165 }
8166 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8167 if (ret)
8168 return ret;
8169
90c68a41
YL
8170 /* RSS indirection table has been configuared by user */
8171 if (rxfh_configured)
8172 goto out;
8173
09f2af64
PL
8174 /* Reinitializes the rss indirect table according to the new RSS size */
8175 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8176 if (!rss_indir)
8177 return -ENOMEM;
8178
8179 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8180 rss_indir[i] = i % kinfo->rss_size;
8181
8182 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8183 if (ret)
8184 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8185 ret);
8186
8187 kfree(rss_indir);
8188
90c68a41 8189out:
09f2af64
PL
8190 if (!ret)
8191 dev_info(&hdev->pdev->dev,
8192 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8193 cur_rss_size, kinfo->rss_size,
8194 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8195
8196 return ret;
8197}
8198
77b34110
FL
8199static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8200 u32 *regs_num_64_bit)
8201{
8202 struct hclge_desc desc;
8203 u32 total_num;
8204 int ret;
8205
8206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8207 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8208 if (ret) {
8209 dev_err(&hdev->pdev->dev,
8210 "Query register number cmd failed, ret = %d.\n", ret);
8211 return ret;
8212 }
8213
8214 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8215 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8216
8217 total_num = *regs_num_32_bit + *regs_num_64_bit;
8218 if (!total_num)
8219 return -EINVAL;
8220
8221 return 0;
8222}
8223
8224static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8225 void *data)
8226{
8227#define HCLGE_32_BIT_REG_RTN_DATANUM 8
8228
8229 struct hclge_desc *desc;
8230 u32 *reg_val = data;
8231 __le32 *desc_data;
8232 int cmd_num;
8233 int i, k, n;
8234 int ret;
8235
8236 if (regs_num == 0)
8237 return 0;
8238
8239 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8240 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8241 if (!desc)
8242 return -ENOMEM;
8243
8244 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8245 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8246 if (ret) {
8247 dev_err(&hdev->pdev->dev,
8248 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8249 kfree(desc);
8250 return ret;
8251 }
8252
8253 for (i = 0; i < cmd_num; i++) {
8254 if (i == 0) {
8255 desc_data = (__le32 *)(&desc[i].data[0]);
8256 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8257 } else {
8258 desc_data = (__le32 *)(&desc[i]);
8259 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8260 }
8261 for (k = 0; k < n; k++) {
8262 *reg_val++ = le32_to_cpu(*desc_data++);
8263
8264 regs_num--;
8265 if (!regs_num)
8266 break;
8267 }
8268 }
8269
8270 kfree(desc);
8271 return 0;
8272}
8273
8274static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8275 void *data)
8276{
8277#define HCLGE_64_BIT_REG_RTN_DATANUM 4
8278
8279 struct hclge_desc *desc;
8280 u64 *reg_val = data;
8281 __le64 *desc_data;
8282 int cmd_num;
8283 int i, k, n;
8284 int ret;
8285
8286 if (regs_num == 0)
8287 return 0;
8288
8289 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8290 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8291 if (!desc)
8292 return -ENOMEM;
8293
8294 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8295 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8296 if (ret) {
8297 dev_err(&hdev->pdev->dev,
8298 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8299 kfree(desc);
8300 return ret;
8301 }
8302
8303 for (i = 0; i < cmd_num; i++) {
8304 if (i == 0) {
8305 desc_data = (__le64 *)(&desc[i].data[0]);
8306 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8307 } else {
8308 desc_data = (__le64 *)(&desc[i]);
8309 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8310 }
8311 for (k = 0; k < n; k++) {
8312 *reg_val++ = le64_to_cpu(*desc_data++);
8313
8314 regs_num--;
8315 if (!regs_num)
8316 break;
8317 }
8318 }
8319
8320 kfree(desc);
8321 return 0;
8322}
8323
ea4750ca
JS
8324#define MAX_SEPARATE_NUM 4
8325#define SEPARATOR_VALUE 0xFFFFFFFF
8326#define REG_NUM_PER_LINE 4
8327#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8328
77b34110
FL
8329static int hclge_get_regs_len(struct hnae3_handle *handle)
8330{
ea4750ca
JS
8331 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8332 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
77b34110
FL
8333 struct hclge_vport *vport = hclge_get_vport(handle);
8334 struct hclge_dev *hdev = vport->back;
8335 u32 regs_num_32_bit, regs_num_64_bit;
8336 int ret;
8337
8338 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8339 if (ret) {
8340 dev_err(&hdev->pdev->dev,
8341 "Get register number failed, ret = %d.\n", ret);
8342 return -EOPNOTSUPP;
8343 }
8344
ea4750ca
JS
8345 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8346 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8347 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8348 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8349
8350 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8351 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8352 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
77b34110
FL
8353}
8354
8355static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8356 void *data)
8357{
ea4750ca 8358 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
77b34110
FL
8359 struct hclge_vport *vport = hclge_get_vport(handle);
8360 struct hclge_dev *hdev = vport->back;
8361 u32 regs_num_32_bit, regs_num_64_bit;
ea4750ca
JS
8362 int i, j, reg_um, separator_num;
8363 u32 *reg = data;
77b34110
FL
8364 int ret;
8365
8366 *version = hdev->fw_version;
8367
8368 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8369 if (ret) {
8370 dev_err(&hdev->pdev->dev,
8371 "Get register number failed, ret = %d.\n", ret);
8372 return;
8373 }
8374
ea4750ca
JS
8375 /* fetching per-PF registers valus from PF PCIe register space */
8376 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8377 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8378 for (i = 0; i < reg_um; i++)
8379 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8380 for (i = 0; i < separator_num; i++)
8381 *reg++ = SEPARATOR_VALUE;
8382
8383 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8384 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8385 for (i = 0; i < reg_um; i++)
8386 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8387 for (i = 0; i < separator_num; i++)
8388 *reg++ = SEPARATOR_VALUE;
8389
8390 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8391 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8392 for (j = 0; j < kinfo->num_tqps; j++) {
8393 for (i = 0; i < reg_um; i++)
8394 *reg++ = hclge_read_dev(&hdev->hw,
8395 ring_reg_addr_list[i] +
8396 0x200 * j);
8397 for (i = 0; i < separator_num; i++)
8398 *reg++ = SEPARATOR_VALUE;
8399 }
8400
8401 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8402 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8403 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8404 for (i = 0; i < reg_um; i++)
8405 *reg++ = hclge_read_dev(&hdev->hw,
8406 tqp_intr_reg_addr_list[i] +
8407 4 * j);
8408 for (i = 0; i < separator_num; i++)
8409 *reg++ = SEPARATOR_VALUE;
8410 }
8411
8412 /* fetching PF common registers values from firmware */
8413 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
8414 if (ret) {
8415 dev_err(&hdev->pdev->dev,
8416 "Get 32 bit register failed, ret = %d.\n", ret);
8417 return;
8418 }
8419
ea4750ca
JS
8420 reg += regs_num_32_bit;
8421 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
77b34110
FL
8422 if (ret)
8423 dev_err(&hdev->pdev->dev,
8424 "Get 64 bit register failed, ret = %d.\n", ret);
8425}
8426
f6f75abc 8427static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
8428{
8429 struct hclge_set_led_state_cmd *req;
8430 struct hclge_desc desc;
8431 int ret;
8432
8433 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8434
8435 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
8436 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8437 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
8438
8439 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8440 if (ret)
8441 dev_err(&hdev->pdev->dev,
8442 "Send set led state cmd error, ret =%d\n", ret);
8443
8444 return ret;
8445}
8446
8447enum hclge_led_status {
8448 HCLGE_LED_OFF,
8449 HCLGE_LED_ON,
8450 HCLGE_LED_NO_CHANGE = 0xFF,
8451};
8452
8453static int hclge_set_led_id(struct hnae3_handle *handle,
8454 enum ethtool_phys_id_state status)
8455{
07f8e940
JS
8456 struct hclge_vport *vport = hclge_get_vport(handle);
8457 struct hclge_dev *hdev = vport->back;
07f8e940
JS
8458
8459 switch (status) {
8460 case ETHTOOL_ID_ACTIVE:
f6f75abc 8461 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 8462 case ETHTOOL_ID_INACTIVE:
f6f75abc 8463 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 8464 default:
f6f75abc 8465 return -EINVAL;
07f8e940 8466 }
07f8e940
JS
8467}
8468
0979aa0b
FL
8469static void hclge_get_link_mode(struct hnae3_handle *handle,
8470 unsigned long *supported,
8471 unsigned long *advertising)
8472{
8473 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8474 struct hclge_vport *vport = hclge_get_vport(handle);
8475 struct hclge_dev *hdev = vport->back;
8476 unsigned int idx = 0;
8477
8478 for (; idx < size; idx++) {
8479 supported[idx] = hdev->hw.mac.supported[idx];
8480 advertising[idx] = hdev->hw.mac.advertising[idx];
8481 }
8482}
8483
1731be4c 8484static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
8485{
8486 struct hclge_vport *vport = hclge_get_vport(handle);
8487 struct hclge_dev *hdev = vport->back;
8488
8489 return hclge_config_gro(hdev, enable);
8490}
8491
46a3df9f
S
8492static const struct hnae3_ae_ops hclge_ops = {
8493 .init_ae_dev = hclge_init_ae_dev,
8494 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
8495 .flr_prepare = hclge_flr_prepare,
8496 .flr_done = hclge_flr_done,
46a3df9f
S
8497 .init_client_instance = hclge_init_client_instance,
8498 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
8499 .map_ring_to_vector = hclge_map_ring_to_vector,
8500 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 8501 .get_vector = hclge_get_vector,
0d3e6631 8502 .put_vector = hclge_put_vector,
46a3df9f 8503 .set_promisc_mode = hclge_set_promisc_mode,
c39c4d98 8504 .set_loopback = hclge_set_loopback,
46a3df9f
S
8505 .start = hclge_ae_start,
8506 .stop = hclge_ae_stop,
a6d818e3
YL
8507 .client_start = hclge_client_start,
8508 .client_stop = hclge_client_stop,
46a3df9f
S
8509 .get_status = hclge_get_status,
8510 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8511 .update_speed_duplex_h = hclge_update_speed_duplex_h,
8512 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8513 .get_media_type = hclge_get_media_type,
8514 .get_rss_key_size = hclge_get_rss_key_size,
8515 .get_rss_indir_size = hclge_get_rss_indir_size,
8516 .get_rss = hclge_get_rss,
8517 .set_rss = hclge_set_rss,
f7db940a 8518 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 8519 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
8520 .get_tc_size = hclge_get_tc_size,
8521 .get_mac_addr = hclge_get_mac_addr,
8522 .set_mac_addr = hclge_set_mac_addr,
26483246 8523 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
8524 .add_uc_addr = hclge_add_uc_addr,
8525 .rm_uc_addr = hclge_rm_uc_addr,
8526 .add_mc_addr = hclge_add_mc_addr,
8527 .rm_mc_addr = hclge_rm_mc_addr,
8528 .set_autoneg = hclge_set_autoneg,
8529 .get_autoneg = hclge_get_autoneg,
8530 .get_pauseparam = hclge_get_pauseparam,
61387774 8531 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
8532 .set_mtu = hclge_set_mtu,
8533 .reset_queue = hclge_reset_tqp,
8534 .get_stats = hclge_get_stats,
e511c97d 8535 .get_mac_pause_stats = hclge_get_mac_pause_stat,
46a3df9f
S
8536 .update_stats = hclge_update_stats,
8537 .get_strings = hclge_get_strings,
8538 .get_sset_count = hclge_get_sset_count,
8539 .get_fw_version = hclge_get_fw_version,
8540 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 8541 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 8542 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 8543 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 8544 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 8545 .reset_event = hclge_reset_event,
720bd583 8546 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
8547 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8548 .set_channels = hclge_set_channels,
482d2e9c 8549 .get_channels = hclge_get_channels,
77b34110
FL
8550 .get_regs_len = hclge_get_regs_len,
8551 .get_regs = hclge_get_regs,
07f8e940 8552 .set_led_id = hclge_set_led_id,
0979aa0b 8553 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
8554 .add_fd_entry = hclge_add_fd_entry,
8555 .del_fd_entry = hclge_del_fd_entry,
6871af29 8556 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
8557 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8558 .get_fd_rule_info = hclge_get_fd_rule_info,
8559 .get_fd_all_rules = hclge_get_all_rules,
6871af29 8560 .restore_fd_rules = hclge_restore_fd_entries,
c17852a8 8561 .enable_fd = hclge_enable_fd,
3c666b58 8562 .dbg_run_cmd = hclge_dbg_run_cmd,
381c356e 8563 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
8564 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8565 .ae_dev_resetting = hclge_ae_dev_resetting,
8566 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 8567 .set_gro_en = hclge_gro_en,
0c29d191 8568 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 8569 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
8570 .mac_connect_phy = hclge_mac_connect_phy,
8571 .mac_disconnect_phy = hclge_mac_disconnect_phy,
46a3df9f
S
8572};
8573
8574static struct hnae3_ae_algo ae_algo = {
8575 .ops = &hclge_ops,
46a3df9f
S
8576 .pdev_id_table = ae_algo_pci_tbl,
8577};
8578
8579static int hclge_init(void)
8580{
8581 pr_info("%s is initializing\n", HCLGE_NAME);
8582
854cf33a
FL
8583 hnae3_register_ae_algo(&ae_algo);
8584
8585 return 0;
46a3df9f
S
8586}
8587
8588static void hclge_exit(void)
8589{
8590 hnae3_unregister_ae_algo(&ae_algo);
8591}
8592module_init(hclge_init);
8593module_exit(hclge_exit);
8594
8595MODULE_LICENSE("GPL");
8596MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8597MODULE_DESCRIPTION("HCLGE Driver");
8598MODULE_VERSION(HCLGE_MOD_VERSION);