net: hns3: add a resetting check in hclgevf_init_nic_client_instance()
[linux-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
f2f432f2 16#include <net/rtnetlink.h>
46a3df9f 17#include "hclge_cmd.h"
cacde272 18#include "hclge_dcb.h"
46a3df9f 19#include "hclge_main.h"
dde1a86e 20#include "hclge_mbx.h"
46a3df9f
S
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
5a9f0eac 23#include "hclge_err.h"
46a3df9f
S
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 29
ebaf1908 30#define HCLGE_BUF_SIZE_UNIT 256U
b37ce587
YM
31#define HCLGE_BUF_MUL_BY 2
32#define HCLGE_BUF_DIV_BY 2
9e15be90
YL
33#define NEED_RESERVE_TC_NUM 2
34#define BUF_MAX_PERCENT 100
35#define BUF_RESERVE_PERCENT 90
b9a400ac 36
63cbf7a9 37#define HCLGE_RESET_MAX_FAIL_CNT 5
427a7bff
HT
38#define HCLGE_RESET_SYNC_TIME 100
39#define HCLGE_PF_RESET_SYNC_TIME 20
40#define HCLGE_PF_RESET_SYNC_CNT 1500
63cbf7a9 41
ddb54554
GH
42/* Get DFX BD number offset */
43#define HCLGE_DFX_BIOS_BD_OFFSET 1
44#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46#define HCLGE_DFX_IGU_BD_OFFSET 4
47#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49#define HCLGE_DFX_NCSI_BD_OFFSET 7
50#define HCLGE_DFX_RTC_BD_OFFSET 8
51#define HCLGE_DFX_PPP_BD_OFFSET 9
52#define HCLGE_DFX_RCB_BD_OFFSET 10
53#define HCLGE_DFX_TQP_BD_OFFSET 11
54#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
c9765a89
YM
56#define HCLGE_LINK_STATUS_MS 10
57
6430f744
YM
58#define HCLGE_VF_VPORT_START_NUM 1
59
e6d7d79d 60static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 61static int hclge_init_vlan_config(struct hclge_dev *hdev);
fe4144d4 62static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
4ed340ab 63static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 64static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
d93ed94f
JS
65static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
123297b7
SJ
67static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
1cbc662d 69static int hclge_set_default_loopback(struct hclge_dev *hdev);
46a3df9f 70
ee4bcd3b 71static void hclge_sync_mac_table(struct hclge_dev *hdev);
039ba863 72static void hclge_restore_hw_table(struct hclge_dev *hdev);
c631c696 73static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
ee4bcd3b 74
46a3df9f
S
75static struct hnae3_ae_algo ae_algo;
76
0ea68902
YL
77static struct workqueue_struct *hclge_wq;
78
46a3df9f
S
79static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 87 /* required last entry */
46a3df9f
S
88 {0, }
89};
90
2f550a46
YL
91MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
ea4750ca
JS
93static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
107
108static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
113 HCLGE_FUN_RST_ING,
114 HCLGE_GRO_EN_REG};
115
116static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
141 HCLGE_RING_EN_REG};
142
143static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
148
46a3df9f 149static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 150 "App Loopback test",
4dc13b96
FL
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
46a3df9f
S
153 "Phy Loopback test"
154};
155
46a3df9f
S
156static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 301
a6c51c26
JS
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
326};
327
f5aac71c
FL
328static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
7efffc64 331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
0e02a53d 332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
f5aac71c
FL
333 .i_port_bitmap = 0x1,
334 },
335};
336
472d7ece
JS
337static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343};
344
ddb54554
GH
345static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
358};
359
360static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
373};
374
2307f4a5
Y
375static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
377 { IP_FRAGEMENT, 1},
378 { ROCE_TYPE, 1},
379 { NEXT_KEY, 5},
380 { VLAN_NUMBER, 2},
381 { SRC_VPORT, 12},
382 { DST_VPORT, 12},
383 { TUNNEL_PACKET, 1},
384};
385
386static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
392 { OUTER_L2_RSV, 16},
393 { OUTER_IP_TOS, 8},
394 { OUTER_IP_PROTO, 8},
395 { OUTER_SRC_IP, 32},
396 { OUTER_DST_IP, 32},
397 { OUTER_L3_RSV, 16},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
400 { OUTER_L4_RSV, 32},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
408 { INNER_L2_RSV, 16},
409 { INNER_IP_TOS, 8},
410 { INNER_IP_PROTO, 8},
411 { INNER_SRC_IP, 32},
412 { INNER_DST_IP, 32},
413 { INNER_L3_RSV, 16},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
416 { INNER_L4_RSV, 32},
417};
418
d174ea75 419static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 420{
91f384f6 421#define HCLGE_MAC_CMD_NUM 21
46a3df9f 422
1c6dfe6f 423 u64 *data = (u64 *)(&hdev->mac_stats);
46a3df9f 424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 425 __le64 *desc_data;
46a3df9f
S
426 int i, k, n;
427 int ret;
428
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 if (ret) {
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
434
435 return ret;
436 }
437
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 439 /* for special opcode 0032, only the first desc has the head */
46a3df9f 440 if (unlikely(i == 0)) {
a90bb9a5 441 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 442 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 443 } else {
a90bb9a5 444 desc_data = (__le64 *)(&desc[i]);
d174ea75 445 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 446 }
d174ea75 447
46a3df9f 448 for (k = 0; k < n; k++) {
d174ea75 449 *data += le64_to_cpu(*desc_data);
450 data++;
46a3df9f
S
451 desc_data++;
452 }
453 }
454
455 return 0;
456}
457
d174ea75 458static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459{
1c6dfe6f 460 u64 *data = (u64 *)(&hdev->mac_stats);
d174ea75 461 struct hclge_desc *desc;
462 __le64 *desc_data;
463 u16 i, k, n;
464 int ret;
465
9e6717af
ZL
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
468 */
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
39ee6e82
DC
470 if (!desc)
471 return -ENOMEM;
9e6717af 472
d174ea75 473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 if (ret) {
476 kfree(desc);
477 return ret;
478 }
479
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
482 if (i == 0) {
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
485 } else {
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
488 }
489
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
492 data++;
493 desc_data++;
494 }
495 }
496
497 kfree(desc);
498
499 return 0;
500}
501
502static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503{
504 struct hclge_desc desc;
505 __le32 *desc_data;
506 u32 reg_num;
507 int ret;
508
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 if (ret)
512 return ret;
513
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
516
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520 return 0;
521}
522
523static int hclge_mac_update_stats(struct hclge_dev *hdev)
524{
525 u32 desc_num;
526 int ret;
527
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530 /* The firmware supports the new statistics acquisition method */
531 if (!ret)
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
535 else
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538 return ret;
539}
540
46a3df9f
S
541static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542{
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
549 int ret, i;
550
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
4279b4d5 555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
46a3df9f
S
556 true);
557
a90bb9a5 558 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 if (ret) {
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
9b2f3477 563 ret, i);
46a3df9f
S
564 return ret;
565 }
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 567 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
568 }
569
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
4279b4d5 575 HCLGE_OPC_QUERY_TX_STATS,
46a3df9f
S
576 true);
577
a90bb9a5 578 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 if (ret) {
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
583 ret, i);
584 return ret;
585 }
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 587 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
588 }
589
590 return 0;
591}
592
593static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594{
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
597 u64 *buff = data;
598 int i;
599
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
603 }
604
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
608 }
609
610 return buff;
611}
612
613static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614{
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
9b2f3477 617 /* each tqp has TX & RX two queues */
46a3df9f
S
618 return kinfo->num_tqps * (2);
619}
620
621static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622{
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 u8 *buff = data;
625 int i = 0;
626
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
0c218123 630 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
46a3df9f
S
631 tqp->index);
632 buff = buff + ETH_GSTRING_LEN;
633 }
634
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
0c218123 638 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
46a3df9f
S
639 tqp->index);
640 buff = buff + ETH_GSTRING_LEN;
641 }
642
643 return buff;
644}
645
ebaf1908 646static u64 *hclge_comm_get_stats(const void *comm_stats,
46a3df9f
S
647 const struct hclge_comm_stats_str strs[],
648 int size, u64 *data)
649{
650 u64 *buf = data;
651 u32 i;
652
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656 return buf + size;
657}
658
659static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
661 int size, u8 *data)
662{
663 char *buff = (char *)data;
664 u32 i;
665
666 if (stringset != ETH_SS_STATS)
667 return buff;
668
669 for (i = 0; i < size; i++) {
18d219b7 670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
46a3df9f
S
671 buff = buff + ETH_GSTRING_LEN;
672 }
673
674 return (u8 *)buff;
675}
676
46a3df9f
S
677static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678{
679 struct hnae3_handle *handle;
680 int status;
681
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
685 if (status) {
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
688 status);
689 }
690 }
691
692 status = hclge_mac_update_stats(hdev);
693 if (status)
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
696}
697
698static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
700{
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
46a3df9f
S
703 int status;
704
c5f65480
JS
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 return;
707
46a3df9f
S
708 status = hclge_mac_update_stats(hdev);
709 if (status)
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
712 status);
713
46a3df9f
S
714 status = hclge_tqps_update_stats(handle);
715 if (status)
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
718 status);
719
c5f65480 720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
721}
722
723static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724{
4dc13b96
FL
725#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
729
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
732 int count = 0;
733
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
738 */
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
3ff6cde8 742 if (hdev->pdev->revision >= 0x21 ||
4dc13b96 743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 count += 1;
eb66d503 747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 748 }
5fd50ac3 749
4dc13b96
FL
750 count += 2;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
c9765a89
YM
753
754 if (hdev->hw.mac.phydev) {
755 count += 1;
756 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 }
758
46a3df9f
S
759 } else if (stringset == ETH_SS_STATS) {
760 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
761 hclge_tqps_get_sset_count(handle, stringset);
762 }
763
764 return count;
765}
766
9b2f3477 767static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
46a3df9f
S
768 u8 *data)
769{
770 u8 *p = (char *)data;
771 int size;
772
773 if (stringset == ETH_SS_STATS) {
774 size = ARRAY_SIZE(g_mac_stats_string);
9b2f3477
WL
775 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776 size, p);
46a3df9f
S
777 p = hclge_tqps_get_strings(handle, p);
778 } else if (stringset == ETH_SS_TEST) {
eb66d503 779 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
9b2f3477 780 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
781 ETH_GSTRING_LEN);
782 p += ETH_GSTRING_LEN;
783 }
4dc13b96 784 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
9b2f3477 785 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
4dc13b96
FL
786 ETH_GSTRING_LEN);
787 p += ETH_GSTRING_LEN;
788 }
789 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790 memcpy(p,
791 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
792 ETH_GSTRING_LEN);
793 p += ETH_GSTRING_LEN;
794 }
795 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
9b2f3477 796 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
797 ETH_GSTRING_LEN);
798 p += ETH_GSTRING_LEN;
799 }
800 }
801}
802
803static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804{
805 struct hclge_vport *vport = hclge_get_vport(handle);
806 struct hclge_dev *hdev = vport->back;
807 u64 *p;
808
1c6dfe6f 809 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
9b2f3477 810 ARRAY_SIZE(g_mac_stats_string), data);
46a3df9f
S
811 p = hclge_tqps_get_stats(handle, p);
812}
813
615466ce
YM
814static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 struct hns3_mac_stats *mac_stats)
e511c97d
JS
816{
817 struct hclge_vport *vport = hclge_get_vport(handle);
818 struct hclge_dev *hdev = vport->back;
819
615466ce
YM
820 hclge_update_stats(handle, NULL);
821
1c6dfe6f
YL
822 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
e511c97d
JS
824}
825
46a3df9f 826static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 827 struct hclge_func_status_cmd *status)
46a3df9f 828{
ded45d40
YM
829#define HCLGE_MAC_ID_MASK 0xF
830
46a3df9f
S
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 return -EINVAL;
833
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
837 else
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
839
ded45d40 840 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
46a3df9f
S
841 return 0;
842}
843
844static int hclge_query_function_status(struct hclge_dev *hdev)
845{
b37ce587
YM
846#define HCLGE_QUERY_MAX_CNT 5
847
d44f9b63 848 struct hclge_func_status_cmd *req;
46a3df9f
S
849 struct hclge_desc desc;
850 int timeout = 0;
851 int ret;
852
853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 854 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
855
856 do {
857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858 if (ret) {
859 dev_err(&hdev->pdev->dev,
9b2f3477 860 "query function status failed %d.\n", ret);
46a3df9f
S
861 return ret;
862 }
863
864 /* Check pf reset is done */
865 if (req->pf_state)
866 break;
867 usleep_range(1000, 2000);
b37ce587 868 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
46a3df9f 869
60df7e91 870 return hclge_parse_func_status(hdev, req);
46a3df9f
S
871}
872
873static int hclge_query_pf_resource(struct hclge_dev *hdev)
874{
d44f9b63 875 struct hclge_pf_res_cmd *req;
46a3df9f
S
876 struct hclge_desc desc;
877 int ret;
878
879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881 if (ret) {
882 dev_err(&hdev->pdev->dev,
883 "query pf resource failed %d.\n", ret);
884 return ret;
885 }
886
d44f9b63 887 req = (struct hclge_pf_res_cmd *)desc.data;
60df7e91
HT
888 hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
46a3df9f 890
368686be
YL
891 if (req->tx_buf_size)
892 hdev->tx_buf_size =
60df7e91 893 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
894 else
895 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896
b9a400ac
YL
897 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898
368686be
YL
899 if (req->dv_buf_size)
900 hdev->dv_buf_size =
60df7e91 901 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
902 else
903 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904
b9a400ac
YL
905 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906
e92a0843 907 if (hnae3_dev_roce_supported(hdev)) {
375dd5e4 908 hdev->roce_base_msix_offset =
60df7e91 909 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
375dd5e4 910 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
887c3820 911 hdev->num_roce_msi =
60df7e91 912 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 913 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f 914
580a05f9
YL
915 /* nic's msix numbers is always equals to the roce's. */
916 hdev->num_nic_msi = hdev->num_roce_msi;
917
46a3df9f
S
918 /* PF should have NIC vectors and Roce vectors,
919 * NIC vectors are queued before Roce vectors.
920 */
9b2f3477 921 hdev->num_msi = hdev->num_roce_msi +
375dd5e4 922 hdev->roce_base_msix_offset;
46a3df9f
S
923 } else {
924 hdev->num_msi =
60df7e91 925 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 926 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
580a05f9
YL
927
928 hdev->num_nic_msi = hdev->num_msi;
929 }
930
931 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 dev_err(&hdev->pdev->dev,
933 "Just %u msi resources, not enough for pf(min:2).\n",
934 hdev->num_nic_msi);
935 return -EINVAL;
46a3df9f
S
936 }
937
938 return 0;
939}
940
941static int hclge_parse_speed(int speed_cmd, int *speed)
942{
943 switch (speed_cmd) {
944 case 6:
945 *speed = HCLGE_MAC_SPEED_10M;
946 break;
947 case 7:
948 *speed = HCLGE_MAC_SPEED_100M;
949 break;
950 case 0:
951 *speed = HCLGE_MAC_SPEED_1G;
952 break;
953 case 1:
954 *speed = HCLGE_MAC_SPEED_10G;
955 break;
956 case 2:
957 *speed = HCLGE_MAC_SPEED_25G;
958 break;
959 case 3:
960 *speed = HCLGE_MAC_SPEED_40G;
961 break;
962 case 4:
963 *speed = HCLGE_MAC_SPEED_50G;
964 break;
965 case 5:
966 *speed = HCLGE_MAC_SPEED_100G;
967 break;
968 default:
969 return -EINVAL;
970 }
971
972 return 0;
973}
974
22f48e24
JS
975static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976{
977 struct hclge_vport *vport = hclge_get_vport(handle);
978 struct hclge_dev *hdev = vport->back;
979 u32 speed_ability = hdev->hw.mac.speed_ability;
980 u32 speed_bit = 0;
981
982 switch (speed) {
983 case HCLGE_MAC_SPEED_10M:
984 speed_bit = HCLGE_SUPPORT_10M_BIT;
985 break;
986 case HCLGE_MAC_SPEED_100M:
987 speed_bit = HCLGE_SUPPORT_100M_BIT;
988 break;
989 case HCLGE_MAC_SPEED_1G:
990 speed_bit = HCLGE_SUPPORT_1G_BIT;
991 break;
992 case HCLGE_MAC_SPEED_10G:
993 speed_bit = HCLGE_SUPPORT_10G_BIT;
994 break;
995 case HCLGE_MAC_SPEED_25G:
996 speed_bit = HCLGE_SUPPORT_25G_BIT;
997 break;
998 case HCLGE_MAC_SPEED_40G:
999 speed_bit = HCLGE_SUPPORT_40G_BIT;
1000 break;
1001 case HCLGE_MAC_SPEED_50G:
1002 speed_bit = HCLGE_SUPPORT_50G_BIT;
1003 break;
1004 case HCLGE_MAC_SPEED_100G:
1005 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 break;
1007 default:
1008 return -EINVAL;
1009 }
1010
1011 if (speed_bit & speed_ability)
1012 return 0;
1013
1014 return -EINVAL;
1015}
1016
88d10bd6 1017static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
0979aa0b 1018{
0979aa0b 1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e 1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
88d10bd6
JS
1021 mac->supported);
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024 mac->supported);
1025 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027 mac->supported);
1028 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030 mac->supported);
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 mac->supported);
1034}
0979aa0b 1035
88d10bd6
JS
1036static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037{
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 mac->supported);
0979aa0b 1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e 1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
88d10bd6
JS
1043 mac->supported);
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 mac->supported);
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 mac->supported);
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 mac->supported);
1053}
0979aa0b 1054
88d10bd6
JS
1055static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056{
1057 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059 mac->supported);
1060 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062 mac->supported);
1063 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065 mac->supported);
0979aa0b 1066 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
88d10bd6
JS
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068 mac->supported);
1069 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 mac->supported);
1072}
0979aa0b 1073
88d10bd6
JS
1074static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075{
1076 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078 mac->supported);
1079 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081 mac->supported);
1082 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084 mac->supported);
1085 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087 mac->supported);
1088 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090 mac->supported);
0979aa0b 1091 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
88d10bd6
JS
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 mac->supported);
1094}
0979aa0b 1095
7e6ec914
JS
1096static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097{
1098 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100
1101 switch (mac->speed) {
1102 case HCLGE_MAC_SPEED_10G:
1103 case HCLGE_MAC_SPEED_40G:
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105 mac->supported);
1106 mac->fec_ability =
1107 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108 break;
1109 case HCLGE_MAC_SPEED_25G:
1110 case HCLGE_MAC_SPEED_50G:
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112 mac->supported);
1113 mac->fec_ability =
1114 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 BIT(HNAE3_FEC_AUTO);
1116 break;
1117 case HCLGE_MAC_SPEED_100G:
1118 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120 break;
1121 default:
1122 mac->fec_ability = 0;
1123 break;
1124 }
1125}
1126
88d10bd6
JS
1127static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128 u8 speed_ability)
1129{
1130 struct hclge_mac *mac = &hdev->hw.mac;
1131
1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134 mac->supported);
1135
1136 hclge_convert_setting_sr(mac, speed_ability);
1137 hclge_convert_setting_lr(mac, speed_ability);
1138 hclge_convert_setting_cr(mac, speed_ability);
7e6ec914
JS
1139 if (hdev->pdev->revision >= 0x21)
1140 hclge_convert_setting_fec(mac);
88d10bd6
JS
1141
1142 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1144 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
88d10bd6
JS
1145}
1146
1147static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148 u8 speed_ability)
1149{
1150 struct hclge_mac *mac = &hdev->hw.mac;
1151
1152 hclge_convert_setting_kr(mac, speed_ability);
7e6ec914
JS
1153 if (hdev->pdev->revision >= 0x21)
1154 hclge_convert_setting_fec(mac);
88d10bd6
JS
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
0979aa0b
FL
1158}
1159
f18635d5
JS
1160static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161 u8 speed_ability)
1162{
1163 unsigned long *supported = hdev->hw.mac.supported;
1164
1165 /* default to support all speed for GE port */
1166 if (!speed_ability)
1167 speed_ability = HCLGE_SUPPORT_GE;
1168
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171 supported);
1172
1173 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175 supported);
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 supported);
1178 }
1179
1180 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183 }
1184
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
bc3781ed 1188 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
f18635d5
JS
1189}
1190
0979aa0b
FL
1191static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192{
1193 u8 media_type = hdev->hw.mac.media_type;
1194
f18635d5
JS
1195 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 hclge_parse_copper_link_mode(hdev, speed_ability);
88d10bd6
JS
1199 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 hclge_parse_backplane_link_mode(hdev, speed_ability);
0979aa0b 1201}
37417c66 1202
ee9e4424
YL
1203static u32 hclge_get_max_speed(u8 speed_ability)
1204{
1205 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 return HCLGE_MAC_SPEED_100G;
1207
1208 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 return HCLGE_MAC_SPEED_50G;
1210
1211 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 return HCLGE_MAC_SPEED_40G;
1213
1214 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 return HCLGE_MAC_SPEED_25G;
1216
1217 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 return HCLGE_MAC_SPEED_10G;
1219
1220 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 return HCLGE_MAC_SPEED_1G;
1222
1223 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 return HCLGE_MAC_SPEED_100M;
1225
1226 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 return HCLGE_MAC_SPEED_10M;
1228
1229 return HCLGE_MAC_SPEED_1G;
1230}
1231
46a3df9f
S
1232static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233{
d44f9b63 1234 struct hclge_cfg_param_cmd *req;
46a3df9f
S
1235 u64 mac_addr_tmp_high;
1236 u64 mac_addr_tmp;
ebaf1908 1237 unsigned int i;
46a3df9f 1238
d44f9b63 1239 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
1240
1241 /* get the configuration */
e4e87715
PL
1242 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_VMDQ_M,
1244 HCLGE_CFG_VMDQ_S);
1245 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 HCLGE_CFG_TQP_DESC_N_M,
1249 HCLGE_CFG_TQP_DESC_N_S);
1250
1251 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_PHY_ADDR_M,
1253 HCLGE_CFG_PHY_ADDR_S);
1254 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_MEDIA_TP_M,
1256 HCLGE_CFG_MEDIA_TP_S);
1257 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 HCLGE_CFG_RX_BUF_LEN_M,
1259 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
1260 /* get mac_address */
1261 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
1262 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 HCLGE_CFG_MAC_ADDR_H_M,
1264 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
1265
1266 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267
e4e87715
PL
1268 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_DEFAULT_SPEED_M,
1270 HCLGE_CFG_DEFAULT_SPEED_S);
1271 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 HCLGE_CFG_RSS_SIZE_M,
1273 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 1274
46a3df9f
S
1275 for (i = 0; i < ETH_ALEN; i++)
1276 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277
d44f9b63 1278 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 1279 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 1280
e4e87715
PL
1281 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_SPEED_ABILITY_M,
1283 HCLGE_CFG_SPEED_ABILITY_S);
39932473
JS
1284 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 if (!cfg->umv_space)
1288 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
46a3df9f
S
1289}
1290
1291/* hclge_get_cfg: query the static parameter from flash
1292 * @hdev: pointer to struct hclge_dev
1293 * @hcfg: the config structure to be getted
1294 */
1295static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296{
1297 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 1298 struct hclge_cfg_param_cmd *req;
ebaf1908
WL
1299 unsigned int i;
1300 int ret;
46a3df9f
S
1301
1302 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1303 u32 offset = 0;
1304
d44f9b63 1305 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1306 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307 true);
e4e87715
PL
1308 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 1310 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
1311 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1313 req->offset = cpu_to_le32(offset);
46a3df9f
S
1314 }
1315
1316 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317 if (ret) {
3f639907 1318 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
1319 return ret;
1320 }
1321
1322 hclge_parse_cfg(hcfg, desc);
3f639907 1323
46a3df9f
S
1324 return 0;
1325}
1326
1327static int hclge_get_cap(struct hclge_dev *hdev)
1328{
1329 int ret;
1330
1331 ret = hclge_query_function_status(hdev);
1332 if (ret) {
1333 dev_err(&hdev->pdev->dev,
1334 "query function status error %d.\n", ret);
1335 return ret;
1336 }
1337
1338 /* get pf resource */
60df7e91 1339 return hclge_query_pf_resource(hdev);
46a3df9f
S
1340}
1341
962e31bd
YL
1342static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343{
1344#define HCLGE_MIN_TX_DESC 64
1345#define HCLGE_MIN_RX_DESC 64
1346
1347 if (!is_kdump_kernel())
1348 return;
1349
1350 dev_info(&hdev->pdev->dev,
1351 "Running kdump kernel. Using minimal resources\n");
1352
1353 /* minimal queue pairs equals to the number of vports */
1354 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357}
1358
46a3df9f
S
1359static int hclge_configure(struct hclge_dev *hdev)
1360{
1361 struct hclge_cfg cfg;
ebaf1908
WL
1362 unsigned int i;
1363 int ret;
46a3df9f
S
1364
1365 ret = hclge_get_cfg(hdev, &cfg);
727f514b 1366 if (ret)
46a3df9f 1367 return ret;
46a3df9f
S
1368
1369 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 hdev->base_tqp_pid = 0;
0e7a40cd 1371 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1372 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1373 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1374 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1375 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1376 hdev->num_tx_desc = cfg.tqp_desc_num;
1377 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1378 hdev->tm_info.num_pg = 1;
cacde272 1379 hdev->tc_max = cfg.tc_num;
46a3df9f 1380 hdev->tm_info.hw_pfc_map = 0;
39932473 1381 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1382
44122887 1383 if (hnae3_dev_fd_supported(hdev)) {
9abeb7d8 1384 hdev->fd_en = true;
44122887
JS
1385 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 }
9abeb7d8 1387
46a3df9f
S
1388 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 if (ret) {
1390 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 return ret;
1392 }
1393
0979aa0b
FL
1394 hclge_parse_link_mode(hdev, cfg.speed_ability);
1395
ee9e4424
YL
1396 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1397
cacde272
YL
1398 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399 (hdev->tc_max < 1)) {
adcf738b 1400 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
cacde272
YL
1401 hdev->tc_max);
1402 hdev->tc_max = 1;
46a3df9f
S
1403 }
1404
cacde272
YL
1405 /* Dev does not support DCB */
1406 if (!hnae3_dev_dcb_supported(hdev)) {
1407 hdev->tc_max = 1;
1408 hdev->pfc_max = 0;
1409 } else {
1410 hdev->pfc_max = hdev->tc_max;
1411 }
1412
a2987975 1413 hdev->tm_info.num_tc = 1;
cacde272 1414
46a3df9f 1415 /* Currently not support uncontiuous tc */
cacde272 1416 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1417 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1418
71b83869 1419 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1420
962e31bd
YL
1421 hclge_init_kdump_kernel_config(hdev);
1422
08125454
YL
1423 /* Set the init affinity based on pci func number */
1424 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427 &hdev->affinity_mask);
1428
46a3df9f
S
1429 return ret;
1430}
1431
ebaf1908
WL
1432static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1433 unsigned int tso_mss_max)
46a3df9f 1434{
d44f9b63 1435 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1436 struct hclge_desc desc;
a90bb9a5 1437 u16 tso_mss;
46a3df9f
S
1438
1439 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440
d44f9b63 1441 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1442
1443 tso_mss = 0;
e4e87715
PL
1444 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1445 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1446 req->tso_mss_min = cpu_to_le16(tso_mss);
1447
1448 tso_mss = 0;
e4e87715
PL
1449 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1450 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1451 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1452
1453 return hclge_cmd_send(&hdev->hw, &desc, 1);
1454}
1455
b26a6fea
PL
1456static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1457{
1458 struct hclge_cfg_gro_status_cmd *req;
1459 struct hclge_desc desc;
1460 int ret;
1461
1462 if (!hnae3_dev_gro_supported(hdev))
1463 return 0;
1464
1465 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1466 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1467
1468 req->gro_en = cpu_to_le16(en ? 1 : 0);
1469
1470 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1471 if (ret)
1472 dev_err(&hdev->pdev->dev,
1473 "GRO hardware config cmd failed, ret = %d\n", ret);
1474
1475 return ret;
1476}
1477
46a3df9f
S
1478static int hclge_alloc_tqps(struct hclge_dev *hdev)
1479{
1480 struct hclge_tqp *tqp;
1481 int i;
1482
1483 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1484 sizeof(struct hclge_tqp), GFP_KERNEL);
1485 if (!hdev->htqp)
1486 return -ENOMEM;
1487
1488 tqp = hdev->htqp;
1489
1490 for (i = 0; i < hdev->num_tqps; i++) {
1491 tqp->dev = &hdev->pdev->dev;
1492 tqp->index = i;
1493
1494 tqp->q.ae_algo = &ae_algo;
1495 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1496 tqp->q.tx_desc_num = hdev->num_tx_desc;
1497 tqp->q.rx_desc_num = hdev->num_rx_desc;
46a3df9f
S
1498 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1499 i * HCLGE_TQP_REG_SIZE;
1500
1501 tqp++;
1502 }
1503
1504 return 0;
1505}
1506
1507static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1508 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1509{
d44f9b63 1510 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1511 struct hclge_desc desc;
1512 int ret;
1513
1514 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1515
d44f9b63 1516 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1517 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1518 req->tqp_vf = func_id;
b9a8f883
YL
1519 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1520 if (!is_pf)
1521 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
46a3df9f
S
1522 req->tqp_vid = cpu_to_le16(tqp_vid);
1523
1524 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1525 if (ret)
1526 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1527
3f639907 1528 return ret;
46a3df9f
S
1529}
1530
672ad0ed 1531static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1532{
128b900d 1533 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1534 struct hclge_dev *hdev = vport->back;
7df7dad6 1535 int i, alloced;
46a3df9f
S
1536
1537 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1538 alloced < num_tqps; i++) {
46a3df9f
S
1539 if (!hdev->htqp[i].alloced) {
1540 hdev->htqp[i].q.handle = &vport->nic;
1541 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1542 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1543 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1544 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1545 hdev->htqp[i].alloced = true;
46a3df9f
S
1546 alloced++;
1547 }
1548 }
672ad0ed
HT
1549 vport->alloc_tqps = alloced;
1550 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1551 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f 1552
580a05f9
YL
1553 /* ensure one to one mapping between irq and queue at default */
1554 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1555 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1556
46a3df9f
S
1557 return 0;
1558}
1559
c0425944
PL
1560static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1561 u16 num_tx_desc, u16 num_rx_desc)
1562
46a3df9f
S
1563{
1564 struct hnae3_handle *nic = &vport->nic;
1565 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1566 struct hclge_dev *hdev = vport->back;
af958827 1567 int ret;
46a3df9f 1568
c0425944
PL
1569 kinfo->num_tx_desc = num_tx_desc;
1570 kinfo->num_rx_desc = num_rx_desc;
1571
46a3df9f 1572 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1573
672ad0ed 1574 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1575 sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 if (!kinfo->tqp)
1577 return -ENOMEM;
1578
672ad0ed 1579 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1580 if (ret)
46a3df9f 1581 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1582
3f639907 1583 return ret;
46a3df9f
S
1584}
1585
7df7dad6
L
1586static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1587 struct hclge_vport *vport)
1588{
1589 struct hnae3_handle *nic = &vport->nic;
1590 struct hnae3_knic_private_info *kinfo;
1591 u16 i;
1592
1593 kinfo = &nic->kinfo;
205a24ca 1594 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1595 struct hclge_tqp *q =
1596 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 bool is_pf;
1598 int ret;
1599
1600 is_pf = !(vport->vport_id);
1601 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1602 i, is_pf);
1603 if (ret)
1604 return ret;
1605 }
1606
1607 return 0;
1608}
1609
1610static int hclge_map_tqp(struct hclge_dev *hdev)
1611{
1612 struct hclge_vport *vport = hdev->vport;
1613 u16 i, num_vport;
1614
1615 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1616 for (i = 0; i < num_vport; i++) {
1617 int ret;
1618
1619 ret = hclge_map_tqp_to_vport(hdev, vport);
1620 if (ret)
1621 return ret;
1622
1623 vport++;
1624 }
1625
1626 return 0;
1627}
1628
46a3df9f
S
1629static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1630{
1631 struct hnae3_handle *nic = &vport->nic;
1632 struct hclge_dev *hdev = vport->back;
1633 int ret;
1634
1635 nic->pdev = hdev->pdev;
1636 nic->ae_algo = &ae_algo;
1637 nic->numa_node_mask = hdev->numa_node_mask;
1638
b69c9737
YL
1639 ret = hclge_knic_setup(vport, num_tqps,
1640 hdev->num_tx_desc, hdev->num_rx_desc);
1641 if (ret)
1642 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
46a3df9f 1643
b69c9737 1644 return ret;
46a3df9f
S
1645}
1646
1647static int hclge_alloc_vport(struct hclge_dev *hdev)
1648{
1649 struct pci_dev *pdev = hdev->pdev;
1650 struct hclge_vport *vport;
1651 u32 tqp_main_vport;
1652 u32 tqp_per_vport;
1653 int num_vport, i;
1654 int ret;
1655
1656 /* We need to alloc a vport for main NIC of PF */
1657 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1658
38e62046 1659 if (hdev->num_tqps < num_vport) {
adcf738b 1660 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
38e62046
HT
1661 hdev->num_tqps, num_vport);
1662 return -EINVAL;
1663 }
46a3df9f
S
1664
1665 /* Alloc the same number of TQPs for every vport */
1666 tqp_per_vport = hdev->num_tqps / num_vport;
1667 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1668
1669 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1670 GFP_KERNEL);
1671 if (!vport)
1672 return -ENOMEM;
1673
1674 hdev->vport = vport;
1675 hdev->num_alloc_vport = num_vport;
1676
2312e050
FL
1677 if (IS_ENABLED(CONFIG_PCI_IOV))
1678 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1679
1680 for (i = 0; i < num_vport; i++) {
1681 vport->back = hdev;
1682 vport->vport_id = i;
6430f744 1683 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
818f1675 1684 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1685 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1686 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1687 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1688 INIT_LIST_HEAD(&vport->uc_mac_list);
1689 INIT_LIST_HEAD(&vport->mc_mac_list);
ee4bcd3b 1690 spin_lock_init(&vport->mac_list_lock);
46a3df9f
S
1691
1692 if (i == 0)
1693 ret = hclge_vport_setup(vport, tqp_main_vport);
1694 else
1695 ret = hclge_vport_setup(vport, tqp_per_vport);
1696 if (ret) {
1697 dev_err(&pdev->dev,
1698 "vport setup failed for vport %d, %d\n",
1699 i, ret);
1700 return ret;
1701 }
1702
1703 vport++;
1704 }
1705
1706 return 0;
1707}
1708
acf61ecd
YL
1709static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1710 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1711{
1712/* TX buffer size is unit by 128 byte */
1713#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1714#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1715 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1716 struct hclge_desc desc;
1717 int ret;
1718 u8 i;
1719
d44f9b63 1720 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1721
1722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1724 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1725
46a3df9f
S
1726 req->tx_pkt_buff[i] =
1727 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1728 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1729 }
46a3df9f
S
1730
1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1732 if (ret)
46a3df9f
S
1733 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1734 ret);
46a3df9f 1735
3f639907 1736 return ret;
46a3df9f
S
1737}
1738
acf61ecd
YL
1739static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1740 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1741{
acf61ecd 1742 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1743
3f639907
JS
1744 if (ret)
1745 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1746
3f639907 1747 return ret;
46a3df9f
S
1748}
1749
1a49f3c6 1750static u32 hclge_get_tc_num(struct hclge_dev *hdev)
46a3df9f 1751{
ebaf1908
WL
1752 unsigned int i;
1753 u32 cnt = 0;
46a3df9f
S
1754
1755 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1756 if (hdev->hw_tc_map & BIT(i))
1757 cnt++;
1758 return cnt;
1759}
1760
46a3df9f 1761/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1762static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1763 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1764{
1765 struct hclge_priv_buf *priv;
ebaf1908
WL
1766 unsigned int i;
1767 int cnt = 0;
46a3df9f
S
1768
1769 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1770 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1771 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1772 priv->enable)
1773 cnt++;
1774 }
1775
1776 return cnt;
1777}
1778
1779/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1780static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1781 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1782{
1783 struct hclge_priv_buf *priv;
ebaf1908
WL
1784 unsigned int i;
1785 int cnt = 0;
46a3df9f
S
1786
1787 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1788 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1789 if (hdev->hw_tc_map & BIT(i) &&
1790 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1791 priv->enable)
1792 cnt++;
1793 }
1794
1795 return cnt;
1796}
1797
acf61ecd 1798static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1799{
1800 struct hclge_priv_buf *priv;
1801 u32 rx_priv = 0;
1802 int i;
1803
1804 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1805 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1806 if (priv->enable)
1807 rx_priv += priv->buf_size;
1808 }
1809 return rx_priv;
1810}
1811
acf61ecd 1812static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1813{
1814 u32 i, total_tx_size = 0;
1815
1816 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1817 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1818
1819 return total_tx_size;
1820}
1821
acf61ecd
YL
1822static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1823 struct hclge_pkt_buf_alloc *buf_alloc,
1824 u32 rx_all)
46a3df9f 1825{
1a49f3c6
YL
1826 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1827 u32 tc_num = hclge_get_tc_num(hdev);
b9a400ac 1828 u32 shared_buf, aligned_mps;
46a3df9f
S
1829 u32 rx_priv;
1830 int i;
1831
b9a400ac 1832 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1833
d221df4e 1834 if (hnae3_dev_dcb_supported(hdev))
b37ce587
YM
1835 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1836 hdev->dv_buf_size;
d221df4e 1837 else
b9a400ac 1838 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1839 + hdev->dv_buf_size;
d221df4e 1840
db5936db 1841 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1842 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1843 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1844
acf61ecd 1845 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1846 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1847 return false;
1848
b9a400ac 1849 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1850 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1851 if (hnae3_dev_dcb_supported(hdev)) {
1852 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1853 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b37ce587
YM
1854 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1855 HCLGE_BUF_SIZE_UNIT);
368686be 1856 } else {
b9a400ac 1857 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1858 HCLGE_NON_DCB_ADDITIONAL_BUF;
1a49f3c6
YL
1859 buf_alloc->s_buf.self.low = aligned_mps;
1860 }
1861
1862 if (hnae3_dev_dcb_supported(hdev)) {
9e15be90
YL
1863 hi_thrd = shared_buf - hdev->dv_buf_size;
1864
1865 if (tc_num <= NEED_RESERVE_TC_NUM)
1866 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1867 / BUF_MAX_PERCENT;
1868
1a49f3c6 1869 if (tc_num)
9e15be90 1870 hi_thrd = hi_thrd / tc_num;
1a49f3c6 1871
b37ce587 1872 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1a49f3c6 1873 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
b37ce587 1874 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1a49f3c6
YL
1875 } else {
1876 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1877 lo_thrd = aligned_mps;
368686be 1878 }
46a3df9f
S
1879
1880 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1a49f3c6
YL
1881 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1882 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
46a3df9f
S
1883 }
1884
1885 return true;
1886}
1887
acf61ecd
YL
1888static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1889 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1890{
1891 u32 i, total_size;
1892
1893 total_size = hdev->pkt_buf_size;
1894
1895 /* alloc tx buffer for all enabled tc */
1896 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1897 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 1898
b6b4f987
HT
1899 if (hdev->hw_tc_map & BIT(i)) {
1900 if (total_size < hdev->tx_buf_size)
1901 return -ENOMEM;
9ffe79a9 1902
368686be 1903 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 1904 } else {
9ffe79a9 1905 priv->tx_buf_size = 0;
b6b4f987 1906 }
9ffe79a9
YL
1907
1908 total_size -= priv->tx_buf_size;
1909 }
1910
1911 return 0;
1912}
1913
8ca754b1
YL
1914static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1915 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1916{
8ca754b1
YL
1917 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1918 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
ebaf1908 1919 unsigned int i;
46a3df9f 1920
46a3df9f 1921 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 1922 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 1923
bb1fe9ea
YL
1924 priv->enable = 0;
1925 priv->wl.low = 0;
1926 priv->wl.high = 0;
1927 priv->buf_size = 0;
1928
1929 if (!(hdev->hw_tc_map & BIT(i)))
1930 continue;
1931
1932 priv->enable = 1;
46a3df9f
S
1933
1934 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
b37ce587 1935 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
8ca754b1
YL
1936 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1937 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1938 } else {
1939 priv->wl.low = 0;
b37ce587
YM
1940 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1941 aligned_mps;
46a3df9f 1942 }
8ca754b1
YL
1943
1944 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
1945 }
1946
8ca754b1
YL
1947 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1948}
46a3df9f 1949
8ca754b1
YL
1950static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1951 struct hclge_pkt_buf_alloc *buf_alloc)
1952{
1953 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1954 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1955 int i;
46a3df9f
S
1956
1957 /* let the last to be cleared first */
1958 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1959 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1960 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1961
ebaf1908
WL
1962 if (hdev->hw_tc_map & mask &&
1963 !(hdev->tm_info.hw_pfc_map & mask)) {
46a3df9f
S
1964 /* Clear the no pfc TC private buffer */
1965 priv->wl.low = 0;
1966 priv->wl.high = 0;
1967 priv->buf_size = 0;
1968 priv->enable = 0;
1969 no_pfc_priv_num--;
1970 }
1971
acf61ecd 1972 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1973 no_pfc_priv_num == 0)
1974 break;
1975 }
1976
8ca754b1
YL
1977 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1978}
46a3df9f 1979
8ca754b1
YL
1980static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1981 struct hclge_pkt_buf_alloc *buf_alloc)
1982{
1983 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1984 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1985 int i;
46a3df9f
S
1986
1987 /* let the last to be cleared first */
1988 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1989 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1990 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1991
ebaf1908
WL
1992 if (hdev->hw_tc_map & mask &&
1993 hdev->tm_info.hw_pfc_map & mask) {
46a3df9f
S
1994 /* Reduce the number of pfc TC with private buffer */
1995 priv->wl.low = 0;
1996 priv->enable = 0;
1997 priv->wl.high = 0;
1998 priv->buf_size = 0;
1999 pfc_priv_num--;
2000 }
2001
acf61ecd 2002 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
2003 pfc_priv_num == 0)
2004 break;
2005 }
8ca754b1
YL
2006
2007 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2008}
2009
9e15be90
YL
2010static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2011 struct hclge_pkt_buf_alloc *buf_alloc)
2012{
2013#define COMPENSATE_BUFFER 0x3C00
2014#define COMPENSATE_HALF_MPS_NUM 5
2015#define PRIV_WL_GAP 0x1800
2016
2017 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2018 u32 tc_num = hclge_get_tc_num(hdev);
2019 u32 half_mps = hdev->mps >> 1;
2020 u32 min_rx_priv;
2021 unsigned int i;
2022
2023 if (tc_num)
2024 rx_priv = rx_priv / tc_num;
2025
2026 if (tc_num <= NEED_RESERVE_TC_NUM)
2027 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2028
2029 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2030 COMPENSATE_HALF_MPS_NUM * half_mps;
2031 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2033
2034 if (rx_priv < min_rx_priv)
2035 return false;
2036
2037 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2038 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2039
2040 priv->enable = 0;
2041 priv->wl.low = 0;
2042 priv->wl.high = 0;
2043 priv->buf_size = 0;
2044
2045 if (!(hdev->hw_tc_map & BIT(i)))
2046 continue;
2047
2048 priv->enable = 1;
2049 priv->buf_size = rx_priv;
2050 priv->wl.high = rx_priv - hdev->dv_buf_size;
2051 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2052 }
2053
2054 buf_alloc->s_buf.buf_size = 0;
2055
2056 return true;
2057}
2058
8ca754b1
YL
2059/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2060 * @hdev: pointer to struct hclge_dev
2061 * @buf_alloc: pointer to buffer calculation data
2062 * @return: 0: calculate sucessful, negative: fail
2063 */
2064static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2065 struct hclge_pkt_buf_alloc *buf_alloc)
2066{
2067 /* When DCB is not supported, rx private buffer is not allocated. */
2068 if (!hnae3_dev_dcb_supported(hdev)) {
2069 u32 rx_all = hdev->pkt_buf_size;
2070
2071 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2072 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2073 return -ENOMEM;
2074
2075 return 0;
2076 }
2077
9e15be90
YL
2078 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2079 return 0;
2080
8ca754b1
YL
2081 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2082 return 0;
2083
2084 /* try to decrease the buffer size */
2085 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2086 return 0;
2087
2088 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2089 return 0;
2090
2091 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
2092 return 0;
2093
2094 return -ENOMEM;
2095}
2096
acf61ecd
YL
2097static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2099{
d44f9b63 2100 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
2101 struct hclge_desc desc;
2102 int ret;
2103 int i;
2104
2105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 2106 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
2107
2108 /* Alloc private buffer TCs */
2109 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2110 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
2111
2112 req->buf_num[i] =
2113 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2114 req->buf_num[i] |=
5bca3b94 2115 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
2116 }
2117
b8c8bf47 2118 req->shared_buf =
acf61ecd 2119 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
2120 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2121
46a3df9f 2122 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2123 if (ret)
46a3df9f
S
2124 dev_err(&hdev->pdev->dev,
2125 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 2126
3f639907 2127 return ret;
46a3df9f
S
2128}
2129
acf61ecd
YL
2130static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
2132{
2133 struct hclge_rx_priv_wl_buf *req;
2134 struct hclge_priv_buf *priv;
2135 struct hclge_desc desc[2];
2136 int i, j;
2137 int ret;
2138
2139 for (i = 0; i < 2; i++) {
2140 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2141 false);
2142 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2143
2144 /* The first descriptor set the NEXT bit to 1 */
2145 if (i == 0)
2146 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2147 else
2148 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149
2150 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
2151 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2152
2153 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
2154 req->tc_wl[j].high =
2155 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].high |=
3738287c 2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2158 req->tc_wl[j].low =
2159 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2160 req->tc_wl[j].low |=
3738287c 2161 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2162 }
2163 }
2164
2165 /* Send 2 descriptor at one time */
2166 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2167 if (ret)
46a3df9f
S
2168 dev_err(&hdev->pdev->dev,
2169 "rx private waterline config cmd failed %d\n",
2170 ret);
3f639907 2171 return ret;
46a3df9f
S
2172}
2173
acf61ecd
YL
2174static int hclge_common_thrd_config(struct hclge_dev *hdev,
2175 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2176{
acf61ecd 2177 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
2178 struct hclge_rx_com_thrd *req;
2179 struct hclge_desc desc[2];
2180 struct hclge_tc_thrd *tc;
2181 int i, j;
2182 int ret;
2183
2184 for (i = 0; i < 2; i++) {
2185 hclge_cmd_setup_basic_desc(&desc[i],
2186 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2187 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2188
2189 /* The first descriptor set the NEXT bit to 1 */
2190 if (i == 0)
2191 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2192 else
2193 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194
2195 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2196 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2197
2198 req->com_thrd[j].high =
2199 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].high |=
3738287c 2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2202 req->com_thrd[j].low =
2203 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2204 req->com_thrd[j].low |=
3738287c 2205 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2206 }
2207 }
2208
2209 /* Send 2 descriptors at one time */
2210 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2211 if (ret)
46a3df9f
S
2212 dev_err(&hdev->pdev->dev,
2213 "common threshold config cmd failed %d\n", ret);
3f639907 2214 return ret;
46a3df9f
S
2215}
2216
acf61ecd
YL
2217static int hclge_common_wl_config(struct hclge_dev *hdev,
2218 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2219{
acf61ecd 2220 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
2221 struct hclge_rx_com_wl *req;
2222 struct hclge_desc desc;
2223 int ret;
2224
2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2226
2227 req = (struct hclge_rx_com_wl *)desc.data;
2228 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 2229 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2230
2231 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 2232 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2233
2234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2235 if (ret)
46a3df9f
S
2236 dev_err(&hdev->pdev->dev,
2237 "common waterline config cmd failed %d\n", ret);
46a3df9f 2238
3f639907 2239 return ret;
46a3df9f
S
2240}
2241
2242int hclge_buffer_alloc(struct hclge_dev *hdev)
2243{
acf61ecd 2244 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
2245 int ret;
2246
acf61ecd
YL
2247 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2248 if (!pkt_buf)
46a3df9f
S
2249 return -ENOMEM;
2250
acf61ecd 2251 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
2252 if (ret) {
2253 dev_err(&hdev->pdev->dev,
2254 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 2255 goto out;
9ffe79a9
YL
2256 }
2257
acf61ecd 2258 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
2259 if (ret) {
2260 dev_err(&hdev->pdev->dev,
2261 "could not alloc tx buffers %d\n", ret);
acf61ecd 2262 goto out;
46a3df9f
S
2263 }
2264
acf61ecd 2265 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
2266 if (ret) {
2267 dev_err(&hdev->pdev->dev,
2268 "could not calc rx priv buffer size for all TCs %d\n",
2269 ret);
acf61ecd 2270 goto out;
46a3df9f
S
2271 }
2272
acf61ecd 2273 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
2274 if (ret) {
2275 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2276 ret);
acf61ecd 2277 goto out;
46a3df9f
S
2278 }
2279
2daf4a65 2280 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 2281 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
2282 if (ret) {
2283 dev_err(&hdev->pdev->dev,
2284 "could not configure rx private waterline %d\n",
2285 ret);
acf61ecd 2286 goto out;
2daf4a65 2287 }
46a3df9f 2288
acf61ecd 2289 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
2290 if (ret) {
2291 dev_err(&hdev->pdev->dev,
2292 "could not configure common threshold %d\n",
2293 ret);
acf61ecd 2294 goto out;
2daf4a65 2295 }
46a3df9f
S
2296 }
2297
acf61ecd
YL
2298 ret = hclge_common_wl_config(hdev, pkt_buf);
2299 if (ret)
46a3df9f
S
2300 dev_err(&hdev->pdev->dev,
2301 "could not configure common waterline %d\n", ret);
46a3df9f 2302
acf61ecd
YL
2303out:
2304 kfree(pkt_buf);
2305 return ret;
46a3df9f
S
2306}
2307
2308static int hclge_init_roce_base_info(struct hclge_vport *vport)
2309{
2310 struct hnae3_handle *roce = &vport->roce;
2311 struct hnae3_handle *nic = &vport->nic;
2312
887c3820 2313 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
2314
2315 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2316 vport->back->num_msi_left == 0)
2317 return -EINVAL;
2318
2319 roce->rinfo.base_vector = vport->back->roce_base_vector;
2320
2321 roce->rinfo.netdev = nic->kinfo.netdev;
2322 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2323
2324 roce->pdev = nic->pdev;
2325 roce->ae_algo = nic->ae_algo;
2326 roce->numa_node_mask = nic->numa_node_mask;
2327
2328 return 0;
2329}
2330
887c3820 2331static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
2332{
2333 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
2334 int vectors;
2335 int i;
46a3df9f 2336
580a05f9
YL
2337 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2338 hdev->num_msi,
887c3820
SM
2339 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2340 if (vectors < 0) {
2341 dev_err(&pdev->dev,
2342 "failed(%d) to allocate MSI/MSI-X vectors\n",
2343 vectors);
2344 return vectors;
46a3df9f 2345 }
887c3820
SM
2346 if (vectors < hdev->num_msi)
2347 dev_warn(&hdev->pdev->dev,
adcf738b 2348 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
887c3820 2349 hdev->num_msi, vectors);
46a3df9f 2350
887c3820
SM
2351 hdev->num_msi = vectors;
2352 hdev->num_msi_left = vectors;
580a05f9 2353
887c3820 2354 hdev->base_msi_vector = pdev->irq;
46a3df9f 2355 hdev->roce_base_vector = hdev->base_msi_vector +
375dd5e4 2356 hdev->roce_base_msix_offset;
46a3df9f 2357
46a3df9f
S
2358 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2359 sizeof(u16), GFP_KERNEL);
887c3820
SM
2360 if (!hdev->vector_status) {
2361 pci_free_irq_vectors(pdev);
46a3df9f 2362 return -ENOMEM;
887c3820 2363 }
46a3df9f
S
2364
2365 for (i = 0; i < hdev->num_msi; i++)
2366 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2367
887c3820
SM
2368 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2369 sizeof(int), GFP_KERNEL);
2370 if (!hdev->vector_irq) {
2371 pci_free_irq_vectors(pdev);
2372 return -ENOMEM;
46a3df9f 2373 }
46a3df9f
S
2374
2375 return 0;
2376}
2377
2d03eacc 2378static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 2379{
2d03eacc
YL
2380 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2381 duplex = HCLGE_MAC_FULL;
46a3df9f 2382
2d03eacc 2383 return duplex;
46a3df9f
S
2384}
2385
2d03eacc
YL
2386static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2387 u8 duplex)
46a3df9f 2388{
d44f9b63 2389 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2390 struct hclge_desc desc;
2391 int ret;
2392
d44f9b63 2393 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2394
2395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2396
63cbf7a9
YM
2397 if (duplex)
2398 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
46a3df9f
S
2399
2400 switch (speed) {
2401 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2404 break;
2405 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2408 break;
2409 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2412 break;
2413 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2416 break;
2417 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2420 break;
2421 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2424 break;
2425 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2428 break;
2429 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2430 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2431 HCLGE_CFG_SPEED_S, 5);
46a3df9f
S
2432 break;
2433 default:
d7629e74 2434 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2435 return -EINVAL;
2436 }
2437
e4e87715
PL
2438 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2439 1);
46a3df9f
S
2440
2441 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2442 if (ret) {
2443 dev_err(&hdev->pdev->dev,
2444 "mac speed/duplex config cmd failed %d.\n", ret);
2445 return ret;
2446 }
2447
2d03eacc
YL
2448 return 0;
2449}
2450
2451int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2452{
68e1006f 2453 struct hclge_mac *mac = &hdev->hw.mac;
2d03eacc
YL
2454 int ret;
2455
2456 duplex = hclge_check_speed_dup(duplex, speed);
68e1006f
JS
2457 if (!mac->support_autoneg && mac->speed == speed &&
2458 mac->duplex == duplex)
2d03eacc
YL
2459 return 0;
2460
2461 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2462 if (ret)
2463 return ret;
2464
2465 hdev->hw.mac.speed = speed;
2466 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2467
2468 return 0;
2469}
2470
2471static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472 u8 duplex)
2473{
2474 struct hclge_vport *vport = hclge_get_vport(handle);
2475 struct hclge_dev *hdev = vport->back;
2476
2477 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478}
2479
46a3df9f
S
2480static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2481{
d44f9b63 2482 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2483 struct hclge_desc desc;
a90bb9a5 2484 u32 flag = 0;
46a3df9f
S
2485 int ret;
2486
2487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2488
d44f9b63 2489 req = (struct hclge_config_auto_neg_cmd *)desc.data;
b9a8f883
YL
2490 if (enable)
2491 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
a90bb9a5 2492 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2493
2494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2495 if (ret)
46a3df9f
S
2496 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2497 ret);
46a3df9f 2498
3f639907 2499 return ret;
46a3df9f
S
2500}
2501
2502static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2503{
2504 struct hclge_vport *vport = hclge_get_vport(handle);
2505 struct hclge_dev *hdev = vport->back;
2506
22f48e24
JS
2507 if (!hdev->hw.mac.support_autoneg) {
2508 if (enable) {
2509 dev_err(&hdev->pdev->dev,
2510 "autoneg is not supported by current port\n");
2511 return -EOPNOTSUPP;
2512 } else {
2513 return 0;
2514 }
2515 }
2516
46a3df9f
S
2517 return hclge_set_autoneg_en(hdev, enable);
2518}
2519
2520static int hclge_get_autoneg(struct hnae3_handle *handle)
2521{
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2524 struct phy_device *phydev = hdev->hw.mac.phydev;
2525
2526 if (phydev)
2527 return phydev->autoneg;
46a3df9f
S
2528
2529 return hdev->hw.mac.autoneg;
2530}
2531
22f48e24
JS
2532static int hclge_restart_autoneg(struct hnae3_handle *handle)
2533{
2534 struct hclge_vport *vport = hclge_get_vport(handle);
2535 struct hclge_dev *hdev = vport->back;
2536 int ret;
2537
2538 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2539
2540 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541 if (ret)
2542 return ret;
2543 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544}
2545
7786a996
JS
2546static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2547{
2548 struct hclge_vport *vport = hclge_get_vport(handle);
2549 struct hclge_dev *hdev = vport->back;
2550
2551 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552 return hclge_set_autoneg_en(hdev, !halt);
2553
2554 return 0;
2555}
2556
7e6ec914
JS
2557static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2558{
2559 struct hclge_config_fec_cmd *req;
2560 struct hclge_desc desc;
2561 int ret;
2562
2563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2564
2565 req = (struct hclge_config_fec_cmd *)desc.data;
2566 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568 if (fec_mode & BIT(HNAE3_FEC_RS))
2569 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571 if (fec_mode & BIT(HNAE3_FEC_BASER))
2572 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2574
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576 if (ret)
2577 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2578
2579 return ret;
2580}
2581
2582static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2583{
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 struct hclge_mac *mac = &hdev->hw.mac;
2587 int ret;
2588
2589 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2591 return -EINVAL;
2592 }
2593
2594 ret = hclge_set_fec_hw(hdev, fec_mode);
2595 if (ret)
2596 return ret;
2597
2598 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2599 return 0;
2600}
2601
2602static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603 u8 *fec_mode)
2604{
2605 struct hclge_vport *vport = hclge_get_vport(handle);
2606 struct hclge_dev *hdev = vport->back;
2607 struct hclge_mac *mac = &hdev->hw.mac;
2608
2609 if (fec_ability)
2610 *fec_ability = mac->fec_ability;
2611 if (fec_mode)
2612 *fec_mode = mac->fec_mode;
2613}
2614
46a3df9f
S
2615static int hclge_mac_init(struct hclge_dev *hdev)
2616{
2617 struct hclge_mac *mac = &hdev->hw.mac;
2618 int ret;
2619
5d497936 2620 hdev->support_sfp_query = true;
2d03eacc
YL
2621 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623 hdev->hw.mac.duplex);
60df7e91 2624 if (ret)
46a3df9f 2625 return ret;
46a3df9f 2626
d736fc6c
JS
2627 if (hdev->hw.mac.support_autoneg) {
2628 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
60df7e91 2629 if (ret)
d736fc6c 2630 return ret;
d736fc6c
JS
2631 }
2632
46a3df9f
S
2633 mac->link = 0;
2634
7e6ec914
JS
2635 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2636 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
60df7e91 2637 if (ret)
7e6ec914 2638 return ret;
7e6ec914
JS
2639 }
2640
e6d7d79d
YL
2641 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2642 if (ret) {
2643 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2644 return ret;
2645 }
f9fd82a9 2646
1cbc662d
YM
2647 ret = hclge_set_default_loopback(hdev);
2648 if (ret)
2649 return ret;
2650
e6d7d79d 2651 ret = hclge_buffer_alloc(hdev);
3f639907 2652 if (ret)
f9fd82a9 2653 dev_err(&hdev->pdev->dev,
e6d7d79d 2654 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2655
3f639907 2656 return ret;
46a3df9f
S
2657}
2658
c1a81619
SM
2659static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2660{
1c6dfe6f 2661 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
18e24888 2662 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2663 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2664 hclge_wq, &hdev->service_task, 0);
c1a81619
SM
2665}
2666
cb1b9f77
SM
2667static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2668{
acfc3d55
HT
2669 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2670 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2671 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2672 hclge_wq, &hdev->service_task, 0);
cb1b9f77
SM
2673}
2674
ed8fb4b2 2675void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
46a3df9f 2676{
d5432455
GL
2677 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2678 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
08125454 2679 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2680 hclge_wq, &hdev->service_task,
ed8fb4b2 2681 delay_time);
46a3df9f
S
2682}
2683
2684static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2685{
d44f9b63 2686 struct hclge_link_status_cmd *req;
46a3df9f
S
2687 struct hclge_desc desc;
2688 int link_status;
2689 int ret;
2690
2691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2692 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2693 if (ret) {
2694 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2695 ret);
2696 return ret;
2697 }
2698
d44f9b63 2699 req = (struct hclge_link_status_cmd *)desc.data;
c79301d8 2700 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
46a3df9f
S
2701
2702 return !!link_status;
2703}
2704
2705static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2706{
ebaf1908 2707 unsigned int mac_state;
46a3df9f
S
2708 int link_stat;
2709
582d37bb
PL
2710 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2711 return 0;
2712
46a3df9f
S
2713 mac_state = hclge_get_mac_link_status(hdev);
2714
2715 if (hdev->hw.mac.phydev) {
fd813314 2716 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
46a3df9f
S
2717 link_stat = mac_state &
2718 hdev->hw.mac.phydev->link;
2719 else
2720 link_stat = 0;
2721
2722 } else {
2723 link_stat = mac_state;
2724 }
2725
2726 return !!link_stat;
2727}
2728
2729static void hclge_update_link_status(struct hclge_dev *hdev)
2730{
45e92b7e 2731 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2732 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2733 struct hnae3_handle *rhandle;
46a3df9f
S
2734 struct hnae3_handle *handle;
2735 int state;
2736 int i;
2737
2738 if (!client)
2739 return;
1c6dfe6f
YL
2740
2741 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2742 return;
2743
46a3df9f
S
2744 state = hclge_get_mac_phy_link(hdev);
2745 if (state != hdev->hw.mac.link) {
2746 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2747 handle = &hdev->vport[i].nic;
2748 client->ops->link_status_change(handle, state);
a6345787 2749 hclge_config_mac_tnl_int(hdev, state);
45e92b7e
PL
2750 rhandle = &hdev->vport[i].roce;
2751 if (rclient && rclient->ops->link_status_change)
2752 rclient->ops->link_status_change(rhandle,
2753 state);
46a3df9f
S
2754 }
2755 hdev->hw.mac.link = state;
2756 }
1c6dfe6f
YL
2757
2758 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
46a3df9f
S
2759}
2760
88d10bd6
JS
2761static void hclge_update_port_capability(struct hclge_mac *mac)
2762{
f438bfe9
JS
2763 /* update fec ability by speed */
2764 hclge_convert_setting_fec(mac);
2765
88d10bd6
JS
2766 /* firmware can not identify back plane type, the media type
2767 * read from configuration can help deal it
2768 */
2769 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2770 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2771 mac->module_type = HNAE3_MODULE_TYPE_KR;
2772 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2773 mac->module_type = HNAE3_MODULE_TYPE_TP;
2774
db4d3d55 2775 if (mac->support_autoneg) {
88d10bd6
JS
2776 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2777 linkmode_copy(mac->advertising, mac->supported);
2778 } else {
2779 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2780 mac->supported);
2781 linkmode_zero(mac->advertising);
2782 }
2783}
2784
5d497936
PL
2785static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2786{
63cbf7a9 2787 struct hclge_sfp_info_cmd *resp;
5d497936
PL
2788 struct hclge_desc desc;
2789 int ret;
2790
88d10bd6
JS
2791 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2792 resp = (struct hclge_sfp_info_cmd *)desc.data;
5d497936
PL
2793 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2794 if (ret == -EOPNOTSUPP) {
2795 dev_warn(&hdev->pdev->dev,
2796 "IMP do not support get SFP speed %d\n", ret);
2797 return ret;
2798 } else if (ret) {
2799 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2800 return ret;
2801 }
2802
88d10bd6 2803 *speed = le32_to_cpu(resp->speed);
5d497936
PL
2804
2805 return 0;
2806}
2807
88d10bd6 2808static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
46a3df9f 2809{
88d10bd6
JS
2810 struct hclge_sfp_info_cmd *resp;
2811 struct hclge_desc desc;
46a3df9f
S
2812 int ret;
2813
88d10bd6
JS
2814 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2815 resp = (struct hclge_sfp_info_cmd *)desc.data;
2816
2817 resp->query_type = QUERY_ACTIVE_SPEED;
2818
2819 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2820 if (ret == -EOPNOTSUPP) {
2821 dev_warn(&hdev->pdev->dev,
2822 "IMP does not support get SFP info %d\n", ret);
2823 return ret;
2824 } else if (ret) {
2825 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2826 return ret;
2827 }
2828
2af8cb61
GL
2829 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2830 * set to mac->speed.
2831 */
2832 if (!le32_to_cpu(resp->speed))
2833 return 0;
2834
88d10bd6
JS
2835 mac->speed = le32_to_cpu(resp->speed);
2836 /* if resp->speed_ability is 0, it means it's an old version
2837 * firmware, do not update these params
46a3df9f 2838 */
88d10bd6
JS
2839 if (resp->speed_ability) {
2840 mac->module_type = le32_to_cpu(resp->module_type);
2841 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2842 mac->autoneg = resp->autoneg;
2843 mac->support_autoneg = resp->autoneg_ability;
49b12556 2844 mac->speed_type = QUERY_ACTIVE_SPEED;
f438bfe9
JS
2845 if (!resp->active_fec)
2846 mac->fec_mode = 0;
2847 else
2848 mac->fec_mode = BIT(resp->active_fec);
88d10bd6
JS
2849 } else {
2850 mac->speed_type = QUERY_SFP_SPEED;
2851 }
2852
2853 return 0;
2854}
2855
2856static int hclge_update_port_info(struct hclge_dev *hdev)
2857{
2858 struct hclge_mac *mac = &hdev->hw.mac;
2859 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2860 int ret;
2861
2862 /* get the port info from SFP cmd if not copper port */
2863 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
46a3df9f
S
2864 return 0;
2865
88d10bd6 2866 /* if IMP does not support get SFP/qSFP info, return directly */
5d497936
PL
2867 if (!hdev->support_sfp_query)
2868 return 0;
46a3df9f 2869
88d10bd6
JS
2870 if (hdev->pdev->revision >= 0x21)
2871 ret = hclge_get_sfp_info(hdev, mac);
2872 else
2873 ret = hclge_get_sfp_speed(hdev, &speed);
2874
5d497936
PL
2875 if (ret == -EOPNOTSUPP) {
2876 hdev->support_sfp_query = false;
2877 return ret;
2878 } else if (ret) {
2d03eacc 2879 return ret;
46a3df9f
S
2880 }
2881
88d10bd6
JS
2882 if (hdev->pdev->revision >= 0x21) {
2883 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2884 hclge_update_port_capability(mac);
2885 return 0;
2886 }
2887 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2888 HCLGE_MAC_FULL);
2889 } else {
2890 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2891 return 0; /* do nothing if no SFP */
46a3df9f 2892
88d10bd6
JS
2893 /* must config full duplex for SFP */
2894 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2895 }
46a3df9f
S
2896}
2897
2898static int hclge_get_status(struct hnae3_handle *handle)
2899{
2900 struct hclge_vport *vport = hclge_get_vport(handle);
2901 struct hclge_dev *hdev = vport->back;
2902
2903 hclge_update_link_status(hdev);
2904
2905 return hdev->hw.mac.link;
2906}
2907
6430f744
YM
2908static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2909{
60df7e91 2910 if (!pci_num_vf(hdev->pdev)) {
6430f744
YM
2911 dev_err(&hdev->pdev->dev,
2912 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2913 return NULL;
2914 }
2915
2916 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2917 dev_err(&hdev->pdev->dev,
2918 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2919 vf, pci_num_vf(hdev->pdev));
2920 return NULL;
2921 }
2922
2923 /* VF start from 1 in vport */
2924 vf += HCLGE_VF_VPORT_START_NUM;
2925 return &hdev->vport[vf];
2926}
2927
2928static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2929 struct ifla_vf_info *ivf)
2930{
2931 struct hclge_vport *vport = hclge_get_vport(handle);
2932 struct hclge_dev *hdev = vport->back;
2933
2934 vport = hclge_get_vf_vport(hdev, vf);
2935 if (!vport)
2936 return -EINVAL;
2937
2938 ivf->vf = vf;
2939 ivf->linkstate = vport->vf_info.link_state;
22044f95 2940 ivf->spoofchk = vport->vf_info.spoofchk;
e196ec75 2941 ivf->trusted = vport->vf_info.trusted;
ee9e4424
YL
2942 ivf->min_tx_rate = 0;
2943 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
89b40c7f
HT
2944 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2945 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2946 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
6430f744
YM
2947 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2948
2949 return 0;
2950}
2951
2952static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2953 int link_state)
2954{
2955 struct hclge_vport *vport = hclge_get_vport(handle);
2956 struct hclge_dev *hdev = vport->back;
2957
2958 vport = hclge_get_vf_vport(hdev, vf);
2959 if (!vport)
2960 return -EINVAL;
2961
2962 vport->vf_info.link_state = link_state;
2963
2964 return 0;
2965}
2966
ca1d7669
SM
2967static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2968{
5705b451 2969 u32 cmdq_src_reg, msix_src_reg;
ca1d7669
SM
2970
2971 /* fetch the events from their corresponding regs */
c1a81619 2972 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
5705b451 2973 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619
SM
2974
2975 /* Assumption: If by any chance reset and mailbox events are reported
2976 * together then we will only process reset event in this go and will
2977 * defer the processing of the mailbox events. Since, we would have not
2978 * cleared RX CMDQ event this time we would receive again another
2979 * interrupt from H/W just for the mailbox.
46ee7350
GL
2980 *
2981 * check for vector0 reset event sources
c1a81619 2982 */
5705b451 2983 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
6dd22bbc
HT
2984 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2985 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2986 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2987 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
f02eb82d 2988 hdev->rst_stats.imp_rst_cnt++;
6dd22bbc
HT
2989 return HCLGE_VECTOR0_EVENT_RST;
2990 }
2991
5705b451 2992 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
65e41e7e 2993 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 2994 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2995 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2996 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
f02eb82d 2997 hdev->rst_stats.global_rst_cnt++;
ca1d7669
SM
2998 return HCLGE_VECTOR0_EVENT_RST;
2999 }
3000
f6162d44 3001 /* check for vector0 msix event source */
147175c9 3002 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
9bc6ac91 3003 *clearval = msix_src_reg;
f6162d44 3004 return HCLGE_VECTOR0_EVENT_ERR;
147175c9 3005 }
f6162d44 3006
c1a81619
SM
3007 /* check for vector0 mailbox(=CMDQ RX) event source */
3008 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3009 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3010 *clearval = cmdq_src_reg;
3011 return HCLGE_VECTOR0_EVENT_MBX;
3012 }
ca1d7669 3013
147175c9 3014 /* print other vector0 event source */
9bc6ac91
HT
3015 dev_info(&hdev->pdev->dev,
3016 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3017 cmdq_src_reg, msix_src_reg);
3018 *clearval = msix_src_reg;
3019
ca1d7669
SM
3020 return HCLGE_VECTOR0_EVENT_OTHER;
3021}
3022
3023static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3024 u32 regclr)
3025{
c1a81619
SM
3026 switch (event_type) {
3027 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 3028 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
3029 break;
3030 case HCLGE_VECTOR0_EVENT_MBX:
3031 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3032 break;
fa7a4bd5
JS
3033 default:
3034 break;
c1a81619 3035 }
ca1d7669
SM
3036}
3037
8e52a602
XW
3038static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3039{
3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3041 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3042 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3043 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3044 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3045}
3046
466b0c00
L
3047static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3048{
3049 writel(enable ? 1 : 0, vector->addr);
3050}
3051
3052static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3053{
3054 struct hclge_dev *hdev = data;
ebaf1908 3055 u32 clearval = 0;
ca1d7669 3056 u32 event_cause;
466b0c00
L
3057
3058 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
3059 event_cause = hclge_check_event_cause(hdev, &clearval);
3060
c1a81619 3061 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 3062 switch (event_cause) {
f6162d44
SM
3063 case HCLGE_VECTOR0_EVENT_ERR:
3064 /* we do not know what type of reset is required now. This could
3065 * only be decided after we fetch the type of errors which
3066 * caused this event. Therefore, we will do below for now:
3067 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3068 * have defered type of reset to be used.
3069 * 2. Schedule the reset serivce task.
3070 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3071 * will fetch the correct type of reset. This would be done
3072 * by first decoding the types of errors.
3073 */
3074 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3075 /* fall through */
ca1d7669 3076 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 3077 hclge_reset_task_schedule(hdev);
ca1d7669 3078 break;
c1a81619
SM
3079 case HCLGE_VECTOR0_EVENT_MBX:
3080 /* If we are here then,
3081 * 1. Either we are not handling any mbx task and we are not
3082 * scheduled as well
3083 * OR
3084 * 2. We could be handling a mbx task but nothing more is
3085 * scheduled.
3086 * In both cases, we should schedule mbx task as there are more
3087 * mbx messages reported by this interrupt.
3088 */
3089 hclge_mbx_task_schedule(hdev);
f0ad97ac 3090 break;
ca1d7669 3091 default:
f0ad97ac
YL
3092 dev_warn(&hdev->pdev->dev,
3093 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
3094 break;
3095 }
3096
72e2fb07
HT
3097 hclge_clear_event_cause(hdev, event_cause, clearval);
3098
3099 /* Enable interrupt if it is not cause by reset. And when
3100 * clearval equal to 0, it means interrupt status may be
3101 * cleared by hardware before driver reads status register.
3102 * For this case, vector0 interrupt also should be enabled.
3103 */
9bc6ac91
HT
3104 if (!clearval ||
3105 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
3106 hclge_enable_vector(&hdev->misc_vector, true);
3107 }
466b0c00
L
3108
3109 return IRQ_HANDLED;
3110}
3111
3112static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3113{
36cbbdf6
PL
3114 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3115 dev_warn(&hdev->pdev->dev,
3116 "vector(vector_id %d) has been freed.\n", vector_id);
3117 return;
3118 }
3119
466b0c00
L
3120 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3121 hdev->num_msi_left += 1;
3122 hdev->num_msi_used -= 1;
3123}
3124
3125static void hclge_get_misc_vector(struct hclge_dev *hdev)
3126{
3127 struct hclge_misc_vector *vector = &hdev->misc_vector;
3128
3129 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3130
3131 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3132 hdev->vector_status[0] = 0;
3133
3134 hdev->num_msi_left -= 1;
3135 hdev->num_msi_used += 1;
3136}
3137
08125454
YL
3138static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3139 const cpumask_t *mask)
3140{
3141 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3142 affinity_notify);
3143
3144 cpumask_copy(&hdev->affinity_mask, mask);
3145}
3146
3147static void hclge_irq_affinity_release(struct kref *ref)
3148{
3149}
3150
3151static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3152{
3153 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3154 &hdev->affinity_mask);
3155
3156 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3157 hdev->affinity_notify.release = hclge_irq_affinity_release;
3158 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3159 &hdev->affinity_notify);
3160}
3161
3162static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3163{
3164 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3165 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3166}
3167
466b0c00
L
3168static int hclge_misc_irq_init(struct hclge_dev *hdev)
3169{
3170 int ret;
3171
3172 hclge_get_misc_vector(hdev);
3173
ca1d7669 3174 /* this would be explicitly freed in the end */
f97c4d82
YL
3175 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3176 HCLGE_NAME, pci_name(hdev->pdev));
ca1d7669 3177 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
f97c4d82 3178 0, hdev->misc_vector.name, hdev);
466b0c00
L
3179 if (ret) {
3180 hclge_free_vector(hdev, 0);
3181 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3182 hdev->misc_vector.vector_irq);
3183 }
3184
3185 return ret;
3186}
3187
ca1d7669
SM
3188static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3189{
3190 free_irq(hdev->misc_vector.vector_irq, hdev);
3191 hclge_free_vector(hdev, 0);
3192}
3193
af013903
HT
3194int hclge_notify_client(struct hclge_dev *hdev,
3195 enum hnae3_reset_notify_type type)
4ed340ab
L
3196{
3197 struct hnae3_client *client = hdev->nic_client;
3198 u16 i;
3199
9b2f3477 3200 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
bd9109c9
HT
3201 return 0;
3202
4ed340ab
L
3203 if (!client->ops->reset_notify)
3204 return -EOPNOTSUPP;
3205
3206 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3207 struct hnae3_handle *handle = &hdev->vport[i].nic;
3208 int ret;
3209
3210 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
3211 if (ret) {
3212 dev_err(&hdev->pdev->dev,
3213 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 3214 return ret;
65e41e7e 3215 }
4ed340ab
L
3216 }
3217
3218 return 0;
3219}
3220
f403a84f
HT
3221static int hclge_notify_roce_client(struct hclge_dev *hdev,
3222 enum hnae3_reset_notify_type type)
3223{
3224 struct hnae3_client *client = hdev->roce_client;
3225 int ret = 0;
3226 u16 i;
3227
9b2f3477 3228 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
f403a84f
HT
3229 return 0;
3230
3231 if (!client->ops->reset_notify)
3232 return -EOPNOTSUPP;
3233
3234 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3235 struct hnae3_handle *handle = &hdev->vport[i].roce;
3236
3237 ret = client->ops->reset_notify(handle, type);
3238 if (ret) {
3239 dev_err(&hdev->pdev->dev,
3240 "notify roce client failed %d(%d)",
3241 type, ret);
3242 return ret;
3243 }
3244 }
3245
3246 return ret;
3247}
3248
4ed340ab
L
3249static int hclge_reset_wait(struct hclge_dev *hdev)
3250{
3251#define HCLGE_RESET_WATI_MS 100
5bb784e9
HT
3252#define HCLGE_RESET_WAIT_CNT 350
3253
4ed340ab
L
3254 u32 val, reg, reg_bit;
3255 u32 cnt = 0;
3256
3257 switch (hdev->reset_type) {
6dd22bbc
HT
3258 case HNAE3_IMP_RESET:
3259 reg = HCLGE_GLOBAL_RESET_REG;
3260 reg_bit = HCLGE_IMP_RESET_BIT;
3261 break;
4ed340ab
L
3262 case HNAE3_GLOBAL_RESET:
3263 reg = HCLGE_GLOBAL_RESET_REG;
3264 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3265 break;
4ed340ab
L
3266 case HNAE3_FUNC_RESET:
3267 reg = HCLGE_FUN_RST_ING;
3268 reg_bit = HCLGE_FUN_RST_ING_B;
3269 break;
3270 default:
3271 dev_err(&hdev->pdev->dev,
3272 "Wait for unsupported reset type: %d\n",
3273 hdev->reset_type);
3274 return -EINVAL;
3275 }
3276
3277 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 3278 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
3279 msleep(HCLGE_RESET_WATI_MS);
3280 val = hclge_read_dev(&hdev->hw, reg);
3281 cnt++;
3282 }
3283
4ed340ab
L
3284 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3285 dev_warn(&hdev->pdev->dev,
3286 "Wait for reset timeout: %d\n", hdev->reset_type);
3287 return -EBUSY;
3288 }
3289
3290 return 0;
3291}
3292
aa5c4f17
HT
3293static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3294{
3295 struct hclge_vf_rst_cmd *req;
3296 struct hclge_desc desc;
3297
3298 req = (struct hclge_vf_rst_cmd *)desc.data;
3299 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3300 req->dest_vfid = func_id;
3301
3302 if (reset)
3303 req->vf_rst = 0x1;
3304
3305 return hclge_cmd_send(&hdev->hw, &desc, 1);
3306}
3307
e511f17b 3308static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
3309{
3310 int i;
3311
3312 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3313 struct hclge_vport *vport = &hdev->vport[i];
3314 int ret;
3315
3316 /* Send cmd to set/clear VF's FUNC_RST_ING */
3317 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3318 if (ret) {
3319 dev_err(&hdev->pdev->dev,
adcf738b 3320 "set vf(%u) rst failed %d!\n",
aa5c4f17
HT
3321 vport->vport_id, ret);
3322 return ret;
3323 }
3324
cc645dfa 3325 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
3326 continue;
3327
3328 /* Inform VF to process the reset.
3329 * hclge_inform_reset_assert_to_vf may fail if VF
3330 * driver is not loaded.
3331 */
3332 ret = hclge_inform_reset_assert_to_vf(vport);
3333 if (ret)
3334 dev_warn(&hdev->pdev->dev,
adcf738b 3335 "inform reset to vf(%u) failed %d!\n",
aa5c4f17
HT
3336 vport->vport_id, ret);
3337 }
3338
3339 return 0;
3340}
3341
1c6dfe6f
YL
3342static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3343{
3344 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3345 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3346 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3347 return;
3348
3349 hclge_mbx_handler(hdev);
3350
3351 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3352}
3353
c3106cac 3354static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
427a7bff
HT
3355{
3356 struct hclge_pf_rst_sync_cmd *req;
3357 struct hclge_desc desc;
3358 int cnt = 0;
3359 int ret;
3360
3361 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3363
3364 do {
1c6dfe6f
YL
3365 /* vf need to down netdev by mbx during PF or FLR reset */
3366 hclge_mailbox_service_task(hdev);
3367
427a7bff
HT
3368 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3369 /* for compatible with old firmware, wait
3370 * 100 ms for VF to stop IO
3371 */
3372 if (ret == -EOPNOTSUPP) {
3373 msleep(HCLGE_RESET_SYNC_TIME);
c3106cac 3374 return;
427a7bff 3375 } else if (ret) {
c3106cac
HT
3376 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3377 ret);
3378 return;
427a7bff 3379 } else if (req->all_vf_ready) {
c3106cac 3380 return;
427a7bff
HT
3381 }
3382 msleep(HCLGE_PF_RESET_SYNC_TIME);
3383 hclge_cmd_reuse_desc(&desc, true);
3384 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3385
c3106cac 3386 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
427a7bff
HT
3387}
3388
a83d2961
WL
3389void hclge_report_hw_error(struct hclge_dev *hdev,
3390 enum hnae3_hw_error_type type)
3391{
3392 struct hnae3_client *client = hdev->nic_client;
3393 u16 i;
3394
3395 if (!client || !client->ops->process_hw_error ||
3396 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3397 return;
3398
3399 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3400 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3401}
3402
3403static void hclge_handle_imp_error(struct hclge_dev *hdev)
3404{
3405 u32 reg_val;
3406
3407 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3408 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3409 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3410 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3411 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3412 }
3413
3414 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3415 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3416 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3417 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3418 }
3419}
3420
2bfbd35d 3421int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
3422{
3423 struct hclge_desc desc;
3424 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3425 int ret;
3426
3427 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 3428 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
3429 req->fun_reset_vfid = func_id;
3430
3431 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3432 if (ret)
3433 dev_err(&hdev->pdev->dev,
3434 "send function reset cmd fail, status =%d\n", ret);
3435
3436 return ret;
3437}
3438
f2f432f2 3439static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 3440{
4f765d3e 3441 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
3442 struct pci_dev *pdev = hdev->pdev;
3443 u32 val;
3444
4f765d3e 3445 if (hclge_get_hw_reset_stat(handle)) {
8de91e92 3446 dev_info(&pdev->dev, "hardware reset not finish\n");
4f765d3e
HT
3447 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3448 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3449 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3450 return;
3451 }
3452
f2f432f2 3453 switch (hdev->reset_type) {
4ed340ab 3454 case HNAE3_GLOBAL_RESET:
8de91e92 3455 dev_info(&pdev->dev, "global reset requested\n");
4ed340ab 3456 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 3457 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab 3458 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4ed340ab 3459 break;
4ed340ab 3460 case HNAE3_FUNC_RESET:
8de91e92 3461 dev_info(&pdev->dev, "PF reset requested\n");
cb1b9f77
SM
3462 /* schedule again to check later */
3463 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3464 hclge_reset_task_schedule(hdev);
4ed340ab
L
3465 break;
3466 default:
3467 dev_warn(&pdev->dev,
8de91e92 3468 "unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
3469 break;
3470 }
3471}
3472
123297b7 3473static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
f2f432f2
SM
3474 unsigned long *addr)
3475{
3476 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
123297b7 3477 struct hclge_dev *hdev = ae_dev->priv;
f2f432f2 3478
f6162d44
SM
3479 /* first, resolve any unknown reset type to the known type(s) */
3480 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
d9b81c96 3481 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
5705b451 3482 HCLGE_MISC_VECTOR_INT_STS);
f6162d44
SM
3483 /* we will intentionally ignore any errors from this function
3484 * as we will end up in *some* reset request in any case
3485 */
d9b81c96
HT
3486 if (hclge_handle_hw_msix_error(hdev, addr))
3487 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3488 msix_sts_reg);
3489
f6162d44
SM
3490 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3491 /* We defered the clearing of the error event which caused
3492 * interrupt since it was not posssible to do that in
3493 * interrupt context (and this is the reason we introduced
3494 * new UNKNOWN reset type). Now, the errors have been
3495 * handled and cleared in hardware we can safely enable
3496 * interrupts. This is an exception to the norm.
3497 */
3498 hclge_enable_vector(&hdev->misc_vector, true);
3499 }
3500
f2f432f2 3501 /* return the highest priority reset level amongst all */
7cea834d
HT
3502 if (test_bit(HNAE3_IMP_RESET, addr)) {
3503 rst_level = HNAE3_IMP_RESET;
3504 clear_bit(HNAE3_IMP_RESET, addr);
3505 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3506 clear_bit(HNAE3_FUNC_RESET, addr);
3507 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 3508 rst_level = HNAE3_GLOBAL_RESET;
7cea834d 3509 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3510 clear_bit(HNAE3_FUNC_RESET, addr);
3511 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 3512 rst_level = HNAE3_FUNC_RESET;
7cea834d 3513 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
3514 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3515 rst_level = HNAE3_FLR_RESET;
3516 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 3517 }
f2f432f2 3518
0fdf4d30
HT
3519 if (hdev->reset_type != HNAE3_NONE_RESET &&
3520 rst_level < hdev->reset_type)
3521 return HNAE3_NONE_RESET;
3522
f2f432f2
SM
3523 return rst_level;
3524}
3525
cd8c5c26
YL
3526static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3527{
3528 u32 clearval = 0;
3529
3530 switch (hdev->reset_type) {
3531 case HNAE3_IMP_RESET:
3532 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3533 break;
3534 case HNAE3_GLOBAL_RESET:
3535 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3536 break;
cd8c5c26 3537 default:
cd8c5c26
YL
3538 break;
3539 }
3540
3541 if (!clearval)
3542 return;
3543
72e2fb07
HT
3544 /* For revision 0x20, the reset interrupt source
3545 * can only be cleared after hardware reset done
3546 */
3547 if (hdev->pdev->revision == 0x20)
3548 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3549 clearval);
3550
cd8c5c26
YL
3551 hclge_enable_vector(&hdev->misc_vector, true);
3552}
3553
6b428b4f
HT
3554static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3555{
3556 u32 reg_val;
3557
3558 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3559 if (enable)
3560 reg_val |= HCLGE_NIC_SW_RST_RDY;
3561 else
3562 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3563
3564 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3565}
3566
c7554dcd
HT
3567static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3568{
3569 int ret;
3570
3571 ret = hclge_set_all_vf_rst(hdev, true);
3572 if (ret)
3573 return ret;
3574
3575 hclge_func_reset_sync_vf(hdev);
3576
3577 return 0;
3578}
3579
35d93a30
HT
3580static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3581{
6dd22bbc 3582 u32 reg_val;
35d93a30
HT
3583 int ret = 0;
3584
3585 switch (hdev->reset_type) {
3586 case HNAE3_FUNC_RESET:
c7554dcd
HT
3587 ret = hclge_func_reset_notify_vf(hdev);
3588 if (ret)
3589 return ret;
427a7bff 3590
35d93a30
HT
3591 ret = hclge_func_reset_cmd(hdev, 0);
3592 if (ret) {
3593 dev_err(&hdev->pdev->dev,
141b95d5 3594 "asserting function reset fail %d!\n", ret);
35d93a30
HT
3595 return ret;
3596 }
3597
3598 /* After performaning pf reset, it is not necessary to do the
3599 * mailbox handling or send any command to firmware, because
3600 * any mailbox handling or command to firmware is only valid
3601 * after hclge_cmd_init is called.
3602 */
3603 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
f02eb82d 3604 hdev->rst_stats.pf_rst_cnt++;
35d93a30 3605 break;
6b9a97ee 3606 case HNAE3_FLR_RESET:
c7554dcd
HT
3607 ret = hclge_func_reset_notify_vf(hdev);
3608 if (ret)
3609 return ret;
6b9a97ee 3610 break;
6dd22bbc 3611 case HNAE3_IMP_RESET:
a83d2961 3612 hclge_handle_imp_error(hdev);
6dd22bbc
HT
3613 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3614 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3615 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3616 break;
35d93a30
HT
3617 default:
3618 break;
3619 }
3620
ada13ee3
HT
3621 /* inform hardware that preparatory work is done */
3622 msleep(HCLGE_RESET_SYNC_TIME);
6b428b4f 3623 hclge_reset_handshake(hdev, true);
35d93a30
HT
3624 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3625
3626 return ret;
3627}
3628
8e9eee78 3629static bool hclge_reset_err_handle(struct hclge_dev *hdev)
65e41e7e
HT
3630{
3631#define MAX_RESET_FAIL_CNT 5
65e41e7e
HT
3632
3633 if (hdev->reset_pending) {
3634 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3635 hdev->reset_pending);
3636 return true;
2336f19d
HT
3637 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3638 HCLGE_RESET_INT_M) {
65e41e7e 3639 dev_info(&hdev->pdev->dev,
2336f19d 3640 "reset failed because new reset interrupt\n");
65e41e7e
HT
3641 hclge_clear_reset_cause(hdev);
3642 return false;
0ecf1f7b
HT
3643 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3644 hdev->rst_stats.reset_fail_cnt++;
8e9eee78
HT
3645 set_bit(hdev->reset_type, &hdev->reset_pending);
3646 dev_info(&hdev->pdev->dev,
adcf738b 3647 "re-schedule reset task(%u)\n",
0ecf1f7b 3648 hdev->rst_stats.reset_fail_cnt);
8e9eee78 3649 return true;
65e41e7e
HT
3650 }
3651
3652 hclge_clear_reset_cause(hdev);
6b428b4f
HT
3653
3654 /* recover the handshake status when reset fail */
3655 hclge_reset_handshake(hdev, true);
3656
65e41e7e 3657 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3d77d0cb
HT
3658
3659 hclge_dbg_dump_rst_info(hdev);
3660
d5432455
GL
3661 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3662
65e41e7e
HT
3663 return false;
3664}
3665
72e2fb07
HT
3666static int hclge_set_rst_done(struct hclge_dev *hdev)
3667{
3668 struct hclge_pf_rst_done_cmd *req;
3669 struct hclge_desc desc;
648db051 3670 int ret;
72e2fb07
HT
3671
3672 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3674 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3675
648db051
HT
3676 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3677 /* To be compatible with the old firmware, which does not support
3678 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3679 * return success
3680 */
3681 if (ret == -EOPNOTSUPP) {
3682 dev_warn(&hdev->pdev->dev,
3683 "current firmware does not support command(0x%x)!\n",
3684 HCLGE_OPC_PF_RST_DONE);
3685 return 0;
3686 } else if (ret) {
3687 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3688 ret);
3689 }
3690
3691 return ret;
72e2fb07
HT
3692}
3693
aa5c4f17
HT
3694static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3695{
3696 int ret = 0;
3697
3698 switch (hdev->reset_type) {
3699 case HNAE3_FUNC_RESET:
6b9a97ee
HT
3700 /* fall through */
3701 case HNAE3_FLR_RESET:
aa5c4f17
HT
3702 ret = hclge_set_all_vf_rst(hdev, false);
3703 break;
72e2fb07
HT
3704 case HNAE3_GLOBAL_RESET:
3705 /* fall through */
3706 case HNAE3_IMP_RESET:
3707 ret = hclge_set_rst_done(hdev);
3708 break;
aa5c4f17
HT
3709 default:
3710 break;
3711 }
3712
6b428b4f
HT
3713 /* clear up the handshake status after re-initialize done */
3714 hclge_reset_handshake(hdev, false);
3715
aa5c4f17
HT
3716 return ret;
3717}
3718
63cbf7a9
YM
3719static int hclge_reset_stack(struct hclge_dev *hdev)
3720{
3721 int ret;
3722
3723 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3724 if (ret)
3725 return ret;
3726
3727 ret = hclge_reset_ae_dev(hdev->ae_dev);
3728 if (ret)
3729 return ret;
3730
039ba863 3731 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
63cbf7a9
YM
3732}
3733
d4fa0656 3734static int hclge_reset_prepare(struct hclge_dev *hdev)
f2f432f2 3735{
65e41e7e 3736 int ret;
9de0b86f 3737
f02eb82d 3738 hdev->rst_stats.reset_cnt++;
f2f432f2 3739 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
3740 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3741 if (ret)
d4fa0656 3742 return ret;
65e41e7e 3743
6d4fab39 3744 rtnl_lock();
65e41e7e 3745 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
65e41e7e 3746 rtnl_unlock();
65e41e7e 3747 if (ret)
d4fa0656 3748 return ret;
cd8c5c26 3749
d4fa0656
HT
3750 return hclge_reset_prepare_wait(hdev);
3751}
3752
3753static int hclge_reset_rebuild(struct hclge_dev *hdev)
3754{
3755 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3756 enum hnae3_reset_type reset_level;
3757 int ret;
f2f432f2 3758
f02eb82d
HT
3759 hdev->rst_stats.hw_reset_done_cnt++;
3760
65e41e7e
HT
3761 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3762 if (ret)
d4fa0656 3763 return ret;
65e41e7e
HT
3764
3765 rtnl_lock();
63cbf7a9 3766 ret = hclge_reset_stack(hdev);
d4fa0656 3767 rtnl_unlock();
1f609492 3768 if (ret)
d4fa0656 3769 return ret;
1f609492 3770
65e41e7e
HT
3771 hclge_clear_reset_cause(hdev);
3772
aa5c4f17
HT
3773 ret = hclge_reset_prepare_up(hdev);
3774 if (ret)
d4fa0656 3775 return ret;
aa5c4f17 3776
63cbf7a9
YM
3777
3778 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3779 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3780 * times
3781 */
0ecf1f7b
HT
3782 if (ret &&
3783 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
d4fa0656 3784 return ret;
63cbf7a9
YM
3785
3786 rtnl_lock();
65e41e7e 3787 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6d4fab39 3788 rtnl_unlock();
d4fa0656
HT
3789 if (ret)
3790 return ret;
f403a84f 3791
65e41e7e
HT
3792 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3793 if (ret)
d4fa0656 3794 return ret;
65e41e7e 3795
b644a8d4 3796 hdev->last_reset_time = jiffies;
0ecf1f7b 3797 hdev->rst_stats.reset_fail_cnt = 0;
f02eb82d 3798 hdev->rst_stats.reset_done_cnt++;
d5432455 3799 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
012fcb52
HT
3800
3801 /* if default_reset_request has a higher level reset request,
3802 * it should be handled as soon as possible. since some errors
3803 * need this kind of reset to fix.
3804 */
525a294e
HT
3805 reset_level = hclge_get_reset_level(ae_dev,
3806 &hdev->default_reset_request);
3807 if (reset_level != HNAE3_NONE_RESET)
3808 set_bit(reset_level, &hdev->reset_request);
b644a8d4 3809
d4fa0656
HT
3810 return 0;
3811}
3812
3813static void hclge_reset(struct hclge_dev *hdev)
3814{
3815 if (hclge_reset_prepare(hdev))
3816 goto err_reset;
3817
3818 if (hclge_reset_wait(hdev))
3819 goto err_reset;
3820
3821 if (hclge_reset_rebuild(hdev))
3822 goto err_reset;
3823
65e41e7e
HT
3824 return;
3825
65e41e7e 3826err_reset:
8e9eee78 3827 if (hclge_reset_err_handle(hdev))
65e41e7e 3828 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3829}
3830
6ae4e733
SJ
3831static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3832{
3833 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3834 struct hclge_dev *hdev = ae_dev->priv;
3835
3836 /* We might end up getting called broadly because of 2 below cases:
3837 * 1. Recoverable error was conveyed through APEI and only way to bring
3838 * normalcy is to reset.
3839 * 2. A new reset request from the stack due to timeout
3840 *
3841 * For the first case,error event might not have ae handle available.
3842 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3843 * last reset attempt did not succeed and watchdog hit us again. We will
3844 * know this if last reset request did not occur very recently (watchdog
3845 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3846 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3847 * And if it is a repeat reset request of the most recent one then we
3848 * want to make sure we throttle the reset request. Therefore, we will
3849 * not allow it again before 3*HZ times.
6d4c3981 3850 */
6ae4e733
SJ
3851 if (!handle)
3852 handle = &hdev->vport[0].nic;
3853
b37ce587 3854 if (time_before(jiffies, (hdev->last_reset_time +
012fcb52
HT
3855 HCLGE_RESET_INTERVAL))) {
3856 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9de0b86f 3857 return;
db4d3d55 3858 } else if (hdev->default_reset_request) {
0742ed7c 3859 hdev->reset_level =
123297b7 3860 hclge_get_reset_level(ae_dev,
720bd583 3861 &hdev->default_reset_request);
db4d3d55 3862 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
0742ed7c 3863 hdev->reset_level = HNAE3_FUNC_RESET;
db4d3d55 3864 }
4ed340ab 3865
96e65abb 3866 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
0742ed7c 3867 hdev->reset_level);
6d4c3981
SM
3868
3869 /* request reset & schedule reset task */
0742ed7c 3870 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3871 hclge_reset_task_schedule(hdev);
3872
0742ed7c
HT
3873 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3874 hdev->reset_level++;
4ed340ab
L
3875}
3876
720bd583
HT
3877static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3878 enum hnae3_reset_type rst_type)
3879{
3880 struct hclge_dev *hdev = ae_dev->priv;
3881
3882 set_bit(rst_type, &hdev->default_reset_request);
3883}
3884
65e41e7e
HT
3885static void hclge_reset_timer(struct timer_list *t)
3886{
3887 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3888
012fcb52
HT
3889 /* if default_reset_request has no value, it means that this reset
3890 * request has already be handled, so just return here
3891 */
3892 if (!hdev->default_reset_request)
3893 return;
3894
65e41e7e 3895 dev_info(&hdev->pdev->dev,
e3b84ed2 3896 "triggering reset in reset timer\n");
65e41e7e
HT
3897 hclge_reset_event(hdev->pdev, NULL);
3898}
3899
4ed340ab
L
3900static void hclge_reset_subtask(struct hclge_dev *hdev)
3901{
123297b7
SJ
3902 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3903
f2f432f2
SM
3904 /* check if there is any ongoing reset in the hardware. This status can
3905 * be checked from reset_pending. If there is then, we need to wait for
3906 * hardware to complete reset.
3907 * a. If we are able to figure out in reasonable time that hardware
3908 * has fully resetted then, we can proceed with driver, client
3909 * reset.
3910 * b. else, we can come back later to check this status so re-sched
3911 * now.
3912 */
0742ed7c 3913 hdev->last_reset_time = jiffies;
123297b7 3914 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
f2f432f2
SM
3915 if (hdev->reset_type != HNAE3_NONE_RESET)
3916 hclge_reset(hdev);
4ed340ab 3917
f2f432f2 3918 /* check if we got any *new* reset requests to be honored */
123297b7 3919 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
f2f432f2
SM
3920 if (hdev->reset_type != HNAE3_NONE_RESET)
3921 hclge_do_reset(hdev);
4ed340ab 3922
4ed340ab
L
3923 hdev->reset_type = HNAE3_NONE_RESET;
3924}
3925
1c6dfe6f 3926static void hclge_reset_service_task(struct hclge_dev *hdev)
466b0c00 3927{
1c6dfe6f
YL
3928 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3929 return;
cb1b9f77 3930
8627bded
HT
3931 down(&hdev->reset_sem);
3932 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
cb1b9f77 3933
4ed340ab 3934 hclge_reset_subtask(hdev);
cb1b9f77
SM
3935
3936 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8627bded 3937 up(&hdev->reset_sem);
466b0c00
L
3938}
3939
a6d818e3
YL
3940static void hclge_update_vport_alive(struct hclge_dev *hdev)
3941{
3942 int i;
3943
3944 /* start from vport 1 for PF is always alive */
3945 for (i = 1; i < hdev->num_alloc_vport; i++) {
3946 struct hclge_vport *vport = &hdev->vport[i];
3947
3948 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3949 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
3950
3951 /* If vf is not alive, set to default value */
3952 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3953 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
3954 }
3955}
3956
1c6dfe6f 3957static void hclge_periodic_service_task(struct hclge_dev *hdev)
46a3df9f 3958{
1c6dfe6f 3959 unsigned long delta = round_jiffies_relative(HZ);
7be1b9f3 3960
1c6dfe6f
YL
3961 /* Always handle the link updating to make sure link state is
3962 * updated when it is triggered by mbx.
3963 */
3964 hclge_update_link_status(hdev);
ee4bcd3b 3965 hclge_sync_mac_table(hdev);
c631c696 3966 hclge_sync_promisc_mode(hdev);
46a3df9f 3967
1c6dfe6f
YL
3968 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3969 delta = jiffies - hdev->last_serv_processed;
3970
3971 if (delta < round_jiffies_relative(HZ)) {
3972 delta = round_jiffies_relative(HZ) - delta;
3973 goto out;
3974 }
c5f65480
JS
3975 }
3976
1c6dfe6f 3977 hdev->serv_processed_cnt++;
a6d818e3 3978 hclge_update_vport_alive(hdev);
1c6dfe6f
YL
3979
3980 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3981 hdev->last_serv_processed = jiffies;
3982 goto out;
3983 }
3984
3985 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3986 hclge_update_stats_for_all(hdev);
3987
3988 hclge_update_port_info(hdev);
fe4144d4 3989 hclge_sync_vlan_filter(hdev);
db4d3d55 3990
1c6dfe6f 3991 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
d93ed94f 3992 hclge_rfs_filter_expire(hdev);
7be1b9f3 3993
1c6dfe6f
YL
3994 hdev->last_serv_processed = jiffies;
3995
3996out:
3997 hclge_task_schedule(hdev, delta);
3998}
3999
4000static void hclge_service_task(struct work_struct *work)
4001{
4002 struct hclge_dev *hdev =
4003 container_of(work, struct hclge_dev, service_task.work);
4004
4005 hclge_reset_service_task(hdev);
4006 hclge_mailbox_service_task(hdev);
4007 hclge_periodic_service_task(hdev);
4008
4009 /* Handle reset and mbx again in case periodical task delays the
4010 * handling by calling hclge_task_schedule() in
4011 * hclge_periodic_service_task().
4012 */
4013 hclge_reset_service_task(hdev);
4014 hclge_mailbox_service_task(hdev);
46a3df9f
S
4015}
4016
46a3df9f
S
4017struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4018{
4019 /* VF handle has no client */
4020 if (!handle->client)
4021 return container_of(handle, struct hclge_vport, nic);
4022 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4023 return container_of(handle, struct hclge_vport, roce);
4024 else
4025 return container_of(handle, struct hclge_vport, nic);
4026}
4027
4028static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4029 struct hnae3_vector_info *vector_info)
4030{
4031 struct hclge_vport *vport = hclge_get_vport(handle);
4032 struct hnae3_vector_info *vector = vector_info;
4033 struct hclge_dev *hdev = vport->back;
4034 int alloc = 0;
4035 int i, j;
4036
580a05f9 4037 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
46a3df9f
S
4038 vector_num = min(hdev->num_msi_left, vector_num);
4039
4040 for (j = 0; j < vector_num; j++) {
4041 for (i = 1; i < hdev->num_msi; i++) {
4042 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4043 vector->vector = pci_irq_vector(hdev->pdev, i);
4044 vector->io_addr = hdev->hw.io_base +
4045 HCLGE_VECTOR_REG_BASE +
4046 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4047 vport->vport_id *
4048 HCLGE_VECTOR_VF_OFFSET;
4049 hdev->vector_status[i] = vport->vport_id;
887c3820 4050 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
4051
4052 vector++;
4053 alloc++;
4054
4055 break;
4056 }
4057 }
4058 }
4059 hdev->num_msi_left -= alloc;
4060 hdev->num_msi_used += alloc;
4061
4062 return alloc;
4063}
4064
4065static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4066{
4067 int i;
4068
887c3820
SM
4069 for (i = 0; i < hdev->num_msi; i++)
4070 if (vector == hdev->vector_irq[i])
4071 return i;
4072
46a3df9f
S
4073 return -EINVAL;
4074}
4075
0d3e6631
YL
4076static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4077{
4078 struct hclge_vport *vport = hclge_get_vport(handle);
4079 struct hclge_dev *hdev = vport->back;
4080 int vector_id;
4081
4082 vector_id = hclge_get_vector_index(hdev, vector);
4083 if (vector_id < 0) {
4084 dev_err(&hdev->pdev->dev,
6f8e330d 4085 "Get vector index fail. vector = %d\n", vector);
0d3e6631
YL
4086 return vector_id;
4087 }
4088
4089 hclge_free_vector(hdev, vector_id);
4090
4091 return 0;
4092}
4093
46a3df9f
S
4094static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4095{
4096 return HCLGE_RSS_KEY_SIZE;
4097}
4098
4099static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4100{
4101 return HCLGE_RSS_IND_TBL_SIZE;
4102}
4103
46a3df9f
S
4104static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4105 const u8 hfunc, const u8 *key)
4106{
d44f9b63 4107 struct hclge_rss_config_cmd *req;
ebaf1908 4108 unsigned int key_offset = 0;
46a3df9f 4109 struct hclge_desc desc;
3caf772b 4110 int key_counts;
46a3df9f
S
4111 int key_size;
4112 int ret;
4113
3caf772b 4114 key_counts = HCLGE_RSS_KEY_SIZE;
d44f9b63 4115 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f 4116
3caf772b 4117 while (key_counts) {
46a3df9f
S
4118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4119 false);
4120
4121 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4122 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4123
3caf772b 4124 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
46a3df9f
S
4125 memcpy(req->hash_key,
4126 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4127
3caf772b
YM
4128 key_counts -= key_size;
4129 key_offset++;
46a3df9f
S
4130 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4131 if (ret) {
4132 dev_err(&hdev->pdev->dev,
4133 "Configure RSS config fail, status = %d\n",
4134 ret);
4135 return ret;
4136 }
4137 }
4138 return 0;
4139}
4140
89523cfa 4141static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
46a3df9f 4142{
d44f9b63 4143 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
4144 struct hclge_desc desc;
4145 int i, j;
4146 int ret;
4147
d44f9b63 4148 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
4149
4150 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4151 hclge_cmd_setup_basic_desc
4152 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4153
a90bb9a5
YL
4154 req->start_table_index =
4155 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4156 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
4157
4158 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4159 req->rss_result[j] =
4160 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4161
4162 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4163 if (ret) {
4164 dev_err(&hdev->pdev->dev,
4165 "Configure rss indir table fail,status = %d\n",
4166 ret);
4167 return ret;
4168 }
4169 }
4170 return 0;
4171}
4172
4173static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4174 u16 *tc_size, u16 *tc_offset)
4175{
d44f9b63 4176 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
4177 struct hclge_desc desc;
4178 int ret;
4179 int i;
4180
4181 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 4182 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
4183
4184 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
4185 u16 mode = 0;
4186
e4e87715
PL
4187 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4188 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4189 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4190 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4191 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
4192
4193 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
4194 }
4195
4196 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4197 if (ret)
46a3df9f
S
4198 dev_err(&hdev->pdev->dev,
4199 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 4200
3f639907 4201 return ret;
46a3df9f
S
4202}
4203
232fc64b
PL
4204static void hclge_get_rss_type(struct hclge_vport *vport)
4205{
4206 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4207 vport->rss_tuple_sets.ipv4_udp_en ||
4208 vport->rss_tuple_sets.ipv4_sctp_en ||
4209 vport->rss_tuple_sets.ipv6_tcp_en ||
4210 vport->rss_tuple_sets.ipv6_udp_en ||
4211 vport->rss_tuple_sets.ipv6_sctp_en)
4212 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4213 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4214 vport->rss_tuple_sets.ipv6_fragment_en)
4215 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4216 else
4217 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4218}
4219
46a3df9f
S
4220static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4221{
d44f9b63 4222 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
4223 struct hclge_desc desc;
4224 int ret;
4225
4226 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4227
d44f9b63 4228 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
4229
4230 /* Get the tuple cfg from pf */
4231 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4232 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4233 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4234 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4235 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4236 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4237 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4238 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 4239 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 4240 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4241 if (ret)
46a3df9f
S
4242 dev_err(&hdev->pdev->dev,
4243 "Configure rss input fail, status = %d\n", ret);
3f639907 4244 return ret;
46a3df9f
S
4245}
4246
4247static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4248 u8 *key, u8 *hfunc)
4249{
4250 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
4251 int i;
4252
4253 /* Get hash algorithm */
775501a1
JS
4254 if (hfunc) {
4255 switch (vport->rss_algo) {
4256 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4257 *hfunc = ETH_RSS_HASH_TOP;
4258 break;
4259 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4260 *hfunc = ETH_RSS_HASH_XOR;
4261 break;
4262 default:
4263 *hfunc = ETH_RSS_HASH_UNKNOWN;
4264 break;
4265 }
4266 }
46a3df9f
S
4267
4268 /* Get the RSS Key required by the user */
4269 if (key)
4270 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4271
4272 /* Get indirect table */
4273 if (indir)
4274 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4275 indir[i] = vport->rss_indirection_tbl[i];
4276
4277 return 0;
4278}
4279
4280static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4281 const u8 *key, const u8 hfunc)
4282{
4283 struct hclge_vport *vport = hclge_get_vport(handle);
4284 struct hclge_dev *hdev = vport->back;
4285 u8 hash_algo;
4286 int ret, i;
4287
4288 /* Set the RSS Hash Key if specififed by the user */
4289 if (key) {
775501a1
JS
4290 switch (hfunc) {
4291 case ETH_RSS_HASH_TOP:
46a3df9f 4292 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
4293 break;
4294 case ETH_RSS_HASH_XOR:
4295 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4296 break;
4297 case ETH_RSS_HASH_NO_CHANGE:
4298 hash_algo = vport->rss_algo;
4299 break;
4300 default:
46a3df9f 4301 return -EINVAL;
775501a1
JS
4302 }
4303
46a3df9f
S
4304 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4305 if (ret)
4306 return ret;
89523cfa
YL
4307
4308 /* Update the shadow RSS key with user specified qids */
4309 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4310 vport->rss_algo = hash_algo;
46a3df9f
S
4311 }
4312
4313 /* Update the shadow RSS table with user specified qids */
4314 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4315 vport->rss_indirection_tbl[i] = indir[i];
4316
4317 /* Update the hardware */
89523cfa 4318 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
4319}
4320
f7db940a
L
4321static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4322{
4323 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4324
4325 if (nfc->data & RXH_L4_B_2_3)
4326 hash_sets |= HCLGE_D_PORT_BIT;
4327 else
4328 hash_sets &= ~HCLGE_D_PORT_BIT;
4329
4330 if (nfc->data & RXH_IP_SRC)
4331 hash_sets |= HCLGE_S_IP_BIT;
4332 else
4333 hash_sets &= ~HCLGE_S_IP_BIT;
4334
4335 if (nfc->data & RXH_IP_DST)
4336 hash_sets |= HCLGE_D_IP_BIT;
4337 else
4338 hash_sets &= ~HCLGE_D_IP_BIT;
4339
4340 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4341 hash_sets |= HCLGE_V_TAG_BIT;
4342
4343 return hash_sets;
4344}
4345
4346static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4347 struct ethtool_rxnfc *nfc)
4348{
4349 struct hclge_vport *vport = hclge_get_vport(handle);
4350 struct hclge_dev *hdev = vport->back;
4351 struct hclge_rss_input_tuple_cmd *req;
4352 struct hclge_desc desc;
4353 u8 tuple_sets;
4354 int ret;
4355
4356 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4357 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4358 return -EINVAL;
4359
4360 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429 4361 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
f7db940a 4362
6f2af429
YL
4363 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4364 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4365 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4366 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4367 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4368 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4369 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4370 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
4371
4372 tuple_sets = hclge_get_rss_hash_bits(nfc);
4373 switch (nfc->flow_type) {
4374 case TCP_V4_FLOW:
4375 req->ipv4_tcp_en = tuple_sets;
4376 break;
4377 case TCP_V6_FLOW:
4378 req->ipv6_tcp_en = tuple_sets;
4379 break;
4380 case UDP_V4_FLOW:
4381 req->ipv4_udp_en = tuple_sets;
4382 break;
4383 case UDP_V6_FLOW:
4384 req->ipv6_udp_en = tuple_sets;
4385 break;
4386 case SCTP_V4_FLOW:
4387 req->ipv4_sctp_en = tuple_sets;
4388 break;
4389 case SCTP_V6_FLOW:
4390 if ((nfc->data & RXH_L4_B_0_1) ||
4391 (nfc->data & RXH_L4_B_2_3))
4392 return -EINVAL;
4393
4394 req->ipv6_sctp_en = tuple_sets;
4395 break;
4396 case IPV4_FLOW:
4397 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4398 break;
4399 case IPV6_FLOW:
4400 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4401 break;
4402 default:
4403 return -EINVAL;
4404 }
4405
4406 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 4407 if (ret) {
f7db940a
L
4408 dev_err(&hdev->pdev->dev,
4409 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
4410 return ret;
4411 }
f7db940a 4412
6f2af429
YL
4413 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4414 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4415 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4416 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4417 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4418 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4419 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4420 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 4421 hclge_get_rss_type(vport);
6f2af429 4422 return 0;
f7db940a
L
4423}
4424
07d29954
L
4425static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4426 struct ethtool_rxnfc *nfc)
4427{
4428 struct hclge_vport *vport = hclge_get_vport(handle);
07d29954 4429 u8 tuple_sets;
07d29954
L
4430
4431 nfc->data = 0;
4432
07d29954
L
4433 switch (nfc->flow_type) {
4434 case TCP_V4_FLOW:
6f2af429 4435 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
4436 break;
4437 case UDP_V4_FLOW:
6f2af429 4438 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
4439 break;
4440 case TCP_V6_FLOW:
6f2af429 4441 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
4442 break;
4443 case UDP_V6_FLOW:
6f2af429 4444 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
4445 break;
4446 case SCTP_V4_FLOW:
6f2af429 4447 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
4448 break;
4449 case SCTP_V6_FLOW:
6f2af429 4450 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
4451 break;
4452 case IPV4_FLOW:
4453 case IPV6_FLOW:
4454 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4455 break;
4456 default:
4457 return -EINVAL;
4458 }
4459
4460 if (!tuple_sets)
4461 return 0;
4462
4463 if (tuple_sets & HCLGE_D_PORT_BIT)
4464 nfc->data |= RXH_L4_B_2_3;
4465 if (tuple_sets & HCLGE_S_PORT_BIT)
4466 nfc->data |= RXH_L4_B_0_1;
4467 if (tuple_sets & HCLGE_D_IP_BIT)
4468 nfc->data |= RXH_IP_DST;
4469 if (tuple_sets & HCLGE_S_IP_BIT)
4470 nfc->data |= RXH_IP_SRC;
4471
4472 return 0;
4473}
4474
46a3df9f
S
4475static int hclge_get_tc_size(struct hnae3_handle *handle)
4476{
4477 struct hclge_vport *vport = hclge_get_vport(handle);
4478 struct hclge_dev *hdev = vport->back;
4479
4480 return hdev->rss_size_max;
4481}
4482
77f255c1 4483int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f 4484{
46a3df9f 4485 struct hclge_vport *vport = hdev->vport;
268f5dfa
YL
4486 u8 *rss_indir = vport[0].rss_indirection_tbl;
4487 u16 rss_size = vport[0].alloc_rss_size;
354d0fab
PL
4488 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4489 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
268f5dfa
YL
4490 u8 *key = vport[0].rss_hash_key;
4491 u8 hfunc = vport[0].rss_algo;
46a3df9f 4492 u16 tc_valid[HCLGE_MAX_TC_NUM];
268f5dfa 4493 u16 roundup_size;
ebaf1908
WL
4494 unsigned int i;
4495 int ret;
68ece54e 4496
46a3df9f
S
4497 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4498 if (ret)
268f5dfa 4499 return ret;
46a3df9f 4500
46a3df9f
S
4501 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4502 if (ret)
268f5dfa 4503 return ret;
46a3df9f
S
4504
4505 ret = hclge_set_rss_input_tuple(hdev);
4506 if (ret)
268f5dfa 4507 return ret;
46a3df9f 4508
68ece54e
YL
4509 /* Each TC have the same queue size, and tc_size set to hardware is
4510 * the log2 of roundup power of two of rss_size, the acutal queue
4511 * size is limited by indirection table.
4512 */
4513 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4514 dev_err(&hdev->pdev->dev,
adcf738b 4515 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
68ece54e 4516 rss_size);
268f5dfa 4517 return -EINVAL;
68ece54e
YL
4518 }
4519
4520 roundup_size = roundup_pow_of_two(rss_size);
4521 roundup_size = ilog2(roundup_size);
4522
46a3df9f 4523 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 4524 tc_valid[i] = 0;
46a3df9f 4525
68ece54e
YL
4526 if (!(hdev->hw_tc_map & BIT(i)))
4527 continue;
4528
4529 tc_valid[i] = 1;
4530 tc_size[i] = roundup_size;
4531 tc_offset[i] = rss_size * i;
46a3df9f 4532 }
68ece54e 4533
268f5dfa
YL
4534 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4535}
46a3df9f 4536
268f5dfa
YL
4537void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4538{
4539 struct hclge_vport *vport = hdev->vport;
4540 int i, j;
46a3df9f 4541
268f5dfa
YL
4542 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4543 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4544 vport[j].rss_indirection_tbl[i] =
4545 i % vport[j].alloc_rss_size;
4546 }
4547}
4548
4549static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4550{
472d7ece 4551 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 4552 struct hclge_vport *vport = hdev->vport;
472d7ece
JS
4553
4554 if (hdev->pdev->revision >= 0x21)
4555 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 4556
268f5dfa
YL
4557 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4558 vport[i].rss_tuple_sets.ipv4_tcp_en =
4559 HCLGE_RSS_INPUT_TUPLE_OTHER;
4560 vport[i].rss_tuple_sets.ipv4_udp_en =
4561 HCLGE_RSS_INPUT_TUPLE_OTHER;
4562 vport[i].rss_tuple_sets.ipv4_sctp_en =
4563 HCLGE_RSS_INPUT_TUPLE_SCTP;
4564 vport[i].rss_tuple_sets.ipv4_fragment_en =
4565 HCLGE_RSS_INPUT_TUPLE_OTHER;
4566 vport[i].rss_tuple_sets.ipv6_tcp_en =
4567 HCLGE_RSS_INPUT_TUPLE_OTHER;
4568 vport[i].rss_tuple_sets.ipv6_udp_en =
4569 HCLGE_RSS_INPUT_TUPLE_OTHER;
4570 vport[i].rss_tuple_sets.ipv6_sctp_en =
4571 HCLGE_RSS_INPUT_TUPLE_SCTP;
4572 vport[i].rss_tuple_sets.ipv6_fragment_en =
4573 HCLGE_RSS_INPUT_TUPLE_OTHER;
4574
472d7ece 4575 vport[i].rss_algo = rss_algo;
ea739c90 4576
472d7ece
JS
4577 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4578 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
4579 }
4580
4581 hclge_rss_indir_init_cfg(hdev);
46a3df9f
S
4582}
4583
84e095d6
SM
4584int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4585 int vector_id, bool en,
4586 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4587{
4588 struct hclge_dev *hdev = vport->back;
46a3df9f
S
4589 struct hnae3_ring_chain_node *node;
4590 struct hclge_desc desc;
37417c66
GL
4591 struct hclge_ctrl_vector_chain_cmd *req =
4592 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
84e095d6
SM
4593 enum hclge_cmd_status status;
4594 enum hclge_opcode_type op;
4595 u16 tqp_type_and_id;
46a3df9f
S
4596 int i;
4597
84e095d6
SM
4598 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4599 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
4600 req->int_vector_id = vector_id;
4601
4602 i = 0;
4603 for (node = ring_chain; node; node = node->next) {
84e095d6 4604 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
4605 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4606 HCLGE_INT_TYPE_S,
4607 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4608 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4609 HCLGE_TQP_ID_S, node->tqp_index);
4610 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4611 HCLGE_INT_GL_IDX_S,
4612 hnae3_get_field(node->int_gl_idx,
4613 HNAE3_RING_GL_IDX_M,
4614 HNAE3_RING_GL_IDX_S));
84e095d6 4615 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
4616 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4617 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 4618 req->vfid = vport->vport_id;
46a3df9f 4619
84e095d6
SM
4620 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4621 if (status) {
46a3df9f
S
4622 dev_err(&hdev->pdev->dev,
4623 "Map TQP fail, status is %d.\n",
84e095d6
SM
4624 status);
4625 return -EIO;
46a3df9f
S
4626 }
4627 i = 0;
4628
4629 hclge_cmd_setup_basic_desc(&desc,
84e095d6 4630 op,
46a3df9f
S
4631 false);
4632 req->int_vector_id = vector_id;
4633 }
4634 }
4635
4636 if (i > 0) {
4637 req->int_cause_num = i;
84e095d6
SM
4638 req->vfid = vport->vport_id;
4639 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4640 if (status) {
46a3df9f 4641 dev_err(&hdev->pdev->dev,
84e095d6
SM
4642 "Map TQP fail, status is %d.\n", status);
4643 return -EIO;
46a3df9f
S
4644 }
4645 }
4646
4647 return 0;
4648}
4649
9b2f3477 4650static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
84e095d6 4651 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4652{
4653 struct hclge_vport *vport = hclge_get_vport(handle);
4654 struct hclge_dev *hdev = vport->back;
4655 int vector_id;
4656
4657 vector_id = hclge_get_vector_index(hdev, vector);
4658 if (vector_id < 0) {
4659 dev_err(&hdev->pdev->dev,
7ab2b53e 4660 "failed to get vector index. vector=%d\n", vector);
46a3df9f
S
4661 return vector_id;
4662 }
4663
84e095d6 4664 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
4665}
4666
9b2f3477 4667static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
84e095d6 4668 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4669{
4670 struct hclge_vport *vport = hclge_get_vport(handle);
4671 struct hclge_dev *hdev = vport->back;
84e095d6 4672 int vector_id, ret;
46a3df9f 4673
b50ae26c
PL
4674 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4675 return 0;
4676
46a3df9f
S
4677 vector_id = hclge_get_vector_index(hdev, vector);
4678 if (vector_id < 0) {
4679 dev_err(&handle->pdev->dev,
4680 "Get vector index fail. ret =%d\n", vector_id);
4681 return vector_id;
4682 }
4683
84e095d6 4684 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 4685 if (ret)
84e095d6
SM
4686 dev_err(&handle->pdev->dev,
4687 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
9b2f3477 4688 vector_id, ret);
46a3df9f 4689
0d3e6631 4690 return ret;
46a3df9f
S
4691}
4692
e196ec75
JS
4693static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4694 struct hclge_promisc_param *param)
46a3df9f 4695{
d44f9b63 4696 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
4697 struct hclge_desc desc;
4698 int ret;
4699
4700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4701
d44f9b63 4702 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f 4703 req->vf_id = param->vf_id;
96c0e861
PL
4704
4705 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4706 * pdev revision(0x20), new revision support them. The
4707 * value of this two fields will not return error when driver
4708 * send command to fireware in revision(0x20).
4709 */
4710 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4711 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
46a3df9f
S
4712
4713 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4714 if (ret)
46a3df9f 4715 dev_err(&hdev->pdev->dev,
c631c696
JS
4716 "failed to set vport %d promisc mode, ret = %d.\n",
4717 param->vf_id, ret);
3f639907
JS
4718
4719 return ret;
46a3df9f
S
4720}
4721
e196ec75
JS
4722static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4723 bool en_uc, bool en_mc, bool en_bc,
4724 int vport_id)
46a3df9f
S
4725{
4726 if (!param)
4727 return;
4728
4729 memset(param, 0, sizeof(struct hclge_promisc_param));
4730 if (en_uc)
4731 param->enable = HCLGE_PROMISC_EN_UC;
4732 if (en_mc)
4733 param->enable |= HCLGE_PROMISC_EN_MC;
4734 if (en_bc)
4735 param->enable |= HCLGE_PROMISC_EN_BC;
4736 param->vf_id = vport_id;
4737}
4738
e196ec75
JS
4739int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4740 bool en_mc_pmc, bool en_bc_pmc)
4741{
4742 struct hclge_dev *hdev = vport->back;
4743 struct hclge_promisc_param param;
4744
4745 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4746 vport->vport_id);
4747 return hclge_cmd_set_promisc_mode(hdev, &param);
4748}
4749
7fa6be4f
HT
4750static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4751 bool en_mc_pmc)
46a3df9f
S
4752{
4753 struct hclge_vport *vport = hclge_get_vport(handle);
28673b33 4754 bool en_bc_pmc = true;
46a3df9f 4755
28673b33
JS
4756 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4757 * always bypassed. So broadcast promisc should be disabled until
4758 * user enable promisc mode
4759 */
4760 if (handle->pdev->revision == 0x20)
4761 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4762
e196ec75
JS
4763 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4764 en_bc_pmc);
46a3df9f
S
4765}
4766
c631c696
JS
4767static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4768{
4769 struct hclge_vport *vport = hclge_get_vport(handle);
4770 struct hclge_dev *hdev = vport->back;
4771
4772 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4773}
4774
d695964d
JS
4775static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4776{
4777 struct hclge_get_fd_mode_cmd *req;
4778 struct hclge_desc desc;
4779 int ret;
4780
4781 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4782
4783 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4784
4785 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4786 if (ret) {
4787 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4788 return ret;
4789 }
4790
4791 *fd_mode = req->mode;
4792
4793 return ret;
4794}
4795
4796static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4797 u32 *stage1_entry_num,
4798 u32 *stage2_entry_num,
4799 u16 *stage1_counter_num,
4800 u16 *stage2_counter_num)
4801{
4802 struct hclge_get_fd_allocation_cmd *req;
4803 struct hclge_desc desc;
4804 int ret;
4805
4806 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4807
4808 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4809
4810 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4811 if (ret) {
4812 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4813 ret);
4814 return ret;
4815 }
4816
4817 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4818 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4819 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4820 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4821
4822 return ret;
4823}
4824
84944d5c
GL
4825static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4826 enum HCLGE_FD_STAGE stage_num)
d695964d
JS
4827{
4828 struct hclge_set_fd_key_config_cmd *req;
4829 struct hclge_fd_key_cfg *stage;
4830 struct hclge_desc desc;
4831 int ret;
4832
4833 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4834
4835 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4836 stage = &hdev->fd_cfg.key_cfg[stage_num];
4837 req->stage = stage_num;
4838 req->key_select = stage->key_sel;
4839 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4840 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4841 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4842 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4843 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4844 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4845
4846 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4847 if (ret)
4848 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4849
4850 return ret;
4851}
4852
4853static int hclge_init_fd_config(struct hclge_dev *hdev)
4854{
4855#define LOW_2_WORDS 0x03
4856 struct hclge_fd_key_cfg *key_cfg;
4857 int ret;
4858
4859 if (!hnae3_dev_fd_supported(hdev))
4860 return 0;
4861
4862 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4863 if (ret)
4864 return ret;
4865
4866 switch (hdev->fd_cfg.fd_mode) {
4867 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4868 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4869 break;
4870 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4871 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4872 break;
4873 default:
4874 dev_err(&hdev->pdev->dev,
adcf738b 4875 "Unsupported flow director mode %u\n",
d695964d
JS
4876 hdev->fd_cfg.fd_mode);
4877 return -EOPNOTSUPP;
4878 }
4879
d695964d
JS
4880 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4881 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4882 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4883 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4884 key_cfg->outer_sipv6_word_en = 0;
4885 key_cfg->outer_dipv6_word_en = 0;
4886
4887 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4888 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4889 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4890 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4891
4892 /* If use max 400bit key, we can support tuples for ether type */
16505f87 4893 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
d695964d
JS
4894 key_cfg->tuple_active |=
4895 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
d695964d
JS
4896
4897 /* roce_type is used to filter roce frames
4898 * dst_vport is used to specify the rule
4899 */
4900 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4901
4902 ret = hclge_get_fd_allocation(hdev,
4903 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4904 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4905 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4906 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4907 if (ret)
4908 return ret;
4909
4910 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4911}
4912
11732868
JS
4913static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4914 int loc, u8 *key, bool is_add)
4915{
4916 struct hclge_fd_tcam_config_1_cmd *req1;
4917 struct hclge_fd_tcam_config_2_cmd *req2;
4918 struct hclge_fd_tcam_config_3_cmd *req3;
4919 struct hclge_desc desc[3];
4920 int ret;
4921
4922 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4923 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4924 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4925 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4926 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4927
4928 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4929 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4930 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4931
4932 req1->stage = stage;
4933 req1->xy_sel = sel_x ? 1 : 0;
4934 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4935 req1->index = cpu_to_le32(loc);
4936 req1->entry_vld = sel_x ? is_add : 0;
4937
4938 if (key) {
4939 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4940 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4941 sizeof(req2->tcam_data));
4942 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4943 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4944 }
4945
4946 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4947 if (ret)
4948 dev_err(&hdev->pdev->dev,
4949 "config tcam key fail, ret=%d\n",
4950 ret);
4951
4952 return ret;
4953}
4954
4955static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4956 struct hclge_fd_ad_data *action)
4957{
4958 struct hclge_fd_ad_config_cmd *req;
4959 struct hclge_desc desc;
4960 u64 ad_data = 0;
4961 int ret;
4962
4963 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4964
4965 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4966 req->index = cpu_to_le32(loc);
4967 req->stage = stage;
4968
4969 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4970 action->write_rule_id_to_bd);
4971 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4972 action->rule_id);
4973 ad_data <<= 32;
4974 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4975 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4976 action->forward_to_direct_queue);
4977 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4978 action->queue_id);
4979 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4980 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4981 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4982 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4983 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4984 action->counter_id);
4985
4986 req->ad_data = cpu_to_le64(ad_data);
4987 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4988 if (ret)
4989 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4990
4991 return ret;
4992}
4993
4994static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4995 struct hclge_fd_rule *rule)
4996{
4997 u16 tmp_x_s, tmp_y_s;
4998 u32 tmp_x_l, tmp_y_l;
4999 int i;
5000
5001 if (rule->unused_tuple & tuple_bit)
5002 return true;
5003
5004 switch (tuple_bit) {
11732868 5005 case BIT(INNER_DST_MAC):
e91e388c
JS
5006 for (i = 0; i < ETH_ALEN; i++) {
5007 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868 5008 rule->tuples_mask.dst_mac[i]);
e91e388c 5009 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868
JS
5010 rule->tuples_mask.dst_mac[i]);
5011 }
5012
5013 return true;
5014 case BIT(INNER_SRC_MAC):
e91e388c
JS
5015 for (i = 0; i < ETH_ALEN; i++) {
5016 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868 5017 rule->tuples.src_mac[i]);
e91e388c 5018 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868
JS
5019 rule->tuples.src_mac[i]);
5020 }
5021
5022 return true;
5023 case BIT(INNER_VLAN_TAG_FST):
5024 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5025 rule->tuples_mask.vlan_tag1);
5026 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5027 rule->tuples_mask.vlan_tag1);
5028 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5029 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5030
5031 return true;
5032 case BIT(INNER_ETH_TYPE):
5033 calc_x(tmp_x_s, rule->tuples.ether_proto,
5034 rule->tuples_mask.ether_proto);
5035 calc_y(tmp_y_s, rule->tuples.ether_proto,
5036 rule->tuples_mask.ether_proto);
5037 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5038 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5039
5040 return true;
5041 case BIT(INNER_IP_TOS):
5042 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5043 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5044
5045 return true;
5046 case BIT(INNER_IP_PROTO):
5047 calc_x(*key_x, rule->tuples.ip_proto,
5048 rule->tuples_mask.ip_proto);
5049 calc_y(*key_y, rule->tuples.ip_proto,
5050 rule->tuples_mask.ip_proto);
5051
5052 return true;
5053 case BIT(INNER_SRC_IP):
e91e388c
JS
5054 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5055 rule->tuples_mask.src_ip[IPV4_INDEX]);
5056 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5057 rule->tuples_mask.src_ip[IPV4_INDEX]);
11732868
JS
5058 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5059 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5060
5061 return true;
5062 case BIT(INNER_DST_IP):
e91e388c
JS
5063 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5064 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5065 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5066 rule->tuples_mask.dst_ip[IPV4_INDEX]);
11732868
JS
5067 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5068 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5069
5070 return true;
5071 case BIT(INNER_SRC_PORT):
5072 calc_x(tmp_x_s, rule->tuples.src_port,
5073 rule->tuples_mask.src_port);
5074 calc_y(tmp_y_s, rule->tuples.src_port,
5075 rule->tuples_mask.src_port);
5076 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5077 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5078
5079 return true;
5080 case BIT(INNER_DST_PORT):
5081 calc_x(tmp_x_s, rule->tuples.dst_port,
5082 rule->tuples_mask.dst_port);
5083 calc_y(tmp_y_s, rule->tuples.dst_port,
5084 rule->tuples_mask.dst_port);
5085 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5086 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5087
5088 return true;
5089 default:
5090 return false;
5091 }
5092}
5093
5094static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5095 u8 vf_id, u8 network_port_id)
5096{
5097 u32 port_number = 0;
5098
5099 if (port_type == HOST_PORT) {
5100 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5101 pf_id);
5102 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5103 vf_id);
5104 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5105 } else {
5106 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5107 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5108 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5109 }
5110
5111 return port_number;
5112}
5113
5114static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5115 __le32 *key_x, __le32 *key_y,
5116 struct hclge_fd_rule *rule)
5117{
5118 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5119 u8 cur_pos = 0, tuple_size, shift_bits;
ebaf1908 5120 unsigned int i;
11732868
JS
5121
5122 for (i = 0; i < MAX_META_DATA; i++) {
5123 tuple_size = meta_data_key_info[i].key_length;
5124 tuple_bit = key_cfg->meta_data_active & BIT(i);
5125
5126 switch (tuple_bit) {
5127 case BIT(ROCE_TYPE):
5128 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5129 cur_pos += tuple_size;
5130 break;
5131 case BIT(DST_VPORT):
5132 port_number = hclge_get_port_number(HOST_PORT, 0,
5133 rule->vf_id, 0);
5134 hnae3_set_field(meta_data,
5135 GENMASK(cur_pos + tuple_size, cur_pos),
5136 cur_pos, port_number);
5137 cur_pos += tuple_size;
5138 break;
5139 default:
5140 break;
5141 }
5142 }
5143
5144 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5145 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5146 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5147
5148 *key_x = cpu_to_le32(tmp_x << shift_bits);
5149 *key_y = cpu_to_le32(tmp_y << shift_bits);
5150}
5151
5152/* A complete key is combined with meta data key and tuple key.
5153 * Meta data key is stored at the MSB region, and tuple key is stored at
5154 * the LSB region, unused bits will be filled 0.
5155 */
5156static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5157 struct hclge_fd_rule *rule)
5158{
5159 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5160 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5161 u8 *cur_key_x, *cur_key_y;
11732868 5162 u8 meta_data_region;
84944d5c
GL
5163 u8 tuple_size;
5164 int ret;
5165 u32 i;
11732868
JS
5166
5167 memset(key_x, 0, sizeof(key_x));
5168 memset(key_y, 0, sizeof(key_y));
5169 cur_key_x = key_x;
5170 cur_key_y = key_y;
5171
5172 for (i = 0 ; i < MAX_TUPLE; i++) {
5173 bool tuple_valid;
5174 u32 check_tuple;
5175
5176 tuple_size = tuple_key_info[i].key_length / 8;
5177 check_tuple = key_cfg->tuple_active & BIT(i);
5178
5179 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5180 cur_key_y, rule);
5181 if (tuple_valid) {
5182 cur_key_x += tuple_size;
5183 cur_key_y += tuple_size;
5184 }
5185 }
5186
5187 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5188 MAX_META_DATA_LENGTH / 8;
5189
5190 hclge_fd_convert_meta_data(key_cfg,
5191 (__le32 *)(key_x + meta_data_region),
5192 (__le32 *)(key_y + meta_data_region),
5193 rule);
5194
5195 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5196 true);
5197 if (ret) {
5198 dev_err(&hdev->pdev->dev,
adcf738b 5199 "fd key_y config fail, loc=%u, ret=%d\n",
11732868
JS
5200 rule->queue_id, ret);
5201 return ret;
5202 }
5203
5204 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5205 true);
5206 if (ret)
5207 dev_err(&hdev->pdev->dev,
adcf738b 5208 "fd key_x config fail, loc=%u, ret=%d\n",
11732868
JS
5209 rule->queue_id, ret);
5210 return ret;
5211}
5212
5213static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5214 struct hclge_fd_rule *rule)
5215{
5216 struct hclge_fd_ad_data ad_data;
5217
5218 ad_data.ad_id = rule->location;
5219
5220 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5221 ad_data.drop_packet = true;
5222 ad_data.forward_to_direct_queue = false;
5223 ad_data.queue_id = 0;
5224 } else {
5225 ad_data.drop_packet = false;
5226 ad_data.forward_to_direct_queue = true;
5227 ad_data.queue_id = rule->queue_id;
5228 }
5229
5230 ad_data.use_counter = false;
5231 ad_data.counter_id = 0;
5232
5233 ad_data.use_next_stage = false;
5234 ad_data.next_input_key = 0;
5235
5236 ad_data.write_rule_id_to_bd = true;
5237 ad_data.rule_id = rule->location;
5238
5239 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5240}
5241
736fc0e1
JS
5242static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5243 u32 *unused_tuple)
dd74f815 5244{
736fc0e1 5245 if (!spec || !unused_tuple)
dd74f815
JS
5246 return -EINVAL;
5247
736fc0e1 5248 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
dd74f815 5249
736fc0e1
JS
5250 if (!spec->ip4src)
5251 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5252
736fc0e1
JS
5253 if (!spec->ip4dst)
5254 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5255
736fc0e1
JS
5256 if (!spec->psrc)
5257 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5258
736fc0e1
JS
5259 if (!spec->pdst)
5260 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5261
736fc0e1
JS
5262 if (!spec->tos)
5263 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5264
736fc0e1
JS
5265 return 0;
5266}
dd74f815 5267
736fc0e1
JS
5268static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5269 u32 *unused_tuple)
5270{
5271 if (!spec || !unused_tuple)
5272 return -EINVAL;
dd74f815 5273
736fc0e1
JS
5274 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5275 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5276
736fc0e1
JS
5277 if (!spec->ip4src)
5278 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5279
736fc0e1
JS
5280 if (!spec->ip4dst)
5281 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5282
736fc0e1
JS
5283 if (!spec->tos)
5284 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5285
736fc0e1
JS
5286 if (!spec->proto)
5287 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5288
736fc0e1
JS
5289 if (spec->l4_4_bytes)
5290 return -EOPNOTSUPP;
dd74f815 5291
736fc0e1
JS
5292 if (spec->ip_ver != ETH_RX_NFC_IP4)
5293 return -EOPNOTSUPP;
dd74f815 5294
736fc0e1
JS
5295 return 0;
5296}
dd74f815 5297
736fc0e1
JS
5298static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5299 u32 *unused_tuple)
5300{
5301 if (!spec || !unused_tuple)
5302 return -EINVAL;
dd74f815 5303
736fc0e1
JS
5304 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5305 BIT(INNER_IP_TOS);
dd74f815 5306
736fc0e1
JS
5307 /* check whether src/dst ip address used */
5308 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5309 !spec->ip6src[2] && !spec->ip6src[3])
5310 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5311
736fc0e1
JS
5312 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5313 !spec->ip6dst[2] && !spec->ip6dst[3])
5314 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5315
736fc0e1
JS
5316 if (!spec->psrc)
5317 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5318
736fc0e1
JS
5319 if (!spec->pdst)
5320 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5321
736fc0e1
JS
5322 if (spec->tclass)
5323 return -EOPNOTSUPP;
dd74f815 5324
736fc0e1
JS
5325 return 0;
5326}
dd74f815 5327
736fc0e1
JS
5328static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5329 u32 *unused_tuple)
5330{
5331 if (!spec || !unused_tuple)
5332 return -EINVAL;
dd74f815 5333
736fc0e1
JS
5334 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5335 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5336
736fc0e1
JS
5337 /* check whether src/dst ip address used */
5338 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5339 !spec->ip6src[2] && !spec->ip6src[3])
5340 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5341
736fc0e1
JS
5342 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5343 !spec->ip6dst[2] && !spec->ip6dst[3])
5344 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5345
736fc0e1
JS
5346 if (!spec->l4_proto)
5347 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5348
736fc0e1
JS
5349 if (spec->tclass)
5350 return -EOPNOTSUPP;
dd74f815 5351
736fc0e1 5352 if (spec->l4_4_bytes)
dd74f815 5353 return -EOPNOTSUPP;
dd74f815 5354
736fc0e1
JS
5355 return 0;
5356}
5357
5358static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5359{
5360 if (!spec || !unused_tuple)
5361 return -EINVAL;
5362
5363 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5364 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5365 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5366
5367 if (is_zero_ether_addr(spec->h_source))
5368 *unused_tuple |= BIT(INNER_SRC_MAC);
5369
5370 if (is_zero_ether_addr(spec->h_dest))
5371 *unused_tuple |= BIT(INNER_DST_MAC);
5372
5373 if (!spec->h_proto)
5374 *unused_tuple |= BIT(INNER_ETH_TYPE);
5375
5376 return 0;
5377}
5378
5379static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5380 struct ethtool_rx_flow_spec *fs,
5381 u32 *unused_tuple)
5382{
0b4bdc55 5383 if (fs->flow_type & FLOW_EXT) {
a3ca5e90
GL
5384 if (fs->h_ext.vlan_etype) {
5385 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
dd74f815 5386 return -EOPNOTSUPP;
a3ca5e90
GL
5387 }
5388
dd74f815 5389 if (!fs->h_ext.vlan_tci)
736fc0e1 5390 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815 5391
736fc0e1 5392 if (fs->m_ext.vlan_tci &&
a3ca5e90
GL
5393 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5394 dev_err(&hdev->pdev->dev,
5395 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5396 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
736fc0e1 5397 return -EINVAL;
a3ca5e90 5398 }
dd74f815 5399 } else {
736fc0e1 5400 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815
JS
5401 }
5402
5403 if (fs->flow_type & FLOW_MAC_EXT) {
16505f87 5404 if (hdev->fd_cfg.fd_mode !=
a3ca5e90
GL
5405 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5406 dev_err(&hdev->pdev->dev,
5407 "FLOW_MAC_EXT is not supported in current fd mode!\n");
dd74f815 5408 return -EOPNOTSUPP;
a3ca5e90 5409 }
dd74f815
JS
5410
5411 if (is_zero_ether_addr(fs->h_ext.h_dest))
736fc0e1 5412 *unused_tuple |= BIT(INNER_DST_MAC);
dd74f815 5413 else
0b4bdc55 5414 *unused_tuple &= ~BIT(INNER_DST_MAC);
dd74f815
JS
5415 }
5416
5417 return 0;
5418}
5419
736fc0e1
JS
5420static int hclge_fd_check_spec(struct hclge_dev *hdev,
5421 struct ethtool_rx_flow_spec *fs,
5422 u32 *unused_tuple)
5423{
16505f87 5424 u32 flow_type;
736fc0e1
JS
5425 int ret;
5426
a3ca5e90
GL
5427 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5428 dev_err(&hdev->pdev->dev,
5429 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5430 fs->location,
5431 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
736fc0e1 5432 return -EINVAL;
a3ca5e90 5433 }
736fc0e1 5434
736fc0e1
JS
5435 if ((fs->flow_type & FLOW_EXT) &&
5436 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5437 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5438 return -EOPNOTSUPP;
5439 }
5440
16505f87
GL
5441 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5442 switch (flow_type) {
736fc0e1
JS
5443 case SCTP_V4_FLOW:
5444 case TCP_V4_FLOW:
5445 case UDP_V4_FLOW:
5446 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5447 unused_tuple);
5448 break;
5449 case IP_USER_FLOW:
5450 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5451 unused_tuple);
5452 break;
5453 case SCTP_V6_FLOW:
5454 case TCP_V6_FLOW:
5455 case UDP_V6_FLOW:
5456 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5457 unused_tuple);
5458 break;
5459 case IPV6_USER_FLOW:
5460 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5461 unused_tuple);
5462 break;
5463 case ETHER_FLOW:
5464 if (hdev->fd_cfg.fd_mode !=
5465 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5466 dev_err(&hdev->pdev->dev,
5467 "ETHER_FLOW is not supported in current fd mode!\n");
5468 return -EOPNOTSUPP;
5469 }
5470
5471 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5472 unused_tuple);
5473 break;
5474 default:
a3ca5e90
GL
5475 dev_err(&hdev->pdev->dev,
5476 "unsupported protocol type, protocol type = %#x\n",
5477 flow_type);
736fc0e1
JS
5478 return -EOPNOTSUPP;
5479 }
5480
a3ca5e90
GL
5481 if (ret) {
5482 dev_err(&hdev->pdev->dev,
5483 "failed to check flow union tuple, ret = %d\n",
5484 ret);
736fc0e1 5485 return ret;
a3ca5e90 5486 }
736fc0e1
JS
5487
5488 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5489}
5490
dd74f815
JS
5491static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5492{
5493 struct hclge_fd_rule *rule = NULL;
5494 struct hlist_node *node2;
5495
44122887 5496 spin_lock_bh(&hdev->fd_rule_lock);
dd74f815
JS
5497 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5498 if (rule->location >= location)
5499 break;
5500 }
5501
44122887
JS
5502 spin_unlock_bh(&hdev->fd_rule_lock);
5503
dd74f815
JS
5504 return rule && rule->location == location;
5505}
5506
44122887 5507/* make sure being called after lock up with fd_rule_lock */
dd74f815
JS
5508static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5509 struct hclge_fd_rule *new_rule,
5510 u16 location,
5511 bool is_add)
5512{
5513 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5514 struct hlist_node *node2;
5515
5516 if (is_add && !new_rule)
5517 return -EINVAL;
5518
5519 hlist_for_each_entry_safe(rule, node2,
5520 &hdev->fd_rule_list, rule_node) {
5521 if (rule->location >= location)
5522 break;
5523 parent = rule;
5524 }
5525
5526 if (rule && rule->location == location) {
5527 hlist_del(&rule->rule_node);
5528 kfree(rule);
5529 hdev->hclge_fd_rule_num--;
5530
44122887
JS
5531 if (!is_add) {
5532 if (!hdev->hclge_fd_rule_num)
5533 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5534 clear_bit(location, hdev->fd_bmap);
dd74f815 5535
44122887
JS
5536 return 0;
5537 }
dd74f815
JS
5538 } else if (!is_add) {
5539 dev_err(&hdev->pdev->dev,
adcf738b 5540 "delete fail, rule %u is inexistent\n",
dd74f815
JS
5541 location);
5542 return -EINVAL;
5543 }
5544
5545 INIT_HLIST_NODE(&new_rule->rule_node);
5546
5547 if (parent)
5548 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5549 else
5550 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5551
44122887 5552 set_bit(location, hdev->fd_bmap);
dd74f815 5553 hdev->hclge_fd_rule_num++;
44122887 5554 hdev->fd_active_type = new_rule->rule_type;
dd74f815
JS
5555
5556 return 0;
5557}
5558
5559static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5560 struct ethtool_rx_flow_spec *fs,
5561 struct hclge_fd_rule *rule)
5562{
5563 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5564
5565 switch (flow_type) {
5566 case SCTP_V4_FLOW:
5567 case TCP_V4_FLOW:
5568 case UDP_V4_FLOW:
e91e388c 5569 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5570 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
e91e388c 5571 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5572 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5573
e91e388c 5574 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5575 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
e91e388c 5576 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5577 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5578
5579 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5580 rule->tuples_mask.src_port =
5581 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5582
5583 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5584 rule->tuples_mask.dst_port =
5585 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5586
5587 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5588 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5589
5590 rule->tuples.ether_proto = ETH_P_IP;
5591 rule->tuples_mask.ether_proto = 0xFFFF;
5592
5593 break;
5594 case IP_USER_FLOW:
e91e388c 5595 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5596 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
e91e388c 5597 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5598 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5599
e91e388c 5600 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5601 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
e91e388c 5602 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5603 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5604
5605 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5606 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5607
5608 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5609 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5610
5611 rule->tuples.ether_proto = ETH_P_IP;
5612 rule->tuples_mask.ether_proto = 0xFFFF;
5613
5614 break;
5615 case SCTP_V6_FLOW:
5616 case TCP_V6_FLOW:
5617 case UDP_V6_FLOW:
5618 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5619 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5620 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5621 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5622
5623 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5624 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5625 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5626 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5627
5628 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5629 rule->tuples_mask.src_port =
5630 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5631
5632 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5633 rule->tuples_mask.dst_port =
5634 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5635
5636 rule->tuples.ether_proto = ETH_P_IPV6;
5637 rule->tuples_mask.ether_proto = 0xFFFF;
5638
5639 break;
5640 case IPV6_USER_FLOW:
5641 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5642 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5643 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5644 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5645
5646 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5647 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5648 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5649 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5650
5651 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5652 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5653
5654 rule->tuples.ether_proto = ETH_P_IPV6;
5655 rule->tuples_mask.ether_proto = 0xFFFF;
5656
5657 break;
5658 case ETHER_FLOW:
5659 ether_addr_copy(rule->tuples.src_mac,
5660 fs->h_u.ether_spec.h_source);
5661 ether_addr_copy(rule->tuples_mask.src_mac,
5662 fs->m_u.ether_spec.h_source);
5663
5664 ether_addr_copy(rule->tuples.dst_mac,
5665 fs->h_u.ether_spec.h_dest);
5666 ether_addr_copy(rule->tuples_mask.dst_mac,
5667 fs->m_u.ether_spec.h_dest);
5668
5669 rule->tuples.ether_proto =
5670 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5671 rule->tuples_mask.ether_proto =
5672 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5673
5674 break;
5675 default:
5676 return -EOPNOTSUPP;
5677 }
5678
5679 switch (flow_type) {
5680 case SCTP_V4_FLOW:
5681 case SCTP_V6_FLOW:
5682 rule->tuples.ip_proto = IPPROTO_SCTP;
5683 rule->tuples_mask.ip_proto = 0xFF;
5684 break;
5685 case TCP_V4_FLOW:
5686 case TCP_V6_FLOW:
5687 rule->tuples.ip_proto = IPPROTO_TCP;
5688 rule->tuples_mask.ip_proto = 0xFF;
5689 break;
5690 case UDP_V4_FLOW:
5691 case UDP_V6_FLOW:
5692 rule->tuples.ip_proto = IPPROTO_UDP;
5693 rule->tuples_mask.ip_proto = 0xFF;
5694 break;
5695 default:
5696 break;
5697 }
5698
0b4bdc55 5699 if (fs->flow_type & FLOW_EXT) {
dd74f815
JS
5700 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5701 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5702 }
5703
5704 if (fs->flow_type & FLOW_MAC_EXT) {
5705 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5706 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5707 }
5708
5709 return 0;
5710}
5711
44122887
JS
5712/* make sure being called after lock up with fd_rule_lock */
5713static int hclge_fd_config_rule(struct hclge_dev *hdev,
5714 struct hclge_fd_rule *rule)
5715{
5716 int ret;
5717
5718 if (!rule) {
5719 dev_err(&hdev->pdev->dev,
5720 "The flow director rule is NULL\n");
5721 return -EINVAL;
5722 }
5723
5724 /* it will never fail here, so needn't to check return value */
5725 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5726
5727 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5728 if (ret)
5729 goto clear_rule;
5730
5731 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5732 if (ret)
5733 goto clear_rule;
5734
5735 return 0;
5736
5737clear_rule:
5738 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5739 return ret;
5740}
5741
dd74f815
JS
5742static int hclge_add_fd_entry(struct hnae3_handle *handle,
5743 struct ethtool_rxnfc *cmd)
5744{
5745 struct hclge_vport *vport = hclge_get_vport(handle);
5746 struct hclge_dev *hdev = vport->back;
5747 u16 dst_vport_id = 0, q_index = 0;
5748 struct ethtool_rx_flow_spec *fs;
5749 struct hclge_fd_rule *rule;
5750 u32 unused = 0;
5751 u8 action;
5752 int ret;
5753
a3ca5e90
GL
5754 if (!hnae3_dev_fd_supported(hdev)) {
5755 dev_err(&hdev->pdev->dev,
5756 "flow table director is not supported\n");
dd74f815 5757 return -EOPNOTSUPP;
a3ca5e90 5758 }
dd74f815 5759
9abeb7d8 5760 if (!hdev->fd_en) {
a3ca5e90
GL
5761 dev_err(&hdev->pdev->dev,
5762 "please enable flow director first\n");
dd74f815
JS
5763 return -EOPNOTSUPP;
5764 }
5765
5766 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5767
5768 ret = hclge_fd_check_spec(hdev, fs, &unused);
a3ca5e90 5769 if (ret)
dd74f815 5770 return ret;
dd74f815
JS
5771
5772 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5773 action = HCLGE_FD_ACTION_DROP_PACKET;
5774 } else {
5775 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5776 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5777 u16 tqps;
5778
0285dbae
JS
5779 if (vf > hdev->num_req_vfs) {
5780 dev_err(&hdev->pdev->dev,
adcf738b 5781 "Error: vf id (%u) > max vf num (%u)\n",
0285dbae
JS
5782 vf, hdev->num_req_vfs);
5783 return -EINVAL;
5784 }
5785
dd74f815
JS
5786 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5787 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5788
5789 if (ring >= tqps) {
5790 dev_err(&hdev->pdev->dev,
adcf738b 5791 "Error: queue id (%u) > max tqp num (%u)\n",
dd74f815
JS
5792 ring, tqps - 1);
5793 return -EINVAL;
5794 }
5795
dd74f815
JS
5796 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5797 q_index = ring;
5798 }
5799
5800 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5801 if (!rule)
5802 return -ENOMEM;
5803
5804 ret = hclge_fd_get_tuple(hdev, fs, rule);
44122887
JS
5805 if (ret) {
5806 kfree(rule);
5807 return ret;
5808 }
dd74f815
JS
5809
5810 rule->flow_type = fs->flow_type;
dd74f815
JS
5811 rule->location = fs->location;
5812 rule->unused_tuple = unused;
5813 rule->vf_id = dst_vport_id;
5814 rule->queue_id = q_index;
5815 rule->action = action;
44122887 5816 rule->rule_type = HCLGE_FD_EP_ACTIVE;
dd74f815 5817
d93ed94f
JS
5818 /* to avoid rule conflict, when user configure rule by ethtool,
5819 * we need to clear all arfs rules
5820 */
5821 hclge_clear_arfs_rules(handle);
5822
44122887
JS
5823 spin_lock_bh(&hdev->fd_rule_lock);
5824 ret = hclge_fd_config_rule(hdev, rule);
dd74f815 5825
44122887 5826 spin_unlock_bh(&hdev->fd_rule_lock);
dd74f815 5827
dd74f815
JS
5828 return ret;
5829}
5830
5831static int hclge_del_fd_entry(struct hnae3_handle *handle,
5832 struct ethtool_rxnfc *cmd)
5833{
5834 struct hclge_vport *vport = hclge_get_vport(handle);
5835 struct hclge_dev *hdev = vport->back;
5836 struct ethtool_rx_flow_spec *fs;
5837 int ret;
5838
5839 if (!hnae3_dev_fd_supported(hdev))
5840 return -EOPNOTSUPP;
5841
5842 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5843
5844 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5845 return -EINVAL;
5846
5847 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5848 dev_err(&hdev->pdev->dev,
39edaf24 5849 "Delete fail, rule %u is inexistent\n", fs->location);
dd74f815
JS
5850 return -ENOENT;
5851 }
5852
9b2f3477
WL
5853 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5854 NULL, false);
dd74f815
JS
5855 if (ret)
5856 return ret;
5857
44122887
JS
5858 spin_lock_bh(&hdev->fd_rule_lock);
5859 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5860
5861 spin_unlock_bh(&hdev->fd_rule_lock);
5862
5863 return ret;
dd74f815
JS
5864}
5865
6871af29
JS
5866static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5867 bool clear_list)
5868{
5869 struct hclge_vport *vport = hclge_get_vport(handle);
5870 struct hclge_dev *hdev = vport->back;
5871 struct hclge_fd_rule *rule;
5872 struct hlist_node *node;
44122887 5873 u16 location;
6871af29
JS
5874
5875 if (!hnae3_dev_fd_supported(hdev))
5876 return;
5877
44122887
JS
5878 spin_lock_bh(&hdev->fd_rule_lock);
5879 for_each_set_bit(location, hdev->fd_bmap,
5880 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5881 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5882 NULL, false);
5883
6871af29
JS
5884 if (clear_list) {
5885 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5886 rule_node) {
6871af29
JS
5887 hlist_del(&rule->rule_node);
5888 kfree(rule);
6871af29 5889 }
44122887
JS
5890 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5891 hdev->hclge_fd_rule_num = 0;
5892 bitmap_zero(hdev->fd_bmap,
5893 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6871af29 5894 }
44122887
JS
5895
5896 spin_unlock_bh(&hdev->fd_rule_lock);
6871af29
JS
5897}
5898
5899static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5900{
5901 struct hclge_vport *vport = hclge_get_vport(handle);
5902 struct hclge_dev *hdev = vport->back;
5903 struct hclge_fd_rule *rule;
5904 struct hlist_node *node;
5905 int ret;
5906
65e41e7e
HT
5907 /* Return ok here, because reset error handling will check this
5908 * return value. If error is returned here, the reset process will
5909 * fail.
5910 */
6871af29 5911 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 5912 return 0;
6871af29 5913
8edc2285 5914 /* if fd is disabled, should not restore it when reset */
9abeb7d8 5915 if (!hdev->fd_en)
8edc2285
JS
5916 return 0;
5917
44122887 5918 spin_lock_bh(&hdev->fd_rule_lock);
6871af29
JS
5919 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5920 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5921 if (!ret)
5922 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5923
5924 if (ret) {
5925 dev_warn(&hdev->pdev->dev,
adcf738b 5926 "Restore rule %u failed, remove it\n",
6871af29 5927 rule->location);
44122887 5928 clear_bit(rule->location, hdev->fd_bmap);
6871af29
JS
5929 hlist_del(&rule->rule_node);
5930 kfree(rule);
5931 hdev->hclge_fd_rule_num--;
5932 }
5933 }
44122887
JS
5934
5935 if (hdev->hclge_fd_rule_num)
5936 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5937
5938 spin_unlock_bh(&hdev->fd_rule_lock);
5939
6871af29
JS
5940 return 0;
5941}
5942
05c2314f
JS
5943static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5944 struct ethtool_rxnfc *cmd)
5945{
5946 struct hclge_vport *vport = hclge_get_vport(handle);
5947 struct hclge_dev *hdev = vport->back;
5948
5949 if (!hnae3_dev_fd_supported(hdev))
5950 return -EOPNOTSUPP;
5951
5952 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5953 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5954
5955 return 0;
5956}
5957
fa663c09
JS
5958static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5959 struct ethtool_tcpip4_spec *spec,
5960 struct ethtool_tcpip4_spec *spec_mask)
5961{
5962 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5963 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5964 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5965
5966 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5967 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5968 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5969
5970 spec->psrc = cpu_to_be16(rule->tuples.src_port);
5971 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5972 0 : cpu_to_be16(rule->tuples_mask.src_port);
5973
5974 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5975 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5976 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5977
5978 spec->tos = rule->tuples.ip_tos;
5979 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5980 0 : rule->tuples_mask.ip_tos;
5981}
5982
5983static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5984 struct ethtool_usrip4_spec *spec,
5985 struct ethtool_usrip4_spec *spec_mask)
5986{
5987 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5988 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5989 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5990
5991 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5992 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5993 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5994
5995 spec->tos = rule->tuples.ip_tos;
5996 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5997 0 : rule->tuples_mask.ip_tos;
5998
5999 spec->proto = rule->tuples.ip_proto;
6000 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6001 0 : rule->tuples_mask.ip_proto;
6002
6003 spec->ip_ver = ETH_RX_NFC_IP4;
6004}
6005
6006static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6007 struct ethtool_tcpip6_spec *spec,
6008 struct ethtool_tcpip6_spec *spec_mask)
6009{
6010 cpu_to_be32_array(spec->ip6src,
6011 rule->tuples.src_ip, IPV6_SIZE);
6012 cpu_to_be32_array(spec->ip6dst,
6013 rule->tuples.dst_ip, IPV6_SIZE);
6014 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6015 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6016 else
6017 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6018 IPV6_SIZE);
6019
6020 if (rule->unused_tuple & BIT(INNER_DST_IP))
6021 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6022 else
6023 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6024 IPV6_SIZE);
6025
6026 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6027 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6028 0 : cpu_to_be16(rule->tuples_mask.src_port);
6029
6030 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6031 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6032 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6033}
6034
6035static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6036 struct ethtool_usrip6_spec *spec,
6037 struct ethtool_usrip6_spec *spec_mask)
6038{
6039 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6040 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6041 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6042 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6043 else
6044 cpu_to_be32_array(spec_mask->ip6src,
6045 rule->tuples_mask.src_ip, IPV6_SIZE);
6046
6047 if (rule->unused_tuple & BIT(INNER_DST_IP))
6048 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6049 else
6050 cpu_to_be32_array(spec_mask->ip6dst,
6051 rule->tuples_mask.dst_ip, IPV6_SIZE);
6052
6053 spec->l4_proto = rule->tuples.ip_proto;
6054 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6055 0 : rule->tuples_mask.ip_proto;
6056}
6057
6058static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6059 struct ethhdr *spec,
6060 struct ethhdr *spec_mask)
6061{
6062 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6063 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6064
6065 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6066 eth_zero_addr(spec_mask->h_source);
6067 else
6068 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6069
6070 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6071 eth_zero_addr(spec_mask->h_dest);
6072 else
6073 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6074
6075 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6076 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6077 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6078}
6079
6080static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6081 struct hclge_fd_rule *rule)
6082{
6083 if (fs->flow_type & FLOW_EXT) {
6084 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6085 fs->m_ext.vlan_tci =
6086 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6087 cpu_to_be16(VLAN_VID_MASK) :
6088 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6089 }
6090
6091 if (fs->flow_type & FLOW_MAC_EXT) {
6092 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6093 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6094 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6095 else
6096 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6097 rule->tuples_mask.dst_mac);
6098 }
6099}
6100
05c2314f
JS
6101static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6102 struct ethtool_rxnfc *cmd)
6103{
6104 struct hclge_vport *vport = hclge_get_vport(handle);
6105 struct hclge_fd_rule *rule = NULL;
6106 struct hclge_dev *hdev = vport->back;
6107 struct ethtool_rx_flow_spec *fs;
6108 struct hlist_node *node2;
6109
6110 if (!hnae3_dev_fd_supported(hdev))
6111 return -EOPNOTSUPP;
6112
6113 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6114
44122887
JS
6115 spin_lock_bh(&hdev->fd_rule_lock);
6116
05c2314f
JS
6117 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6118 if (rule->location >= fs->location)
6119 break;
6120 }
6121
44122887
JS
6122 if (!rule || fs->location != rule->location) {
6123 spin_unlock_bh(&hdev->fd_rule_lock);
6124
05c2314f 6125 return -ENOENT;
44122887 6126 }
05c2314f
JS
6127
6128 fs->flow_type = rule->flow_type;
6129 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6130 case SCTP_V4_FLOW:
6131 case TCP_V4_FLOW:
6132 case UDP_V4_FLOW:
fa663c09
JS
6133 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6134 &fs->m_u.tcp_ip4_spec);
05c2314f
JS
6135 break;
6136 case IP_USER_FLOW:
fa663c09
JS
6137 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6138 &fs->m_u.usr_ip4_spec);
05c2314f
JS
6139 break;
6140 case SCTP_V6_FLOW:
6141 case TCP_V6_FLOW:
6142 case UDP_V6_FLOW:
fa663c09
JS
6143 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6144 &fs->m_u.tcp_ip6_spec);
05c2314f
JS
6145 break;
6146 case IPV6_USER_FLOW:
fa663c09
JS
6147 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6148 &fs->m_u.usr_ip6_spec);
05c2314f 6149 break;
fa663c09
JS
6150 /* The flow type of fd rule has been checked before adding in to rule
6151 * list. As other flow types have been handled, it must be ETHER_FLOW
6152 * for the default case
6153 */
05c2314f 6154 default:
fa663c09
JS
6155 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6156 &fs->m_u.ether_spec);
6157 break;
05c2314f
JS
6158 }
6159
fa663c09 6160 hclge_fd_get_ext_info(fs, rule);
05c2314f
JS
6161
6162 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6163 fs->ring_cookie = RX_CLS_FLOW_DISC;
6164 } else {
6165 u64 vf_id;
6166
6167 fs->ring_cookie = rule->queue_id;
6168 vf_id = rule->vf_id;
6169 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6170 fs->ring_cookie |= vf_id;
6171 }
6172
44122887
JS
6173 spin_unlock_bh(&hdev->fd_rule_lock);
6174
05c2314f
JS
6175 return 0;
6176}
6177
6178static int hclge_get_all_rules(struct hnae3_handle *handle,
6179 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6180{
6181 struct hclge_vport *vport = hclge_get_vport(handle);
6182 struct hclge_dev *hdev = vport->back;
6183 struct hclge_fd_rule *rule;
6184 struct hlist_node *node2;
6185 int cnt = 0;
6186
6187 if (!hnae3_dev_fd_supported(hdev))
6188 return -EOPNOTSUPP;
6189
6190 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6191
44122887 6192 spin_lock_bh(&hdev->fd_rule_lock);
05c2314f
JS
6193 hlist_for_each_entry_safe(rule, node2,
6194 &hdev->fd_rule_list, rule_node) {
44122887
JS
6195 if (cnt == cmd->rule_cnt) {
6196 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f 6197 return -EMSGSIZE;
44122887 6198 }
05c2314f
JS
6199
6200 rule_locs[cnt] = rule->location;
6201 cnt++;
6202 }
6203
44122887
JS
6204 spin_unlock_bh(&hdev->fd_rule_lock);
6205
05c2314f
JS
6206 cmd->rule_cnt = cnt;
6207
6208 return 0;
6209}
6210
d93ed94f
JS
6211static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6212 struct hclge_fd_rule_tuples *tuples)
6213{
47327c93
GH
6214#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6215#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6216
d93ed94f
JS
6217 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6218 tuples->ip_proto = fkeys->basic.ip_proto;
6219 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6220
6221 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6222 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6223 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6224 } else {
47327c93
GH
6225 int i;
6226
6227 for (i = 0; i < IPV6_SIZE; i++) {
6228 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6229 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6230 }
d93ed94f
JS
6231 }
6232}
6233
6234/* traverse all rules, check whether an existed rule has the same tuples */
6235static struct hclge_fd_rule *
6236hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6237 const struct hclge_fd_rule_tuples *tuples)
6238{
6239 struct hclge_fd_rule *rule = NULL;
6240 struct hlist_node *node;
6241
6242 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6243 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6244 return rule;
6245 }
6246
6247 return NULL;
6248}
6249
6250static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6251 struct hclge_fd_rule *rule)
6252{
6253 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6254 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6255 BIT(INNER_SRC_PORT);
6256 rule->action = 0;
6257 rule->vf_id = 0;
6258 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6259 if (tuples->ether_proto == ETH_P_IP) {
6260 if (tuples->ip_proto == IPPROTO_TCP)
6261 rule->flow_type = TCP_V4_FLOW;
6262 else
6263 rule->flow_type = UDP_V4_FLOW;
6264 } else {
6265 if (tuples->ip_proto == IPPROTO_TCP)
6266 rule->flow_type = TCP_V6_FLOW;
6267 else
6268 rule->flow_type = UDP_V6_FLOW;
6269 }
6270 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6271 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6272}
6273
6274static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6275 u16 flow_id, struct flow_keys *fkeys)
6276{
d93ed94f
JS
6277 struct hclge_vport *vport = hclge_get_vport(handle);
6278 struct hclge_fd_rule_tuples new_tuples;
6279 struct hclge_dev *hdev = vport->back;
6280 struct hclge_fd_rule *rule;
6281 u16 tmp_queue_id;
6282 u16 bit_id;
6283 int ret;
6284
6285 if (!hnae3_dev_fd_supported(hdev))
6286 return -EOPNOTSUPP;
6287
6288 memset(&new_tuples, 0, sizeof(new_tuples));
6289 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6290
6291 spin_lock_bh(&hdev->fd_rule_lock);
6292
6293 /* when there is already fd rule existed add by user,
6294 * arfs should not work
6295 */
6296 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6297 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6298 return -EOPNOTSUPP;
6299 }
6300
6301 /* check is there flow director filter existed for this flow,
6302 * if not, create a new filter for it;
6303 * if filter exist with different queue id, modify the filter;
6304 * if filter exist with same queue id, do nothing
6305 */
6306 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6307 if (!rule) {
6308 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6309 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6310 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6311 return -ENOSPC;
6312 }
6313
d659f9f6 6314 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
d93ed94f
JS
6315 if (!rule) {
6316 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6317 return -ENOMEM;
6318 }
6319
6320 set_bit(bit_id, hdev->fd_bmap);
6321 rule->location = bit_id;
6322 rule->flow_id = flow_id;
6323 rule->queue_id = queue_id;
6324 hclge_fd_build_arfs_rule(&new_tuples, rule);
6325 ret = hclge_fd_config_rule(hdev, rule);
6326
6327 spin_unlock_bh(&hdev->fd_rule_lock);
6328
6329 if (ret)
6330 return ret;
6331
6332 return rule->location;
6333 }
6334
6335 spin_unlock_bh(&hdev->fd_rule_lock);
6336
6337 if (rule->queue_id == queue_id)
6338 return rule->location;
6339
6340 tmp_queue_id = rule->queue_id;
6341 rule->queue_id = queue_id;
6342 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6343 if (ret) {
6344 rule->queue_id = tmp_queue_id;
6345 return ret;
6346 }
6347
6348 return rule->location;
d93ed94f
JS
6349}
6350
6351static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6352{
6353#ifdef CONFIG_RFS_ACCEL
6354 struct hnae3_handle *handle = &hdev->vport[0].nic;
6355 struct hclge_fd_rule *rule;
6356 struct hlist_node *node;
6357 HLIST_HEAD(del_list);
6358
6359 spin_lock_bh(&hdev->fd_rule_lock);
6360 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6361 spin_unlock_bh(&hdev->fd_rule_lock);
6362 return;
6363 }
6364 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6365 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6366 rule->flow_id, rule->location)) {
6367 hlist_del_init(&rule->rule_node);
6368 hlist_add_head(&rule->rule_node, &del_list);
6369 hdev->hclge_fd_rule_num--;
6370 clear_bit(rule->location, hdev->fd_bmap);
6371 }
6372 }
6373 spin_unlock_bh(&hdev->fd_rule_lock);
6374
6375 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6376 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6377 rule->location, NULL, false);
6378 kfree(rule);
6379 }
6380#endif
6381}
6382
6383static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6384{
6385#ifdef CONFIG_RFS_ACCEL
6386 struct hclge_vport *vport = hclge_get_vport(handle);
6387 struct hclge_dev *hdev = vport->back;
6388
6389 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6390 hclge_del_all_fd_entries(handle, true);
6391#endif
6392}
6393
4d60291b
HT
6394static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6395{
6396 struct hclge_vport *vport = hclge_get_vport(handle);
6397 struct hclge_dev *hdev = vport->back;
6398
6399 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6400 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6401}
6402
a4de0228
HT
6403static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6404{
6405 struct hclge_vport *vport = hclge_get_vport(handle);
6406 struct hclge_dev *hdev = vport->back;
6407
6408 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6409}
6410
4d60291b
HT
6411static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6412{
6413 struct hclge_vport *vport = hclge_get_vport(handle);
6414 struct hclge_dev *hdev = vport->back;
6415
6416 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6417}
6418
6419static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6420{
6421 struct hclge_vport *vport = hclge_get_vport(handle);
6422 struct hclge_dev *hdev = vport->back;
6423
f02eb82d 6424 return hdev->rst_stats.hw_reset_done_cnt;
4d60291b
HT
6425}
6426
c17852a8
JS
6427static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6428{
6429 struct hclge_vport *vport = hclge_get_vport(handle);
6430 struct hclge_dev *hdev = vport->back;
44122887 6431 bool clear;
c17852a8 6432
9abeb7d8 6433 hdev->fd_en = enable;
1483fa49 6434 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
c17852a8 6435 if (!enable)
44122887 6436 hclge_del_all_fd_entries(handle, clear);
c17852a8
JS
6437 else
6438 hclge_restore_fd_entries(handle);
6439}
6440
46a3df9f
S
6441static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6442{
6443 struct hclge_desc desc;
d44f9b63
YL
6444 struct hclge_config_mac_mode_cmd *req =
6445 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 6446 u32 loop_en = 0;
46a3df9f
S
6447 int ret;
6448
6449 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
b9a8f883
YL
6450
6451 if (enable) {
6452 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6453 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6454 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6455 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6456 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6457 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6458 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6459 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6460 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6461 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6462 }
6463
a90bb9a5 6464 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
6465
6466 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6467 if (ret)
6468 dev_err(&hdev->pdev->dev,
6469 "mac enable fail, ret =%d.\n", ret);
6470}
6471
dd2956ea
YM
6472static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6473 u8 switch_param, u8 param_mask)
6474{
6475 struct hclge_mac_vlan_switch_cmd *req;
6476 struct hclge_desc desc;
6477 u32 func_id;
6478 int ret;
6479
6480 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6481 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
71c5e83b
GH
6482
6483 /* read current config parameter */
dd2956ea 6484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
71c5e83b 6485 true);
dd2956ea
YM
6486 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6487 req->func_id = cpu_to_le32(func_id);
71c5e83b
GH
6488
6489 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6490 if (ret) {
6491 dev_err(&hdev->pdev->dev,
6492 "read mac vlan switch parameter fail, ret = %d\n", ret);
6493 return ret;
6494 }
6495
6496 /* modify and write new config parameter */
6497 hclge_cmd_reuse_desc(&desc, false);
6498 req->switch_param = (req->switch_param & param_mask) | switch_param;
dd2956ea
YM
6499 req->param_mask = param_mask;
6500
6501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6502 if (ret)
6503 dev_err(&hdev->pdev->dev,
6504 "set mac vlan switch parameter fail, ret = %d\n", ret);
6505 return ret;
6506}
6507
c9765a89
YM
6508static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6509 int link_ret)
6510{
6511#define HCLGE_PHY_LINK_STATUS_NUM 200
6512
6513 struct phy_device *phydev = hdev->hw.mac.phydev;
6514 int i = 0;
6515 int ret;
6516
6517 do {
6518 ret = phy_read_status(phydev);
6519 if (ret) {
6520 dev_err(&hdev->pdev->dev,
6521 "phy update link status fail, ret = %d\n", ret);
6522 return;
6523 }
6524
6525 if (phydev->link == link_ret)
6526 break;
6527
6528 msleep(HCLGE_LINK_STATUS_MS);
6529 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6530}
6531
6532static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6533{
6534#define HCLGE_MAC_LINK_STATUS_NUM 100
6535
6536 int i = 0;
6537 int ret;
6538
6539 do {
6540 ret = hclge_get_mac_link_status(hdev);
6541 if (ret < 0)
6542 return ret;
6543 else if (ret == link_ret)
6544 return 0;
6545
6546 msleep(HCLGE_LINK_STATUS_MS);
6547 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6548 return -EBUSY;
6549}
6550
6551static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6552 bool is_phy)
6553{
6554#define HCLGE_LINK_STATUS_DOWN 0
6555#define HCLGE_LINK_STATUS_UP 1
6556
6557 int link_ret;
6558
6559 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6560
6561 if (is_phy)
6562 hclge_phy_link_status_wait(hdev, link_ret);
6563
6564 return hclge_mac_link_status_wait(hdev, link_ret);
6565}
6566
eb66d503 6567static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 6568{
c39c4d98 6569 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
6570 struct hclge_desc desc;
6571 u32 loop_en;
6572 int ret;
6573
e4d68dae
YL
6574 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6575 /* 1 Read out the MAC mode config at first */
6576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6578 if (ret) {
6579 dev_err(&hdev->pdev->dev,
6580 "mac loopback get fail, ret =%d.\n", ret);
6581 return ret;
6582 }
c39c4d98 6583
e4d68dae
YL
6584 /* 2 Then setup the loopback flag */
6585 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 6586 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
0f29fc23
YL
6587 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6588 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
e4d68dae
YL
6589
6590 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 6591
e4d68dae
YL
6592 /* 3 Config mac work mode with loopback flag
6593 * and its original configure parameters
6594 */
6595 hclge_cmd_reuse_desc(&desc, false);
6596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6597 if (ret)
6598 dev_err(&hdev->pdev->dev,
6599 "mac loopback set fail, ret =%d.\n", ret);
6600 return ret;
6601}
c39c4d98 6602
1cbc662d 6603static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
4dc13b96 6604 enum hnae3_loop loop_mode)
5fd50ac3
PL
6605{
6606#define HCLGE_SERDES_RETRY_MS 10
6607#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 6608
5fd50ac3
PL
6609 struct hclge_serdes_lb_cmd *req;
6610 struct hclge_desc desc;
6611 int ret, i = 0;
4dc13b96 6612 u8 loop_mode_b;
5fd50ac3 6613
d0d72bac 6614 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
6615 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6616
4dc13b96
FL
6617 switch (loop_mode) {
6618 case HNAE3_LOOP_SERIAL_SERDES:
6619 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6620 break;
6621 case HNAE3_LOOP_PARALLEL_SERDES:
6622 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6623 break;
6624 default:
6625 dev_err(&hdev->pdev->dev,
6626 "unsupported serdes loopback mode %d\n", loop_mode);
6627 return -ENOTSUPP;
6628 }
6629
5fd50ac3 6630 if (en) {
4dc13b96
FL
6631 req->enable = loop_mode_b;
6632 req->mask = loop_mode_b;
5fd50ac3 6633 } else {
4dc13b96 6634 req->mask = loop_mode_b;
5fd50ac3
PL
6635 }
6636
6637 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6638 if (ret) {
6639 dev_err(&hdev->pdev->dev,
6640 "serdes loopback set fail, ret = %d\n", ret);
6641 return ret;
6642 }
6643
6644 do {
6645 msleep(HCLGE_SERDES_RETRY_MS);
6646 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6647 true);
6648 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6649 if (ret) {
6650 dev_err(&hdev->pdev->dev,
6651 "serdes loopback get, ret = %d\n", ret);
6652 return ret;
6653 }
6654 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6655 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6656
6657 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6658 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6659 return -EBUSY;
6660 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6661 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6662 return -EIO;
6663 }
1cbc662d
YM
6664 return ret;
6665}
6666
6667static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6668 enum hnae3_loop loop_mode)
6669{
6670 int ret;
6671
6672 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6673 if (ret)
6674 return ret;
5fd50ac3 6675
0f29fc23 6676 hclge_cfg_mac_mode(hdev, en);
350fda0a 6677
60df7e91 6678 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
c9765a89
YM
6679 if (ret)
6680 dev_err(&hdev->pdev->dev,
6681 "serdes loopback config mac mode timeout\n");
6682
6683 return ret;
6684}
350fda0a 6685
c9765a89
YM
6686static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6687 struct phy_device *phydev)
6688{
6689 int ret;
350fda0a 6690
c9765a89
YM
6691 if (!phydev->suspended) {
6692 ret = phy_suspend(phydev);
6693 if (ret)
6694 return ret;
6695 }
6696
6697 ret = phy_resume(phydev);
6698 if (ret)
6699 return ret;
6700
6701 return phy_loopback(phydev, true);
6702}
6703
6704static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6705 struct phy_device *phydev)
6706{
6707 int ret;
6708
6709 ret = phy_loopback(phydev, false);
6710 if (ret)
6711 return ret;
6712
6713 return phy_suspend(phydev);
6714}
6715
6716static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6717{
6718 struct phy_device *phydev = hdev->hw.mac.phydev;
6719 int ret;
6720
6721 if (!phydev)
6722 return -ENOTSUPP;
6723
6724 if (en)
6725 ret = hclge_enable_phy_loopback(hdev, phydev);
6726 else
6727 ret = hclge_disable_phy_loopback(hdev, phydev);
6728 if (ret) {
6729 dev_err(&hdev->pdev->dev,
6730 "set phy loopback fail, ret = %d\n", ret);
6731 return ret;
6732 }
6733
6734 hclge_cfg_mac_mode(hdev, en);
6735
60df7e91 6736 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
c9765a89
YM
6737 if (ret)
6738 dev_err(&hdev->pdev->dev,
6739 "phy loopback config mac mode timeout\n");
6740
6741 return ret;
5fd50ac3
PL
6742}
6743
ebaf1908 6744static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
0f29fc23
YL
6745 int stream_id, bool enable)
6746{
6747 struct hclge_desc desc;
6748 struct hclge_cfg_com_tqp_queue_cmd *req =
6749 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6750 int ret;
6751
6752 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6753 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6754 req->stream_id = cpu_to_le16(stream_id);
ebaf1908
WL
6755 if (enable)
6756 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
0f29fc23
YL
6757
6758 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6759 if (ret)
6760 dev_err(&hdev->pdev->dev,
6761 "Tqp enable fail, status =%d.\n", ret);
6762 return ret;
6763}
6764
e4d68dae
YL
6765static int hclge_set_loopback(struct hnae3_handle *handle,
6766 enum hnae3_loop loop_mode, bool en)
6767{
6768 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6769 struct hnae3_knic_private_info *kinfo;
e4d68dae 6770 struct hclge_dev *hdev = vport->back;
0f29fc23 6771 int i, ret;
e4d68dae 6772
dd2956ea
YM
6773 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6774 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6775 * the same, the packets are looped back in the SSU. If SSU loopback
6776 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6777 */
6778 if (hdev->pdev->revision >= 0x21) {
6779 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6780
6781 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6782 HCLGE_SWITCH_ALW_LPBK_MASK);
6783 if (ret)
6784 return ret;
6785 }
6786
e4d68dae 6787 switch (loop_mode) {
eb66d503
FL
6788 case HNAE3_LOOP_APP:
6789 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 6790 break;
4dc13b96
FL
6791 case HNAE3_LOOP_SERIAL_SERDES:
6792 case HNAE3_LOOP_PARALLEL_SERDES:
6793 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 6794 break;
c9765a89
YM
6795 case HNAE3_LOOP_PHY:
6796 ret = hclge_set_phy_loopback(hdev, en);
6797 break;
c39c4d98
YL
6798 default:
6799 ret = -ENOTSUPP;
6800 dev_err(&hdev->pdev->dev,
6801 "loop_mode %d is not supported\n", loop_mode);
6802 break;
6803 }
6804
47ef6dec
JS
6805 if (ret)
6806 return ret;
6807
205a24ca
HT
6808 kinfo = &vport->nic.kinfo;
6809 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
6810 ret = hclge_tqp_enable(hdev, i, 0, en);
6811 if (ret)
6812 return ret;
6813 }
46a3df9f 6814
0f29fc23 6815 return 0;
46a3df9f
S
6816}
6817
1cbc662d
YM
6818static int hclge_set_default_loopback(struct hclge_dev *hdev)
6819{
6820 int ret;
6821
6822 ret = hclge_set_app_loopback(hdev, false);
6823 if (ret)
6824 return ret;
6825
6826 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6827 if (ret)
6828 return ret;
6829
6830 return hclge_cfg_serdes_loopback(hdev, false,
6831 HNAE3_LOOP_PARALLEL_SERDES);
6832}
6833
46a3df9f
S
6834static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6835{
6836 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6837 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
6838 struct hnae3_queue *queue;
6839 struct hclge_tqp *tqp;
6840 int i;
6841
205a24ca
HT
6842 kinfo = &vport->nic.kinfo;
6843 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
6844 queue = handle->kinfo.tqp[i];
6845 tqp = container_of(queue, struct hclge_tqp, q);
6846 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6847 }
6848}
6849
1c6dfe6f
YL
6850static void hclge_flush_link_update(struct hclge_dev *hdev)
6851{
6852#define HCLGE_FLUSH_LINK_TIMEOUT 100000
6853
6854 unsigned long last = hdev->serv_processed_cnt;
6855 int i = 0;
6856
6857 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6858 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6859 last == hdev->serv_processed_cnt)
6860 usleep_range(1, 1);
6861}
6862
8cdb992f
JS
6863static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6864{
6865 struct hclge_vport *vport = hclge_get_vport(handle);
6866 struct hclge_dev *hdev = vport->back;
6867
6868 if (enable) {
a9775bb6 6869 hclge_task_schedule(hdev, 0);
8cdb992f 6870 } else {
1c6dfe6f 6871 /* Set the DOWN flag here to disable link updating */
7be1b9f3 6872 set_bit(HCLGE_STATE_DOWN, &hdev->state);
1c6dfe6f
YL
6873
6874 /* flush memory to make sure DOWN is seen by service task */
6875 smp_mb__before_atomic();
6876 hclge_flush_link_update(hdev);
8cdb992f
JS
6877 }
6878}
6879
46a3df9f
S
6880static int hclge_ae_start(struct hnae3_handle *handle)
6881{
6882 struct hclge_vport *vport = hclge_get_vport(handle);
6883 struct hclge_dev *hdev = vport->back;
46a3df9f 6884
46a3df9f
S
6885 /* mac enable */
6886 hclge_cfg_mac_mode(hdev, true);
6887 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 6888 hdev->hw.mac.link = 0;
46a3df9f 6889
b50ae26c
PL
6890 /* reset tqp stats */
6891 hclge_reset_tqp_stats(handle);
6892
b01b7cf1 6893 hclge_mac_start_phy(hdev);
46a3df9f 6894
46a3df9f
S
6895 return 0;
6896}
6897
6898static void hclge_ae_stop(struct hnae3_handle *handle)
6899{
6900 struct hclge_vport *vport = hclge_get_vport(handle);
6901 struct hclge_dev *hdev = vport->back;
39cfbc9c 6902 int i;
46a3df9f 6903
2f7e4896
FL
6904 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6905
d93ed94f
JS
6906 hclge_clear_arfs_rules(handle);
6907
35d93a30
HT
6908 /* If it is not PF reset, the firmware will disable the MAC,
6909 * so it only need to stop phy here.
6910 */
6911 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6912 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 6913 hclge_mac_stop_phy(hdev);
ed8fb4b2 6914 hclge_update_link_status(hdev);
b50ae26c 6915 return;
9617f668 6916 }
b50ae26c 6917
39cfbc9c
HT
6918 for (i = 0; i < handle->kinfo.num_tqps; i++)
6919 hclge_reset_tqp(handle, i);
6920
20981a1e
HT
6921 hclge_config_mac_tnl_int(hdev, false);
6922
46a3df9f
S
6923 /* Mac disable */
6924 hclge_cfg_mac_mode(hdev, false);
6925
6926 hclge_mac_stop_phy(hdev);
6927
6928 /* reset tqp stats */
6929 hclge_reset_tqp_stats(handle);
f30dfddc 6930 hclge_update_link_status(hdev);
46a3df9f
S
6931}
6932
a6d818e3
YL
6933int hclge_vport_start(struct hclge_vport *vport)
6934{
ee4bcd3b
JS
6935 struct hclge_dev *hdev = vport->back;
6936
a6d818e3
YL
6937 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6938 vport->last_active_jiffies = jiffies;
ee4bcd3b 6939
039ba863
JS
6940 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6941 if (vport->vport_id) {
6942 hclge_restore_mac_table_common(vport);
6943 hclge_restore_vport_vlan_table(vport);
6944 } else {
6945 hclge_restore_hw_table(hdev);
6946 }
6947 }
ee4bcd3b
JS
6948
6949 clear_bit(vport->vport_id, hdev->vport_config_block);
6950
a6d818e3
YL
6951 return 0;
6952}
6953
6954void hclge_vport_stop(struct hclge_vport *vport)
6955{
6956 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6957}
6958
6959static int hclge_client_start(struct hnae3_handle *handle)
6960{
6961 struct hclge_vport *vport = hclge_get_vport(handle);
6962
6963 return hclge_vport_start(vport);
6964}
6965
6966static void hclge_client_stop(struct hnae3_handle *handle)
6967{
6968 struct hclge_vport *vport = hclge_get_vport(handle);
6969
6970 hclge_vport_stop(vport);
6971}
6972
46a3df9f
S
6973static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6974 u16 cmdq_resp, u8 resp_code,
6975 enum hclge_mac_vlan_tbl_opcode op)
6976{
6977 struct hclge_dev *hdev = vport->back;
46a3df9f
S
6978
6979 if (cmdq_resp) {
6980 dev_err(&hdev->pdev->dev,
adcf738b 6981 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
46a3df9f
S
6982 cmdq_resp);
6983 return -EIO;
6984 }
6985
6986 if (op == HCLGE_MAC_VLAN_ADD) {
c631c696 6987 if (!resp_code || resp_code == 1)
6e4139f6 6988 return 0;
c631c696
JS
6989 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6990 resp_code == HCLGE_ADD_MC_OVERFLOW)
6e4139f6 6991 return -ENOSPC;
6e4139f6
JS
6992
6993 dev_err(&hdev->pdev->dev,
6994 "add mac addr failed for undefined, code=%u.\n",
6995 resp_code);
6996 return -EIO;
46a3df9f
S
6997 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6998 if (!resp_code) {
6e4139f6 6999 return 0;
46a3df9f 7000 } else if (resp_code == 1) {
46a3df9f
S
7001 dev_dbg(&hdev->pdev->dev,
7002 "remove mac addr failed for miss.\n");
6e4139f6 7003 return -ENOENT;
46a3df9f 7004 }
6e4139f6
JS
7005
7006 dev_err(&hdev->pdev->dev,
7007 "remove mac addr failed for undefined, code=%u.\n",
7008 resp_code);
7009 return -EIO;
46a3df9f
S
7010 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7011 if (!resp_code) {
6e4139f6 7012 return 0;
46a3df9f 7013 } else if (resp_code == 1) {
46a3df9f
S
7014 dev_dbg(&hdev->pdev->dev,
7015 "lookup mac addr failed for miss.\n");
6e4139f6 7016 return -ENOENT;
46a3df9f 7017 }
6e4139f6 7018
46a3df9f 7019 dev_err(&hdev->pdev->dev,
6e4139f6
JS
7020 "lookup mac addr failed for undefined, code=%u.\n",
7021 resp_code);
7022 return -EIO;
46a3df9f
S
7023 }
7024
6e4139f6
JS
7025 dev_err(&hdev->pdev->dev,
7026 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7027
7028 return -EINVAL;
46a3df9f
S
7029}
7030
7031static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7032{
b37ce587
YM
7033#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7034
b9a8f883
YL
7035 unsigned int word_num;
7036 unsigned int bit_num;
46a3df9f
S
7037
7038 if (vfid > 255 || vfid < 0)
7039 return -EIO;
7040
b37ce587 7041 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
46a3df9f
S
7042 word_num = vfid / 32;
7043 bit_num = vfid % 32;
7044 if (clr)
a90bb9a5 7045 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7046 else
a90bb9a5 7047 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f 7048 } else {
b37ce587 7049 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
46a3df9f
S
7050 bit_num = vfid % 32;
7051 if (clr)
a90bb9a5 7052 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7053 else
a90bb9a5 7054 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
7055 }
7056
7057 return 0;
7058}
7059
7060static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7061{
7062#define HCLGE_DESC_NUMBER 3
7063#define HCLGE_FUNC_NUMBER_PER_DESC 6
7064 int i, j;
7065
6c39d527 7066 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
7067 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7068 if (desc[i].data[j])
7069 return false;
7070
7071 return true;
7072}
7073
d44f9b63 7074static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 7075 const u8 *addr, bool is_mc)
46a3df9f
S
7076{
7077 const unsigned char *mac_addr = addr;
7078 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7079 (mac_addr[0]) | (mac_addr[1] << 8);
7080 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7081
3a586422
WL
7082 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7083 if (is_mc) {
7084 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7085 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7086 }
7087
46a3df9f
S
7088 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7089 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7090}
7091
46a3df9f 7092static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7093 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
7094{
7095 struct hclge_dev *hdev = vport->back;
7096 struct hclge_desc desc;
7097 u8 resp_code;
a90bb9a5 7098 u16 retval;
46a3df9f
S
7099 int ret;
7100
7101 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7102
d44f9b63 7103 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7104
7105 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7106 if (ret) {
7107 dev_err(&hdev->pdev->dev,
7108 "del mac addr failed for cmd_send, ret =%d.\n",
7109 ret);
7110 return ret;
7111 }
a90bb9a5
YL
7112 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7113 retval = le16_to_cpu(desc.retval);
46a3df9f 7114
a90bb9a5 7115 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7116 HCLGE_MAC_VLAN_REMOVE);
7117}
7118
7119static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7120 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7121 struct hclge_desc *desc,
7122 bool is_mc)
7123{
7124 struct hclge_dev *hdev = vport->back;
7125 u8 resp_code;
a90bb9a5 7126 u16 retval;
46a3df9f
S
7127 int ret;
7128
7129 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7130 if (is_mc) {
7131 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7132 memcpy(desc[0].data,
7133 req,
d44f9b63 7134 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7135 hclge_cmd_setup_basic_desc(&desc[1],
7136 HCLGE_OPC_MAC_VLAN_ADD,
7137 true);
7138 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7139 hclge_cmd_setup_basic_desc(&desc[2],
7140 HCLGE_OPC_MAC_VLAN_ADD,
7141 true);
7142 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7143 } else {
7144 memcpy(desc[0].data,
7145 req,
d44f9b63 7146 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7147 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7148 }
7149 if (ret) {
7150 dev_err(&hdev->pdev->dev,
7151 "lookup mac addr failed for cmd_send, ret =%d.\n",
7152 ret);
7153 return ret;
7154 }
a90bb9a5
YL
7155 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7156 retval = le16_to_cpu(desc[0].retval);
46a3df9f 7157
a90bb9a5 7158 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7159 HCLGE_MAC_VLAN_LKUP);
7160}
7161
7162static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7163 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7164 struct hclge_desc *mc_desc)
7165{
7166 struct hclge_dev *hdev = vport->back;
7167 int cfg_status;
7168 u8 resp_code;
a90bb9a5 7169 u16 retval;
46a3df9f
S
7170 int ret;
7171
7172 if (!mc_desc) {
7173 struct hclge_desc desc;
7174
7175 hclge_cmd_setup_basic_desc(&desc,
7176 HCLGE_OPC_MAC_VLAN_ADD,
7177 false);
d44f9b63
YL
7178 memcpy(desc.data, req,
7179 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7180 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
7181 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7182 retval = le16_to_cpu(desc.retval);
7183
7184 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7185 resp_code,
7186 HCLGE_MAC_VLAN_ADD);
7187 } else {
c3b6f755 7188 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 7189 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7190 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 7191 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7192 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
7193 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7194 memcpy(mc_desc[0].data, req,
d44f9b63 7195 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7196 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
7197 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7198 retval = le16_to_cpu(mc_desc[0].retval);
7199
7200 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7201 resp_code,
7202 HCLGE_MAC_VLAN_ADD);
7203 }
7204
7205 if (ret) {
7206 dev_err(&hdev->pdev->dev,
7207 "add mac addr failed for cmd_send, ret =%d.\n",
7208 ret);
7209 return ret;
7210 }
7211
7212 return cfg_status;
7213}
7214
39932473 7215static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
c1c5f66e 7216 u16 *allocated_size)
39932473
JS
7217{
7218 struct hclge_umv_spc_alc_cmd *req;
7219 struct hclge_desc desc;
7220 int ret;
7221
7222 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7223 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
63cbf7a9 7224
39932473
JS
7225 req->space_size = cpu_to_le32(space_size);
7226
7227 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7228 if (ret) {
c1c5f66e
JS
7229 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7230 ret);
39932473
JS
7231 return ret;
7232 }
7233
3fd8dc26 7234 *allocated_size = le32_to_cpu(desc.data[1]);
39932473
JS
7235
7236 return 0;
7237}
7238
1ac0e6c2
JS
7239static int hclge_init_umv_space(struct hclge_dev *hdev)
7240{
7241 u16 allocated_size = 0;
7242 int ret;
7243
c1c5f66e 7244 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
1ac0e6c2
JS
7245 if (ret)
7246 return ret;
7247
7248 if (allocated_size < hdev->wanted_umv_size)
7249 dev_warn(&hdev->pdev->dev,
7250 "failed to alloc umv space, want %u, get %u\n",
7251 hdev->wanted_umv_size, allocated_size);
7252
1ac0e6c2
JS
7253 hdev->max_umv_size = allocated_size;
7254 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7255 hdev->share_umv_size = hdev->priv_umv_size +
7256 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7257
7258 return 0;
7259}
7260
39932473
JS
7261static void hclge_reset_umv_space(struct hclge_dev *hdev)
7262{
7263 struct hclge_vport *vport;
7264 int i;
7265
7266 for (i = 0; i < hdev->num_alloc_vport; i++) {
7267 vport = &hdev->vport[i];
7268 vport->used_umv_num = 0;
7269 }
7270
7d0b3451 7271 mutex_lock(&hdev->vport_lock);
39932473 7272 hdev->share_umv_size = hdev->priv_umv_size +
4c58f592 7273 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7d0b3451 7274 mutex_unlock(&hdev->vport_lock);
39932473
JS
7275}
7276
7d0b3451 7277static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
39932473
JS
7278{
7279 struct hclge_dev *hdev = vport->back;
7280 bool is_full;
7281
7d0b3451
JS
7282 if (need_lock)
7283 mutex_lock(&hdev->vport_lock);
7284
39932473
JS
7285 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7286 hdev->share_umv_size == 0);
7d0b3451
JS
7287
7288 if (need_lock)
7289 mutex_unlock(&hdev->vport_lock);
39932473
JS
7290
7291 return is_full;
7292}
7293
7294static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7295{
7296 struct hclge_dev *hdev = vport->back;
7297
39932473
JS
7298 if (is_free) {
7299 if (vport->used_umv_num > hdev->priv_umv_size)
7300 hdev->share_umv_size++;
54a395b6 7301
7302 if (vport->used_umv_num > 0)
7303 vport->used_umv_num--;
39932473 7304 } else {
54a395b6 7305 if (vport->used_umv_num >= hdev->priv_umv_size &&
7306 hdev->share_umv_size > 0)
39932473
JS
7307 hdev->share_umv_size--;
7308 vport->used_umv_num++;
7309 }
39932473
JS
7310}
7311
ee4bcd3b
JS
7312static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7313 const u8 *mac_addr)
7314{
7315 struct hclge_mac_node *mac_node, *tmp;
7316
7317 list_for_each_entry_safe(mac_node, tmp, list, node)
7318 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7319 return mac_node;
7320
7321 return NULL;
7322}
7323
7324static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7325 enum HCLGE_MAC_NODE_STATE state)
7326{
7327 switch (state) {
7328 /* from set_rx_mode or tmp_add_list */
7329 case HCLGE_MAC_TO_ADD:
7330 if (mac_node->state == HCLGE_MAC_TO_DEL)
7331 mac_node->state = HCLGE_MAC_ACTIVE;
7332 break;
7333 /* only from set_rx_mode */
7334 case HCLGE_MAC_TO_DEL:
7335 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7336 list_del(&mac_node->node);
7337 kfree(mac_node);
7338 } else {
7339 mac_node->state = HCLGE_MAC_TO_DEL;
7340 }
7341 break;
7342 /* only from tmp_add_list, the mac_node->state won't be
7343 * ACTIVE.
7344 */
7345 case HCLGE_MAC_ACTIVE:
7346 if (mac_node->state == HCLGE_MAC_TO_ADD)
7347 mac_node->state = HCLGE_MAC_ACTIVE;
7348
7349 break;
7350 }
7351}
7352
7353int hclge_update_mac_list(struct hclge_vport *vport,
7354 enum HCLGE_MAC_NODE_STATE state,
7355 enum HCLGE_MAC_ADDR_TYPE mac_type,
7356 const unsigned char *addr)
7357{
7358 struct hclge_dev *hdev = vport->back;
7359 struct hclge_mac_node *mac_node;
7360 struct list_head *list;
7361
7362 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7363 &vport->uc_mac_list : &vport->mc_mac_list;
7364
7365 spin_lock_bh(&vport->mac_list_lock);
7366
7367 /* if the mac addr is already in the mac list, no need to add a new
7368 * one into it, just check the mac addr state, convert it to a new
7369 * new state, or just remove it, or do nothing.
7370 */
7371 mac_node = hclge_find_mac_node(list, addr);
7372 if (mac_node) {
7373 hclge_update_mac_node(mac_node, state);
7374 spin_unlock_bh(&vport->mac_list_lock);
7375 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7376 return 0;
7377 }
7378
7379 /* if this address is never added, unnecessary to delete */
7380 if (state == HCLGE_MAC_TO_DEL) {
7381 spin_unlock_bh(&vport->mac_list_lock);
7382 dev_err(&hdev->pdev->dev,
7383 "failed to delete address %pM from mac list\n",
7384 addr);
7385 return -ENOENT;
7386 }
7387
7388 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7389 if (!mac_node) {
7390 spin_unlock_bh(&vport->mac_list_lock);
7391 return -ENOMEM;
7392 }
7393
7394 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7395
7396 mac_node->state = state;
7397 ether_addr_copy(mac_node->mac_addr, addr);
7398 list_add_tail(&mac_node->node, list);
7399
7400 spin_unlock_bh(&vport->mac_list_lock);
7401
7402 return 0;
7403}
7404
46a3df9f
S
7405static int hclge_add_uc_addr(struct hnae3_handle *handle,
7406 const unsigned char *addr)
7407{
7408 struct hclge_vport *vport = hclge_get_vport(handle);
7409
ee4bcd3b
JS
7410 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7411 addr);
46a3df9f
S
7412}
7413
7414int hclge_add_uc_addr_common(struct hclge_vport *vport,
7415 const unsigned char *addr)
7416{
7417 struct hclge_dev *hdev = vport->back;
d44f9b63 7418 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 7419 struct hclge_desc desc;
a90bb9a5 7420 u16 egress_port = 0;
aa7a795e 7421 int ret;
46a3df9f
S
7422
7423 /* mac addr check */
7424 if (is_zero_ether_addr(addr) ||
7425 is_broadcast_ether_addr(addr) ||
7426 is_multicast_ether_addr(addr)) {
7427 dev_err(&hdev->pdev->dev,
7428 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
9b2f3477 7429 addr, is_zero_ether_addr(addr),
46a3df9f
S
7430 is_broadcast_ether_addr(addr),
7431 is_multicast_ether_addr(addr));
7432 return -EINVAL;
7433 }
7434
7435 memset(&req, 0, sizeof(req));
a90bb9a5 7436
e4e87715
PL
7437 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7438 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
7439
7440 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 7441
3a586422 7442 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 7443
d07b6bb4
JS
7444 /* Lookup the mac address in the mac_vlan table, and add
7445 * it if the entry is inexistent. Repeated unicast entry
7446 * is not allowed in the mac vlan table.
7447 */
7448 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473 7449 if (ret == -ENOENT) {
7d0b3451
JS
7450 mutex_lock(&hdev->vport_lock);
7451 if (!hclge_is_umv_space_full(vport, false)) {
39932473
JS
7452 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7453 if (!ret)
7454 hclge_update_umv_space(vport, false);
7d0b3451 7455 mutex_unlock(&hdev->vport_lock);
39932473
JS
7456 return ret;
7457 }
7d0b3451 7458 mutex_unlock(&hdev->vport_lock);
39932473 7459
c631c696
JS
7460 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7461 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7462 hdev->priv_umv_size);
39932473
JS
7463
7464 return -ENOSPC;
7465 }
d07b6bb4
JS
7466
7467 /* check if we just hit the duplicate */
72110b56 7468 if (!ret) {
adcf738b 7469 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
72110b56
PL
7470 vport->vport_id, addr);
7471 return 0;
7472 }
d07b6bb4
JS
7473
7474 dev_err(&hdev->pdev->dev,
7475 "PF failed to add unicast entry(%pM) in the MAC table\n",
7476 addr);
46a3df9f 7477
aa7a795e 7478 return ret;
46a3df9f
S
7479}
7480
7481static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7482 const unsigned char *addr)
7483{
7484 struct hclge_vport *vport = hclge_get_vport(handle);
7485
ee4bcd3b
JS
7486 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7487 addr);
46a3df9f
S
7488}
7489
7490int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7491 const unsigned char *addr)
7492{
7493 struct hclge_dev *hdev = vport->back;
d44f9b63 7494 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 7495 int ret;
46a3df9f
S
7496
7497 /* mac addr check */
7498 if (is_zero_ether_addr(addr) ||
7499 is_broadcast_ether_addr(addr) ||
7500 is_multicast_ether_addr(addr)) {
9b2f3477
WL
7501 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7502 addr);
46a3df9f
S
7503 return -EINVAL;
7504 }
7505
7506 memset(&req, 0, sizeof(req));
e4e87715 7507 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 7508 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 7509 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7d0b3451
JS
7510 if (!ret) {
7511 mutex_lock(&hdev->vport_lock);
39932473 7512 hclge_update_umv_space(vport, true);
7d0b3451
JS
7513 mutex_unlock(&hdev->vport_lock);
7514 } else if (ret == -ENOENT) {
ee4bcd3b 7515 ret = 0;
7d0b3451 7516 }
46a3df9f 7517
aa7a795e 7518 return ret;
46a3df9f
S
7519}
7520
7521static int hclge_add_mc_addr(struct hnae3_handle *handle,
7522 const unsigned char *addr)
7523{
7524 struct hclge_vport *vport = hclge_get_vport(handle);
7525
ee4bcd3b
JS
7526 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7527 addr);
46a3df9f
S
7528}
7529
7530int hclge_add_mc_addr_common(struct hclge_vport *vport,
7531 const unsigned char *addr)
7532{
7533 struct hclge_dev *hdev = vport->back;
d44f9b63 7534 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 7535 struct hclge_desc desc[3];
46a3df9f
S
7536 int status;
7537
7538 /* mac addr check */
7539 if (!is_multicast_ether_addr(addr)) {
7540 dev_err(&hdev->pdev->dev,
7541 "Add mc mac err! invalid mac:%pM.\n",
7542 addr);
7543 return -EINVAL;
7544 }
7545 memset(&req, 0, sizeof(req));
3a586422 7546 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f 7547 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
63cbf7a9 7548 if (status) {
46a3df9f
S
7549 /* This mac addr do not exist, add new entry for it */
7550 memset(desc[0].data, 0, sizeof(desc[0].data));
7551 memset(desc[1].data, 0, sizeof(desc[0].data));
7552 memset(desc[2].data, 0, sizeof(desc[0].data));
46a3df9f 7553 }
63cbf7a9
YM
7554 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7555 if (status)
7556 return status;
7557 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
46a3df9f 7558
c631c696
JS
7559 /* if already overflow, not to print each time */
7560 if (status == -ENOSPC &&
7561 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
1f6db589 7562 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
7563
7564 return status;
7565}
7566
7567static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7568 const unsigned char *addr)
7569{
7570 struct hclge_vport *vport = hclge_get_vport(handle);
7571
ee4bcd3b
JS
7572 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7573 addr);
46a3df9f
S
7574}
7575
7576int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7577 const unsigned char *addr)
7578{
7579 struct hclge_dev *hdev = vport->back;
d44f9b63 7580 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
7581 enum hclge_cmd_status status;
7582 struct hclge_desc desc[3];
46a3df9f
S
7583
7584 /* mac addr check */
7585 if (!is_multicast_ether_addr(addr)) {
7586 dev_dbg(&hdev->pdev->dev,
7587 "Remove mc mac err! invalid mac:%pM.\n",
7588 addr);
7589 return -EINVAL;
7590 }
7591
7592 memset(&req, 0, sizeof(req));
3a586422 7593 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
7594 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7595 if (!status) {
7596 /* This mac addr exist, remove this handle's VFID for it */
63cbf7a9
YM
7597 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7598 if (status)
7599 return status;
46a3df9f
S
7600
7601 if (hclge_is_all_function_id_zero(desc))
7602 /* All the vfid is zero, so need to delete this entry */
7603 status = hclge_remove_mac_vlan_tbl(vport, &req);
7604 else
7605 /* Not all the vfid is zero, update the vfid */
7606 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7607
ee4bcd3b 7608 } else if (status == -ENOENT) {
40cca1c5 7609 status = 0;
46a3df9f
S
7610 }
7611
46a3df9f
S
7612 return status;
7613}
7614
ee4bcd3b
JS
7615static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7616 struct list_head *list,
7617 int (*sync)(struct hclge_vport *,
7618 const unsigned char *))
6dd86902 7619{
ee4bcd3b
JS
7620 struct hclge_mac_node *mac_node, *tmp;
7621 int ret;
6dd86902 7622
ee4bcd3b
JS
7623 list_for_each_entry_safe(mac_node, tmp, list, node) {
7624 ret = sync(vport, mac_node->mac_addr);
7625 if (!ret) {
7626 mac_node->state = HCLGE_MAC_ACTIVE;
7627 } else {
7628 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7629 &vport->state);
7630 break;
7631 }
7632 }
7633}
6dd86902 7634
ee4bcd3b
JS
7635static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7636 struct list_head *list,
7637 int (*unsync)(struct hclge_vport *,
7638 const unsigned char *))
7639{
7640 struct hclge_mac_node *mac_node, *tmp;
7641 int ret;
6dd86902 7642
ee4bcd3b
JS
7643 list_for_each_entry_safe(mac_node, tmp, list, node) {
7644 ret = unsync(vport, mac_node->mac_addr);
7645 if (!ret || ret == -ENOENT) {
7646 list_del(&mac_node->node);
7647 kfree(mac_node);
7648 } else {
7649 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7650 &vport->state);
7651 break;
7652 }
7653 }
7654}
6dd86902 7655
c631c696 7656static bool hclge_sync_from_add_list(struct list_head *add_list,
ee4bcd3b
JS
7657 struct list_head *mac_list)
7658{
7659 struct hclge_mac_node *mac_node, *tmp, *new_node;
c631c696 7660 bool all_added = true;
6dd86902 7661
ee4bcd3b 7662 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
c631c696
JS
7663 if (mac_node->state == HCLGE_MAC_TO_ADD)
7664 all_added = false;
7665
ee4bcd3b
JS
7666 /* if the mac address from tmp_add_list is not in the
7667 * uc/mc_mac_list, it means have received a TO_DEL request
7668 * during the time window of adding the mac address into mac
7669 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7670 * then it will be removed at next time. else it must be TO_ADD,
7671 * this address hasn't been added into mac table,
7672 * so just remove the mac node.
7673 */
7674 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7675 if (new_node) {
7676 hclge_update_mac_node(new_node, mac_node->state);
7677 list_del(&mac_node->node);
7678 kfree(mac_node);
7679 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7680 mac_node->state = HCLGE_MAC_TO_DEL;
7681 list_del(&mac_node->node);
7682 list_add_tail(&mac_node->node, mac_list);
7683 } else {
7684 list_del(&mac_node->node);
7685 kfree(mac_node);
7686 }
7687 }
c631c696
JS
7688
7689 return all_added;
6dd86902 7690}
7691
ee4bcd3b
JS
7692static void hclge_sync_from_del_list(struct list_head *del_list,
7693 struct list_head *mac_list)
6dd86902 7694{
ee4bcd3b 7695 struct hclge_mac_node *mac_node, *tmp, *new_node;
6dd86902 7696
ee4bcd3b
JS
7697 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7698 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7699 if (new_node) {
7700 /* If the mac addr exists in the mac list, it means
7701 * received a new TO_ADD request during the time window
7702 * of configuring the mac address. For the mac node
7703 * state is TO_ADD, and the address is already in the
7704 * in the hardware(due to delete fail), so we just need
7705 * to change the mac node state to ACTIVE.
7706 */
7707 new_node->state = HCLGE_MAC_ACTIVE;
7708 list_del(&mac_node->node);
7709 kfree(mac_node);
7710 } else {
7711 list_del(&mac_node->node);
7712 list_add_tail(&mac_node->node, mac_list);
7713 }
7714 }
7715}
6dd86902 7716
c631c696
JS
7717static void hclge_update_overflow_flags(struct hclge_vport *vport,
7718 enum HCLGE_MAC_ADDR_TYPE mac_type,
7719 bool is_all_added)
7720{
7721 if (mac_type == HCLGE_MAC_ADDR_UC) {
7722 if (is_all_added)
7723 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7724 else
7725 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7726 } else {
7727 if (is_all_added)
7728 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7729 else
7730 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7731 }
7732}
7733
ee4bcd3b
JS
7734static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7735 enum HCLGE_MAC_ADDR_TYPE mac_type)
7736{
7737 struct hclge_mac_node *mac_node, *tmp, *new_node;
7738 struct list_head tmp_add_list, tmp_del_list;
7739 struct list_head *list;
c631c696 7740 bool all_added;
6dd86902 7741
ee4bcd3b
JS
7742 INIT_LIST_HEAD(&tmp_add_list);
7743 INIT_LIST_HEAD(&tmp_del_list);
6dd86902 7744
ee4bcd3b
JS
7745 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7746 * we can add/delete these mac addr outside the spin lock
7747 */
7748 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7749 &vport->uc_mac_list : &vport->mc_mac_list;
6dd86902 7750
ee4bcd3b
JS
7751 spin_lock_bh(&vport->mac_list_lock);
7752
7753 list_for_each_entry_safe(mac_node, tmp, list, node) {
7754 switch (mac_node->state) {
7755 case HCLGE_MAC_TO_DEL:
7756 list_del(&mac_node->node);
7757 list_add_tail(&mac_node->node, &tmp_del_list);
7758 break;
7759 case HCLGE_MAC_TO_ADD:
7760 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7761 if (!new_node)
7762 goto stop_traverse;
7763 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7764 new_node->state = mac_node->state;
7765 list_add_tail(&new_node->node, &tmp_add_list);
7766 break;
7767 default:
6dd86902 7768 break;
7769 }
7770 }
ee4bcd3b
JS
7771
7772stop_traverse:
7773 spin_unlock_bh(&vport->mac_list_lock);
7774
7775 /* delete first, in order to get max mac table space for adding */
7776 if (mac_type == HCLGE_MAC_ADDR_UC) {
7777 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7778 hclge_rm_uc_addr_common);
7779 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7780 hclge_add_uc_addr_common);
7781 } else {
7782 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7783 hclge_rm_mc_addr_common);
7784 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7785 hclge_add_mc_addr_common);
7786 }
7787
7788 /* if some mac addresses were added/deleted fail, move back to the
7789 * mac_list, and retry at next time.
7790 */
7791 spin_lock_bh(&vport->mac_list_lock);
7792
7793 hclge_sync_from_del_list(&tmp_del_list, list);
c631c696 7794 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
ee4bcd3b
JS
7795
7796 spin_unlock_bh(&vport->mac_list_lock);
c631c696
JS
7797
7798 hclge_update_overflow_flags(vport, mac_type, all_added);
ee4bcd3b
JS
7799}
7800
7801static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7802{
7803 struct hclge_dev *hdev = vport->back;
7804
7805 if (test_bit(vport->vport_id, hdev->vport_config_block))
7806 return false;
7807
7808 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7809 return true;
7810
7811 return false;
7812}
7813
7814static void hclge_sync_mac_table(struct hclge_dev *hdev)
7815{
7816 int i;
7817
7818 for (i = 0; i < hdev->num_alloc_vport; i++) {
7819 struct hclge_vport *vport = &hdev->vport[i];
7820
7821 if (!hclge_need_sync_mac_table(vport))
7822 continue;
7823
7824 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7825 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7826 }
6dd86902 7827}
7828
7829void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7830 enum HCLGE_MAC_ADDR_TYPE mac_type)
7831{
ee4bcd3b
JS
7832 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7833 struct hclge_mac_node *mac_cfg, *tmp;
7834 struct hclge_dev *hdev = vport->back;
7835 struct list_head tmp_del_list, *list;
7836 int ret;
6dd86902 7837
ee4bcd3b
JS
7838 if (mac_type == HCLGE_MAC_ADDR_UC) {
7839 list = &vport->uc_mac_list;
7840 unsync = hclge_rm_uc_addr_common;
7841 } else {
7842 list = &vport->mc_mac_list;
7843 unsync = hclge_rm_mc_addr_common;
7844 }
6dd86902 7845
ee4bcd3b 7846 INIT_LIST_HEAD(&tmp_del_list);
6dd86902 7847
ee4bcd3b
JS
7848 if (!is_del_list)
7849 set_bit(vport->vport_id, hdev->vport_config_block);
6dd86902 7850
ee4bcd3b
JS
7851 spin_lock_bh(&vport->mac_list_lock);
7852
7853 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7854 switch (mac_cfg->state) {
7855 case HCLGE_MAC_TO_DEL:
7856 case HCLGE_MAC_ACTIVE:
6dd86902 7857 list_del(&mac_cfg->node);
ee4bcd3b
JS
7858 list_add_tail(&mac_cfg->node, &tmp_del_list);
7859 break;
7860 case HCLGE_MAC_TO_ADD:
7861 if (is_del_list) {
7862 list_del(&mac_cfg->node);
7863 kfree(mac_cfg);
7864 }
7865 break;
6dd86902 7866 }
7867 }
ee4bcd3b
JS
7868
7869 spin_unlock_bh(&vport->mac_list_lock);
7870
7871 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7872 ret = unsync(vport, mac_cfg->mac_addr);
7873 if (!ret || ret == -ENOENT) {
7874 /* clear all mac addr from hardware, but remain these
7875 * mac addr in the mac list, and restore them after
7876 * vf reset finished.
7877 */
7878 if (!is_del_list &&
7879 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7880 mac_cfg->state = HCLGE_MAC_TO_ADD;
7881 } else {
7882 list_del(&mac_cfg->node);
7883 kfree(mac_cfg);
7884 }
7885 } else if (is_del_list) {
7886 mac_cfg->state = HCLGE_MAC_TO_DEL;
7887 }
7888 }
7889
7890 spin_lock_bh(&vport->mac_list_lock);
7891
7892 hclge_sync_from_del_list(&tmp_del_list, list);
7893
7894 spin_unlock_bh(&vport->mac_list_lock);
7895}
7896
7897/* remove all mac address when uninitailize */
7898static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7899 enum HCLGE_MAC_ADDR_TYPE mac_type)
7900{
7901 struct hclge_mac_node *mac_node, *tmp;
7902 struct hclge_dev *hdev = vport->back;
7903 struct list_head tmp_del_list, *list;
7904
7905 INIT_LIST_HEAD(&tmp_del_list);
7906
7907 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7908 &vport->uc_mac_list : &vport->mc_mac_list;
7909
7910 spin_lock_bh(&vport->mac_list_lock);
7911
7912 list_for_each_entry_safe(mac_node, tmp, list, node) {
7913 switch (mac_node->state) {
7914 case HCLGE_MAC_TO_DEL:
7915 case HCLGE_MAC_ACTIVE:
7916 list_del(&mac_node->node);
7917 list_add_tail(&mac_node->node, &tmp_del_list);
7918 break;
7919 case HCLGE_MAC_TO_ADD:
7920 list_del(&mac_node->node);
7921 kfree(mac_node);
7922 break;
7923 }
7924 }
7925
7926 spin_unlock_bh(&vport->mac_list_lock);
7927
7928 if (mac_type == HCLGE_MAC_ADDR_UC)
7929 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7930 hclge_rm_uc_addr_common);
7931 else
7932 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7933 hclge_rm_mc_addr_common);
7934
7935 if (!list_empty(&tmp_del_list))
7936 dev_warn(&hdev->pdev->dev,
7937 "uninit %s mac list for vport %u not completely.\n",
7938 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7939 vport->vport_id);
7940
7941 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7942 list_del(&mac_node->node);
7943 kfree(mac_node);
7944 }
6dd86902 7945}
7946
ee4bcd3b 7947static void hclge_uninit_mac_table(struct hclge_dev *hdev)
6dd86902 7948{
6dd86902 7949 struct hclge_vport *vport;
7950 int i;
7951
6dd86902 7952 for (i = 0; i < hdev->num_alloc_vport; i++) {
7953 vport = &hdev->vport[i];
ee4bcd3b
JS
7954 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7955 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
6dd86902 7956 }
6dd86902 7957}
7958
f5aac71c
FL
7959static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7960 u16 cmdq_resp, u8 resp_code)
7961{
7962#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7963#define HCLGE_ETHERTYPE_ALREADY_ADD 1
7964#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7965#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7966
7967 int return_status;
7968
7969 if (cmdq_resp) {
7970 dev_err(&hdev->pdev->dev,
adcf738b 7971 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
f5aac71c
FL
7972 cmdq_resp);
7973 return -EIO;
7974 }
7975
7976 switch (resp_code) {
7977 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7978 case HCLGE_ETHERTYPE_ALREADY_ADD:
7979 return_status = 0;
7980 break;
7981 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7982 dev_err(&hdev->pdev->dev,
7983 "add mac ethertype failed for manager table overflow.\n");
7984 return_status = -EIO;
7985 break;
7986 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7987 dev_err(&hdev->pdev->dev,
7988 "add mac ethertype failed for key conflict.\n");
7989 return_status = -EIO;
7990 break;
7991 default:
7992 dev_err(&hdev->pdev->dev,
adcf738b 7993 "add mac ethertype failed for undefined, code=%u.\n",
f5aac71c
FL
7994 resp_code);
7995 return_status = -EIO;
7996 }
7997
7998 return return_status;
7999}
8000
8e6de441
HT
8001static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8002 u8 *mac_addr)
8003{
8004 struct hclge_mac_vlan_tbl_entry_cmd req;
8005 struct hclge_dev *hdev = vport->back;
8006 struct hclge_desc desc;
8007 u16 egress_port = 0;
8008 int i;
8009
8010 if (is_zero_ether_addr(mac_addr))
8011 return false;
8012
8013 memset(&req, 0, sizeof(req));
8014 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8015 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8016 req.egress_port = cpu_to_le16(egress_port);
8017 hclge_prepare_mac_addr(&req, mac_addr, false);
8018
8019 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8020 return true;
8021
8022 vf_idx += HCLGE_VF_VPORT_START_NUM;
8023 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8024 if (i != vf_idx &&
8025 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8026 return true;
8027
8028 return false;
8029}
8030
8031static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8032 u8 *mac_addr)
8033{
8034 struct hclge_vport *vport = hclge_get_vport(handle);
8035 struct hclge_dev *hdev = vport->back;
8036
8037 vport = hclge_get_vf_vport(hdev, vf);
8038 if (!vport)
8039 return -EINVAL;
8040
8041 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8042 dev_info(&hdev->pdev->dev,
8043 "Specified MAC(=%pM) is same as before, no change committed!\n",
8044 mac_addr);
8045 return 0;
8046 }
8047
8048 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8049 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8050 mac_addr);
8051 return -EEXIST;
8052 }
8053
8054 ether_addr_copy(vport->vf_info.mac, mac_addr);
8e6de441 8055
90913670
YL
8056 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8057 dev_info(&hdev->pdev->dev,
8058 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8059 vf, mac_addr);
8060 return hclge_inform_reset_assert_to_vf(vport);
8061 }
8062
8063 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8064 vf, mac_addr);
8065 return 0;
8e6de441
HT
8066}
8067
f5aac71c
FL
8068static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8069 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8070{
8071 struct hclge_desc desc;
8072 u8 resp_code;
8073 u16 retval;
8074 int ret;
8075
8076 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8077 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8078
8079 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8080 if (ret) {
8081 dev_err(&hdev->pdev->dev,
8082 "add mac ethertype failed for cmd_send, ret =%d.\n",
8083 ret);
8084 return ret;
8085 }
8086
8087 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8088 retval = le16_to_cpu(desc.retval);
8089
8090 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8091}
8092
8093static int init_mgr_tbl(struct hclge_dev *hdev)
8094{
8095 int ret;
8096 int i;
8097
8098 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8099 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8100 if (ret) {
8101 dev_err(&hdev->pdev->dev,
8102 "add mac ethertype failed, ret =%d.\n",
8103 ret);
8104 return ret;
8105 }
8106 }
8107
8108 return 0;
8109}
8110
46a3df9f
S
8111static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8112{
8113 struct hclge_vport *vport = hclge_get_vport(handle);
8114 struct hclge_dev *hdev = vport->back;
8115
8116 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8117}
8118
ee4bcd3b
JS
8119int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8120 const u8 *old_addr, const u8 *new_addr)
8121{
8122 struct list_head *list = &vport->uc_mac_list;
8123 struct hclge_mac_node *old_node, *new_node;
8124
8125 new_node = hclge_find_mac_node(list, new_addr);
8126 if (!new_node) {
8127 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8128 if (!new_node)
8129 return -ENOMEM;
8130
8131 new_node->state = HCLGE_MAC_TO_ADD;
8132 ether_addr_copy(new_node->mac_addr, new_addr);
8133 list_add(&new_node->node, list);
8134 } else {
8135 if (new_node->state == HCLGE_MAC_TO_DEL)
8136 new_node->state = HCLGE_MAC_ACTIVE;
8137
8138 /* make sure the new addr is in the list head, avoid dev
8139 * addr may be not re-added into mac table for the umv space
8140 * limitation after global/imp reset which will clear mac
8141 * table by hardware.
8142 */
8143 list_move(&new_node->node, list);
8144 }
8145
8146 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8147 old_node = hclge_find_mac_node(list, old_addr);
8148 if (old_node) {
8149 if (old_node->state == HCLGE_MAC_TO_ADD) {
8150 list_del(&old_node->node);
8151 kfree(old_node);
8152 } else {
8153 old_node->state = HCLGE_MAC_TO_DEL;
8154 }
8155 }
8156 }
8157
8158 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8159
8160 return 0;
8161}
8162
59098055
FL
8163static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8164 bool is_first)
46a3df9f
S
8165{
8166 const unsigned char *new_addr = (const unsigned char *)p;
8167 struct hclge_vport *vport = hclge_get_vport(handle);
8168 struct hclge_dev *hdev = vport->back;
ee4bcd3b 8169 unsigned char *old_addr = NULL;
18838d0c 8170 int ret;
46a3df9f
S
8171
8172 /* mac addr check */
8173 if (is_zero_ether_addr(new_addr) ||
8174 is_broadcast_ether_addr(new_addr) ||
8175 is_multicast_ether_addr(new_addr)) {
8176 dev_err(&hdev->pdev->dev,
ee4bcd3b 8177 "change uc mac err! invalid mac: %pM.\n",
46a3df9f
S
8178 new_addr);
8179 return -EINVAL;
8180 }
8181
ee4bcd3b 8182 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
8183 if (ret) {
8184 dev_err(&hdev->pdev->dev,
ee4bcd3b 8185 "failed to configure mac pause address, ret = %d\n",
18838d0c 8186 ret);
ee4bcd3b 8187 return ret;
46a3df9f
S
8188 }
8189
ee4bcd3b
JS
8190 if (!is_first)
8191 old_addr = hdev->hw.mac.mac_addr;
8192
8193 spin_lock_bh(&vport->mac_list_lock);
8194 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
18838d0c
FL
8195 if (ret) {
8196 dev_err(&hdev->pdev->dev,
ee4bcd3b
JS
8197 "failed to change the mac addr:%pM, ret = %d\n",
8198 new_addr, ret);
8199 spin_unlock_bh(&vport->mac_list_lock);
8200
8201 if (!is_first)
8202 hclge_pause_addr_cfg(hdev, old_addr);
18838d0c 8203
ee4bcd3b
JS
8204 return ret;
8205 }
8206 /* we must update dev addr with spin lock protect, preventing dev addr
8207 * being removed by set_rx_mode path.
8208 */
18838d0c 8209 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
ee4bcd3b
JS
8210 spin_unlock_bh(&vport->mac_list_lock);
8211
8212 hclge_task_schedule(hdev, 0);
18838d0c
FL
8213
8214 return 0;
46a3df9f
S
8215}
8216
26483246
XW
8217static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8218 int cmd)
8219{
8220 struct hclge_vport *vport = hclge_get_vport(handle);
8221 struct hclge_dev *hdev = vport->back;
8222
8223 if (!hdev->hw.mac.phydev)
8224 return -EOPNOTSUPP;
8225
8226 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8227}
8228
46a3df9f 8229static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 8230 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 8231{
d44f9b63 8232 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
8233 struct hclge_desc desc;
8234 int ret;
8235
903b85d3
JS
8236 /* read current vlan filter parameter */
8237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
d44f9b63 8238 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 8239 req->vlan_type = vlan_type;
30ebc576 8240 req->vf_id = vf_id;
46a3df9f 8241
903b85d3
JS
8242 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8243 if (ret) {
8244 dev_err(&hdev->pdev->dev,
8245 "failed to get vlan filter config, ret = %d.\n", ret);
8246 return ret;
8247 }
8248
8249 /* modify and write new config parameter */
8250 hclge_cmd_reuse_desc(&desc, false);
8251 req->vlan_fe = filter_en ?
8252 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8253
46a3df9f 8254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 8255 if (ret)
903b85d3 8256 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
46a3df9f 8257 ret);
46a3df9f 8258
3f639907 8259 return ret;
46a3df9f
S
8260}
8261
391b5e93
JS
8262#define HCLGE_FILTER_TYPE_VF 0
8263#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
8264#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8265#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8266#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8267#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8268#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8269#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8270 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8271#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8272 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
8273
8274static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8275{
8276 struct hclge_vport *vport = hclge_get_vport(handle);
8277 struct hclge_dev *hdev = vport->back;
8278
64d114f0
ZL
8279 if (hdev->pdev->revision >= 0x21) {
8280 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 8281 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 8282 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 8283 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
8284 } else {
8285 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
8286 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8287 0);
64d114f0 8288 }
c60edc17
JS
8289 if (enable)
8290 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8291 else
8292 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
8293}
8294
ebaf1908 8295static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
70a21490 8296 bool is_kill, u16 vlan,
dc8131d8 8297 __be16 proto)
46a3df9f 8298{
22044f95 8299 struct hclge_vport *vport = &hdev->vport[vfid];
d44f9b63
YL
8300 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8301 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
8302 struct hclge_desc desc[2];
8303 u8 vf_byte_val;
8304 u8 vf_byte_off;
8305 int ret;
8306
81a9255e 8307 /* if vf vlan table is full, firmware will close vf vlan filter, it
22044f95
JS
8308 * is unable and unnecessary to add new vlan id to vf vlan filter.
8309 * If spoof check is enable, and vf vlan is full, it shouldn't add
8310 * new vlan, because tx packets with these vlan id will be dropped.
81a9255e 8311 */
22044f95
JS
8312 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8313 if (vport->vf_info.spoofchk && vlan) {
8314 dev_err(&hdev->pdev->dev,
8315 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8316 return -EPERM;
8317 }
81a9255e 8318 return 0;
22044f95 8319 }
81a9255e 8320
46a3df9f
S
8321 hclge_cmd_setup_basic_desc(&desc[0],
8322 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8323 hclge_cmd_setup_basic_desc(&desc[1],
8324 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8325
8326 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8327
8328 vf_byte_off = vfid / 8;
8329 vf_byte_val = 1 << (vfid % 8);
8330
d44f9b63
YL
8331 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8332 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 8333
a90bb9a5 8334 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
8335 req0->vlan_cfg = is_kill;
8336
8337 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8338 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8339 else
8340 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8341
8342 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8343 if (ret) {
8344 dev_err(&hdev->pdev->dev,
8345 "Send vf vlan command fail, ret =%d.\n",
8346 ret);
8347 return ret;
8348 }
8349
8350 if (!is_kill) {
6c251711 8351#define HCLGE_VF_VLAN_NO_ENTRY 2
46a3df9f
S
8352 if (!req0->resp_code || req0->resp_code == 1)
8353 return 0;
8354
6c251711 8355 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
81a9255e 8356 set_bit(vfid, hdev->vf_vlan_full);
6c251711
YL
8357 dev_warn(&hdev->pdev->dev,
8358 "vf vlan table is full, vf vlan filter is disabled\n");
8359 return 0;
8360 }
8361
46a3df9f 8362 dev_err(&hdev->pdev->dev,
adcf738b 8363 "Add vf vlan filter fail, ret =%u.\n",
46a3df9f
S
8364 req0->resp_code);
8365 } else {
41dafea2 8366#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
46a3df9f
S
8367 if (!req0->resp_code)
8368 return 0;
8369
d0c31df2
JS
8370 /* vf vlan filter is disabled when vf vlan table is full,
8371 * then new vlan id will not be added into vf vlan table.
8372 * Just return 0 without warning, avoid massive verbose
8373 * print logs when unload.
8374 */
8375 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
41dafea2 8376 return 0;
41dafea2 8377
46a3df9f 8378 dev_err(&hdev->pdev->dev,
adcf738b 8379 "Kill vf vlan filter fail, ret =%u.\n",
46a3df9f
S
8380 req0->resp_code);
8381 }
8382
8383 return -EIO;
8384}
8385
dc8131d8
YL
8386static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8387 u16 vlan_id, bool is_kill)
46a3df9f 8388{
d44f9b63 8389 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
8390 struct hclge_desc desc;
8391 u8 vlan_offset_byte_val;
8392 u8 vlan_offset_byte;
8393 u8 vlan_offset_160;
8394 int ret;
8395
8396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8397
d6ad7c53
GL
8398 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8399 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8400 HCLGE_VLAN_BYTE_SIZE;
8401 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
46a3df9f 8402
d44f9b63 8403 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
8404 req->vlan_offset = vlan_offset_160;
8405 req->vlan_cfg = is_kill;
8406 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8407
8408 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
8409 if (ret)
8410 dev_err(&hdev->pdev->dev,
8411 "port vlan command, send fail, ret =%d.\n", ret);
8412 return ret;
8413}
8414
8415static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
70a21490 8416 u16 vport_id, u16 vlan_id,
dc8131d8
YL
8417 bool is_kill)
8418{
8419 u16 vport_idx, vport_num = 0;
8420 int ret;
8421
daaa8521
YL
8422 if (is_kill && !vlan_id)
8423 return 0;
8424
dc8131d8 8425 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
70a21490 8426 proto);
46a3df9f
S
8427 if (ret) {
8428 dev_err(&hdev->pdev->dev,
adcf738b 8429 "Set %u vport vlan filter config fail, ret =%d.\n",
dc8131d8 8430 vport_id, ret);
46a3df9f
S
8431 return ret;
8432 }
8433
dc8131d8
YL
8434 /* vlan 0 may be added twice when 8021q module is enabled */
8435 if (!is_kill && !vlan_id &&
8436 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8437 return 0;
8438
8439 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 8440 dev_err(&hdev->pdev->dev,
adcf738b 8441 "Add port vlan failed, vport %u is already in vlan %u\n",
dc8131d8
YL
8442 vport_id, vlan_id);
8443 return -EINVAL;
46a3df9f
S
8444 }
8445
dc8131d8
YL
8446 if (is_kill &&
8447 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8448 dev_err(&hdev->pdev->dev,
adcf738b 8449 "Delete port vlan failed, vport %u is not in vlan %u\n",
dc8131d8
YL
8450 vport_id, vlan_id);
8451 return -EINVAL;
8452 }
8453
54e97d11 8454 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
8455 vport_num++;
8456
8457 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8458 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8459 is_kill);
8460
8461 return ret;
8462}
8463
5f6ea83f
PL
8464static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8465{
8466 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8467 struct hclge_vport_vtag_tx_cfg_cmd *req;
8468 struct hclge_dev *hdev = vport->back;
8469 struct hclge_desc desc;
d9c0f275 8470 u16 bmap_index;
5f6ea83f
PL
8471 int status;
8472
8473 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8474
8475 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8476 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8477 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
8478 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8479 vcfg->accept_tag1 ? 1 : 0);
8480 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8481 vcfg->accept_untag1 ? 1 : 0);
8482 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8483 vcfg->accept_tag2 ? 1 : 0);
8484 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8485 vcfg->accept_untag2 ? 1 : 0);
8486 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8487 vcfg->insert_tag1_en ? 1 : 0);
8488 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8489 vcfg->insert_tag2_en ? 1 : 0);
8490 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
8491
8492 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8493 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8494 HCLGE_VF_NUM_PER_BYTE;
8495 req->vf_bitmap[bmap_index] =
8496 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8497
8498 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8499 if (status)
8500 dev_err(&hdev->pdev->dev,
8501 "Send port txvlan cfg command fail, ret =%d\n",
8502 status);
8503
8504 return status;
8505}
8506
8507static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8508{
8509 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8510 struct hclge_vport_vtag_rx_cfg_cmd *req;
8511 struct hclge_dev *hdev = vport->back;
8512 struct hclge_desc desc;
d9c0f275 8513 u16 bmap_index;
5f6ea83f
PL
8514 int status;
8515
8516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8517
8518 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
8519 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8520 vcfg->strip_tag1_en ? 1 : 0);
8521 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8522 vcfg->strip_tag2_en ? 1 : 0);
8523 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8524 vcfg->vlan1_vlan_prionly ? 1 : 0);
8525 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8526 vcfg->vlan2_vlan_prionly ? 1 : 0);
5f6ea83f
PL
8527
8528 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8529 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8530 HCLGE_VF_NUM_PER_BYTE;
8531 req->vf_bitmap[bmap_index] =
8532 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8533
8534 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8535 if (status)
8536 dev_err(&hdev->pdev->dev,
8537 "Send port rxvlan cfg command fail, ret =%d\n",
8538 status);
8539
8540 return status;
8541}
8542
741fca16
JS
8543static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8544 u16 port_base_vlan_state,
8545 u16 vlan_tag)
8546{
8547 int ret;
8548
8549 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8550 vport->txvlan_cfg.accept_tag1 = true;
8551 vport->txvlan_cfg.insert_tag1_en = false;
8552 vport->txvlan_cfg.default_tag1 = 0;
8553 } else {
8554 vport->txvlan_cfg.accept_tag1 = false;
8555 vport->txvlan_cfg.insert_tag1_en = true;
8556 vport->txvlan_cfg.default_tag1 = vlan_tag;
8557 }
8558
8559 vport->txvlan_cfg.accept_untag1 = true;
8560
8561 /* accept_tag2 and accept_untag2 are not supported on
8562 * pdev revision(0x20), new revision support them,
8563 * this two fields can not be configured by user.
8564 */
8565 vport->txvlan_cfg.accept_tag2 = true;
8566 vport->txvlan_cfg.accept_untag2 = true;
8567 vport->txvlan_cfg.insert_tag2_en = false;
8568 vport->txvlan_cfg.default_tag2 = 0;
8569
8570 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8571 vport->rxvlan_cfg.strip_tag1_en = false;
8572 vport->rxvlan_cfg.strip_tag2_en =
8573 vport->rxvlan_cfg.rx_vlan_offload_en;
8574 } else {
8575 vport->rxvlan_cfg.strip_tag1_en =
8576 vport->rxvlan_cfg.rx_vlan_offload_en;
8577 vport->rxvlan_cfg.strip_tag2_en = true;
8578 }
8579 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8580 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8581
8582 ret = hclge_set_vlan_tx_offload_cfg(vport);
8583 if (ret)
8584 return ret;
8585
8586 return hclge_set_vlan_rx_offload_cfg(vport);
8587}
8588
5f6ea83f
PL
8589static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8590{
8591 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8592 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8593 struct hclge_desc desc;
8594 int status;
8595
8596 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8597 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8598 rx_req->ot_fst_vlan_type =
8599 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8600 rx_req->ot_sec_vlan_type =
8601 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8602 rx_req->in_fst_vlan_type =
8603 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8604 rx_req->in_sec_vlan_type =
8605 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8606
8607 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8608 if (status) {
8609 dev_err(&hdev->pdev->dev,
8610 "Send rxvlan protocol type command fail, ret =%d\n",
8611 status);
8612 return status;
8613 }
8614
8615 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8616
d0d72bac 8617 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
8618 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8619 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8620
8621 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8622 if (status)
8623 dev_err(&hdev->pdev->dev,
8624 "Send txvlan protocol type command fail, ret =%d\n",
8625 status);
8626
8627 return status;
8628}
8629
46a3df9f
S
8630static int hclge_init_vlan_config(struct hclge_dev *hdev)
8631{
5f6ea83f
PL
8632#define HCLGE_DEF_VLAN_TYPE 0x8100
8633
c60edc17 8634 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 8635 struct hclge_vport *vport;
46a3df9f 8636 int ret;
5f6ea83f
PL
8637 int i;
8638
64d114f0 8639 if (hdev->pdev->revision >= 0x21) {
30ebc576
JS
8640 /* for revision 0x21, vf vlan filter is per function */
8641 for (i = 0; i < hdev->num_alloc_vport; i++) {
8642 vport = &hdev->vport[i];
8643 ret = hclge_set_vlan_filter_ctrl(hdev,
8644 HCLGE_FILTER_TYPE_VF,
8645 HCLGE_FILTER_FE_EGRESS,
8646 true,
8647 vport->vport_id);
8648 if (ret)
8649 return ret;
8650 }
46a3df9f 8651
64d114f0 8652 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
8653 HCLGE_FILTER_FE_INGRESS, true,
8654 0);
64d114f0
ZL
8655 if (ret)
8656 return ret;
8657 } else {
8658 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8659 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 8660 true, 0);
64d114f0
ZL
8661 if (ret)
8662 return ret;
8663 }
46a3df9f 8664
c60edc17
JS
8665 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8666
5f6ea83f
PL
8667 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8668 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8669 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8670 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8671 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8672 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8673
8674 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
8675 if (ret)
8676 return ret;
46a3df9f 8677
5f6ea83f 8678 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 8679 u16 vlan_tag;
dcb35cce 8680
741fca16
JS
8681 vport = &hdev->vport[i];
8682 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 8683
741fca16
JS
8684 ret = hclge_vlan_offload_cfg(vport,
8685 vport->port_base_vlan_cfg.state,
8686 vlan_tag);
5f6ea83f
PL
8687 if (ret)
8688 return ret;
8689 }
8690
dc8131d8 8691 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
8692}
8693
21e043cd
JS
8694static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8695 bool writen_to_tbl)
c6075b19 8696{
8697 struct hclge_vport_vlan_cfg *vlan;
8698
c6075b19 8699 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8700 if (!vlan)
8701 return;
8702
21e043cd 8703 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 8704 vlan->vlan_id = vlan_id;
8705
8706 list_add_tail(&vlan->node, &vport->vlan_list);
8707}
8708
21e043cd
JS
8709static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8710{
8711 struct hclge_vport_vlan_cfg *vlan, *tmp;
8712 struct hclge_dev *hdev = vport->back;
8713 int ret;
8714
8715 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8716 if (!vlan->hd_tbl_status) {
8717 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8718 vport->vport_id,
70a21490 8719 vlan->vlan_id, false);
21e043cd
JS
8720 if (ret) {
8721 dev_err(&hdev->pdev->dev,
8722 "restore vport vlan list failed, ret=%d\n",
8723 ret);
8724 return ret;
8725 }
8726 }
8727 vlan->hd_tbl_status = true;
8728 }
8729
8730 return 0;
8731}
8732
8733static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8734 bool is_write_tbl)
c6075b19 8735{
8736 struct hclge_vport_vlan_cfg *vlan, *tmp;
8737 struct hclge_dev *hdev = vport->back;
8738
8739 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8740 if (vlan->vlan_id == vlan_id) {
8741 if (is_write_tbl && vlan->hd_tbl_status)
8742 hclge_set_vlan_filter_hw(hdev,
8743 htons(ETH_P_8021Q),
8744 vport->vport_id,
70a21490 8745 vlan_id,
c6075b19 8746 true);
8747
8748 list_del(&vlan->node);
8749 kfree(vlan);
8750 break;
8751 }
8752 }
8753}
8754
8755void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8756{
8757 struct hclge_vport_vlan_cfg *vlan, *tmp;
8758 struct hclge_dev *hdev = vport->back;
8759
8760 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8761 if (vlan->hd_tbl_status)
8762 hclge_set_vlan_filter_hw(hdev,
8763 htons(ETH_P_8021Q),
8764 vport->vport_id,
70a21490 8765 vlan->vlan_id,
c6075b19 8766 true);
8767
8768 vlan->hd_tbl_status = false;
8769 if (is_del_list) {
8770 list_del(&vlan->node);
8771 kfree(vlan);
8772 }
8773 }
23b4201d 8774 clear_bit(vport->vport_id, hdev->vf_vlan_full);
c6075b19 8775}
8776
8777void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8778{
8779 struct hclge_vport_vlan_cfg *vlan, *tmp;
8780 struct hclge_vport *vport;
8781 int i;
8782
c6075b19 8783 for (i = 0; i < hdev->num_alloc_vport; i++) {
8784 vport = &hdev->vport[i];
8785 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8786 list_del(&vlan->node);
8787 kfree(vlan);
8788 }
8789 }
c6075b19 8790}
8791
039ba863 8792void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
b524b38f 8793{
b524b38f
JS
8794 struct hclge_vport_vlan_cfg *vlan, *tmp;
8795 struct hclge_dev *hdev = vport->back;
b943e033 8796 u16 vlan_proto;
039ba863
JS
8797 u16 vlan_id;
8798 u16 state;
8799 int ret;
b524b38f 8800
039ba863
JS
8801 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8802 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8803 state = vport->port_base_vlan_cfg.state;
b524b38f 8804
039ba863
JS
8805 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8806 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8807 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8808 vport->vport_id, vlan_id,
8809 false);
8810 return;
8811 }
22044f95 8812
039ba863
JS
8813 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8814 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8815 vport->vport_id,
8816 vlan->vlan_id, false);
8817 if (ret)
8818 break;
8819 vlan->hd_tbl_status = true;
b524b38f 8820 }
b524b38f
JS
8821}
8822
ee4bcd3b
JS
8823/* For global reset and imp reset, hardware will clear the mac table,
8824 * so we change the mac address state from ACTIVE to TO_ADD, then they
8825 * can be restored in the service task after reset complete. Furtherly,
8826 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8827 * be restored after reset, so just remove these mac nodes from mac_list.
8828 */
8829static void hclge_mac_node_convert_for_reset(struct list_head *list)
8830{
8831 struct hclge_mac_node *mac_node, *tmp;
8832
8833 list_for_each_entry_safe(mac_node, tmp, list, node) {
8834 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8835 mac_node->state = HCLGE_MAC_TO_ADD;
8836 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8837 list_del(&mac_node->node);
8838 kfree(mac_node);
8839 }
8840 }
8841}
8842
8843void hclge_restore_mac_table_common(struct hclge_vport *vport)
8844{
8845 spin_lock_bh(&vport->mac_list_lock);
8846
8847 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8848 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8849 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8850
8851 spin_unlock_bh(&vport->mac_list_lock);
8852}
8853
039ba863
JS
8854static void hclge_restore_hw_table(struct hclge_dev *hdev)
8855{
8856 struct hclge_vport *vport = &hdev->vport[0];
8857 struct hnae3_handle *handle = &vport->nic;
8858
8859 hclge_restore_mac_table_common(vport);
8860 hclge_restore_vport_vlan_table(vport);
8861 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8862
8863 hclge_restore_fd_entries(handle);
8864}
8865
b2641e2a 8866int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
8867{
8868 struct hclge_vport *vport = hclge_get_vport(handle);
8869
44e626f7
JS
8870 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8871 vport->rxvlan_cfg.strip_tag1_en = false;
8872 vport->rxvlan_cfg.strip_tag2_en = enable;
8873 } else {
8874 vport->rxvlan_cfg.strip_tag1_en = enable;
8875 vport->rxvlan_cfg.strip_tag2_en = true;
8876 }
052ece6d
PL
8877 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8878 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 8879 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
8880
8881 return hclge_set_vlan_rx_offload_cfg(vport);
8882}
8883
21e043cd
JS
8884static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8885 u16 port_base_vlan_state,
8886 struct hclge_vlan_info *new_info,
8887 struct hclge_vlan_info *old_info)
8888{
8889 struct hclge_dev *hdev = vport->back;
8890 int ret;
8891
8892 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8893 hclge_rm_vport_all_vlan_table(vport, false);
8894 return hclge_set_vlan_filter_hw(hdev,
8895 htons(new_info->vlan_proto),
8896 vport->vport_id,
8897 new_info->vlan_tag,
70a21490 8898 false);
21e043cd
JS
8899 }
8900
8901 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8902 vport->vport_id, old_info->vlan_tag,
70a21490 8903 true);
21e043cd
JS
8904 if (ret)
8905 return ret;
8906
8907 return hclge_add_vport_all_vlan_table(vport);
8908}
8909
8910int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8911 struct hclge_vlan_info *vlan_info)
8912{
8913 struct hnae3_handle *nic = &vport->nic;
8914 struct hclge_vlan_info *old_vlan_info;
8915 struct hclge_dev *hdev = vport->back;
8916 int ret;
8917
8918 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8919
8920 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8921 if (ret)
8922 return ret;
8923
8924 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8925 /* add new VLAN tag */
8a9a654b
JS
8926 ret = hclge_set_vlan_filter_hw(hdev,
8927 htons(vlan_info->vlan_proto),
21e043cd
JS
8928 vport->vport_id,
8929 vlan_info->vlan_tag,
70a21490 8930 false);
21e043cd
JS
8931 if (ret)
8932 return ret;
8933
8934 /* remove old VLAN tag */
8a9a654b
JS
8935 ret = hclge_set_vlan_filter_hw(hdev,
8936 htons(old_vlan_info->vlan_proto),
21e043cd
JS
8937 vport->vport_id,
8938 old_vlan_info->vlan_tag,
70a21490 8939 true);
21e043cd
JS
8940 if (ret)
8941 return ret;
8942
8943 goto update;
8944 }
8945
8946 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8947 old_vlan_info);
8948 if (ret)
8949 return ret;
8950
8951 /* update state only when disable/enable port based VLAN */
8952 vport->port_base_vlan_cfg.state = state;
8953 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8954 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8955 else
8956 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8957
8958update:
8959 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8960 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8961 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8962
8963 return 0;
8964}
8965
8966static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8967 enum hnae3_port_base_vlan_state state,
8968 u16 vlan)
8969{
8970 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8971 if (!vlan)
8972 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8973 else
8974 return HNAE3_PORT_BASE_VLAN_ENABLE;
8975 } else {
8976 if (!vlan)
8977 return HNAE3_PORT_BASE_VLAN_DISABLE;
8978 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8979 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8980 else
8981 return HNAE3_PORT_BASE_VLAN_MODIFY;
8982 }
8983}
8984
8985static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8986 u16 vlan, u8 qos, __be16 proto)
8987{
8988 struct hclge_vport *vport = hclge_get_vport(handle);
8989 struct hclge_dev *hdev = vport->back;
8990 struct hclge_vlan_info vlan_info;
8991 u16 state;
8992 int ret;
8993
8994 if (hdev->pdev->revision == 0x20)
8995 return -EOPNOTSUPP;
8996
1c985508
JS
8997 vport = hclge_get_vf_vport(hdev, vfid);
8998 if (!vport)
8999 return -EINVAL;
9000
21e043cd 9001 /* qos is a 3 bits value, so can not be bigger than 7 */
1c985508 9002 if (vlan > VLAN_N_VID - 1 || qos > 7)
21e043cd
JS
9003 return -EINVAL;
9004 if (proto != htons(ETH_P_8021Q))
9005 return -EPROTONOSUPPORT;
9006
21e043cd
JS
9007 state = hclge_get_port_base_vlan_state(vport,
9008 vport->port_base_vlan_cfg.state,
9009 vlan);
9010 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9011 return 0;
9012
9013 vlan_info.vlan_tag = vlan;
9014 vlan_info.qos = qos;
9015 vlan_info.vlan_proto = ntohs(proto);
9016
92f11ea1
JS
9017 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9018 return hclge_update_port_base_vlan_cfg(vport, state,
9019 &vlan_info);
9020 } else {
9021 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
1c985508 9022 vport->vport_id, state,
92f11ea1
JS
9023 vlan, qos,
9024 ntohs(proto));
9025 return ret;
9026 }
21e043cd
JS
9027}
9028
59359fc8
JS
9029static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9030{
9031 struct hclge_vlan_info *vlan_info;
9032 struct hclge_vport *vport;
9033 int ret;
9034 int vf;
9035
9036 /* clear port base vlan for all vf */
9037 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9038 vport = &hdev->vport[vf];
9039 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9040
9041 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9042 vport->vport_id,
9043 vlan_info->vlan_tag, true);
9044 if (ret)
9045 dev_err(&hdev->pdev->dev,
9046 "failed to clear vf vlan for vf%d, ret = %d\n",
9047 vf - HCLGE_VF_VPORT_START_NUM, ret);
9048 }
9049}
9050
21e043cd
JS
9051int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9052 u16 vlan_id, bool is_kill)
9053{
9054 struct hclge_vport *vport = hclge_get_vport(handle);
9055 struct hclge_dev *hdev = vport->back;
9056 bool writen_to_tbl = false;
9057 int ret = 0;
9058
fe4144d4
JS
9059 /* When device is resetting, firmware is unable to handle
9060 * mailbox. Just record the vlan id, and remove it after
9061 * reset finished.
9062 */
9063 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9064 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9065 return -EBUSY;
9066 }
9067
46ee7350 9068 /* when port base vlan enabled, we use port base vlan as the vlan
fe4144d4
JS
9069 * filter entry. In this case, we don't update vlan filter table
9070 * when user add new vlan or remove exist vlan, just update the vport
9071 * vlan list. The vlan id in vlan list will be writen in vlan filter
9072 * table until port base vlan disabled
21e043cd
JS
9073 */
9074 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9075 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
70a21490 9076 vlan_id, is_kill);
21e043cd
JS
9077 writen_to_tbl = true;
9078 }
9079
fe4144d4
JS
9080 if (!ret) {
9081 if (is_kill)
9082 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9083 else
9084 hclge_add_vport_vlan_table(vport, vlan_id,
9085 writen_to_tbl);
9086 } else if (is_kill) {
46ee7350 9087 /* when remove hw vlan filter failed, record the vlan id,
fe4144d4
JS
9088 * and try to remove it from hw later, to be consistence
9089 * with stack
9090 */
9091 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9092 }
9093 return ret;
9094}
21e043cd 9095
fe4144d4
JS
9096static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9097{
9098#define HCLGE_MAX_SYNC_COUNT 60
21e043cd 9099
fe4144d4
JS
9100 int i, ret, sync_cnt = 0;
9101 u16 vlan_id;
9102
9103 /* start from vport 1 for PF is always alive */
9104 for (i = 0; i < hdev->num_alloc_vport; i++) {
9105 struct hclge_vport *vport = &hdev->vport[i];
9106
9107 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9108 VLAN_N_VID);
9109 while (vlan_id != VLAN_N_VID) {
9110 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9111 vport->vport_id, vlan_id,
70a21490 9112 true);
fe4144d4
JS
9113 if (ret && ret != -EINVAL)
9114 return;
9115
9116 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9117 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9118
9119 sync_cnt++;
9120 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9121 return;
9122
9123 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9124 VLAN_N_VID);
9125 }
9126 }
21e043cd
JS
9127}
9128
e6d7d79d 9129static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 9130{
d44f9b63 9131 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 9132 struct hclge_desc desc;
46a3df9f 9133
46a3df9f
S
9134 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9135
d44f9b63 9136 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 9137 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 9138 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 9139
e6d7d79d 9140 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
9141}
9142
dd72140c
FL
9143static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9144{
9145 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
9146
9147 return hclge_set_vport_mtu(vport, new_mtu);
9148}
9149
9150int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9151{
dd72140c 9152 struct hclge_dev *hdev = vport->back;
63cbf7a9 9153 int i, max_frm_size, ret;
dd72140c 9154
9e690456 9155 /* HW supprt 2 layer vlan */
e6d7d79d
YL
9156 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9157 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9158 max_frm_size > HCLGE_MAC_MAX_FRAME)
9159 return -EINVAL;
9160
818f1675
YL
9161 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9162 mutex_lock(&hdev->vport_lock);
9163 /* VF's mps must fit within hdev->mps */
9164 if (vport->vport_id && max_frm_size > hdev->mps) {
9165 mutex_unlock(&hdev->vport_lock);
9166 return -EINVAL;
9167 } else if (vport->vport_id) {
9168 vport->mps = max_frm_size;
9169 mutex_unlock(&hdev->vport_lock);
9170 return 0;
9171 }
9172
9173 /* PF's mps must be greater then VF's mps */
9174 for (i = 1; i < hdev->num_alloc_vport; i++)
9175 if (max_frm_size < hdev->vport[i].mps) {
9176 mutex_unlock(&hdev->vport_lock);
9177 return -EINVAL;
9178 }
9179
cdca4c48
YL
9180 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9181
e6d7d79d 9182 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
9183 if (ret) {
9184 dev_err(&hdev->pdev->dev,
9185 "Change mtu fail, ret =%d\n", ret);
818f1675 9186 goto out;
dd72140c
FL
9187 }
9188
e6d7d79d 9189 hdev->mps = max_frm_size;
818f1675 9190 vport->mps = max_frm_size;
e6d7d79d 9191
dd72140c
FL
9192 ret = hclge_buffer_alloc(hdev);
9193 if (ret)
9194 dev_err(&hdev->pdev->dev,
9195 "Allocate buffer fail, ret =%d\n", ret);
9196
818f1675 9197out:
cdca4c48 9198 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 9199 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
9200 return ret;
9201}
9202
46a3df9f
S
9203static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9204 bool enable)
9205{
d44f9b63 9206 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9207 struct hclge_desc desc;
9208 int ret;
9209
9210 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9211
d44f9b63 9212 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f 9213 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
b9a8f883
YL
9214 if (enable)
9215 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
46a3df9f
S
9216
9217 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9218 if (ret) {
9219 dev_err(&hdev->pdev->dev,
9220 "Send tqp reset cmd error, status =%d\n", ret);
9221 return ret;
9222 }
9223
9224 return 0;
9225}
9226
9227static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9228{
d44f9b63 9229 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9230 struct hclge_desc desc;
9231 int ret;
9232
9233 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9234
d44f9b63 9235 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
9236 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9237
9238 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9239 if (ret) {
9240 dev_err(&hdev->pdev->dev,
9241 "Get reset status error, status =%d\n", ret);
9242 return ret;
9243 }
9244
e4e87715 9245 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
9246}
9247
0c29d191 9248u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
9249{
9250 struct hnae3_queue *queue;
9251 struct hclge_tqp *tqp;
9252
9253 queue = handle->kinfo.tqp[queue_id];
9254 tqp = container_of(queue, struct hclge_tqp, q);
9255
9256 return tqp->index;
9257}
9258
7fa6be4f 9259int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
9260{
9261 struct hclge_vport *vport = hclge_get_vport(handle);
9262 struct hclge_dev *hdev = vport->back;
9263 int reset_try_times = 0;
9264 int reset_status;
814e0274 9265 u16 queue_gid;
63cbf7a9 9266 int ret;
46a3df9f 9267
814e0274
PL
9268 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9269
46a3df9f
S
9270 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9271 if (ret) {
7fa6be4f
HT
9272 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9273 return ret;
46a3df9f
S
9274 }
9275
814e0274 9276 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 9277 if (ret) {
7fa6be4f
HT
9278 dev_err(&hdev->pdev->dev,
9279 "Send reset tqp cmd fail, ret = %d\n", ret);
9280 return ret;
46a3df9f
S
9281 }
9282
46a3df9f 9283 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
814e0274 9284 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
9285 if (reset_status)
9286 break;
e8df45c2
ZL
9287
9288 /* Wait for tqp hw reset */
9289 usleep_range(1000, 1200);
46a3df9f
S
9290 }
9291
9292 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
9293 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9294 return ret;
46a3df9f
S
9295 }
9296
814e0274 9297 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
9298 if (ret)
9299 dev_err(&hdev->pdev->dev,
9300 "Deassert the soft reset fail, ret = %d\n", ret);
9301
9302 return ret;
46a3df9f
S
9303}
9304
1a426f8b
PL
9305void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9306{
9307 struct hclge_dev *hdev = vport->back;
9308 int reset_try_times = 0;
9309 int reset_status;
9310 u16 queue_gid;
9311 int ret;
9312
9313 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9314
9315 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9316 if (ret) {
9317 dev_warn(&hdev->pdev->dev,
9318 "Send reset tqp cmd fail, ret = %d\n", ret);
9319 return;
9320 }
9321
1a426f8b 9322 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
1a426f8b
PL
9323 reset_status = hclge_get_reset_status(hdev, queue_gid);
9324 if (reset_status)
9325 break;
e8df45c2
ZL
9326
9327 /* Wait for tqp hw reset */
9328 usleep_range(1000, 1200);
1a426f8b
PL
9329 }
9330
9331 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9332 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9333 return;
9334 }
9335
9336 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9337 if (ret)
9338 dev_warn(&hdev->pdev->dev,
9339 "Deassert the soft reset fail, ret = %d\n", ret);
9340}
9341
46a3df9f
S
9342static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9343{
9344 struct hclge_vport *vport = hclge_get_vport(handle);
9345 struct hclge_dev *hdev = vport->back;
9346
9347 return hdev->fw_version;
9348}
9349
61387774
PL
9350static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9351{
9352 struct phy_device *phydev = hdev->hw.mac.phydev;
9353
9354 if (!phydev)
9355 return;
9356
70814e81 9357 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
9358}
9359
9360static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9361{
61387774
PL
9362 int ret;
9363
40173a2e 9364 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 9365 return 0;
61387774
PL
9366
9367 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
aacbe27e
YL
9368 if (ret)
9369 dev_err(&hdev->pdev->dev,
9370 "configure pauseparam error, ret = %d.\n", ret);
61387774 9371
aacbe27e 9372 return ret;
61387774
PL
9373}
9374
1770a7a3
PL
9375int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9376{
9377 struct phy_device *phydev = hdev->hw.mac.phydev;
9378 u16 remote_advertising = 0;
63cbf7a9 9379 u16 local_advertising;
1770a7a3
PL
9380 u32 rx_pause, tx_pause;
9381 u8 flowctl;
9382
9383 if (!phydev->link || !phydev->autoneg)
9384 return 0;
9385
3c1bcc86 9386 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
9387
9388 if (phydev->pause)
9389 remote_advertising = LPA_PAUSE_CAP;
9390
9391 if (phydev->asym_pause)
9392 remote_advertising |= LPA_PAUSE_ASYM;
9393
9394 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9395 remote_advertising);
9396 tx_pause = flowctl & FLOW_CTRL_TX;
9397 rx_pause = flowctl & FLOW_CTRL_RX;
9398
9399 if (phydev->duplex == HCLGE_MAC_HALF) {
9400 tx_pause = 0;
9401 rx_pause = 0;
9402 }
9403
9404 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9405}
9406
46a3df9f
S
9407static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9408 u32 *rx_en, u32 *tx_en)
9409{
9410 struct hclge_vport *vport = hclge_get_vport(handle);
9411 struct hclge_dev *hdev = vport->back;
fb89629f 9412 struct phy_device *phydev = hdev->hw.mac.phydev;
46a3df9f 9413
fb89629f 9414 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
46a3df9f
S
9415
9416 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9417 *rx_en = 0;
9418 *tx_en = 0;
9419 return;
9420 }
9421
9422 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9423 *rx_en = 1;
9424 *tx_en = 0;
9425 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9426 *tx_en = 1;
9427 *rx_en = 0;
9428 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9429 *rx_en = 1;
9430 *tx_en = 1;
9431 } else {
9432 *rx_en = 0;
9433 *tx_en = 0;
9434 }
9435}
9436
aacbe27e
YL
9437static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9438 u32 rx_en, u32 tx_en)
9439{
9440 if (rx_en && tx_en)
9441 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9442 else if (rx_en && !tx_en)
9443 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9444 else if (!rx_en && tx_en)
9445 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9446 else
9447 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9448
9449 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9450}
9451
61387774
PL
9452static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9453 u32 rx_en, u32 tx_en)
9454{
9455 struct hclge_vport *vport = hclge_get_vport(handle);
9456 struct hclge_dev *hdev = vport->back;
9457 struct phy_device *phydev = hdev->hw.mac.phydev;
9458 u32 fc_autoneg;
9459
fb89629f
JS
9460 if (phydev) {
9461 fc_autoneg = hclge_get_autoneg(handle);
9462 if (auto_neg != fc_autoneg) {
9463 dev_info(&hdev->pdev->dev,
9464 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9465 return -EOPNOTSUPP;
9466 }
61387774
PL
9467 }
9468
9469 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9470 dev_info(&hdev->pdev->dev,
9471 "Priority flow control enabled. Cannot set link flow control.\n");
9472 return -EOPNOTSUPP;
9473 }
9474
9475 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9476
aacbe27e
YL
9477 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9478
fb89629f 9479 if (!auto_neg)
61387774
PL
9480 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9481
22f48e24
JS
9482 if (phydev)
9483 return phy_start_aneg(phydev);
9484
fb89629f 9485 return -EOPNOTSUPP;
61387774
PL
9486}
9487
46a3df9f
S
9488static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9489 u8 *auto_neg, u32 *speed, u8 *duplex)
9490{
9491 struct hclge_vport *vport = hclge_get_vport(handle);
9492 struct hclge_dev *hdev = vport->back;
9493
9494 if (speed)
9495 *speed = hdev->hw.mac.speed;
9496 if (duplex)
9497 *duplex = hdev->hw.mac.duplex;
9498 if (auto_neg)
9499 *auto_neg = hdev->hw.mac.autoneg;
9500}
9501
88d10bd6
JS
9502static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9503 u8 *module_type)
46a3df9f
S
9504{
9505 struct hclge_vport *vport = hclge_get_vport(handle);
9506 struct hclge_dev *hdev = vport->back;
9507
a9775bb6
GH
9508 /* When nic is down, the service task is not running, doesn't update
9509 * the port information per second. Query the port information before
9510 * return the media type, ensure getting the correct media information.
9511 */
9512 hclge_update_port_info(hdev);
9513
46a3df9f
S
9514 if (media_type)
9515 *media_type = hdev->hw.mac.media_type;
88d10bd6
JS
9516
9517 if (module_type)
9518 *module_type = hdev->hw.mac.module_type;
46a3df9f
S
9519}
9520
9521static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9522 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9523{
9524 struct hclge_vport *vport = hclge_get_vport(handle);
9525 struct hclge_dev *hdev = vport->back;
9526 struct phy_device *phydev = hdev->hw.mac.phydev;
ebaf1908
WL
9527 int mdix_ctrl, mdix, is_resolved;
9528 unsigned int retval;
46a3df9f
S
9529
9530 if (!phydev) {
9531 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9532 *tp_mdix = ETH_TP_MDI_INVALID;
9533 return;
9534 }
9535
9536 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9537
9538 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
9539 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9540 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
9541
9542 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
9543 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9544 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
9545
9546 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9547
9548 switch (mdix_ctrl) {
9549 case 0x0:
9550 *tp_mdix_ctrl = ETH_TP_MDI;
9551 break;
9552 case 0x1:
9553 *tp_mdix_ctrl = ETH_TP_MDI_X;
9554 break;
9555 case 0x3:
9556 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9557 break;
9558 default:
9559 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9560 break;
9561 }
9562
9563 if (!is_resolved)
9564 *tp_mdix = ETH_TP_MDI_INVALID;
9565 else if (mdix)
9566 *tp_mdix = ETH_TP_MDI_X;
9567 else
9568 *tp_mdix = ETH_TP_MDI;
9569}
9570
bb87be87
YL
9571static void hclge_info_show(struct hclge_dev *hdev)
9572{
9573 struct device *dev = &hdev->pdev->dev;
9574
9575 dev_info(dev, "PF info begin:\n");
9576
adcf738b
GL
9577 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9578 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9579 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9580 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9581 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9582 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9583 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9584 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9585 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9586 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
bb87be87
YL
9587 dev_info(dev, "This is %s PF\n",
9588 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9589 dev_info(dev, "DCB %s\n",
9590 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9591 dev_info(dev, "MQPRIO %s\n",
9592 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9593
9594 dev_info(dev, "PF info end.\n");
9595}
9596
994e04f1
HT
9597static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9598 struct hclge_vport *vport)
9599{
9600 struct hnae3_client *client = vport->nic.client;
9601 struct hclge_dev *hdev = ae_dev->priv;
0bfdf286 9602 int rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9603 int ret;
9604
9605 ret = client->ops->init_instance(&vport->nic);
9606 if (ret)
9607 return ret;
9608
9609 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9610 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9611 rst_cnt != hdev->rst_stats.reset_cnt) {
9612 ret = -EBUSY;
9613 goto init_nic_err;
9614 }
9615
00ea6e5f
WL
9616 /* Enable nic hw error interrupts */
9617 ret = hclge_config_nic_hw_error(hdev, true);
bcf643c5 9618 if (ret) {
00ea6e5f
WL
9619 dev_err(&ae_dev->pdev->dev,
9620 "fail(%d) to enable hw error interrupts\n", ret);
bcf643c5
WL
9621 goto init_nic_err;
9622 }
9623
9624 hnae3_set_client_init_flag(client, ae_dev, 1);
00ea6e5f 9625
994e04f1
HT
9626 if (netif_msg_drv(&hdev->vport->nic))
9627 hclge_info_show(hdev);
9628
00ea6e5f 9629 return ret;
7cf9c069
HT
9630
9631init_nic_err:
9632 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9633 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9634 msleep(HCLGE_WAIT_RESET_DONE);
9635
9636 client->ops->uninit_instance(&vport->nic, 0);
9637
9638 return ret;
994e04f1
HT
9639}
9640
9641static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9642 struct hclge_vport *vport)
9643{
994e04f1 9644 struct hclge_dev *hdev = ae_dev->priv;
31a57fde 9645 struct hnae3_client *client;
7cf9c069 9646 int rst_cnt;
994e04f1
HT
9647 int ret;
9648
9649 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9650 !hdev->nic_client)
9651 return 0;
9652
9653 client = hdev->roce_client;
9654 ret = hclge_init_roce_base_info(vport);
9655 if (ret)
9656 return ret;
9657
7cf9c069 9658 rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9659 ret = client->ops->init_instance(&vport->roce);
9660 if (ret)
9661 return ret;
9662
9663 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9664 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9665 rst_cnt != hdev->rst_stats.reset_cnt) {
9666 ret = -EBUSY;
9667 goto init_roce_err;
9668 }
9669
72fcd2be
HT
9670 /* Enable roce ras interrupts */
9671 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9672 if (ret) {
9673 dev_err(&ae_dev->pdev->dev,
9674 "fail(%d) to enable roce ras interrupts\n", ret);
9675 goto init_roce_err;
9676 }
9677
994e04f1
HT
9678 hnae3_set_client_init_flag(client, ae_dev, 1);
9679
9680 return 0;
7cf9c069
HT
9681
9682init_roce_err:
9683 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9684 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9685 msleep(HCLGE_WAIT_RESET_DONE);
9686
9687 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9688
9689 return ret;
994e04f1
HT
9690}
9691
46a3df9f
S
9692static int hclge_init_client_instance(struct hnae3_client *client,
9693 struct hnae3_ae_dev *ae_dev)
9694{
9695 struct hclge_dev *hdev = ae_dev->priv;
9696 struct hclge_vport *vport;
9697 int i, ret;
9698
9699 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9700 vport = &hdev->vport[i];
9701
9702 switch (client->type) {
9703 case HNAE3_CLIENT_KNIC:
46a3df9f
S
9704 hdev->nic_client = client;
9705 vport->nic.client = client;
994e04f1 9706 ret = hclge_init_nic_client_instance(ae_dev, vport);
46a3df9f 9707 if (ret)
49dd8054 9708 goto clear_nic;
46a3df9f 9709
994e04f1
HT
9710 ret = hclge_init_roce_client_instance(ae_dev, vport);
9711 if (ret)
9712 goto clear_roce;
46a3df9f 9713
46a3df9f
S
9714 break;
9715 case HNAE3_CLIENT_ROCE:
e92a0843 9716 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
9717 hdev->roce_client = client;
9718 vport->roce.client = client;
9719 }
9720
994e04f1
HT
9721 ret = hclge_init_roce_client_instance(ae_dev, vport);
9722 if (ret)
9723 goto clear_roce;
fa7a4bd5
JS
9724
9725 break;
9726 default:
9727 return -EINVAL;
46a3df9f
S
9728 }
9729 }
9730
37417c66 9731 return 0;
49dd8054
JS
9732
9733clear_nic:
9734 hdev->nic_client = NULL;
9735 vport->nic.client = NULL;
9736 return ret;
9737clear_roce:
9738 hdev->roce_client = NULL;
9739 vport->roce.client = NULL;
9740 return ret;
46a3df9f
S
9741}
9742
9743static void hclge_uninit_client_instance(struct hnae3_client *client,
9744 struct hnae3_ae_dev *ae_dev)
9745{
9746 struct hclge_dev *hdev = ae_dev->priv;
9747 struct hclge_vport *vport;
9748 int i;
9749
9750 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9751 vport = &hdev->vport[i];
a17dcf3f 9752 if (hdev->roce_client) {
2a0bfc36 9753 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9754 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9755 msleep(HCLGE_WAIT_RESET_DONE);
9756
46a3df9f
S
9757 hdev->roce_client->ops->uninit_instance(&vport->roce,
9758 0);
a17dcf3f
L
9759 hdev->roce_client = NULL;
9760 vport->roce.client = NULL;
9761 }
46a3df9f
S
9762 if (client->type == HNAE3_CLIENT_ROCE)
9763 return;
49dd8054 9764 if (hdev->nic_client && client->ops->uninit_instance) {
bd9109c9 9765 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9766 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9767 msleep(HCLGE_WAIT_RESET_DONE);
9768
46a3df9f 9769 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
9770 hdev->nic_client = NULL;
9771 vport->nic.client = NULL;
9772 }
46a3df9f
S
9773 }
9774}
9775
9776static int hclge_pci_init(struct hclge_dev *hdev)
9777{
9778 struct pci_dev *pdev = hdev->pdev;
9779 struct hclge_hw *hw;
9780 int ret;
9781
9782 ret = pci_enable_device(pdev);
9783 if (ret) {
9784 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 9785 return ret;
46a3df9f
S
9786 }
9787
9788 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9789 if (ret) {
9790 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9791 if (ret) {
9792 dev_err(&pdev->dev,
9793 "can't set consistent PCI DMA");
9794 goto err_disable_device;
9795 }
9796 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9797 }
9798
9799 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9800 if (ret) {
9801 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9802 goto err_disable_device;
9803 }
9804
9805 pci_set_master(pdev);
9806 hw = &hdev->hw;
46a3df9f
S
9807 hw->io_base = pcim_iomap(pdev, 2, 0);
9808 if (!hw->io_base) {
9809 dev_err(&pdev->dev, "Can't map configuration register space\n");
9810 ret = -ENOMEM;
9811 goto err_clr_master;
9812 }
9813
709eb41a
L
9814 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9815
46a3df9f
S
9816 return 0;
9817err_clr_master:
9818 pci_clear_master(pdev);
9819 pci_release_regions(pdev);
9820err_disable_device:
9821 pci_disable_device(pdev);
46a3df9f
S
9822
9823 return ret;
9824}
9825
9826static void hclge_pci_uninit(struct hclge_dev *hdev)
9827{
9828 struct pci_dev *pdev = hdev->pdev;
9829
6a814413 9830 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 9831 pci_free_irq_vectors(pdev);
46a3df9f
S
9832 pci_clear_master(pdev);
9833 pci_release_mem_regions(pdev);
9834 pci_disable_device(pdev);
9835}
9836
48569cda
PL
9837static void hclge_state_init(struct hclge_dev *hdev)
9838{
9839 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9840 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9841 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9842 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
d5432455 9843 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
48569cda
PL
9844 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9845 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9846}
9847
9848static void hclge_state_uninit(struct hclge_dev *hdev)
9849{
9850 set_bit(HCLGE_STATE_DOWN, &hdev->state);
acfc3d55 9851 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
48569cda 9852
65e41e7e
HT
9853 if (hdev->reset_timer.function)
9854 del_timer_sync(&hdev->reset_timer);
7be1b9f3
YL
9855 if (hdev->service_task.work.func)
9856 cancel_delayed_work_sync(&hdev->service_task);
48569cda
PL
9857}
9858
6b9a97ee
HT
9859static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9860{
8627bded
HT
9861#define HCLGE_FLR_RETRY_WAIT_MS 500
9862#define HCLGE_FLR_RETRY_CNT 5
6b9a97ee 9863
8627bded
HT
9864 struct hclge_dev *hdev = ae_dev->priv;
9865 int retry_cnt = 0;
9866 int ret;
6b9a97ee 9867
8627bded
HT
9868retry:
9869 down(&hdev->reset_sem);
9870 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9871 hdev->reset_type = HNAE3_FLR_RESET;
9872 ret = hclge_reset_prepare(hdev);
9873 if (ret) {
9874 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9875 ret);
9876 if (hdev->reset_pending ||
9877 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9878 dev_err(&hdev->pdev->dev,
9879 "reset_pending:0x%lx, retry_cnt:%d\n",
9880 hdev->reset_pending, retry_cnt);
9881 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9882 up(&hdev->reset_sem);
9883 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9884 goto retry;
9885 }
9886 }
6b9a97ee 9887
8627bded
HT
9888 /* disable misc vector before FLR done */
9889 hclge_enable_vector(&hdev->misc_vector, false);
9890 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9891 hdev->rst_stats.flr_rst_cnt++;
6b9a97ee
HT
9892}
9893
9894static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9895{
9896 struct hclge_dev *hdev = ae_dev->priv;
8627bded
HT
9897 int ret;
9898
9899 hclge_enable_vector(&hdev->misc_vector, true);
6b9a97ee 9900
8627bded
HT
9901 ret = hclge_reset_rebuild(hdev);
9902 if (ret)
9903 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9904
9905 hdev->reset_type = HNAE3_NONE_RESET;
9906 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9907 up(&hdev->reset_sem);
6b9a97ee
HT
9908}
9909
31bb229d
PL
9910static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9911{
9912 u16 i;
9913
9914 for (i = 0; i < hdev->num_alloc_vport; i++) {
9915 struct hclge_vport *vport = &hdev->vport[i];
9916 int ret;
9917
9918 /* Send cmd to clear VF's FUNC_RST_ING */
9919 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9920 if (ret)
9921 dev_warn(&hdev->pdev->dev,
adcf738b 9922 "clear vf(%u) rst failed %d!\n",
31bb229d
PL
9923 vport->vport_id, ret);
9924 }
9925}
9926
46a3df9f
S
9927static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9928{
9929 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
9930 struct hclge_dev *hdev;
9931 int ret;
9932
9933 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9934 if (!hdev) {
9935 ret = -ENOMEM;
ffd5656e 9936 goto out;
46a3df9f
S
9937 }
9938
46a3df9f
S
9939 hdev->pdev = pdev;
9940 hdev->ae_dev = ae_dev;
4ed340ab 9941 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 9942 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 9943 ae_dev->priv = hdev;
9e690456
GH
9944
9945 /* HW supprt 2 layer vlan */
e6d7d79d 9946 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 9947
818f1675 9948 mutex_init(&hdev->vport_lock);
44122887 9949 spin_lock_init(&hdev->fd_rule_lock);
8627bded 9950 sema_init(&hdev->reset_sem, 1);
818f1675 9951
46a3df9f 9952 ret = hclge_pci_init(hdev);
60df7e91 9953 if (ret)
ffd5656e 9954 goto out;
46a3df9f 9955
3efb960f
L
9956 /* Firmware command queue initialize */
9957 ret = hclge_cmd_queue_init(hdev);
60df7e91 9958 if (ret)
ffd5656e 9959 goto err_pci_uninit;
3efb960f
L
9960
9961 /* Firmware command initialize */
46a3df9f
S
9962 ret = hclge_cmd_init(hdev);
9963 if (ret)
ffd5656e 9964 goto err_cmd_uninit;
46a3df9f
S
9965
9966 ret = hclge_get_cap(hdev);
60df7e91 9967 if (ret)
ffd5656e 9968 goto err_cmd_uninit;
46a3df9f
S
9969
9970 ret = hclge_configure(hdev);
9971 if (ret) {
9972 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 9973 goto err_cmd_uninit;
46a3df9f
S
9974 }
9975
887c3820 9976 ret = hclge_init_msi(hdev);
46a3df9f 9977 if (ret) {
887c3820 9978 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 9979 goto err_cmd_uninit;
46a3df9f
S
9980 }
9981
466b0c00 9982 ret = hclge_misc_irq_init(hdev);
60df7e91 9983 if (ret)
ffd5656e 9984 goto err_msi_uninit;
466b0c00 9985
46a3df9f
S
9986 ret = hclge_alloc_tqps(hdev);
9987 if (ret) {
9988 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 9989 goto err_msi_irq_uninit;
46a3df9f
S
9990 }
9991
9992 ret = hclge_alloc_vport(hdev);
60df7e91 9993 if (ret)
ffd5656e 9994 goto err_msi_irq_uninit;
46a3df9f 9995
7df7dad6 9996 ret = hclge_map_tqp(hdev);
60df7e91 9997 if (ret)
2312e050 9998 goto err_msi_irq_uninit;
7df7dad6 9999
c5ef83cb
HT
10000 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10001 ret = hclge_mac_mdio_config(hdev);
60df7e91 10002 if (ret)
2312e050 10003 goto err_msi_irq_uninit;
cf9cca2d 10004 }
10005
39932473 10006 ret = hclge_init_umv_space(hdev);
60df7e91 10007 if (ret)
9fc55413 10008 goto err_mdiobus_unreg;
39932473 10009
46a3df9f
S
10010 ret = hclge_mac_init(hdev);
10011 if (ret) {
10012 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 10013 goto err_mdiobus_unreg;
46a3df9f 10014 }
46a3df9f
S
10015
10016 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10017 if (ret) {
10018 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 10019 goto err_mdiobus_unreg;
46a3df9f
S
10020 }
10021
b26a6fea
PL
10022 ret = hclge_config_gro(hdev, true);
10023 if (ret)
10024 goto err_mdiobus_unreg;
10025
46a3df9f
S
10026 ret = hclge_init_vlan_config(hdev);
10027 if (ret) {
10028 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 10029 goto err_mdiobus_unreg;
46a3df9f
S
10030 }
10031
10032 ret = hclge_tm_schd_init(hdev);
10033 if (ret) {
10034 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 10035 goto err_mdiobus_unreg;
68ece54e
YL
10036 }
10037
268f5dfa 10038 hclge_rss_init_cfg(hdev);
68ece54e
YL
10039 ret = hclge_rss_init_hw(hdev);
10040 if (ret) {
10041 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 10042 goto err_mdiobus_unreg;
46a3df9f
S
10043 }
10044
f5aac71c
FL
10045 ret = init_mgr_tbl(hdev);
10046 if (ret) {
10047 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 10048 goto err_mdiobus_unreg;
f5aac71c
FL
10049 }
10050
d695964d
JS
10051 ret = hclge_init_fd_config(hdev);
10052 if (ret) {
10053 dev_err(&pdev->dev,
10054 "fd table init fail, ret=%d\n", ret);
10055 goto err_mdiobus_unreg;
10056 }
10057
a6345787
WL
10058 INIT_KFIFO(hdev->mac_tnl_log);
10059
cacde272
YL
10060 hclge_dcb_ops_set(hdev);
10061
65e41e7e 10062 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7be1b9f3 10063 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
46a3df9f 10064
08125454
YL
10065 /* Setup affinity after service timer setup because add_timer_on
10066 * is called in affinity notify.
10067 */
10068 hclge_misc_affinity_setup(hdev);
10069
8e52a602 10070 hclge_clear_all_event_cause(hdev);
31bb229d 10071 hclge_clear_resetting_state(hdev);
8e52a602 10072
e4193e24
SJ
10073 /* Log and clear the hw errors those already occurred */
10074 hclge_handle_all_hns_hw_errors(ae_dev);
10075
e3b84ed2
SJ
10076 /* request delayed reset for the error recovery because an immediate
10077 * global reset on a PF affecting pending initialization of other PFs
10078 */
10079 if (ae_dev->hw_err_reset_req) {
10080 enum hnae3_reset_type reset_level;
10081
10082 reset_level = hclge_get_reset_level(ae_dev,
10083 &ae_dev->hw_err_reset_req);
10084 hclge_set_def_reset_request(ae_dev, reset_level);
10085 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10086 }
10087
466b0c00
L
10088 /* Enable MISC vector(vector0) */
10089 hclge_enable_vector(&hdev->misc_vector, true);
10090
48569cda 10091 hclge_state_init(hdev);
0742ed7c 10092 hdev->last_reset_time = jiffies;
46a3df9f 10093
08d80a4c
HT
10094 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10095 HCLGE_DRIVER_NAME);
10096
1c6dfe6f
YL
10097 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10098
46a3df9f
S
10099 return 0;
10100
ffd5656e
HT
10101err_mdiobus_unreg:
10102 if (hdev->hw.mac.phydev)
10103 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
10104err_msi_irq_uninit:
10105 hclge_misc_irq_uninit(hdev);
10106err_msi_uninit:
10107 pci_free_irq_vectors(pdev);
10108err_cmd_uninit:
232d0d55 10109 hclge_cmd_uninit(hdev);
ffd5656e 10110err_pci_uninit:
6a814413 10111 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 10112 pci_clear_master(pdev);
46a3df9f 10113 pci_release_regions(pdev);
ffd5656e 10114 pci_disable_device(pdev);
ffd5656e 10115out:
46a3df9f
S
10116 return ret;
10117}
10118
c6dc5213 10119static void hclge_stats_clear(struct hclge_dev *hdev)
10120{
1c6dfe6f 10121 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
c6dc5213 10122}
10123
22044f95
JS
10124static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10125{
10126 return hclge_config_switch_param(hdev, vf, enable,
10127 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10128}
10129
10130static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10131{
10132 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10133 HCLGE_FILTER_FE_NIC_INGRESS_B,
10134 enable, vf);
10135}
10136
10137static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10138{
10139 int ret;
10140
10141 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10142 if (ret) {
10143 dev_err(&hdev->pdev->dev,
10144 "Set vf %d mac spoof check %s failed, ret=%d\n",
10145 vf, enable ? "on" : "off", ret);
10146 return ret;
10147 }
10148
10149 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10150 if (ret)
10151 dev_err(&hdev->pdev->dev,
10152 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10153 vf, enable ? "on" : "off", ret);
10154
10155 return ret;
10156}
10157
10158static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10159 bool enable)
10160{
10161 struct hclge_vport *vport = hclge_get_vport(handle);
10162 struct hclge_dev *hdev = vport->back;
10163 u32 new_spoofchk = enable ? 1 : 0;
10164 int ret;
10165
10166 if (hdev->pdev->revision == 0x20)
10167 return -EOPNOTSUPP;
10168
10169 vport = hclge_get_vf_vport(hdev, vf);
10170 if (!vport)
10171 return -EINVAL;
10172
10173 if (vport->vf_info.spoofchk == new_spoofchk)
10174 return 0;
10175
10176 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10177 dev_warn(&hdev->pdev->dev,
10178 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10179 vf);
7d0b3451 10180 else if (enable && hclge_is_umv_space_full(vport, true))
22044f95
JS
10181 dev_warn(&hdev->pdev->dev,
10182 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10183 vf);
10184
10185 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10186 if (ret)
10187 return ret;
10188
10189 vport->vf_info.spoofchk = new_spoofchk;
10190 return 0;
10191}
10192
10193static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10194{
10195 struct hclge_vport *vport = hdev->vport;
10196 int ret;
10197 int i;
10198
10199 if (hdev->pdev->revision == 0x20)
10200 return 0;
10201
10202 /* resume the vf spoof check state after reset */
10203 for (i = 0; i < hdev->num_alloc_vport; i++) {
10204 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10205 vport->vf_info.spoofchk);
10206 if (ret)
10207 return ret;
10208
10209 vport++;
10210 }
10211
10212 return 0;
10213}
10214
e196ec75
JS
10215static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10216{
10217 struct hclge_vport *vport = hclge_get_vport(handle);
10218 struct hclge_dev *hdev = vport->back;
10219 u32 new_trusted = enable ? 1 : 0;
10220 bool en_bc_pmc;
10221 int ret;
10222
10223 vport = hclge_get_vf_vport(hdev, vf);
10224 if (!vport)
10225 return -EINVAL;
10226
10227 if (vport->vf_info.trusted == new_trusted)
10228 return 0;
10229
10230 /* Disable promisc mode for VF if it is not trusted any more. */
10231 if (!enable && vport->vf_info.promisc_enable) {
10232 en_bc_pmc = hdev->pdev->revision != 0x20;
10233 ret = hclge_set_vport_promisc_mode(vport, false, false,
10234 en_bc_pmc);
10235 if (ret)
10236 return ret;
10237 vport->vf_info.promisc_enable = 0;
10238 hclge_inform_vf_promisc_info(vport);
10239 }
10240
10241 vport->vf_info.trusted = new_trusted;
10242
10243 return 0;
10244}
10245
ee9e4424
YL
10246static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10247{
10248 int ret;
10249 int vf;
10250
10251 /* reset vf rate to default value */
10252 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10253 struct hclge_vport *vport = &hdev->vport[vf];
10254
10255 vport->vf_info.max_tx_rate = 0;
10256 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10257 if (ret)
10258 dev_err(&hdev->pdev->dev,
10259 "vf%d failed to reset to default, ret=%d\n",
10260 vf - HCLGE_VF_VPORT_START_NUM, ret);
10261 }
10262}
10263
10264static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10265 int min_tx_rate, int max_tx_rate)
10266{
10267 if (min_tx_rate != 0 ||
10268 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10269 dev_err(&hdev->pdev->dev,
10270 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10271 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10272 return -EINVAL;
10273 }
10274
10275 return 0;
10276}
10277
10278static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10279 int min_tx_rate, int max_tx_rate, bool force)
10280{
10281 struct hclge_vport *vport = hclge_get_vport(handle);
10282 struct hclge_dev *hdev = vport->back;
10283 int ret;
10284
10285 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10286 if (ret)
10287 return ret;
10288
10289 vport = hclge_get_vf_vport(hdev, vf);
10290 if (!vport)
10291 return -EINVAL;
10292
10293 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10294 return 0;
10295
10296 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10297 if (ret)
10298 return ret;
10299
10300 vport->vf_info.max_tx_rate = max_tx_rate;
10301
10302 return 0;
10303}
10304
10305static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10306{
10307 struct hnae3_handle *handle = &hdev->vport->nic;
10308 struct hclge_vport *vport;
10309 int ret;
10310 int vf;
10311
10312 /* resume the vf max_tx_rate after reset */
10313 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10314 vport = hclge_get_vf_vport(hdev, vf);
10315 if (!vport)
10316 return -EINVAL;
10317
10318 /* zero means max rate, after reset, firmware already set it to
10319 * max rate, so just continue.
10320 */
10321 if (!vport->vf_info.max_tx_rate)
10322 continue;
10323
10324 ret = hclge_set_vf_rate(handle, vf, 0,
10325 vport->vf_info.max_tx_rate, true);
10326 if (ret) {
10327 dev_err(&hdev->pdev->dev,
10328 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10329 vf, vport->vf_info.max_tx_rate, ret);
10330 return ret;
10331 }
10332 }
10333
10334 return 0;
10335}
10336
a6d818e3
YL
10337static void hclge_reset_vport_state(struct hclge_dev *hdev)
10338{
10339 struct hclge_vport *vport = hdev->vport;
10340 int i;
10341
10342 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 10343 hclge_vport_stop(vport);
a6d818e3
YL
10344 vport++;
10345 }
10346}
10347
4ed340ab
L
10348static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10349{
10350 struct hclge_dev *hdev = ae_dev->priv;
10351 struct pci_dev *pdev = ae_dev->pdev;
10352 int ret;
10353
10354 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10355
c6dc5213 10356 hclge_stats_clear(hdev);
ee4bcd3b
JS
10357 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10358 * so here should not clean table in memory.
10359 */
10360 if (hdev->reset_type == HNAE3_IMP_RESET ||
10361 hdev->reset_type == HNAE3_GLOBAL_RESET) {
039ba863
JS
10362 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10363 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
ee4bcd3b
JS
10364 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10365 hclge_reset_umv_space(hdev);
10366 }
10367
4ed340ab
L
10368 ret = hclge_cmd_init(hdev);
10369 if (ret) {
10370 dev_err(&pdev->dev, "Cmd queue init failed\n");
10371 return ret;
10372 }
10373
4ed340ab
L
10374 ret = hclge_map_tqp(hdev);
10375 if (ret) {
10376 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10377 return ret;
10378 }
10379
10380 ret = hclge_mac_init(hdev);
10381 if (ret) {
10382 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10383 return ret;
10384 }
10385
4ed340ab
L
10386 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10387 if (ret) {
10388 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10389 return ret;
10390 }
10391
b26a6fea
PL
10392 ret = hclge_config_gro(hdev, true);
10393 if (ret)
10394 return ret;
10395
4ed340ab
L
10396 ret = hclge_init_vlan_config(hdev);
10397 if (ret) {
10398 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10399 return ret;
10400 }
10401
44e59e37 10402 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 10403 if (ret) {
f31c1ba6 10404 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
10405 return ret;
10406 }
10407
10408 ret = hclge_rss_init_hw(hdev);
10409 if (ret) {
10410 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10411 return ret;
10412 }
10413
d0db7ed3
YM
10414 ret = init_mgr_tbl(hdev);
10415 if (ret) {
10416 dev_err(&pdev->dev,
10417 "failed to reinit manager table, ret = %d\n", ret);
10418 return ret;
10419 }
10420
d695964d
JS
10421 ret = hclge_init_fd_config(hdev);
10422 if (ret) {
9b2f3477 10423 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
d695964d
JS
10424 return ret;
10425 }
10426
4fdd0bca
JS
10427 /* Log and clear the hw errors those already occurred */
10428 hclge_handle_all_hns_hw_errors(ae_dev);
10429
f3fa4a94 10430 /* Re-enable the hw error interrupts because
00ea6e5f 10431 * the interrupts get disabled on global reset.
01865a50 10432 */
00ea6e5f 10433 ret = hclge_config_nic_hw_error(hdev, true);
f3fa4a94
SJ
10434 if (ret) {
10435 dev_err(&pdev->dev,
00ea6e5f
WL
10436 "fail(%d) to re-enable NIC hw error interrupts\n",
10437 ret);
f3fa4a94
SJ
10438 return ret;
10439 }
01865a50 10440
00ea6e5f
WL
10441 if (hdev->roce_client) {
10442 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10443 if (ret) {
10444 dev_err(&pdev->dev,
10445 "fail(%d) to re-enable roce ras interrupts\n",
10446 ret);
10447 return ret;
10448 }
10449 }
10450
a6d818e3 10451 hclge_reset_vport_state(hdev);
22044f95
JS
10452 ret = hclge_reset_vport_spoofchk(hdev);
10453 if (ret)
10454 return ret;
a6d818e3 10455
ee9e4424
YL
10456 ret = hclge_resume_vf_rate(hdev);
10457 if (ret)
10458 return ret;
10459
4ed340ab
L
10460 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10461 HCLGE_DRIVER_NAME);
10462
10463 return 0;
10464}
10465
46a3df9f
S
10466static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10467{
10468 struct hclge_dev *hdev = ae_dev->priv;
10469 struct hclge_mac *mac = &hdev->hw.mac;
10470
ee9e4424 10471 hclge_reset_vf_rate(hdev);
59359fc8 10472 hclge_clear_vf_vlan(hdev);
08125454 10473 hclge_misc_affinity_teardown(hdev);
48569cda 10474 hclge_state_uninit(hdev);
ee4bcd3b 10475 hclge_uninit_mac_table(hdev);
46a3df9f
S
10476
10477 if (mac->phydev)
10478 mdiobus_unregister(mac->mdio_bus);
10479
466b0c00
L
10480 /* Disable MISC vector(vector0) */
10481 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
10482 synchronize_irq(hdev->misc_vector.vector_irq);
10483
00ea6e5f 10484 /* Disable all hw interrupts */
a6345787 10485 hclge_config_mac_tnl_int(hdev, false);
00ea6e5f
WL
10486 hclge_config_nic_hw_error(hdev, false);
10487 hclge_config_rocee_ras_interrupt(hdev, false);
10488
232d0d55 10489 hclge_cmd_uninit(hdev);
ca1d7669 10490 hclge_misc_irq_uninit(hdev);
46a3df9f 10491 hclge_pci_uninit(hdev);
818f1675 10492 mutex_destroy(&hdev->vport_lock);
c6075b19 10493 hclge_uninit_vport_vlan_table(hdev);
46a3df9f
S
10494 ae_dev->priv = NULL;
10495}
10496
482d2e9c
PL
10497static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10498{
10499 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10500 struct hclge_vport *vport = hclge_get_vport(handle);
10501 struct hclge_dev *hdev = vport->back;
10502
c3b9c50d
HT
10503 return min_t(u32, hdev->rss_size_max,
10504 vport->alloc_tqps / kinfo->num_tc);
482d2e9c
PL
10505}
10506
10507static void hclge_get_channels(struct hnae3_handle *handle,
10508 struct ethtool_channels *ch)
10509{
482d2e9c
PL
10510 ch->max_combined = hclge_get_max_channels(handle);
10511 ch->other_count = 1;
10512 ch->max_other = 1;
c3b9c50d 10513 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
10514}
10515
09f2af64 10516static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 10517 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
10518{
10519 struct hclge_vport *vport = hclge_get_vport(handle);
10520 struct hclge_dev *hdev = vport->back;
09f2af64 10521
0d43bf45 10522 *alloc_tqps = vport->alloc_tqps;
09f2af64
PL
10523 *max_rss_size = hdev->rss_size_max;
10524}
10525
90c68a41
YL
10526static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10527 bool rxfh_configured)
09f2af64
PL
10528{
10529 struct hclge_vport *vport = hclge_get_vport(handle);
10530 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
354d0fab 10531 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
09f2af64 10532 struct hclge_dev *hdev = vport->back;
354d0fab 10533 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
adcf738b
GL
10534 u16 cur_rss_size = kinfo->rss_size;
10535 u16 cur_tqps = kinfo->num_tqps;
09f2af64 10536 u16 tc_valid[HCLGE_MAX_TC_NUM];
09f2af64
PL
10537 u16 roundup_size;
10538 u32 *rss_indir;
ebaf1908
WL
10539 unsigned int i;
10540 int ret;
09f2af64 10541
672ad0ed 10542 kinfo->req_rss_size = new_tqps_num;
09f2af64 10543
672ad0ed 10544 ret = hclge_tm_vport_map_update(hdev);
09f2af64 10545 if (ret) {
672ad0ed 10546 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
10547 return ret;
10548 }
10549
10550 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10551 roundup_size = ilog2(roundup_size);
10552 /* Set the RSS TC mode according to the new RSS size */
10553 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10554 tc_valid[i] = 0;
10555
10556 if (!(hdev->hw_tc_map & BIT(i)))
10557 continue;
10558
10559 tc_valid[i] = 1;
10560 tc_size[i] = roundup_size;
10561 tc_offset[i] = kinfo->rss_size * i;
10562 }
10563 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10564 if (ret)
10565 return ret;
10566
90c68a41
YL
10567 /* RSS indirection table has been configuared by user */
10568 if (rxfh_configured)
10569 goto out;
10570
09f2af64
PL
10571 /* Reinitializes the rss indirect table according to the new RSS size */
10572 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10573 if (!rss_indir)
10574 return -ENOMEM;
10575
10576 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10577 rss_indir[i] = i % kinfo->rss_size;
10578
10579 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10580 if (ret)
10581 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10582 ret);
10583
10584 kfree(rss_indir);
10585
90c68a41 10586out:
09f2af64
PL
10587 if (!ret)
10588 dev_info(&hdev->pdev->dev,
adcf738b 10589 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
09f2af64
PL
10590 cur_rss_size, kinfo->rss_size,
10591 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10592
10593 return ret;
10594}
10595
77b34110
FL
10596static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10597 u32 *regs_num_64_bit)
10598{
10599 struct hclge_desc desc;
10600 u32 total_num;
10601 int ret;
10602
10603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10604 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10605 if (ret) {
10606 dev_err(&hdev->pdev->dev,
10607 "Query register number cmd failed, ret = %d.\n", ret);
10608 return ret;
10609 }
10610
10611 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10612 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10613
10614 total_num = *regs_num_32_bit + *regs_num_64_bit;
10615 if (!total_num)
10616 return -EINVAL;
10617
10618 return 0;
10619}
10620
10621static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10622 void *data)
10623{
10624#define HCLGE_32_BIT_REG_RTN_DATANUM 8
b37ce587 10625#define HCLGE_32_BIT_DESC_NODATA_LEN 2
77b34110
FL
10626
10627 struct hclge_desc *desc;
10628 u32 *reg_val = data;
10629 __le32 *desc_data;
b37ce587 10630 int nodata_num;
77b34110
FL
10631 int cmd_num;
10632 int i, k, n;
10633 int ret;
10634
10635 if (regs_num == 0)
10636 return 0;
10637
b37ce587
YM
10638 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10639 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10640 HCLGE_32_BIT_REG_RTN_DATANUM);
77b34110
FL
10641 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10642 if (!desc)
10643 return -ENOMEM;
10644
10645 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10646 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10647 if (ret) {
10648 dev_err(&hdev->pdev->dev,
10649 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10650 kfree(desc);
10651 return ret;
10652 }
10653
10654 for (i = 0; i < cmd_num; i++) {
10655 if (i == 0) {
10656 desc_data = (__le32 *)(&desc[i].data[0]);
b37ce587 10657 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
77b34110
FL
10658 } else {
10659 desc_data = (__le32 *)(&desc[i]);
10660 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10661 }
10662 for (k = 0; k < n; k++) {
10663 *reg_val++ = le32_to_cpu(*desc_data++);
10664
10665 regs_num--;
10666 if (!regs_num)
10667 break;
10668 }
10669 }
10670
10671 kfree(desc);
10672 return 0;
10673}
10674
10675static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10676 void *data)
10677{
10678#define HCLGE_64_BIT_REG_RTN_DATANUM 4
b37ce587 10679#define HCLGE_64_BIT_DESC_NODATA_LEN 1
77b34110
FL
10680
10681 struct hclge_desc *desc;
10682 u64 *reg_val = data;
10683 __le64 *desc_data;
b37ce587 10684 int nodata_len;
77b34110
FL
10685 int cmd_num;
10686 int i, k, n;
10687 int ret;
10688
10689 if (regs_num == 0)
10690 return 0;
10691
b37ce587
YM
10692 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10693 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10694 HCLGE_64_BIT_REG_RTN_DATANUM);
77b34110
FL
10695 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10696 if (!desc)
10697 return -ENOMEM;
10698
10699 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10700 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10701 if (ret) {
10702 dev_err(&hdev->pdev->dev,
10703 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10704 kfree(desc);
10705 return ret;
10706 }
10707
10708 for (i = 0; i < cmd_num; i++) {
10709 if (i == 0) {
10710 desc_data = (__le64 *)(&desc[i].data[0]);
b37ce587 10711 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
77b34110
FL
10712 } else {
10713 desc_data = (__le64 *)(&desc[i]);
10714 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10715 }
10716 for (k = 0; k < n; k++) {
10717 *reg_val++ = le64_to_cpu(*desc_data++);
10718
10719 regs_num--;
10720 if (!regs_num)
10721 break;
10722 }
10723 }
10724
10725 kfree(desc);
10726 return 0;
10727}
10728
ea4750ca 10729#define MAX_SEPARATE_NUM 4
ddb54554 10730#define SEPARATOR_VALUE 0xFDFCFBFA
ea4750ca
JS
10731#define REG_NUM_PER_LINE 4
10732#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
ddb54554
GH
10733#define REG_SEPARATOR_LINE 1
10734#define REG_NUM_REMAIN_MASK 3
10735#define BD_LIST_MAX_NUM 30
ea4750ca 10736
ddb54554 10737int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
77b34110 10738{
ddb54554
GH
10739 /*prepare 4 commands to query DFX BD number*/
10740 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10741 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10742 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10743 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10744 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10745 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10746 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10747
10748 return hclge_cmd_send(&hdev->hw, desc, 4);
10749}
10750
10751static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10752 int *bd_num_list,
10753 u32 type_num)
10754{
ddb54554 10755 u32 entries_per_desc, desc_index, index, offset, i;
9027d043 10756 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
77b34110
FL
10757 int ret;
10758
ddb54554 10759 ret = hclge_query_bd_num_cmd_send(hdev, desc);
77b34110
FL
10760 if (ret) {
10761 dev_err(&hdev->pdev->dev,
ddb54554
GH
10762 "Get dfx bd num fail, status is %d.\n", ret);
10763 return ret;
77b34110
FL
10764 }
10765
ddb54554
GH
10766 entries_per_desc = ARRAY_SIZE(desc[0].data);
10767 for (i = 0; i < type_num; i++) {
10768 offset = hclge_dfx_bd_offset_list[i];
10769 index = offset % entries_per_desc;
10770 desc_index = offset / entries_per_desc;
10771 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10772 }
ea4750ca 10773
ddb54554 10774 return ret;
77b34110
FL
10775}
10776
ddb54554
GH
10777static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10778 struct hclge_desc *desc_src, int bd_num,
10779 enum hclge_opcode_type cmd)
77b34110 10780{
ddb54554
GH
10781 struct hclge_desc *desc = desc_src;
10782 int i, ret;
10783
10784 hclge_cmd_setup_basic_desc(desc, cmd, true);
10785 for (i = 0; i < bd_num - 1; i++) {
10786 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10787 desc++;
10788 hclge_cmd_setup_basic_desc(desc, cmd, true);
10789 }
10790
10791 desc = desc_src;
10792 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10793 if (ret)
10794 dev_err(&hdev->pdev->dev,
10795 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10796 cmd, ret);
10797
10798 return ret;
10799}
10800
10801static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10802 void *data)
10803{
10804 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10805 struct hclge_desc *desc = desc_src;
ea4750ca 10806 u32 *reg = data;
ddb54554
GH
10807
10808 entries_per_desc = ARRAY_SIZE(desc->data);
10809 reg_num = entries_per_desc * bd_num;
10810 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10811 for (i = 0; i < reg_num; i++) {
10812 index = i % entries_per_desc;
10813 desc_index = i / entries_per_desc;
10814 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10815 }
10816 for (i = 0; i < separator_num; i++)
10817 *reg++ = SEPARATOR_VALUE;
10818
10819 return reg_num + separator_num;
10820}
10821
10822static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10823{
10824 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
72fa4904 10825 int data_len_per_desc, bd_num, i;
ddb54554 10826 int bd_num_list[BD_LIST_MAX_NUM];
72fa4904 10827 u32 data_len;
77b34110
FL
10828 int ret;
10829
ddb54554
GH
10830 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10831 if (ret) {
10832 dev_err(&hdev->pdev->dev,
10833 "Get dfx reg bd num fail, status is %d.\n", ret);
10834 return ret;
10835 }
77b34110 10836
c593642c 10837 data_len_per_desc = sizeof_field(struct hclge_desc, data);
ddb54554
GH
10838 *len = 0;
10839 for (i = 0; i < dfx_reg_type_num; i++) {
10840 bd_num = bd_num_list[i];
10841 data_len = data_len_per_desc * bd_num;
10842 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10843 }
10844
10845 return ret;
10846}
10847
10848static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10849{
10850 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10851 int bd_num, bd_num_max, buf_len, i;
10852 int bd_num_list[BD_LIST_MAX_NUM];
10853 struct hclge_desc *desc_src;
10854 u32 *reg = data;
10855 int ret;
10856
10857 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
77b34110
FL
10858 if (ret) {
10859 dev_err(&hdev->pdev->dev,
ddb54554
GH
10860 "Get dfx reg bd num fail, status is %d.\n", ret);
10861 return ret;
10862 }
10863
10864 bd_num_max = bd_num_list[0];
10865 for (i = 1; i < dfx_reg_type_num; i++)
10866 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10867
10868 buf_len = sizeof(*desc_src) * bd_num_max;
10869 desc_src = kzalloc(buf_len, GFP_KERNEL);
322cb97c 10870 if (!desc_src)
ddb54554 10871 return -ENOMEM;
77b34110 10872
ddb54554
GH
10873 for (i = 0; i < dfx_reg_type_num; i++) {
10874 bd_num = bd_num_list[i];
10875 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10876 hclge_dfx_reg_opcode_list[i]);
10877 if (ret) {
10878 dev_err(&hdev->pdev->dev,
10879 "Get dfx reg fail, status is %d.\n", ret);
10880 break;
10881 }
10882
10883 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10884 }
10885
10886 kfree(desc_src);
10887 return ret;
10888}
10889
10890static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10891 struct hnae3_knic_private_info *kinfo)
10892{
10893#define HCLGE_RING_REG_OFFSET 0x200
10894#define HCLGE_RING_INT_REG_OFFSET 0x4
10895
10896 int i, j, reg_num, separator_num;
10897 int data_num_sum;
10898 u32 *reg = data;
10899
ea4750ca 10900 /* fetching per-PF registers valus from PF PCIe register space */
ddb54554
GH
10901 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10902 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10903 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10904 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10905 for (i = 0; i < separator_num; i++)
10906 *reg++ = SEPARATOR_VALUE;
ddb54554 10907 data_num_sum = reg_num + separator_num;
ea4750ca 10908
ddb54554
GH
10909 reg_num = ARRAY_SIZE(common_reg_addr_list);
10910 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10911 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10912 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10913 for (i = 0; i < separator_num; i++)
10914 *reg++ = SEPARATOR_VALUE;
ddb54554 10915 data_num_sum += reg_num + separator_num;
ea4750ca 10916
ddb54554
GH
10917 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10918 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 10919 for (j = 0; j < kinfo->num_tqps; j++) {
ddb54554 10920 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10921 *reg++ = hclge_read_dev(&hdev->hw,
10922 ring_reg_addr_list[i] +
ddb54554 10923 HCLGE_RING_REG_OFFSET * j);
ea4750ca
JS
10924 for (i = 0; i < separator_num; i++)
10925 *reg++ = SEPARATOR_VALUE;
10926 }
ddb54554 10927 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
ea4750ca 10928
ddb54554
GH
10929 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10930 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 10931 for (j = 0; j < hdev->num_msi_used - 1; j++) {
ddb54554 10932 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10933 *reg++ = hclge_read_dev(&hdev->hw,
10934 tqp_intr_reg_addr_list[i] +
ddb54554 10935 HCLGE_RING_INT_REG_OFFSET * j);
ea4750ca
JS
10936 for (i = 0; i < separator_num; i++)
10937 *reg++ = SEPARATOR_VALUE;
10938 }
ddb54554
GH
10939 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10940
10941 return data_num_sum;
10942}
10943
10944static int hclge_get_regs_len(struct hnae3_handle *handle)
10945{
10946 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10947 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10948 struct hclge_vport *vport = hclge_get_vport(handle);
10949 struct hclge_dev *hdev = vport->back;
10950 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10951 int regs_lines_32_bit, regs_lines_64_bit;
10952 int ret;
10953
10954 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10955 if (ret) {
10956 dev_err(&hdev->pdev->dev,
10957 "Get register number failed, ret = %d.\n", ret);
10958 return ret;
10959 }
10960
10961 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10962 if (ret) {
10963 dev_err(&hdev->pdev->dev,
10964 "Get dfx reg len failed, ret = %d.\n", ret);
10965 return ret;
10966 }
10967
10968 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10969 REG_SEPARATOR_LINE;
10970 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10971 REG_SEPARATOR_LINE;
10972 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10973 REG_SEPARATOR_LINE;
10974 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10975 REG_SEPARATOR_LINE;
10976 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10977 REG_SEPARATOR_LINE;
10978 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10979 REG_SEPARATOR_LINE;
10980
10981 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10982 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10983 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10984}
10985
10986static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10987 void *data)
10988{
10989 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10990 struct hclge_vport *vport = hclge_get_vport(handle);
10991 struct hclge_dev *hdev = vport->back;
10992 u32 regs_num_32_bit, regs_num_64_bit;
10993 int i, reg_num, separator_num, ret;
10994 u32 *reg = data;
10995
10996 *version = hdev->fw_version;
10997
10998 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10999 if (ret) {
11000 dev_err(&hdev->pdev->dev,
11001 "Get register number failed, ret = %d.\n", ret);
11002 return;
11003 }
11004
11005 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
ea4750ca 11006
ea4750ca 11007 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
11008 if (ret) {
11009 dev_err(&hdev->pdev->dev,
11010 "Get 32 bit register failed, ret = %d.\n", ret);
11011 return;
11012 }
ddb54554
GH
11013 reg_num = regs_num_32_bit;
11014 reg += reg_num;
11015 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11016 for (i = 0; i < separator_num; i++)
11017 *reg++ = SEPARATOR_VALUE;
77b34110 11018
ea4750ca 11019 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
ddb54554 11020 if (ret) {
77b34110
FL
11021 dev_err(&hdev->pdev->dev,
11022 "Get 64 bit register failed, ret = %d.\n", ret);
ddb54554
GH
11023 return;
11024 }
11025 reg_num = regs_num_64_bit * 2;
11026 reg += reg_num;
11027 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11028 for (i = 0; i < separator_num; i++)
11029 *reg++ = SEPARATOR_VALUE;
11030
11031 ret = hclge_get_dfx_reg(hdev, reg);
11032 if (ret)
11033 dev_err(&hdev->pdev->dev,
11034 "Get dfx register failed, ret = %d.\n", ret);
77b34110
FL
11035}
11036
f6f75abc 11037static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
11038{
11039 struct hclge_set_led_state_cmd *req;
11040 struct hclge_desc desc;
11041 int ret;
11042
11043 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11044
11045 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
11046 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11047 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
11048
11049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11050 if (ret)
11051 dev_err(&hdev->pdev->dev,
11052 "Send set led state cmd error, ret =%d\n", ret);
11053
11054 return ret;
11055}
11056
11057enum hclge_led_status {
11058 HCLGE_LED_OFF,
11059 HCLGE_LED_ON,
11060 HCLGE_LED_NO_CHANGE = 0xFF,
11061};
11062
11063static int hclge_set_led_id(struct hnae3_handle *handle,
11064 enum ethtool_phys_id_state status)
11065{
07f8e940
JS
11066 struct hclge_vport *vport = hclge_get_vport(handle);
11067 struct hclge_dev *hdev = vport->back;
07f8e940
JS
11068
11069 switch (status) {
11070 case ETHTOOL_ID_ACTIVE:
f6f75abc 11071 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 11072 case ETHTOOL_ID_INACTIVE:
f6f75abc 11073 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 11074 default:
f6f75abc 11075 return -EINVAL;
07f8e940 11076 }
07f8e940
JS
11077}
11078
0979aa0b
FL
11079static void hclge_get_link_mode(struct hnae3_handle *handle,
11080 unsigned long *supported,
11081 unsigned long *advertising)
11082{
11083 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11084 struct hclge_vport *vport = hclge_get_vport(handle);
11085 struct hclge_dev *hdev = vport->back;
11086 unsigned int idx = 0;
11087
11088 for (; idx < size; idx++) {
11089 supported[idx] = hdev->hw.mac.supported[idx];
11090 advertising[idx] = hdev->hw.mac.advertising[idx];
11091 }
11092}
11093
1731be4c 11094static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
11095{
11096 struct hclge_vport *vport = hclge_get_vport(handle);
11097 struct hclge_dev *hdev = vport->back;
11098
11099 return hclge_config_gro(hdev, enable);
11100}
11101
c631c696
JS
11102static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11103{
11104 struct hclge_vport *vport = &hdev->vport[0];
11105 struct hnae3_handle *handle = &vport->nic;
11106 u8 tmp_flags = 0;
11107 int ret;
11108
11109 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11110 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11111 vport->last_promisc_flags = vport->overflow_promisc_flags;
11112 }
11113
11114 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11115 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11116 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11117 tmp_flags & HNAE3_MPE);
11118 if (!ret) {
11119 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11120 hclge_enable_vlan_filter(handle,
11121 tmp_flags & HNAE3_VLAN_FLTR);
11122 }
11123 }
11124}
11125
cb10228d
YL
11126static bool hclge_module_existed(struct hclge_dev *hdev)
11127{
11128 struct hclge_desc desc;
11129 u32 existed;
11130 int ret;
11131
11132 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11133 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11134 if (ret) {
11135 dev_err(&hdev->pdev->dev,
11136 "failed to get SFP exist state, ret = %d\n", ret);
11137 return false;
11138 }
11139
11140 existed = le32_to_cpu(desc.data[0]);
11141
11142 return existed != 0;
11143}
11144
11145/* need 6 bds(total 140 bytes) in one reading
11146 * return the number of bytes actually read, 0 means read failed.
11147 */
11148static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11149 u32 len, u8 *data)
11150{
11151 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11152 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11153 u16 read_len;
11154 u16 copy_len;
11155 int ret;
11156 int i;
11157
11158 /* setup all 6 bds to read module eeprom info. */
11159 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11160 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11161 true);
11162
11163 /* bd0~bd4 need next flag */
11164 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11165 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11166 }
11167
11168 /* setup bd0, this bd contains offset and read length. */
11169 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11170 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11171 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11172 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11173
11174 ret = hclge_cmd_send(&hdev->hw, desc, i);
11175 if (ret) {
11176 dev_err(&hdev->pdev->dev,
11177 "failed to get SFP eeprom info, ret = %d\n", ret);
11178 return 0;
11179 }
11180
11181 /* copy sfp info from bd0 to out buffer. */
11182 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11183 memcpy(data, sfp_info_bd0->data, copy_len);
11184 read_len = copy_len;
11185
11186 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11187 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11188 if (read_len >= len)
11189 return read_len;
11190
11191 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11192 memcpy(data + read_len, desc[i].data, copy_len);
11193 read_len += copy_len;
11194 }
11195
11196 return read_len;
11197}
11198
11199static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11200 u32 len, u8 *data)
11201{
11202 struct hclge_vport *vport = hclge_get_vport(handle);
11203 struct hclge_dev *hdev = vport->back;
11204 u32 read_len = 0;
11205 u16 data_len;
11206
11207 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11208 return -EOPNOTSUPP;
11209
11210 if (!hclge_module_existed(hdev))
11211 return -ENXIO;
11212
11213 while (read_len < len) {
11214 data_len = hclge_get_sfp_eeprom_info(hdev,
11215 offset + read_len,
11216 len - read_len,
11217 data + read_len);
11218 if (!data_len)
11219 return -EIO;
11220
11221 read_len += data_len;
11222 }
11223
11224 return 0;
11225}
11226
46a3df9f
S
11227static const struct hnae3_ae_ops hclge_ops = {
11228 .init_ae_dev = hclge_init_ae_dev,
11229 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
11230 .flr_prepare = hclge_flr_prepare,
11231 .flr_done = hclge_flr_done,
46a3df9f
S
11232 .init_client_instance = hclge_init_client_instance,
11233 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
11234 .map_ring_to_vector = hclge_map_ring_to_vector,
11235 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 11236 .get_vector = hclge_get_vector,
0d3e6631 11237 .put_vector = hclge_put_vector,
46a3df9f 11238 .set_promisc_mode = hclge_set_promisc_mode,
c631c696 11239 .request_update_promisc_mode = hclge_request_update_promisc_mode,
c39c4d98 11240 .set_loopback = hclge_set_loopback,
46a3df9f
S
11241 .start = hclge_ae_start,
11242 .stop = hclge_ae_stop,
a6d818e3
YL
11243 .client_start = hclge_client_start,
11244 .client_stop = hclge_client_stop,
46a3df9f
S
11245 .get_status = hclge_get_status,
11246 .get_ksettings_an_result = hclge_get_ksettings_an_result,
46a3df9f
S
11247 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11248 .get_media_type = hclge_get_media_type,
22f48e24 11249 .check_port_speed = hclge_check_port_speed,
7e6ec914
JS
11250 .get_fec = hclge_get_fec,
11251 .set_fec = hclge_set_fec,
46a3df9f
S
11252 .get_rss_key_size = hclge_get_rss_key_size,
11253 .get_rss_indir_size = hclge_get_rss_indir_size,
11254 .get_rss = hclge_get_rss,
11255 .set_rss = hclge_set_rss,
f7db940a 11256 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 11257 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
11258 .get_tc_size = hclge_get_tc_size,
11259 .get_mac_addr = hclge_get_mac_addr,
11260 .set_mac_addr = hclge_set_mac_addr,
26483246 11261 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
11262 .add_uc_addr = hclge_add_uc_addr,
11263 .rm_uc_addr = hclge_rm_uc_addr,
11264 .add_mc_addr = hclge_add_mc_addr,
11265 .rm_mc_addr = hclge_rm_mc_addr,
11266 .set_autoneg = hclge_set_autoneg,
11267 .get_autoneg = hclge_get_autoneg,
22f48e24 11268 .restart_autoneg = hclge_restart_autoneg,
7786a996 11269 .halt_autoneg = hclge_halt_autoneg,
46a3df9f 11270 .get_pauseparam = hclge_get_pauseparam,
61387774 11271 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
11272 .set_mtu = hclge_set_mtu,
11273 .reset_queue = hclge_reset_tqp,
11274 .get_stats = hclge_get_stats,
615466ce 11275 .get_mac_stats = hclge_get_mac_stat,
46a3df9f
S
11276 .update_stats = hclge_update_stats,
11277 .get_strings = hclge_get_strings,
11278 .get_sset_count = hclge_get_sset_count,
11279 .get_fw_version = hclge_get_fw_version,
11280 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 11281 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 11282 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 11283 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 11284 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 11285 .reset_event = hclge_reset_event,
123297b7 11286 .get_reset_level = hclge_get_reset_level,
720bd583 11287 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
11288 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11289 .set_channels = hclge_set_channels,
482d2e9c 11290 .get_channels = hclge_get_channels,
77b34110
FL
11291 .get_regs_len = hclge_get_regs_len,
11292 .get_regs = hclge_get_regs,
07f8e940 11293 .set_led_id = hclge_set_led_id,
0979aa0b 11294 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
11295 .add_fd_entry = hclge_add_fd_entry,
11296 .del_fd_entry = hclge_del_fd_entry,
6871af29 11297 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
11298 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11299 .get_fd_rule_info = hclge_get_fd_rule_info,
11300 .get_fd_all_rules = hclge_get_all_rules,
c17852a8 11301 .enable_fd = hclge_enable_fd,
d93ed94f 11302 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
3c666b58 11303 .dbg_run_cmd = hclge_dbg_run_cmd,
381c356e 11304 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
11305 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11306 .ae_dev_resetting = hclge_ae_dev_resetting,
11307 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 11308 .set_gro_en = hclge_gro_en,
0c29d191 11309 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 11310 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
11311 .mac_connect_phy = hclge_mac_connect_phy,
11312 .mac_disconnect_phy = hclge_mac_disconnect_phy,
6430f744
YM
11313 .get_vf_config = hclge_get_vf_config,
11314 .set_vf_link_state = hclge_set_vf_link_state,
22044f95 11315 .set_vf_spoofchk = hclge_set_vf_spoofchk,
e196ec75 11316 .set_vf_trust = hclge_set_vf_trust,
ee9e4424 11317 .set_vf_rate = hclge_set_vf_rate,
8e6de441 11318 .set_vf_mac = hclge_set_vf_mac,
cb10228d 11319 .get_module_eeprom = hclge_get_module_eeprom,
a4de0228 11320 .get_cmdq_stat = hclge_get_cmdq_stat,
46a3df9f
S
11321};
11322
11323static struct hnae3_ae_algo ae_algo = {
11324 .ops = &hclge_ops,
46a3df9f
S
11325 .pdev_id_table = ae_algo_pci_tbl,
11326};
11327
11328static int hclge_init(void)
11329{
11330 pr_info("%s is initializing\n", HCLGE_NAME);
11331
16deaef2 11332 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
0ea68902
YL
11333 if (!hclge_wq) {
11334 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11335 return -ENOMEM;
11336 }
11337
854cf33a
FL
11338 hnae3_register_ae_algo(&ae_algo);
11339
11340 return 0;
46a3df9f
S
11341}
11342
11343static void hclge_exit(void)
11344{
11345 hnae3_unregister_ae_algo(&ae_algo);
0ea68902 11346 destroy_workqueue(hclge_wq);
46a3df9f
S
11347}
11348module_init(hclge_init);
11349module_exit(hclge_exit);
11350
11351MODULE_LICENSE("GPL");
11352MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11353MODULE_DESCRIPTION("HCLGE Driver");
11354MODULE_VERSION(HCLGE_MOD_VERSION);