net: hns3: Fix -Wunused-const-variable warning
[linux-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
f2f432f2 16#include <net/rtnetlink.h>
46a3df9f 17#include "hclge_cmd.h"
cacde272 18#include "hclge_dcb.h"
46a3df9f 19#include "hclge_main.h"
dde1a86e 20#include "hclge_mbx.h"
46a3df9f
S
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
5a9f0eac 23#include "hclge_err.h"
46a3df9f
S
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 29
ebaf1908 30#define HCLGE_BUF_SIZE_UNIT 256U
b37ce587
YM
31#define HCLGE_BUF_MUL_BY 2
32#define HCLGE_BUF_DIV_BY 2
9e15be90
YL
33#define NEED_RESERVE_TC_NUM 2
34#define BUF_MAX_PERCENT 100
35#define BUF_RESERVE_PERCENT 90
b9a400ac 36
63cbf7a9 37#define HCLGE_RESET_MAX_FAIL_CNT 5
427a7bff
HT
38#define HCLGE_RESET_SYNC_TIME 100
39#define HCLGE_PF_RESET_SYNC_TIME 20
40#define HCLGE_PF_RESET_SYNC_CNT 1500
63cbf7a9 41
ddb54554
GH
42/* Get DFX BD number offset */
43#define HCLGE_DFX_BIOS_BD_OFFSET 1
44#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46#define HCLGE_DFX_IGU_BD_OFFSET 4
47#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49#define HCLGE_DFX_NCSI_BD_OFFSET 7
50#define HCLGE_DFX_RTC_BD_OFFSET 8
51#define HCLGE_DFX_PPP_BD_OFFSET 9
52#define HCLGE_DFX_RCB_BD_OFFSET 10
53#define HCLGE_DFX_TQP_BD_OFFSET 11
54#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
e6d7d79d 56static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 57static int hclge_init_vlan_config(struct hclge_dev *hdev);
fe4144d4 58static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
4ed340ab 59static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 60static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
39932473
JS
61static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
62 u16 *allocated_size, bool is_alloc);
d93ed94f
JS
63static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
64static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
123297b7
SJ
65static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
66 unsigned long *addr);
46a3df9f
S
67
68static struct hnae3_ae_algo ae_algo;
69
70static const struct pci_device_id ae_algo_pci_tbl[] = {
71 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 78 /* required last entry */
46a3df9f
S
79 {0, }
80};
81
2f550a46
YL
82MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
83
ea4750ca
JS
84static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
85 HCLGE_CMDQ_TX_ADDR_H_REG,
86 HCLGE_CMDQ_TX_DEPTH_REG,
87 HCLGE_CMDQ_TX_TAIL_REG,
88 HCLGE_CMDQ_TX_HEAD_REG,
89 HCLGE_CMDQ_RX_ADDR_L_REG,
90 HCLGE_CMDQ_RX_ADDR_H_REG,
91 HCLGE_CMDQ_RX_DEPTH_REG,
92 HCLGE_CMDQ_RX_TAIL_REG,
93 HCLGE_CMDQ_RX_HEAD_REG,
94 HCLGE_VECTOR0_CMDQ_SRC_REG,
95 HCLGE_CMDQ_INTR_STS_REG,
96 HCLGE_CMDQ_INTR_EN_REG,
97 HCLGE_CMDQ_INTR_GEN_REG};
98
99static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
100 HCLGE_VECTOR0_OTER_EN_REG,
101 HCLGE_MISC_RESET_STS_REG,
102 HCLGE_MISC_VECTOR_INT_STS,
103 HCLGE_GLOBAL_RESET_REG,
104 HCLGE_FUN_RST_ING,
105 HCLGE_GRO_EN_REG};
106
107static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
108 HCLGE_RING_RX_ADDR_H_REG,
109 HCLGE_RING_RX_BD_NUM_REG,
110 HCLGE_RING_RX_BD_LENGTH_REG,
111 HCLGE_RING_RX_MERGE_EN_REG,
112 HCLGE_RING_RX_TAIL_REG,
113 HCLGE_RING_RX_HEAD_REG,
114 HCLGE_RING_RX_FBD_NUM_REG,
115 HCLGE_RING_RX_OFFSET_REG,
116 HCLGE_RING_RX_FBD_OFFSET_REG,
117 HCLGE_RING_RX_STASH_REG,
118 HCLGE_RING_RX_BD_ERR_REG,
119 HCLGE_RING_TX_ADDR_L_REG,
120 HCLGE_RING_TX_ADDR_H_REG,
121 HCLGE_RING_TX_BD_NUM_REG,
122 HCLGE_RING_TX_PRIORITY_REG,
123 HCLGE_RING_TX_TC_REG,
124 HCLGE_RING_TX_MERGE_EN_REG,
125 HCLGE_RING_TX_TAIL_REG,
126 HCLGE_RING_TX_HEAD_REG,
127 HCLGE_RING_TX_FBD_NUM_REG,
128 HCLGE_RING_TX_OFFSET_REG,
129 HCLGE_RING_TX_EBD_NUM_REG,
130 HCLGE_RING_TX_EBD_OFFSET_REG,
131 HCLGE_RING_TX_BD_ERR_REG,
132 HCLGE_RING_EN_REG};
133
134static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
135 HCLGE_TQP_INTR_GL0_REG,
136 HCLGE_TQP_INTR_GL1_REG,
137 HCLGE_TQP_INTR_GL2_REG,
138 HCLGE_TQP_INTR_RL_REG};
139
46a3df9f 140static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 141 "App Loopback test",
4dc13b96
FL
142 "Serdes serial Loopback test",
143 "Serdes parallel Loopback test",
46a3df9f
S
144 "Phy Loopback test"
145};
146
46a3df9f
S
147static const struct hclge_comm_stats_str g_mac_stats_string[] = {
148 {"mac_tx_mac_pause_num",
149 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
150 {"mac_rx_mac_pause_num",
151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 152 {"mac_tx_control_pkt_num",
153 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
154 {"mac_rx_control_pkt_num",
155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
156 {"mac_tx_pfc_pkt_num",
157 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
158 {"mac_tx_pfc_pri0_pkt_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
160 {"mac_tx_pfc_pri1_pkt_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
162 {"mac_tx_pfc_pri2_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
164 {"mac_tx_pfc_pri3_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
166 {"mac_tx_pfc_pri4_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
168 {"mac_tx_pfc_pri5_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
170 {"mac_tx_pfc_pri6_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
172 {"mac_tx_pfc_pri7_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 174 {"mac_rx_pfc_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
176 {"mac_rx_pfc_pri0_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
178 {"mac_rx_pfc_pri1_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
180 {"mac_rx_pfc_pri2_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
182 {"mac_rx_pfc_pri3_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
184 {"mac_rx_pfc_pri4_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
186 {"mac_rx_pfc_pri5_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
188 {"mac_rx_pfc_pri6_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
190 {"mac_rx_pfc_pri7_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
192 {"mac_tx_total_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
194 {"mac_tx_total_oct_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
196 {"mac_tx_good_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
198 {"mac_tx_bad_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
200 {"mac_tx_good_oct_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
202 {"mac_tx_bad_oct_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
204 {"mac_tx_uni_pkt_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
206 {"mac_tx_multi_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
208 {"mac_tx_broad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
210 {"mac_tx_undersize_pkt_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
212 {"mac_tx_oversize_pkt_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
214 {"mac_tx_64_oct_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
216 {"mac_tx_65_127_oct_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
218 {"mac_tx_128_255_oct_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
220 {"mac_tx_256_511_oct_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
222 {"mac_tx_512_1023_oct_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
224 {"mac_tx_1024_1518_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
226 {"mac_tx_1519_2047_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
228 {"mac_tx_2048_4095_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
230 {"mac_tx_4096_8191_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
232 {"mac_tx_8192_9216_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
234 {"mac_tx_9217_12287_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
236 {"mac_tx_12288_16383_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
238 {"mac_tx_1519_max_good_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
240 {"mac_tx_1519_max_bad_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
242 {"mac_rx_total_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
244 {"mac_rx_total_oct_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
246 {"mac_rx_good_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
248 {"mac_rx_bad_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
250 {"mac_rx_good_oct_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
252 {"mac_rx_bad_oct_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
254 {"mac_rx_uni_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
256 {"mac_rx_multi_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
258 {"mac_rx_broad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
260 {"mac_rx_undersize_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
262 {"mac_rx_oversize_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
264 {"mac_rx_64_oct_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
266 {"mac_rx_65_127_oct_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
268 {"mac_rx_128_255_oct_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
270 {"mac_rx_256_511_oct_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
272 {"mac_rx_512_1023_oct_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
274 {"mac_rx_1024_1518_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
276 {"mac_rx_1519_2047_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
278 {"mac_rx_2048_4095_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
280 {"mac_rx_4096_8191_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
282 {"mac_rx_8192_9216_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
284 {"mac_rx_9217_12287_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
286 {"mac_rx_12288_16383_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
288 {"mac_rx_1519_max_good_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
290 {"mac_rx_1519_max_bad_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 292
a6c51c26
JS
293 {"mac_tx_fragment_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
295 {"mac_tx_undermin_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
297 {"mac_tx_jabber_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
299 {"mac_tx_err_all_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
301 {"mac_tx_from_app_good_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
303 {"mac_tx_from_app_bad_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
305 {"mac_rx_fragment_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
307 {"mac_rx_undermin_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
309 {"mac_rx_jabber_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
311 {"mac_rx_fcs_err_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
313 {"mac_rx_send_app_good_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
315 {"mac_rx_send_app_bad_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
317};
318
f5aac71c
FL
319static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
320 {
321 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
7efffc64 322 .ethter_type = cpu_to_le16(ETH_P_LLDP),
f5aac71c
FL
323 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
324 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
325 .i_port_bitmap = 0x1,
326 },
327};
328
472d7ece
JS
329static const u8 hclge_hash_key[] = {
330 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
331 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
332 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
333 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
334 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
335};
336
ddb54554
GH
337static const u32 hclge_dfx_bd_offset_list[] = {
338 HCLGE_DFX_BIOS_BD_OFFSET,
339 HCLGE_DFX_SSU_0_BD_OFFSET,
340 HCLGE_DFX_SSU_1_BD_OFFSET,
341 HCLGE_DFX_IGU_BD_OFFSET,
342 HCLGE_DFX_RPU_0_BD_OFFSET,
343 HCLGE_DFX_RPU_1_BD_OFFSET,
344 HCLGE_DFX_NCSI_BD_OFFSET,
345 HCLGE_DFX_RTC_BD_OFFSET,
346 HCLGE_DFX_PPP_BD_OFFSET,
347 HCLGE_DFX_RCB_BD_OFFSET,
348 HCLGE_DFX_TQP_BD_OFFSET,
349 HCLGE_DFX_SSU_2_BD_OFFSET
350};
351
352static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
353 HCLGE_OPC_DFX_BIOS_COMMON_REG,
354 HCLGE_OPC_DFX_SSU_REG_0,
355 HCLGE_OPC_DFX_SSU_REG_1,
356 HCLGE_OPC_DFX_IGU_EGU_REG,
357 HCLGE_OPC_DFX_RPU_REG_0,
358 HCLGE_OPC_DFX_RPU_REG_1,
359 HCLGE_OPC_DFX_NCSI_REG,
360 HCLGE_OPC_DFX_RTC_REG,
361 HCLGE_OPC_DFX_PPP_REG,
362 HCLGE_OPC_DFX_RCB_REG,
363 HCLGE_OPC_DFX_TQP_REG,
364 HCLGE_OPC_DFX_SSU_REG_2
365};
366
2307f4a5
Y
367static const struct key_info meta_data_key_info[] = {
368 { PACKET_TYPE_ID, 6},
369 { IP_FRAGEMENT, 1},
370 { ROCE_TYPE, 1},
371 { NEXT_KEY, 5},
372 { VLAN_NUMBER, 2},
373 { SRC_VPORT, 12},
374 { DST_VPORT, 12},
375 { TUNNEL_PACKET, 1},
376};
377
378static const struct key_info tuple_key_info[] = {
379 { OUTER_DST_MAC, 48},
380 { OUTER_SRC_MAC, 48},
381 { OUTER_VLAN_TAG_FST, 16},
382 { OUTER_VLAN_TAG_SEC, 16},
383 { OUTER_ETH_TYPE, 16},
384 { OUTER_L2_RSV, 16},
385 { OUTER_IP_TOS, 8},
386 { OUTER_IP_PROTO, 8},
387 { OUTER_SRC_IP, 32},
388 { OUTER_DST_IP, 32},
389 { OUTER_L3_RSV, 16},
390 { OUTER_SRC_PORT, 16},
391 { OUTER_DST_PORT, 16},
392 { OUTER_L4_RSV, 32},
393 { OUTER_TUN_VNI, 24},
394 { OUTER_TUN_FLOW_ID, 8},
395 { INNER_DST_MAC, 48},
396 { INNER_SRC_MAC, 48},
397 { INNER_VLAN_TAG_FST, 16},
398 { INNER_VLAN_TAG_SEC, 16},
399 { INNER_ETH_TYPE, 16},
400 { INNER_L2_RSV, 16},
401 { INNER_IP_TOS, 8},
402 { INNER_IP_PROTO, 8},
403 { INNER_SRC_IP, 32},
404 { INNER_DST_IP, 32},
405 { INNER_L3_RSV, 16},
406 { INNER_SRC_PORT, 16},
407 { INNER_DST_PORT, 16},
408 { INNER_L4_RSV, 32},
409};
410
d174ea75 411static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 412{
91f384f6 413#define HCLGE_MAC_CMD_NUM 21
46a3df9f
S
414
415 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
416 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 417 __le64 *desc_data;
46a3df9f
S
418 int i, k, n;
419 int ret;
420
421 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
422 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
423 if (ret) {
424 dev_err(&hdev->pdev->dev,
425 "Get MAC pkt stats fail, status = %d.\n", ret);
426
427 return ret;
428 }
429
430 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 431 /* for special opcode 0032, only the first desc has the head */
46a3df9f 432 if (unlikely(i == 0)) {
a90bb9a5 433 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 434 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 435 } else {
a90bb9a5 436 desc_data = (__le64 *)(&desc[i]);
d174ea75 437 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 438 }
d174ea75 439
46a3df9f 440 for (k = 0; k < n; k++) {
d174ea75 441 *data += le64_to_cpu(*desc_data);
442 data++;
46a3df9f
S
443 desc_data++;
444 }
445 }
446
447 return 0;
448}
449
d174ea75 450static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
451{
452 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
453 struct hclge_desc *desc;
454 __le64 *desc_data;
455 u16 i, k, n;
456 int ret;
457
9e6717af
ZL
458 /* This may be called inside atomic sections,
459 * so GFP_ATOMIC is more suitalbe here
460 */
461 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
39ee6e82
DC
462 if (!desc)
463 return -ENOMEM;
9e6717af 464
d174ea75 465 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
466 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
467 if (ret) {
468 kfree(desc);
469 return ret;
470 }
471
472 for (i = 0; i < desc_num; i++) {
473 /* for special opcode 0034, only the first desc has the head */
474 if (i == 0) {
475 desc_data = (__le64 *)(&desc[i].data[0]);
476 n = HCLGE_RD_FIRST_STATS_NUM;
477 } else {
478 desc_data = (__le64 *)(&desc[i]);
479 n = HCLGE_RD_OTHER_STATS_NUM;
480 }
481
482 for (k = 0; k < n; k++) {
483 *data += le64_to_cpu(*desc_data);
484 data++;
485 desc_data++;
486 }
487 }
488
489 kfree(desc);
490
491 return 0;
492}
493
494static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
495{
496 struct hclge_desc desc;
497 __le32 *desc_data;
498 u32 reg_num;
499 int ret;
500
501 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
502 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
503 if (ret)
504 return ret;
505
506 desc_data = (__le32 *)(&desc.data[0]);
507 reg_num = le32_to_cpu(*desc_data);
508
509 *desc_num = 1 + ((reg_num - 3) >> 2) +
510 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
511
512 return 0;
513}
514
515static int hclge_mac_update_stats(struct hclge_dev *hdev)
516{
517 u32 desc_num;
518 int ret;
519
520 ret = hclge_mac_query_reg_num(hdev, &desc_num);
521
522 /* The firmware supports the new statistics acquisition method */
523 if (!ret)
524 ret = hclge_mac_update_stats_complete(hdev, desc_num);
525 else if (ret == -EOPNOTSUPP)
526 ret = hclge_mac_update_stats_defective(hdev);
527 else
528 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
529
530 return ret;
531}
532
46a3df9f
S
533static int hclge_tqps_update_stats(struct hnae3_handle *handle)
534{
535 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
536 struct hclge_vport *vport = hclge_get_vport(handle);
537 struct hclge_dev *hdev = vport->back;
538 struct hnae3_queue *queue;
539 struct hclge_desc desc[1];
540 struct hclge_tqp *tqp;
541 int ret, i;
542
543 for (i = 0; i < kinfo->num_tqps; i++) {
544 queue = handle->kinfo.tqp[i];
545 tqp = container_of(queue, struct hclge_tqp, q);
546 /* command : HCLGE_OPC_QUERY_IGU_STAT */
9b2f3477 547 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
46a3df9f
S
548 true);
549
a90bb9a5 550 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
551 ret = hclge_cmd_send(&hdev->hw, desc, 1);
552 if (ret) {
553 dev_err(&hdev->pdev->dev,
554 "Query tqp stat fail, status = %d,queue = %d\n",
9b2f3477 555 ret, i);
46a3df9f
S
556 return ret;
557 }
558 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 559 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
560 }
561
562 for (i = 0; i < kinfo->num_tqps; i++) {
563 queue = handle->kinfo.tqp[i];
564 tqp = container_of(queue, struct hclge_tqp, q);
565 /* command : HCLGE_OPC_QUERY_IGU_STAT */
566 hclge_cmd_setup_basic_desc(&desc[0],
567 HCLGE_OPC_QUERY_TX_STATUS,
568 true);
569
a90bb9a5 570 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
571 ret = hclge_cmd_send(&hdev->hw, desc, 1);
572 if (ret) {
573 dev_err(&hdev->pdev->dev,
574 "Query tqp stat fail, status = %d,queue = %d\n",
575 ret, i);
576 return ret;
577 }
578 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 579 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
580 }
581
582 return 0;
583}
584
585static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
586{
587 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
588 struct hclge_tqp *tqp;
589 u64 *buff = data;
590 int i;
591
592 for (i = 0; i < kinfo->num_tqps; i++) {
593 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 594 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
595 }
596
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 599 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
600 }
601
602 return buff;
603}
604
605static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
606{
607 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
608
9b2f3477 609 /* each tqp has TX & RX two queues */
46a3df9f
S
610 return kinfo->num_tqps * (2);
611}
612
613static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
614{
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616 u8 *buff = data;
617 int i = 0;
618
619 for (i = 0; i < kinfo->num_tqps; i++) {
620 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
621 struct hclge_tqp, q);
0c218123 622 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
46a3df9f
S
623 tqp->index);
624 buff = buff + ETH_GSTRING_LEN;
625 }
626
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
629 struct hclge_tqp, q);
0c218123 630 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
46a3df9f
S
631 tqp->index);
632 buff = buff + ETH_GSTRING_LEN;
633 }
634
635 return buff;
636}
637
ebaf1908 638static u64 *hclge_comm_get_stats(const void *comm_stats,
46a3df9f
S
639 const struct hclge_comm_stats_str strs[],
640 int size, u64 *data)
641{
642 u64 *buf = data;
643 u32 i;
644
645 for (i = 0; i < size; i++)
646 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
647
648 return buf + size;
649}
650
651static u8 *hclge_comm_get_strings(u32 stringset,
652 const struct hclge_comm_stats_str strs[],
653 int size, u8 *data)
654{
655 char *buff = (char *)data;
656 u32 i;
657
658 if (stringset != ETH_SS_STATS)
659 return buff;
660
661 for (i = 0; i < size; i++) {
18d219b7 662 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
46a3df9f
S
663 buff = buff + ETH_GSTRING_LEN;
664 }
665
666 return (u8 *)buff;
667}
668
46a3df9f
S
669static void hclge_update_stats_for_all(struct hclge_dev *hdev)
670{
671 struct hnae3_handle *handle;
672 int status;
673
674 handle = &hdev->vport[0].nic;
675 if (handle->client) {
676 status = hclge_tqps_update_stats(handle);
677 if (status) {
678 dev_err(&hdev->pdev->dev,
679 "Update TQPS stats fail, status = %d.\n",
680 status);
681 }
682 }
683
684 status = hclge_mac_update_stats(hdev);
685 if (status)
686 dev_err(&hdev->pdev->dev,
687 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
688}
689
690static void hclge_update_stats(struct hnae3_handle *handle,
691 struct net_device_stats *net_stats)
692{
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
46a3df9f
S
695 int status;
696
c5f65480
JS
697 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
698 return;
699
46a3df9f
S
700 status = hclge_mac_update_stats(hdev);
701 if (status)
702 dev_err(&hdev->pdev->dev,
703 "Update MAC stats fail, status = %d.\n",
704 status);
705
46a3df9f
S
706 status = hclge_tqps_update_stats(handle);
707 if (status)
708 dev_err(&hdev->pdev->dev,
709 "Update TQPS stats fail, status = %d.\n",
710 status);
711
c5f65480 712 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
713}
714
715static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
716{
4dc13b96
FL
717#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
718 HNAE3_SUPPORT_PHY_LOOPBACK |\
719 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
720 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
721
722 struct hclge_vport *vport = hclge_get_vport(handle);
723 struct hclge_dev *hdev = vport->back;
724 int count = 0;
725
726 /* Loopback test support rules:
727 * mac: only GE mode support
728 * serdes: all mac mode will support include GE/XGE/LGE/CGE
729 * phy: only support when phy device exist on board
730 */
731 if (stringset == ETH_SS_TEST) {
732 /* clear loopback bit flags at first */
733 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
3ff6cde8 734 if (hdev->pdev->revision >= 0x21 ||
4dc13b96 735 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
736 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
737 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
738 count += 1;
eb66d503 739 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 740 }
5fd50ac3 741
4dc13b96
FL
742 count += 2;
743 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
744 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
46a3df9f
S
745 } else if (stringset == ETH_SS_STATS) {
746 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
747 hclge_tqps_get_sset_count(handle, stringset);
748 }
749
750 return count;
751}
752
9b2f3477 753static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
46a3df9f
S
754 u8 *data)
755{
756 u8 *p = (char *)data;
757 int size;
758
759 if (stringset == ETH_SS_STATS) {
760 size = ARRAY_SIZE(g_mac_stats_string);
9b2f3477
WL
761 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
762 size, p);
46a3df9f
S
763 p = hclge_tqps_get_strings(handle, p);
764 } else if (stringset == ETH_SS_TEST) {
eb66d503 765 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
9b2f3477 766 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
767 ETH_GSTRING_LEN);
768 p += ETH_GSTRING_LEN;
769 }
4dc13b96 770 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
9b2f3477 771 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
4dc13b96
FL
772 ETH_GSTRING_LEN);
773 p += ETH_GSTRING_LEN;
774 }
775 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
776 memcpy(p,
777 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
778 ETH_GSTRING_LEN);
779 p += ETH_GSTRING_LEN;
780 }
781 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
9b2f3477 782 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
783 ETH_GSTRING_LEN);
784 p += ETH_GSTRING_LEN;
785 }
786 }
787}
788
789static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
790{
791 struct hclge_vport *vport = hclge_get_vport(handle);
792 struct hclge_dev *hdev = vport->back;
793 u64 *p;
794
9b2f3477
WL
795 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
796 ARRAY_SIZE(g_mac_stats_string), data);
46a3df9f
S
797 p = hclge_tqps_get_stats(handle, p);
798}
799
615466ce
YM
800static void hclge_get_mac_stat(struct hnae3_handle *handle,
801 struct hns3_mac_stats *mac_stats)
e511c97d
JS
802{
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
805
615466ce
YM
806 hclge_update_stats(handle, NULL);
807
808 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
809 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
e511c97d
JS
810}
811
46a3df9f 812static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 813 struct hclge_func_status_cmd *status)
46a3df9f
S
814{
815 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
816 return -EINVAL;
817
818 /* Set the pf to main pf */
819 if (status->pf_state & HCLGE_PF_STATE_MAIN)
820 hdev->flag |= HCLGE_FLAG_MAIN;
821 else
822 hdev->flag &= ~HCLGE_FLAG_MAIN;
823
46a3df9f
S
824 return 0;
825}
826
827static int hclge_query_function_status(struct hclge_dev *hdev)
828{
b37ce587
YM
829#define HCLGE_QUERY_MAX_CNT 5
830
d44f9b63 831 struct hclge_func_status_cmd *req;
46a3df9f
S
832 struct hclge_desc desc;
833 int timeout = 0;
834 int ret;
835
836 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 837 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
838
839 do {
840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
841 if (ret) {
842 dev_err(&hdev->pdev->dev,
9b2f3477 843 "query function status failed %d.\n", ret);
46a3df9f
S
844 return ret;
845 }
846
847 /* Check pf reset is done */
848 if (req->pf_state)
849 break;
850 usleep_range(1000, 2000);
b37ce587 851 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
46a3df9f
S
852
853 ret = hclge_parse_func_status(hdev, req);
854
855 return ret;
856}
857
858static int hclge_query_pf_resource(struct hclge_dev *hdev)
859{
d44f9b63 860 struct hclge_pf_res_cmd *req;
46a3df9f
S
861 struct hclge_desc desc;
862 int ret;
863
864 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
865 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
866 if (ret) {
867 dev_err(&hdev->pdev->dev,
868 "query pf resource failed %d.\n", ret);
869 return ret;
870 }
871
d44f9b63 872 req = (struct hclge_pf_res_cmd *)desc.data;
46a3df9f
S
873 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
874 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
875
368686be
YL
876 if (req->tx_buf_size)
877 hdev->tx_buf_size =
878 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
879 else
880 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
881
b9a400ac
YL
882 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
883
368686be
YL
884 if (req->dv_buf_size)
885 hdev->dv_buf_size =
886 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
887 else
888 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
889
b9a400ac
YL
890 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
891
e92a0843 892 if (hnae3_dev_roce_supported(hdev)) {
375dd5e4
JS
893 hdev->roce_base_msix_offset =
894 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
895 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
887c3820 896 hdev->num_roce_msi =
e4e87715
PL
897 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
898 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f
S
899
900 /* PF should have NIC vectors and Roce vectors,
901 * NIC vectors are queued before Roce vectors.
902 */
9b2f3477 903 hdev->num_msi = hdev->num_roce_msi +
375dd5e4 904 hdev->roce_base_msix_offset;
46a3df9f
S
905 } else {
906 hdev->num_msi =
e4e87715
PL
907 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f
S
909 }
910
911 return 0;
912}
913
914static int hclge_parse_speed(int speed_cmd, int *speed)
915{
916 switch (speed_cmd) {
917 case 6:
918 *speed = HCLGE_MAC_SPEED_10M;
919 break;
920 case 7:
921 *speed = HCLGE_MAC_SPEED_100M;
922 break;
923 case 0:
924 *speed = HCLGE_MAC_SPEED_1G;
925 break;
926 case 1:
927 *speed = HCLGE_MAC_SPEED_10G;
928 break;
929 case 2:
930 *speed = HCLGE_MAC_SPEED_25G;
931 break;
932 case 3:
933 *speed = HCLGE_MAC_SPEED_40G;
934 break;
935 case 4:
936 *speed = HCLGE_MAC_SPEED_50G;
937 break;
938 case 5:
939 *speed = HCLGE_MAC_SPEED_100G;
940 break;
941 default:
942 return -EINVAL;
943 }
944
945 return 0;
946}
947
22f48e24
JS
948static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
949{
950 struct hclge_vport *vport = hclge_get_vport(handle);
951 struct hclge_dev *hdev = vport->back;
952 u32 speed_ability = hdev->hw.mac.speed_ability;
953 u32 speed_bit = 0;
954
955 switch (speed) {
956 case HCLGE_MAC_SPEED_10M:
957 speed_bit = HCLGE_SUPPORT_10M_BIT;
958 break;
959 case HCLGE_MAC_SPEED_100M:
960 speed_bit = HCLGE_SUPPORT_100M_BIT;
961 break;
962 case HCLGE_MAC_SPEED_1G:
963 speed_bit = HCLGE_SUPPORT_1G_BIT;
964 break;
965 case HCLGE_MAC_SPEED_10G:
966 speed_bit = HCLGE_SUPPORT_10G_BIT;
967 break;
968 case HCLGE_MAC_SPEED_25G:
969 speed_bit = HCLGE_SUPPORT_25G_BIT;
970 break;
971 case HCLGE_MAC_SPEED_40G:
972 speed_bit = HCLGE_SUPPORT_40G_BIT;
973 break;
974 case HCLGE_MAC_SPEED_50G:
975 speed_bit = HCLGE_SUPPORT_50G_BIT;
976 break;
977 case HCLGE_MAC_SPEED_100G:
978 speed_bit = HCLGE_SUPPORT_100G_BIT;
979 break;
980 default:
981 return -EINVAL;
982 }
983
984 if (speed_bit & speed_ability)
985 return 0;
986
987 return -EINVAL;
988}
989
88d10bd6 990static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
0979aa0b 991{
0979aa0b 992 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e 993 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
88d10bd6
JS
994 mac->supported);
995 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
996 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
997 mac->supported);
998 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
999 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1000 mac->supported);
1001 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1002 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1003 mac->supported);
1004 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1005 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1006 mac->supported);
1007}
0979aa0b 1008
88d10bd6
JS
1009static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1010{
1011 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1012 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1013 mac->supported);
0979aa0b 1014 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e 1015 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
88d10bd6
JS
1016 mac->supported);
1017 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1019 mac->supported);
1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1022 mac->supported);
1023 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1025 mac->supported);
1026}
0979aa0b 1027
88d10bd6
JS
1028static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1029{
1030 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1032 mac->supported);
1033 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1035 mac->supported);
1036 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1038 mac->supported);
0979aa0b 1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
88d10bd6
JS
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1041 mac->supported);
1042 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1044 mac->supported);
1045}
0979aa0b 1046
88d10bd6
JS
1047static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1048{
1049 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1051 mac->supported);
1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1054 mac->supported);
1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1057 mac->supported);
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1060 mac->supported);
1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1063 mac->supported);
0979aa0b 1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
88d10bd6
JS
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1066 mac->supported);
1067}
0979aa0b 1068
7e6ec914
JS
1069static void hclge_convert_setting_fec(struct hclge_mac *mac)
1070{
1071 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1072 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1073
1074 switch (mac->speed) {
1075 case HCLGE_MAC_SPEED_10G:
1076 case HCLGE_MAC_SPEED_40G:
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1078 mac->supported);
1079 mac->fec_ability =
1080 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1081 break;
1082 case HCLGE_MAC_SPEED_25G:
1083 case HCLGE_MAC_SPEED_50G:
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1085 mac->supported);
1086 mac->fec_ability =
1087 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1088 BIT(HNAE3_FEC_AUTO);
1089 break;
1090 case HCLGE_MAC_SPEED_100G:
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1092 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1093 break;
1094 default:
1095 mac->fec_ability = 0;
1096 break;
1097 }
1098}
1099
88d10bd6
JS
1100static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1101 u8 speed_ability)
1102{
1103 struct hclge_mac *mac = &hdev->hw.mac;
1104
1105 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1107 mac->supported);
1108
1109 hclge_convert_setting_sr(mac, speed_ability);
1110 hclge_convert_setting_lr(mac, speed_ability);
1111 hclge_convert_setting_cr(mac, speed_ability);
7e6ec914
JS
1112 if (hdev->pdev->revision >= 0x21)
1113 hclge_convert_setting_fec(mac);
88d10bd6
JS
1114
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1117 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
88d10bd6
JS
1118}
1119
1120static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1121 u8 speed_ability)
1122{
1123 struct hclge_mac *mac = &hdev->hw.mac;
1124
1125 hclge_convert_setting_kr(mac, speed_ability);
7e6ec914
JS
1126 if (hdev->pdev->revision >= 0x21)
1127 hclge_convert_setting_fec(mac);
88d10bd6
JS
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1130 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
0979aa0b
FL
1131}
1132
f18635d5
JS
1133static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1134 u8 speed_ability)
1135{
1136 unsigned long *supported = hdev->hw.mac.supported;
1137
1138 /* default to support all speed for GE port */
1139 if (!speed_ability)
1140 speed_ability = HCLGE_SUPPORT_GE;
1141
1142 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1143 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1144 supported);
1145
1146 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1148 supported);
1149 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1150 supported);
1151 }
1152
1153 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1156 }
1157
1158 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1159 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1160 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
bc3781ed 1161 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
f18635d5
JS
1162}
1163
0979aa0b
FL
1164static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1165{
1166 u8 media_type = hdev->hw.mac.media_type;
1167
f18635d5
JS
1168 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1169 hclge_parse_fiber_link_mode(hdev, speed_ability);
1170 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1171 hclge_parse_copper_link_mode(hdev, speed_ability);
88d10bd6
JS
1172 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1173 hclge_parse_backplane_link_mode(hdev, speed_ability);
0979aa0b 1174}
37417c66 1175
46a3df9f
S
1176static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1177{
d44f9b63 1178 struct hclge_cfg_param_cmd *req;
46a3df9f
S
1179 u64 mac_addr_tmp_high;
1180 u64 mac_addr_tmp;
ebaf1908 1181 unsigned int i;
46a3df9f 1182
d44f9b63 1183 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
1184
1185 /* get the configuration */
e4e87715
PL
1186 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1187 HCLGE_CFG_VMDQ_M,
1188 HCLGE_CFG_VMDQ_S);
1189 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1190 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1191 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1192 HCLGE_CFG_TQP_DESC_N_M,
1193 HCLGE_CFG_TQP_DESC_N_S);
1194
1195 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1196 HCLGE_CFG_PHY_ADDR_M,
1197 HCLGE_CFG_PHY_ADDR_S);
1198 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1199 HCLGE_CFG_MEDIA_TP_M,
1200 HCLGE_CFG_MEDIA_TP_S);
1201 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1202 HCLGE_CFG_RX_BUF_LEN_M,
1203 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
1204 /* get mac_address */
1205 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
1206 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1207 HCLGE_CFG_MAC_ADDR_H_M,
1208 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
1209
1210 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1211
e4e87715
PL
1212 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1213 HCLGE_CFG_DEFAULT_SPEED_M,
1214 HCLGE_CFG_DEFAULT_SPEED_S);
1215 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1216 HCLGE_CFG_RSS_SIZE_M,
1217 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 1218
46a3df9f
S
1219 for (i = 0; i < ETH_ALEN; i++)
1220 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1221
d44f9b63 1222 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 1223 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 1224
e4e87715
PL
1225 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1226 HCLGE_CFG_SPEED_ABILITY_M,
1227 HCLGE_CFG_SPEED_ABILITY_S);
39932473
JS
1228 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1229 HCLGE_CFG_UMV_TBL_SPACE_M,
1230 HCLGE_CFG_UMV_TBL_SPACE_S);
1231 if (!cfg->umv_space)
1232 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
46a3df9f
S
1233}
1234
1235/* hclge_get_cfg: query the static parameter from flash
1236 * @hdev: pointer to struct hclge_dev
1237 * @hcfg: the config structure to be getted
1238 */
1239static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1240{
1241 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 1242 struct hclge_cfg_param_cmd *req;
ebaf1908
WL
1243 unsigned int i;
1244 int ret;
46a3df9f
S
1245
1246 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1247 u32 offset = 0;
1248
d44f9b63 1249 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1250 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1251 true);
e4e87715
PL
1252 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1253 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 1254 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
1255 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1256 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1257 req->offset = cpu_to_le32(offset);
46a3df9f
S
1258 }
1259
1260 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1261 if (ret) {
3f639907 1262 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
1263 return ret;
1264 }
1265
1266 hclge_parse_cfg(hcfg, desc);
3f639907 1267
46a3df9f
S
1268 return 0;
1269}
1270
1271static int hclge_get_cap(struct hclge_dev *hdev)
1272{
1273 int ret;
1274
1275 ret = hclge_query_function_status(hdev);
1276 if (ret) {
1277 dev_err(&hdev->pdev->dev,
1278 "query function status error %d.\n", ret);
1279 return ret;
1280 }
1281
1282 /* get pf resource */
1283 ret = hclge_query_pf_resource(hdev);
3f639907
JS
1284 if (ret)
1285 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
46a3df9f 1286
3f639907 1287 return ret;
46a3df9f
S
1288}
1289
962e31bd
YL
1290static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1291{
1292#define HCLGE_MIN_TX_DESC 64
1293#define HCLGE_MIN_RX_DESC 64
1294
1295 if (!is_kdump_kernel())
1296 return;
1297
1298 dev_info(&hdev->pdev->dev,
1299 "Running kdump kernel. Using minimal resources\n");
1300
1301 /* minimal queue pairs equals to the number of vports */
1302 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1303 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1304 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1305}
1306
46a3df9f
S
1307static int hclge_configure(struct hclge_dev *hdev)
1308{
1309 struct hclge_cfg cfg;
ebaf1908
WL
1310 unsigned int i;
1311 int ret;
46a3df9f
S
1312
1313 ret = hclge_get_cfg(hdev, &cfg);
1314 if (ret) {
1315 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1316 return ret;
1317 }
1318
1319 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1320 hdev->base_tqp_pid = 0;
0e7a40cd 1321 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1322 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1323 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1324 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1325 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1326 hdev->num_tx_desc = cfg.tqp_desc_num;
1327 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1328 hdev->tm_info.num_pg = 1;
cacde272 1329 hdev->tc_max = cfg.tc_num;
46a3df9f 1330 hdev->tm_info.hw_pfc_map = 0;
39932473 1331 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1332
44122887 1333 if (hnae3_dev_fd_supported(hdev)) {
9abeb7d8 1334 hdev->fd_en = true;
44122887
JS
1335 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1336 }
9abeb7d8 1337
46a3df9f
S
1338 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1339 if (ret) {
1340 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1341 return ret;
1342 }
1343
0979aa0b
FL
1344 hclge_parse_link_mode(hdev, cfg.speed_ability);
1345
cacde272
YL
1346 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1347 (hdev->tc_max < 1)) {
46a3df9f 1348 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
cacde272
YL
1349 hdev->tc_max);
1350 hdev->tc_max = 1;
46a3df9f
S
1351 }
1352
cacde272
YL
1353 /* Dev does not support DCB */
1354 if (!hnae3_dev_dcb_supported(hdev)) {
1355 hdev->tc_max = 1;
1356 hdev->pfc_max = 0;
1357 } else {
1358 hdev->pfc_max = hdev->tc_max;
1359 }
1360
a2987975 1361 hdev->tm_info.num_tc = 1;
cacde272 1362
46a3df9f 1363 /* Currently not support uncontiuous tc */
cacde272 1364 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1365 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1366
71b83869 1367 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1368
962e31bd
YL
1369 hclge_init_kdump_kernel_config(hdev);
1370
08125454
YL
1371 /* Set the init affinity based on pci func number */
1372 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1373 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1374 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1375 &hdev->affinity_mask);
1376
46a3df9f
S
1377 return ret;
1378}
1379
ebaf1908
WL
1380static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1381 unsigned int tso_mss_max)
46a3df9f 1382{
d44f9b63 1383 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1384 struct hclge_desc desc;
a90bb9a5 1385 u16 tso_mss;
46a3df9f
S
1386
1387 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1388
d44f9b63 1389 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1390
1391 tso_mss = 0;
e4e87715
PL
1392 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1393 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1394 req->tso_mss_min = cpu_to_le16(tso_mss);
1395
1396 tso_mss = 0;
e4e87715
PL
1397 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1398 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1399 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1400
1401 return hclge_cmd_send(&hdev->hw, &desc, 1);
1402}
1403
b26a6fea
PL
1404static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1405{
1406 struct hclge_cfg_gro_status_cmd *req;
1407 struct hclge_desc desc;
1408 int ret;
1409
1410 if (!hnae3_dev_gro_supported(hdev))
1411 return 0;
1412
1413 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1414 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1415
1416 req->gro_en = cpu_to_le16(en ? 1 : 0);
1417
1418 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1419 if (ret)
1420 dev_err(&hdev->pdev->dev,
1421 "GRO hardware config cmd failed, ret = %d\n", ret);
1422
1423 return ret;
1424}
1425
46a3df9f
S
1426static int hclge_alloc_tqps(struct hclge_dev *hdev)
1427{
1428 struct hclge_tqp *tqp;
1429 int i;
1430
1431 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1432 sizeof(struct hclge_tqp), GFP_KERNEL);
1433 if (!hdev->htqp)
1434 return -ENOMEM;
1435
1436 tqp = hdev->htqp;
1437
1438 for (i = 0; i < hdev->num_tqps; i++) {
1439 tqp->dev = &hdev->pdev->dev;
1440 tqp->index = i;
1441
1442 tqp->q.ae_algo = &ae_algo;
1443 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1444 tqp->q.tx_desc_num = hdev->num_tx_desc;
1445 tqp->q.rx_desc_num = hdev->num_rx_desc;
46a3df9f
S
1446 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1447 i * HCLGE_TQP_REG_SIZE;
1448
1449 tqp++;
1450 }
1451
1452 return 0;
1453}
1454
1455static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1456 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1457{
d44f9b63 1458 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1459 struct hclge_desc desc;
1460 int ret;
1461
1462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1463
d44f9b63 1464 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1465 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1466 req->tqp_vf = func_id;
b9a8f883
YL
1467 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1468 if (!is_pf)
1469 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
46a3df9f
S
1470 req->tqp_vid = cpu_to_le16(tqp_vid);
1471
1472 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1473 if (ret)
1474 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1475
3f639907 1476 return ret;
46a3df9f
S
1477}
1478
672ad0ed 1479static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1480{
128b900d 1481 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1482 struct hclge_dev *hdev = vport->back;
7df7dad6 1483 int i, alloced;
46a3df9f
S
1484
1485 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1486 alloced < num_tqps; i++) {
46a3df9f
S
1487 if (!hdev->htqp[i].alloced) {
1488 hdev->htqp[i].q.handle = &vport->nic;
1489 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1490 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1491 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1492 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1493 hdev->htqp[i].alloced = true;
46a3df9f
S
1494 alloced++;
1495 }
1496 }
672ad0ed
HT
1497 vport->alloc_tqps = alloced;
1498 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1499 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f
S
1500
1501 return 0;
1502}
1503
c0425944
PL
1504static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1505 u16 num_tx_desc, u16 num_rx_desc)
1506
46a3df9f
S
1507{
1508 struct hnae3_handle *nic = &vport->nic;
1509 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1510 struct hclge_dev *hdev = vport->back;
af958827 1511 int ret;
46a3df9f 1512
c0425944
PL
1513 kinfo->num_tx_desc = num_tx_desc;
1514 kinfo->num_rx_desc = num_rx_desc;
1515
46a3df9f 1516 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1517
672ad0ed 1518 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1519 sizeof(struct hnae3_queue *), GFP_KERNEL);
1520 if (!kinfo->tqp)
1521 return -ENOMEM;
1522
672ad0ed 1523 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1524 if (ret)
46a3df9f 1525 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1526
3f639907 1527 return ret;
46a3df9f
S
1528}
1529
7df7dad6
L
1530static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1531 struct hclge_vport *vport)
1532{
1533 struct hnae3_handle *nic = &vport->nic;
1534 struct hnae3_knic_private_info *kinfo;
1535 u16 i;
1536
1537 kinfo = &nic->kinfo;
205a24ca 1538 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1539 struct hclge_tqp *q =
1540 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1541 bool is_pf;
1542 int ret;
1543
1544 is_pf = !(vport->vport_id);
1545 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1546 i, is_pf);
1547 if (ret)
1548 return ret;
1549 }
1550
1551 return 0;
1552}
1553
1554static int hclge_map_tqp(struct hclge_dev *hdev)
1555{
1556 struct hclge_vport *vport = hdev->vport;
1557 u16 i, num_vport;
1558
1559 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1560 for (i = 0; i < num_vport; i++) {
1561 int ret;
1562
1563 ret = hclge_map_tqp_to_vport(hdev, vport);
1564 if (ret)
1565 return ret;
1566
1567 vport++;
1568 }
1569
1570 return 0;
1571}
1572
46a3df9f
S
1573static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1574{
1575 struct hnae3_handle *nic = &vport->nic;
1576 struct hclge_dev *hdev = vport->back;
1577 int ret;
1578
1579 nic->pdev = hdev->pdev;
1580 nic->ae_algo = &ae_algo;
1581 nic->numa_node_mask = hdev->numa_node_mask;
1582
b69c9737
YL
1583 ret = hclge_knic_setup(vport, num_tqps,
1584 hdev->num_tx_desc, hdev->num_rx_desc);
1585 if (ret)
1586 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
46a3df9f 1587
b69c9737 1588 return ret;
46a3df9f
S
1589}
1590
1591static int hclge_alloc_vport(struct hclge_dev *hdev)
1592{
1593 struct pci_dev *pdev = hdev->pdev;
1594 struct hclge_vport *vport;
1595 u32 tqp_main_vport;
1596 u32 tqp_per_vport;
1597 int num_vport, i;
1598 int ret;
1599
1600 /* We need to alloc a vport for main NIC of PF */
1601 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1602
38e62046
HT
1603 if (hdev->num_tqps < num_vport) {
1604 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1605 hdev->num_tqps, num_vport);
1606 return -EINVAL;
1607 }
46a3df9f
S
1608
1609 /* Alloc the same number of TQPs for every vport */
1610 tqp_per_vport = hdev->num_tqps / num_vport;
1611 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1612
1613 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1614 GFP_KERNEL);
1615 if (!vport)
1616 return -ENOMEM;
1617
1618 hdev->vport = vport;
1619 hdev->num_alloc_vport = num_vport;
1620
2312e050
FL
1621 if (IS_ENABLED(CONFIG_PCI_IOV))
1622 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1623
1624 for (i = 0; i < num_vport; i++) {
1625 vport->back = hdev;
1626 vport->vport_id = i;
818f1675 1627 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1628 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1629 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1630 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1631 INIT_LIST_HEAD(&vport->uc_mac_list);
1632 INIT_LIST_HEAD(&vport->mc_mac_list);
46a3df9f
S
1633
1634 if (i == 0)
1635 ret = hclge_vport_setup(vport, tqp_main_vport);
1636 else
1637 ret = hclge_vport_setup(vport, tqp_per_vport);
1638 if (ret) {
1639 dev_err(&pdev->dev,
1640 "vport setup failed for vport %d, %d\n",
1641 i, ret);
1642 return ret;
1643 }
1644
1645 vport++;
1646 }
1647
1648 return 0;
1649}
1650
acf61ecd
YL
1651static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1652 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1653{
1654/* TX buffer size is unit by 128 byte */
1655#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1656#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1657 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1658 struct hclge_desc desc;
1659 int ret;
1660 u8 i;
1661
d44f9b63 1662 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1663
1664 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1665 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1666 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1667
46a3df9f
S
1668 req->tx_pkt_buff[i] =
1669 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1670 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1671 }
46a3df9f
S
1672
1673 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1674 if (ret)
46a3df9f
S
1675 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1676 ret);
46a3df9f 1677
3f639907 1678 return ret;
46a3df9f
S
1679}
1680
acf61ecd
YL
1681static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1682 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1683{
acf61ecd 1684 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1685
3f639907
JS
1686 if (ret)
1687 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1688
3f639907 1689 return ret;
46a3df9f
S
1690}
1691
1a49f3c6 1692static u32 hclge_get_tc_num(struct hclge_dev *hdev)
46a3df9f 1693{
ebaf1908
WL
1694 unsigned int i;
1695 u32 cnt = 0;
46a3df9f
S
1696
1697 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1698 if (hdev->hw_tc_map & BIT(i))
1699 cnt++;
1700 return cnt;
1701}
1702
46a3df9f 1703/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1704static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1705 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1706{
1707 struct hclge_priv_buf *priv;
ebaf1908
WL
1708 unsigned int i;
1709 int cnt = 0;
46a3df9f
S
1710
1711 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1712 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1713 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1714 priv->enable)
1715 cnt++;
1716 }
1717
1718 return cnt;
1719}
1720
1721/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1722static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1723 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1724{
1725 struct hclge_priv_buf *priv;
ebaf1908
WL
1726 unsigned int i;
1727 int cnt = 0;
46a3df9f
S
1728
1729 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1730 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1731 if (hdev->hw_tc_map & BIT(i) &&
1732 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1733 priv->enable)
1734 cnt++;
1735 }
1736
1737 return cnt;
1738}
1739
acf61ecd 1740static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1741{
1742 struct hclge_priv_buf *priv;
1743 u32 rx_priv = 0;
1744 int i;
1745
1746 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1747 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1748 if (priv->enable)
1749 rx_priv += priv->buf_size;
1750 }
1751 return rx_priv;
1752}
1753
acf61ecd 1754static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1755{
1756 u32 i, total_tx_size = 0;
1757
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1759 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1760
1761 return total_tx_size;
1762}
1763
acf61ecd
YL
1764static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1765 struct hclge_pkt_buf_alloc *buf_alloc,
1766 u32 rx_all)
46a3df9f 1767{
1a49f3c6
YL
1768 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1769 u32 tc_num = hclge_get_tc_num(hdev);
b9a400ac 1770 u32 shared_buf, aligned_mps;
46a3df9f
S
1771 u32 rx_priv;
1772 int i;
1773
b9a400ac 1774 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1775
d221df4e 1776 if (hnae3_dev_dcb_supported(hdev))
b37ce587
YM
1777 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1778 hdev->dv_buf_size;
d221df4e 1779 else
b9a400ac 1780 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1781 + hdev->dv_buf_size;
d221df4e 1782
db5936db 1783 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1784 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1785 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1786
acf61ecd 1787 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1788 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1789 return false;
1790
b9a400ac 1791 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1792 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1793 if (hnae3_dev_dcb_supported(hdev)) {
1794 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1795 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b37ce587
YM
1796 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1797 HCLGE_BUF_SIZE_UNIT);
368686be 1798 } else {
b9a400ac 1799 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1800 HCLGE_NON_DCB_ADDITIONAL_BUF;
1a49f3c6
YL
1801 buf_alloc->s_buf.self.low = aligned_mps;
1802 }
1803
1804 if (hnae3_dev_dcb_supported(hdev)) {
9e15be90
YL
1805 hi_thrd = shared_buf - hdev->dv_buf_size;
1806
1807 if (tc_num <= NEED_RESERVE_TC_NUM)
1808 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1809 / BUF_MAX_PERCENT;
1810
1a49f3c6 1811 if (tc_num)
9e15be90 1812 hi_thrd = hi_thrd / tc_num;
1a49f3c6 1813
b37ce587 1814 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1a49f3c6 1815 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
b37ce587 1816 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1a49f3c6
YL
1817 } else {
1818 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1819 lo_thrd = aligned_mps;
368686be 1820 }
46a3df9f
S
1821
1822 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1a49f3c6
YL
1823 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1824 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
46a3df9f
S
1825 }
1826
1827 return true;
1828}
1829
acf61ecd
YL
1830static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1831 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1832{
1833 u32 i, total_size;
1834
1835 total_size = hdev->pkt_buf_size;
1836
1837 /* alloc tx buffer for all enabled tc */
1838 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1839 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 1840
b6b4f987
HT
1841 if (hdev->hw_tc_map & BIT(i)) {
1842 if (total_size < hdev->tx_buf_size)
1843 return -ENOMEM;
9ffe79a9 1844
368686be 1845 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 1846 } else {
9ffe79a9 1847 priv->tx_buf_size = 0;
b6b4f987 1848 }
9ffe79a9
YL
1849
1850 total_size -= priv->tx_buf_size;
1851 }
1852
1853 return 0;
1854}
1855
8ca754b1
YL
1856static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1857 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1858{
8ca754b1
YL
1859 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1860 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
ebaf1908 1861 unsigned int i;
46a3df9f 1862
46a3df9f 1863 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 1864 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 1865
bb1fe9ea
YL
1866 priv->enable = 0;
1867 priv->wl.low = 0;
1868 priv->wl.high = 0;
1869 priv->buf_size = 0;
1870
1871 if (!(hdev->hw_tc_map & BIT(i)))
1872 continue;
1873
1874 priv->enable = 1;
46a3df9f
S
1875
1876 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
b37ce587 1877 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
8ca754b1
YL
1878 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1879 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1880 } else {
1881 priv->wl.low = 0;
b37ce587
YM
1882 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1883 aligned_mps;
46a3df9f 1884 }
8ca754b1
YL
1885
1886 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
1887 }
1888
8ca754b1
YL
1889 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1890}
46a3df9f 1891
8ca754b1
YL
1892static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1893 struct hclge_pkt_buf_alloc *buf_alloc)
1894{
1895 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1896 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1897 int i;
46a3df9f
S
1898
1899 /* let the last to be cleared first */
1900 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1901 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1902 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1903
ebaf1908
WL
1904 if (hdev->hw_tc_map & mask &&
1905 !(hdev->tm_info.hw_pfc_map & mask)) {
46a3df9f
S
1906 /* Clear the no pfc TC private buffer */
1907 priv->wl.low = 0;
1908 priv->wl.high = 0;
1909 priv->buf_size = 0;
1910 priv->enable = 0;
1911 no_pfc_priv_num--;
1912 }
1913
acf61ecd 1914 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1915 no_pfc_priv_num == 0)
1916 break;
1917 }
1918
8ca754b1
YL
1919 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1920}
46a3df9f 1921
8ca754b1
YL
1922static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1923 struct hclge_pkt_buf_alloc *buf_alloc)
1924{
1925 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1926 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1927 int i;
46a3df9f
S
1928
1929 /* let the last to be cleared first */
1930 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1931 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1932 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1933
ebaf1908
WL
1934 if (hdev->hw_tc_map & mask &&
1935 hdev->tm_info.hw_pfc_map & mask) {
46a3df9f
S
1936 /* Reduce the number of pfc TC with private buffer */
1937 priv->wl.low = 0;
1938 priv->enable = 0;
1939 priv->wl.high = 0;
1940 priv->buf_size = 0;
1941 pfc_priv_num--;
1942 }
1943
acf61ecd 1944 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1945 pfc_priv_num == 0)
1946 break;
1947 }
8ca754b1
YL
1948
1949 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950}
1951
9e15be90
YL
1952static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1954{
1955#define COMPENSATE_BUFFER 0x3C00
1956#define COMPENSATE_HALF_MPS_NUM 5
1957#define PRIV_WL_GAP 0x1800
1958
1959 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1960 u32 tc_num = hclge_get_tc_num(hdev);
1961 u32 half_mps = hdev->mps >> 1;
1962 u32 min_rx_priv;
1963 unsigned int i;
1964
1965 if (tc_num)
1966 rx_priv = rx_priv / tc_num;
1967
1968 if (tc_num <= NEED_RESERVE_TC_NUM)
1969 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1970
1971 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1972 COMPENSATE_HALF_MPS_NUM * half_mps;
1973 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1974 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1975
1976 if (rx_priv < min_rx_priv)
1977 return false;
1978
1979 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1980 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1981
1982 priv->enable = 0;
1983 priv->wl.low = 0;
1984 priv->wl.high = 0;
1985 priv->buf_size = 0;
1986
1987 if (!(hdev->hw_tc_map & BIT(i)))
1988 continue;
1989
1990 priv->enable = 1;
1991 priv->buf_size = rx_priv;
1992 priv->wl.high = rx_priv - hdev->dv_buf_size;
1993 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1994 }
1995
1996 buf_alloc->s_buf.buf_size = 0;
1997
1998 return true;
1999}
2000
8ca754b1
YL
2001/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2002 * @hdev: pointer to struct hclge_dev
2003 * @buf_alloc: pointer to buffer calculation data
2004 * @return: 0: calculate sucessful, negative: fail
2005 */
2006static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2007 struct hclge_pkt_buf_alloc *buf_alloc)
2008{
2009 /* When DCB is not supported, rx private buffer is not allocated. */
2010 if (!hnae3_dev_dcb_supported(hdev)) {
2011 u32 rx_all = hdev->pkt_buf_size;
2012
2013 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2014 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2015 return -ENOMEM;
2016
2017 return 0;
2018 }
2019
9e15be90
YL
2020 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2021 return 0;
2022
8ca754b1
YL
2023 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2024 return 0;
2025
2026 /* try to decrease the buffer size */
2027 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2028 return 0;
2029
2030 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2031 return 0;
2032
2033 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
2034 return 0;
2035
2036 return -ENOMEM;
2037}
2038
acf61ecd
YL
2039static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2040 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2041{
d44f9b63 2042 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
2043 struct hclge_desc desc;
2044 int ret;
2045 int i;
2046
2047 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 2048 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
2049
2050 /* Alloc private buffer TCs */
2051 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2052 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
2053
2054 req->buf_num[i] =
2055 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2056 req->buf_num[i] |=
5bca3b94 2057 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
2058 }
2059
b8c8bf47 2060 req->shared_buf =
acf61ecd 2061 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
2062 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2063
46a3df9f 2064 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2065 if (ret)
46a3df9f
S
2066 dev_err(&hdev->pdev->dev,
2067 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 2068
3f639907 2069 return ret;
46a3df9f
S
2070}
2071
acf61ecd
YL
2072static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2073 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
2074{
2075 struct hclge_rx_priv_wl_buf *req;
2076 struct hclge_priv_buf *priv;
2077 struct hclge_desc desc[2];
2078 int i, j;
2079 int ret;
2080
2081 for (i = 0; i < 2; i++) {
2082 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2083 false);
2084 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2085
2086 /* The first descriptor set the NEXT bit to 1 */
2087 if (i == 0)
2088 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2089 else
2090 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2091
2092 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
2093 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2094
2095 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
2096 req->tc_wl[j].high =
2097 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2098 req->tc_wl[j].high |=
3738287c 2099 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2100 req->tc_wl[j].low =
2101 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2102 req->tc_wl[j].low |=
3738287c 2103 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2104 }
2105 }
2106
2107 /* Send 2 descriptor at one time */
2108 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2109 if (ret)
46a3df9f
S
2110 dev_err(&hdev->pdev->dev,
2111 "rx private waterline config cmd failed %d\n",
2112 ret);
3f639907 2113 return ret;
46a3df9f
S
2114}
2115
acf61ecd
YL
2116static int hclge_common_thrd_config(struct hclge_dev *hdev,
2117 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2118{
acf61ecd 2119 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
2120 struct hclge_rx_com_thrd *req;
2121 struct hclge_desc desc[2];
2122 struct hclge_tc_thrd *tc;
2123 int i, j;
2124 int ret;
2125
2126 for (i = 0; i < 2; i++) {
2127 hclge_cmd_setup_basic_desc(&desc[i],
2128 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2129 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2130
2131 /* The first descriptor set the NEXT bit to 1 */
2132 if (i == 0)
2133 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2134 else
2135 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2136
2137 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2138 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2139
2140 req->com_thrd[j].high =
2141 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2142 req->com_thrd[j].high |=
3738287c 2143 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2144 req->com_thrd[j].low =
2145 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2146 req->com_thrd[j].low |=
3738287c 2147 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2148 }
2149 }
2150
2151 /* Send 2 descriptors at one time */
2152 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2153 if (ret)
46a3df9f
S
2154 dev_err(&hdev->pdev->dev,
2155 "common threshold config cmd failed %d\n", ret);
3f639907 2156 return ret;
46a3df9f
S
2157}
2158
acf61ecd
YL
2159static int hclge_common_wl_config(struct hclge_dev *hdev,
2160 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2161{
acf61ecd 2162 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
2163 struct hclge_rx_com_wl *req;
2164 struct hclge_desc desc;
2165 int ret;
2166
2167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2168
2169 req = (struct hclge_rx_com_wl *)desc.data;
2170 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 2171 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2172
2173 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 2174 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2175
2176 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2177 if (ret)
46a3df9f
S
2178 dev_err(&hdev->pdev->dev,
2179 "common waterline config cmd failed %d\n", ret);
46a3df9f 2180
3f639907 2181 return ret;
46a3df9f
S
2182}
2183
2184int hclge_buffer_alloc(struct hclge_dev *hdev)
2185{
acf61ecd 2186 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
2187 int ret;
2188
acf61ecd
YL
2189 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2190 if (!pkt_buf)
46a3df9f
S
2191 return -ENOMEM;
2192
acf61ecd 2193 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
2194 if (ret) {
2195 dev_err(&hdev->pdev->dev,
2196 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 2197 goto out;
9ffe79a9
YL
2198 }
2199
acf61ecd 2200 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
2201 if (ret) {
2202 dev_err(&hdev->pdev->dev,
2203 "could not alloc tx buffers %d\n", ret);
acf61ecd 2204 goto out;
46a3df9f
S
2205 }
2206
acf61ecd 2207 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
2208 if (ret) {
2209 dev_err(&hdev->pdev->dev,
2210 "could not calc rx priv buffer size for all TCs %d\n",
2211 ret);
acf61ecd 2212 goto out;
46a3df9f
S
2213 }
2214
acf61ecd 2215 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
2216 if (ret) {
2217 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2218 ret);
acf61ecd 2219 goto out;
46a3df9f
S
2220 }
2221
2daf4a65 2222 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 2223 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
2224 if (ret) {
2225 dev_err(&hdev->pdev->dev,
2226 "could not configure rx private waterline %d\n",
2227 ret);
acf61ecd 2228 goto out;
2daf4a65 2229 }
46a3df9f 2230
acf61ecd 2231 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
2232 if (ret) {
2233 dev_err(&hdev->pdev->dev,
2234 "could not configure common threshold %d\n",
2235 ret);
acf61ecd 2236 goto out;
2daf4a65 2237 }
46a3df9f
S
2238 }
2239
acf61ecd
YL
2240 ret = hclge_common_wl_config(hdev, pkt_buf);
2241 if (ret)
46a3df9f
S
2242 dev_err(&hdev->pdev->dev,
2243 "could not configure common waterline %d\n", ret);
46a3df9f 2244
acf61ecd
YL
2245out:
2246 kfree(pkt_buf);
2247 return ret;
46a3df9f
S
2248}
2249
2250static int hclge_init_roce_base_info(struct hclge_vport *vport)
2251{
2252 struct hnae3_handle *roce = &vport->roce;
2253 struct hnae3_handle *nic = &vport->nic;
2254
887c3820 2255 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
2256
2257 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2258 vport->back->num_msi_left == 0)
2259 return -EINVAL;
2260
2261 roce->rinfo.base_vector = vport->back->roce_base_vector;
2262
2263 roce->rinfo.netdev = nic->kinfo.netdev;
2264 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2265
2266 roce->pdev = nic->pdev;
2267 roce->ae_algo = nic->ae_algo;
2268 roce->numa_node_mask = nic->numa_node_mask;
2269
2270 return 0;
2271}
2272
887c3820 2273static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
2274{
2275 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
2276 int vectors;
2277 int i;
46a3df9f 2278
887c3820
SM
2279 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2280 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2281 if (vectors < 0) {
2282 dev_err(&pdev->dev,
2283 "failed(%d) to allocate MSI/MSI-X vectors\n",
2284 vectors);
2285 return vectors;
46a3df9f 2286 }
887c3820
SM
2287 if (vectors < hdev->num_msi)
2288 dev_warn(&hdev->pdev->dev,
2289 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2290 hdev->num_msi, vectors);
46a3df9f 2291
887c3820
SM
2292 hdev->num_msi = vectors;
2293 hdev->num_msi_left = vectors;
2294 hdev->base_msi_vector = pdev->irq;
46a3df9f 2295 hdev->roce_base_vector = hdev->base_msi_vector +
375dd5e4 2296 hdev->roce_base_msix_offset;
46a3df9f 2297
46a3df9f
S
2298 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2299 sizeof(u16), GFP_KERNEL);
887c3820
SM
2300 if (!hdev->vector_status) {
2301 pci_free_irq_vectors(pdev);
46a3df9f 2302 return -ENOMEM;
887c3820 2303 }
46a3df9f
S
2304
2305 for (i = 0; i < hdev->num_msi; i++)
2306 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2307
887c3820
SM
2308 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2309 sizeof(int), GFP_KERNEL);
2310 if (!hdev->vector_irq) {
2311 pci_free_irq_vectors(pdev);
2312 return -ENOMEM;
46a3df9f 2313 }
46a3df9f
S
2314
2315 return 0;
2316}
2317
2d03eacc 2318static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 2319{
2d03eacc
YL
2320 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2321 duplex = HCLGE_MAC_FULL;
46a3df9f 2322
2d03eacc 2323 return duplex;
46a3df9f
S
2324}
2325
2d03eacc
YL
2326static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2327 u8 duplex)
46a3df9f 2328{
d44f9b63 2329 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2330 struct hclge_desc desc;
2331 int ret;
2332
d44f9b63 2333 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2334
2335 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2336
63cbf7a9
YM
2337 if (duplex)
2338 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
46a3df9f
S
2339
2340 switch (speed) {
2341 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
2342 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2343 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2344 break;
2345 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2346 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2347 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2348 break;
2349 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2350 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2351 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2352 break;
2353 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2354 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2355 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2356 break;
2357 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2358 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2359 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2360 break;
2361 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2362 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2363 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2364 break;
2365 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2366 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2367 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2368 break;
2369 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2370 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2371 HCLGE_CFG_SPEED_S, 5);
46a3df9f
S
2372 break;
2373 default:
d7629e74 2374 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2375 return -EINVAL;
2376 }
2377
e4e87715
PL
2378 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2379 1);
46a3df9f
S
2380
2381 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2382 if (ret) {
2383 dev_err(&hdev->pdev->dev,
2384 "mac speed/duplex config cmd failed %d.\n", ret);
2385 return ret;
2386 }
2387
2d03eacc
YL
2388 return 0;
2389}
2390
2391int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2392{
2393 int ret;
2394
2395 duplex = hclge_check_speed_dup(duplex, speed);
2396 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2397 return 0;
2398
2399 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2400 if (ret)
2401 return ret;
2402
2403 hdev->hw.mac.speed = speed;
2404 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2405
2406 return 0;
2407}
2408
2409static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2410 u8 duplex)
2411{
2412 struct hclge_vport *vport = hclge_get_vport(handle);
2413 struct hclge_dev *hdev = vport->back;
2414
2415 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2416}
2417
46a3df9f
S
2418static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2419{
d44f9b63 2420 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2421 struct hclge_desc desc;
a90bb9a5 2422 u32 flag = 0;
46a3df9f
S
2423 int ret;
2424
2425 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2426
d44f9b63 2427 req = (struct hclge_config_auto_neg_cmd *)desc.data;
b9a8f883
YL
2428 if (enable)
2429 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
a90bb9a5 2430 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2431
2432 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2433 if (ret)
46a3df9f
S
2434 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2435 ret);
46a3df9f 2436
3f639907 2437 return ret;
46a3df9f
S
2438}
2439
2440static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2441{
2442 struct hclge_vport *vport = hclge_get_vport(handle);
2443 struct hclge_dev *hdev = vport->back;
2444
22f48e24
JS
2445 if (!hdev->hw.mac.support_autoneg) {
2446 if (enable) {
2447 dev_err(&hdev->pdev->dev,
2448 "autoneg is not supported by current port\n");
2449 return -EOPNOTSUPP;
2450 } else {
2451 return 0;
2452 }
2453 }
2454
46a3df9f
S
2455 return hclge_set_autoneg_en(hdev, enable);
2456}
2457
2458static int hclge_get_autoneg(struct hnae3_handle *handle)
2459{
2460 struct hclge_vport *vport = hclge_get_vport(handle);
2461 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2462 struct phy_device *phydev = hdev->hw.mac.phydev;
2463
2464 if (phydev)
2465 return phydev->autoneg;
46a3df9f
S
2466
2467 return hdev->hw.mac.autoneg;
2468}
2469
22f48e24
JS
2470static int hclge_restart_autoneg(struct hnae3_handle *handle)
2471{
2472 struct hclge_vport *vport = hclge_get_vport(handle);
2473 struct hclge_dev *hdev = vport->back;
2474 int ret;
2475
2476 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2477
2478 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2479 if (ret)
2480 return ret;
2481 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2482}
2483
7786a996
JS
2484static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2485{
2486 struct hclge_vport *vport = hclge_get_vport(handle);
2487 struct hclge_dev *hdev = vport->back;
2488
2489 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2490 return hclge_set_autoneg_en(hdev, !halt);
2491
2492 return 0;
2493}
2494
7e6ec914
JS
2495static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2496{
2497 struct hclge_config_fec_cmd *req;
2498 struct hclge_desc desc;
2499 int ret;
2500
2501 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2502
2503 req = (struct hclge_config_fec_cmd *)desc.data;
2504 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2505 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2506 if (fec_mode & BIT(HNAE3_FEC_RS))
2507 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2508 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2509 if (fec_mode & BIT(HNAE3_FEC_BASER))
2510 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2511 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2512
2513 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2514 if (ret)
2515 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2516
2517 return ret;
2518}
2519
2520static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2521{
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
2524 struct hclge_mac *mac = &hdev->hw.mac;
2525 int ret;
2526
2527 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2528 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2529 return -EINVAL;
2530 }
2531
2532 ret = hclge_set_fec_hw(hdev, fec_mode);
2533 if (ret)
2534 return ret;
2535
2536 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2537 return 0;
2538}
2539
2540static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2541 u8 *fec_mode)
2542{
2543 struct hclge_vport *vport = hclge_get_vport(handle);
2544 struct hclge_dev *hdev = vport->back;
2545 struct hclge_mac *mac = &hdev->hw.mac;
2546
2547 if (fec_ability)
2548 *fec_ability = mac->fec_ability;
2549 if (fec_mode)
2550 *fec_mode = mac->fec_mode;
2551}
2552
46a3df9f
S
2553static int hclge_mac_init(struct hclge_dev *hdev)
2554{
2555 struct hclge_mac *mac = &hdev->hw.mac;
2556 int ret;
2557
5d497936 2558 hdev->support_sfp_query = true;
2d03eacc
YL
2559 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2560 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2561 hdev->hw.mac.duplex);
46a3df9f
S
2562 if (ret) {
2563 dev_err(&hdev->pdev->dev,
2564 "Config mac speed dup fail ret=%d\n", ret);
2565 return ret;
2566 }
2567
d736fc6c
JS
2568 if (hdev->hw.mac.support_autoneg) {
2569 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2570 if (ret) {
2571 dev_err(&hdev->pdev->dev,
2572 "Config mac autoneg fail ret=%d\n", ret);
2573 return ret;
2574 }
2575 }
2576
46a3df9f
S
2577 mac->link = 0;
2578
7e6ec914
JS
2579 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2580 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2581 if (ret) {
2582 dev_err(&hdev->pdev->dev,
2583 "Fec mode init fail, ret = %d\n", ret);
2584 return ret;
2585 }
2586 }
2587
e6d7d79d
YL
2588 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2589 if (ret) {
2590 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2591 return ret;
2592 }
f9fd82a9 2593
e6d7d79d 2594 ret = hclge_buffer_alloc(hdev);
3f639907 2595 if (ret)
f9fd82a9 2596 dev_err(&hdev->pdev->dev,
e6d7d79d 2597 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2598
3f639907 2599 return ret;
46a3df9f
S
2600}
2601
c1a81619
SM
2602static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2603{
18e24888
HT
2604 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2605 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
08125454
YL
2606 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2607 &hdev->mbx_service_task);
c1a81619
SM
2608}
2609
cb1b9f77
SM
2610static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2611{
acfc3d55
HT
2612 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2613 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
08125454
YL
2614 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2615 &hdev->rst_service_task);
cb1b9f77
SM
2616}
2617
ed8fb4b2 2618void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
46a3df9f
S
2619{
2620 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2621 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
7be1b9f3
YL
2622 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2623 hdev->hw_stats.stats_timer++;
2624 hdev->fd_arfs_expire_timer++;
08125454
YL
2625 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2626 system_wq, &hdev->service_task,
ed8fb4b2 2627 delay_time);
7be1b9f3 2628 }
46a3df9f
S
2629}
2630
2631static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2632{
d44f9b63 2633 struct hclge_link_status_cmd *req;
46a3df9f
S
2634 struct hclge_desc desc;
2635 int link_status;
2636 int ret;
2637
2638 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2640 if (ret) {
2641 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2642 ret);
2643 return ret;
2644 }
2645
d44f9b63 2646 req = (struct hclge_link_status_cmd *)desc.data;
c79301d8 2647 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
46a3df9f
S
2648
2649 return !!link_status;
2650}
2651
2652static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2653{
ebaf1908 2654 unsigned int mac_state;
46a3df9f
S
2655 int link_stat;
2656
582d37bb
PL
2657 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2658 return 0;
2659
46a3df9f
S
2660 mac_state = hclge_get_mac_link_status(hdev);
2661
2662 if (hdev->hw.mac.phydev) {
fd813314 2663 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
46a3df9f
S
2664 link_stat = mac_state &
2665 hdev->hw.mac.phydev->link;
2666 else
2667 link_stat = 0;
2668
2669 } else {
2670 link_stat = mac_state;
2671 }
2672
2673 return !!link_stat;
2674}
2675
2676static void hclge_update_link_status(struct hclge_dev *hdev)
2677{
45e92b7e 2678 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2679 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2680 struct hnae3_handle *rhandle;
46a3df9f
S
2681 struct hnae3_handle *handle;
2682 int state;
2683 int i;
2684
2685 if (!client)
2686 return;
2687 state = hclge_get_mac_phy_link(hdev);
2688 if (state != hdev->hw.mac.link) {
2689 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2690 handle = &hdev->vport[i].nic;
2691 client->ops->link_status_change(handle, state);
a6345787 2692 hclge_config_mac_tnl_int(hdev, state);
45e92b7e
PL
2693 rhandle = &hdev->vport[i].roce;
2694 if (rclient && rclient->ops->link_status_change)
2695 rclient->ops->link_status_change(rhandle,
2696 state);
46a3df9f
S
2697 }
2698 hdev->hw.mac.link = state;
2699 }
2700}
2701
88d10bd6
JS
2702static void hclge_update_port_capability(struct hclge_mac *mac)
2703{
f438bfe9
JS
2704 /* update fec ability by speed */
2705 hclge_convert_setting_fec(mac);
2706
88d10bd6
JS
2707 /* firmware can not identify back plane type, the media type
2708 * read from configuration can help deal it
2709 */
2710 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2711 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2712 mac->module_type = HNAE3_MODULE_TYPE_KR;
2713 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2714 mac->module_type = HNAE3_MODULE_TYPE_TP;
2715
2716 if (mac->support_autoneg == true) {
2717 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2718 linkmode_copy(mac->advertising, mac->supported);
2719 } else {
2720 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2721 mac->supported);
2722 linkmode_zero(mac->advertising);
2723 }
2724}
2725
5d497936
PL
2726static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2727{
63cbf7a9 2728 struct hclge_sfp_info_cmd *resp;
5d497936
PL
2729 struct hclge_desc desc;
2730 int ret;
2731
88d10bd6
JS
2732 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2733 resp = (struct hclge_sfp_info_cmd *)desc.data;
5d497936
PL
2734 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2735 if (ret == -EOPNOTSUPP) {
2736 dev_warn(&hdev->pdev->dev,
2737 "IMP do not support get SFP speed %d\n", ret);
2738 return ret;
2739 } else if (ret) {
2740 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2741 return ret;
2742 }
2743
88d10bd6 2744 *speed = le32_to_cpu(resp->speed);
5d497936
PL
2745
2746 return 0;
2747}
2748
88d10bd6 2749static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
46a3df9f 2750{
88d10bd6
JS
2751 struct hclge_sfp_info_cmd *resp;
2752 struct hclge_desc desc;
46a3df9f
S
2753 int ret;
2754
88d10bd6
JS
2755 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2756 resp = (struct hclge_sfp_info_cmd *)desc.data;
2757
2758 resp->query_type = QUERY_ACTIVE_SPEED;
2759
2760 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2761 if (ret == -EOPNOTSUPP) {
2762 dev_warn(&hdev->pdev->dev,
2763 "IMP does not support get SFP info %d\n", ret);
2764 return ret;
2765 } else if (ret) {
2766 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2767 return ret;
2768 }
2769
2770 mac->speed = le32_to_cpu(resp->speed);
2771 /* if resp->speed_ability is 0, it means it's an old version
2772 * firmware, do not update these params
46a3df9f 2773 */
88d10bd6
JS
2774 if (resp->speed_ability) {
2775 mac->module_type = le32_to_cpu(resp->module_type);
2776 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2777 mac->autoneg = resp->autoneg;
2778 mac->support_autoneg = resp->autoneg_ability;
49b12556 2779 mac->speed_type = QUERY_ACTIVE_SPEED;
f438bfe9
JS
2780 if (!resp->active_fec)
2781 mac->fec_mode = 0;
2782 else
2783 mac->fec_mode = BIT(resp->active_fec);
88d10bd6
JS
2784 } else {
2785 mac->speed_type = QUERY_SFP_SPEED;
2786 }
2787
2788 return 0;
2789}
2790
2791static int hclge_update_port_info(struct hclge_dev *hdev)
2792{
2793 struct hclge_mac *mac = &hdev->hw.mac;
2794 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2795 int ret;
2796
2797 /* get the port info from SFP cmd if not copper port */
2798 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
46a3df9f
S
2799 return 0;
2800
88d10bd6 2801 /* if IMP does not support get SFP/qSFP info, return directly */
5d497936
PL
2802 if (!hdev->support_sfp_query)
2803 return 0;
46a3df9f 2804
88d10bd6
JS
2805 if (hdev->pdev->revision >= 0x21)
2806 ret = hclge_get_sfp_info(hdev, mac);
2807 else
2808 ret = hclge_get_sfp_speed(hdev, &speed);
2809
5d497936
PL
2810 if (ret == -EOPNOTSUPP) {
2811 hdev->support_sfp_query = false;
2812 return ret;
2813 } else if (ret) {
2d03eacc 2814 return ret;
46a3df9f
S
2815 }
2816
88d10bd6
JS
2817 if (hdev->pdev->revision >= 0x21) {
2818 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2819 hclge_update_port_capability(mac);
2820 return 0;
2821 }
2822 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2823 HCLGE_MAC_FULL);
2824 } else {
2825 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2826 return 0; /* do nothing if no SFP */
46a3df9f 2827
88d10bd6
JS
2828 /* must config full duplex for SFP */
2829 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2830 }
46a3df9f
S
2831}
2832
2833static int hclge_get_status(struct hnae3_handle *handle)
2834{
2835 struct hclge_vport *vport = hclge_get_vport(handle);
2836 struct hclge_dev *hdev = vport->back;
2837
2838 hclge_update_link_status(hdev);
2839
2840 return hdev->hw.mac.link;
2841}
2842
ca1d7669
SM
2843static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2844{
f6162d44 2845 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
ca1d7669
SM
2846
2847 /* fetch the events from their corresponding regs */
9ca8d1a7 2848 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619 2849 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
f6162d44
SM
2850 msix_src_reg = hclge_read_dev(&hdev->hw,
2851 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
c1a81619
SM
2852
2853 /* Assumption: If by any chance reset and mailbox events are reported
2854 * together then we will only process reset event in this go and will
2855 * defer the processing of the mailbox events. Since, we would have not
2856 * cleared RX CMDQ event this time we would receive again another
2857 * interrupt from H/W just for the mailbox.
46ee7350
GL
2858 *
2859 * check for vector0 reset event sources
c1a81619 2860 */
6dd22bbc
HT
2861 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2862 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2863 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2864 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2865 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
f02eb82d 2866 hdev->rst_stats.imp_rst_cnt++;
6dd22bbc
HT
2867 return HCLGE_VECTOR0_EVENT_RST;
2868 }
2869
ca1d7669 2870 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
65e41e7e 2871 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 2872 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2873 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2874 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
f02eb82d 2875 hdev->rst_stats.global_rst_cnt++;
ca1d7669
SM
2876 return HCLGE_VECTOR0_EVENT_RST;
2877 }
2878
f6162d44 2879 /* check for vector0 msix event source */
147175c9 2880 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
9bc6ac91
HT
2881 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2882 msix_src_reg);
2883 *clearval = msix_src_reg;
f6162d44 2884 return HCLGE_VECTOR0_EVENT_ERR;
147175c9 2885 }
f6162d44 2886
c1a81619
SM
2887 /* check for vector0 mailbox(=CMDQ RX) event source */
2888 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2889 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2890 *clearval = cmdq_src_reg;
2891 return HCLGE_VECTOR0_EVENT_MBX;
2892 }
ca1d7669 2893
147175c9 2894 /* print other vector0 event source */
9bc6ac91
HT
2895 dev_info(&hdev->pdev->dev,
2896 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2897 cmdq_src_reg, msix_src_reg);
2898 *clearval = msix_src_reg;
2899
ca1d7669
SM
2900 return HCLGE_VECTOR0_EVENT_OTHER;
2901}
2902
2903static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2904 u32 regclr)
2905{
c1a81619
SM
2906 switch (event_type) {
2907 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 2908 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
2909 break;
2910 case HCLGE_VECTOR0_EVENT_MBX:
2911 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2912 break;
fa7a4bd5
JS
2913 default:
2914 break;
c1a81619 2915 }
ca1d7669
SM
2916}
2917
8e52a602
XW
2918static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2919{
2920 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2921 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2922 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2923 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2924 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2925}
2926
466b0c00
L
2927static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2928{
2929 writel(enable ? 1 : 0, vector->addr);
2930}
2931
2932static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2933{
2934 struct hclge_dev *hdev = data;
ebaf1908 2935 u32 clearval = 0;
ca1d7669 2936 u32 event_cause;
466b0c00
L
2937
2938 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
2939 event_cause = hclge_check_event_cause(hdev, &clearval);
2940
c1a81619 2941 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 2942 switch (event_cause) {
f6162d44
SM
2943 case HCLGE_VECTOR0_EVENT_ERR:
2944 /* we do not know what type of reset is required now. This could
2945 * only be decided after we fetch the type of errors which
2946 * caused this event. Therefore, we will do below for now:
2947 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2948 * have defered type of reset to be used.
2949 * 2. Schedule the reset serivce task.
2950 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2951 * will fetch the correct type of reset. This would be done
2952 * by first decoding the types of errors.
2953 */
2954 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2955 /* fall through */
ca1d7669 2956 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 2957 hclge_reset_task_schedule(hdev);
ca1d7669 2958 break;
c1a81619
SM
2959 case HCLGE_VECTOR0_EVENT_MBX:
2960 /* If we are here then,
2961 * 1. Either we are not handling any mbx task and we are not
2962 * scheduled as well
2963 * OR
2964 * 2. We could be handling a mbx task but nothing more is
2965 * scheduled.
2966 * In both cases, we should schedule mbx task as there are more
2967 * mbx messages reported by this interrupt.
2968 */
2969 hclge_mbx_task_schedule(hdev);
f0ad97ac 2970 break;
ca1d7669 2971 default:
f0ad97ac
YL
2972 dev_warn(&hdev->pdev->dev,
2973 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
2974 break;
2975 }
2976
72e2fb07
HT
2977 hclge_clear_event_cause(hdev, event_cause, clearval);
2978
2979 /* Enable interrupt if it is not cause by reset. And when
2980 * clearval equal to 0, it means interrupt status may be
2981 * cleared by hardware before driver reads status register.
2982 * For this case, vector0 interrupt also should be enabled.
2983 */
9bc6ac91
HT
2984 if (!clearval ||
2985 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
2986 hclge_enable_vector(&hdev->misc_vector, true);
2987 }
466b0c00
L
2988
2989 return IRQ_HANDLED;
2990}
2991
2992static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2993{
36cbbdf6
PL
2994 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2995 dev_warn(&hdev->pdev->dev,
2996 "vector(vector_id %d) has been freed.\n", vector_id);
2997 return;
2998 }
2999
466b0c00
L
3000 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3001 hdev->num_msi_left += 1;
3002 hdev->num_msi_used -= 1;
3003}
3004
3005static void hclge_get_misc_vector(struct hclge_dev *hdev)
3006{
3007 struct hclge_misc_vector *vector = &hdev->misc_vector;
3008
3009 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3010
3011 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3012 hdev->vector_status[0] = 0;
3013
3014 hdev->num_msi_left -= 1;
3015 hdev->num_msi_used += 1;
3016}
3017
08125454
YL
3018static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3019 const cpumask_t *mask)
3020{
3021 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3022 affinity_notify);
3023
3024 cpumask_copy(&hdev->affinity_mask, mask);
3025}
3026
3027static void hclge_irq_affinity_release(struct kref *ref)
3028{
3029}
3030
3031static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3032{
3033 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3034 &hdev->affinity_mask);
3035
3036 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3037 hdev->affinity_notify.release = hclge_irq_affinity_release;
3038 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3039 &hdev->affinity_notify);
3040}
3041
3042static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3043{
3044 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3045 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3046}
3047
466b0c00
L
3048static int hclge_misc_irq_init(struct hclge_dev *hdev)
3049{
3050 int ret;
3051
3052 hclge_get_misc_vector(hdev);
3053
ca1d7669
SM
3054 /* this would be explicitly freed in the end */
3055 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3056 0, "hclge_misc", hdev);
466b0c00
L
3057 if (ret) {
3058 hclge_free_vector(hdev, 0);
3059 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3060 hdev->misc_vector.vector_irq);
3061 }
3062
3063 return ret;
3064}
3065
ca1d7669
SM
3066static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3067{
3068 free_irq(hdev->misc_vector.vector_irq, hdev);
3069 hclge_free_vector(hdev, 0);
3070}
3071
af013903
HT
3072int hclge_notify_client(struct hclge_dev *hdev,
3073 enum hnae3_reset_notify_type type)
4ed340ab
L
3074{
3075 struct hnae3_client *client = hdev->nic_client;
3076 u16 i;
3077
9b2f3477 3078 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
bd9109c9
HT
3079 return 0;
3080
4ed340ab
L
3081 if (!client->ops->reset_notify)
3082 return -EOPNOTSUPP;
3083
3084 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3085 struct hnae3_handle *handle = &hdev->vport[i].nic;
3086 int ret;
3087
3088 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
3089 if (ret) {
3090 dev_err(&hdev->pdev->dev,
3091 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 3092 return ret;
65e41e7e 3093 }
4ed340ab
L
3094 }
3095
3096 return 0;
3097}
3098
f403a84f
HT
3099static int hclge_notify_roce_client(struct hclge_dev *hdev,
3100 enum hnae3_reset_notify_type type)
3101{
3102 struct hnae3_client *client = hdev->roce_client;
3103 int ret = 0;
3104 u16 i;
3105
9b2f3477 3106 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
f403a84f
HT
3107 return 0;
3108
3109 if (!client->ops->reset_notify)
3110 return -EOPNOTSUPP;
3111
3112 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3113 struct hnae3_handle *handle = &hdev->vport[i].roce;
3114
3115 ret = client->ops->reset_notify(handle, type);
3116 if (ret) {
3117 dev_err(&hdev->pdev->dev,
3118 "notify roce client failed %d(%d)",
3119 type, ret);
3120 return ret;
3121 }
3122 }
3123
3124 return ret;
3125}
3126
4ed340ab
L
3127static int hclge_reset_wait(struct hclge_dev *hdev)
3128{
3129#define HCLGE_RESET_WATI_MS 100
6dd22bbc 3130#define HCLGE_RESET_WAIT_CNT 200
4ed340ab
L
3131 u32 val, reg, reg_bit;
3132 u32 cnt = 0;
3133
3134 switch (hdev->reset_type) {
6dd22bbc
HT
3135 case HNAE3_IMP_RESET:
3136 reg = HCLGE_GLOBAL_RESET_REG;
3137 reg_bit = HCLGE_IMP_RESET_BIT;
3138 break;
4ed340ab
L
3139 case HNAE3_GLOBAL_RESET:
3140 reg = HCLGE_GLOBAL_RESET_REG;
3141 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3142 break;
4ed340ab
L
3143 case HNAE3_FUNC_RESET:
3144 reg = HCLGE_FUN_RST_ING;
3145 reg_bit = HCLGE_FUN_RST_ING_B;
3146 break;
6b9a97ee
HT
3147 case HNAE3_FLR_RESET:
3148 break;
4ed340ab
L
3149 default:
3150 dev_err(&hdev->pdev->dev,
3151 "Wait for unsupported reset type: %d\n",
3152 hdev->reset_type);
3153 return -EINVAL;
3154 }
3155
6b9a97ee
HT
3156 if (hdev->reset_type == HNAE3_FLR_RESET) {
3157 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3158 cnt++ < HCLGE_RESET_WAIT_CNT)
3159 msleep(HCLGE_RESET_WATI_MS);
3160
3161 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3162 dev_err(&hdev->pdev->dev,
3163 "flr wait timeout: %d\n", cnt);
3164 return -EBUSY;
3165 }
3166
3167 return 0;
3168 }
3169
4ed340ab 3170 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 3171 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
3172 msleep(HCLGE_RESET_WATI_MS);
3173 val = hclge_read_dev(&hdev->hw, reg);
3174 cnt++;
3175 }
3176
4ed340ab
L
3177 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3178 dev_warn(&hdev->pdev->dev,
3179 "Wait for reset timeout: %d\n", hdev->reset_type);
3180 return -EBUSY;
3181 }
3182
3183 return 0;
3184}
3185
aa5c4f17
HT
3186static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3187{
3188 struct hclge_vf_rst_cmd *req;
3189 struct hclge_desc desc;
3190
3191 req = (struct hclge_vf_rst_cmd *)desc.data;
3192 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3193 req->dest_vfid = func_id;
3194
3195 if (reset)
3196 req->vf_rst = 0x1;
3197
3198 return hclge_cmd_send(&hdev->hw, &desc, 1);
3199}
3200
e511f17b 3201static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
3202{
3203 int i;
3204
3205 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3206 struct hclge_vport *vport = &hdev->vport[i];
3207 int ret;
3208
3209 /* Send cmd to set/clear VF's FUNC_RST_ING */
3210 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3211 if (ret) {
3212 dev_err(&hdev->pdev->dev,
790cd1a8 3213 "set vf(%d) rst failed %d!\n",
aa5c4f17
HT
3214 vport->vport_id, ret);
3215 return ret;
3216 }
3217
cc645dfa 3218 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
3219 continue;
3220
3221 /* Inform VF to process the reset.
3222 * hclge_inform_reset_assert_to_vf may fail if VF
3223 * driver is not loaded.
3224 */
3225 ret = hclge_inform_reset_assert_to_vf(vport);
3226 if (ret)
3227 dev_warn(&hdev->pdev->dev,
790cd1a8 3228 "inform reset to vf(%d) failed %d!\n",
aa5c4f17
HT
3229 vport->vport_id, ret);
3230 }
3231
3232 return 0;
3233}
3234
a9a96760 3235static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
427a7bff
HT
3236{
3237 struct hclge_pf_rst_sync_cmd *req;
3238 struct hclge_desc desc;
3239 int cnt = 0;
3240 int ret;
3241
3242 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3244
3245 do {
3246 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3247 /* for compatible with old firmware, wait
3248 * 100 ms for VF to stop IO
3249 */
3250 if (ret == -EOPNOTSUPP) {
3251 msleep(HCLGE_RESET_SYNC_TIME);
3252 return 0;
3253 } else if (ret) {
3254 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3255 ret);
3256 return ret;
3257 } else if (req->all_vf_ready) {
3258 return 0;
3259 }
3260 msleep(HCLGE_PF_RESET_SYNC_TIME);
3261 hclge_cmd_reuse_desc(&desc, true);
3262 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3263
3264 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3265 return -ETIME;
3266}
3267
2bfbd35d 3268int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
3269{
3270 struct hclge_desc desc;
3271 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3272 int ret;
3273
3274 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 3275 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
3276 req->fun_reset_vfid = func_id;
3277
3278 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3279 if (ret)
3280 dev_err(&hdev->pdev->dev,
3281 "send function reset cmd fail, status =%d\n", ret);
3282
3283 return ret;
3284}
3285
f2f432f2 3286static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 3287{
4f765d3e 3288 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
3289 struct pci_dev *pdev = hdev->pdev;
3290 u32 val;
3291
4f765d3e
HT
3292 if (hclge_get_hw_reset_stat(handle)) {
3293 dev_info(&pdev->dev, "Hardware reset not finish\n");
3294 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3295 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3296 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3297 return;
3298 }
3299
f2f432f2 3300 switch (hdev->reset_type) {
4ed340ab
L
3301 case HNAE3_GLOBAL_RESET:
3302 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 3303 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab
L
3304 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3305 dev_info(&pdev->dev, "Global Reset requested\n");
3306 break;
4ed340ab
L
3307 case HNAE3_FUNC_RESET:
3308 dev_info(&pdev->dev, "PF Reset requested\n");
cb1b9f77
SM
3309 /* schedule again to check later */
3310 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3311 hclge_reset_task_schedule(hdev);
4ed340ab 3312 break;
6b9a97ee
HT
3313 case HNAE3_FLR_RESET:
3314 dev_info(&pdev->dev, "FLR requested\n");
3315 /* schedule again to check later */
3316 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3317 hclge_reset_task_schedule(hdev);
3318 break;
4ed340ab
L
3319 default:
3320 dev_warn(&pdev->dev,
f2f432f2 3321 "Unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
3322 break;
3323 }
3324}
3325
123297b7 3326static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
f2f432f2
SM
3327 unsigned long *addr)
3328{
3329 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
123297b7 3330 struct hclge_dev *hdev = ae_dev->priv;
f2f432f2 3331
f6162d44
SM
3332 /* first, resolve any unknown reset type to the known type(s) */
3333 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3334 /* we will intentionally ignore any errors from this function
3335 * as we will end up in *some* reset request in any case
3336 */
3337 hclge_handle_hw_msix_error(hdev, addr);
3338 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3339 /* We defered the clearing of the error event which caused
3340 * interrupt since it was not posssible to do that in
3341 * interrupt context (and this is the reason we introduced
3342 * new UNKNOWN reset type). Now, the errors have been
3343 * handled and cleared in hardware we can safely enable
3344 * interrupts. This is an exception to the norm.
3345 */
3346 hclge_enable_vector(&hdev->misc_vector, true);
3347 }
3348
f2f432f2 3349 /* return the highest priority reset level amongst all */
7cea834d
HT
3350 if (test_bit(HNAE3_IMP_RESET, addr)) {
3351 rst_level = HNAE3_IMP_RESET;
3352 clear_bit(HNAE3_IMP_RESET, addr);
3353 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3354 clear_bit(HNAE3_FUNC_RESET, addr);
3355 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 3356 rst_level = HNAE3_GLOBAL_RESET;
7cea834d 3357 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3358 clear_bit(HNAE3_FUNC_RESET, addr);
3359 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 3360 rst_level = HNAE3_FUNC_RESET;
7cea834d 3361 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
3362 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3363 rst_level = HNAE3_FLR_RESET;
3364 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 3365 }
f2f432f2 3366
0fdf4d30
HT
3367 if (hdev->reset_type != HNAE3_NONE_RESET &&
3368 rst_level < hdev->reset_type)
3369 return HNAE3_NONE_RESET;
3370
f2f432f2
SM
3371 return rst_level;
3372}
3373
cd8c5c26
YL
3374static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3375{
3376 u32 clearval = 0;
3377
3378 switch (hdev->reset_type) {
3379 case HNAE3_IMP_RESET:
3380 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3381 break;
3382 case HNAE3_GLOBAL_RESET:
3383 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3384 break;
cd8c5c26 3385 default:
cd8c5c26
YL
3386 break;
3387 }
3388
3389 if (!clearval)
3390 return;
3391
72e2fb07
HT
3392 /* For revision 0x20, the reset interrupt source
3393 * can only be cleared after hardware reset done
3394 */
3395 if (hdev->pdev->revision == 0x20)
3396 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3397 clearval);
3398
cd8c5c26
YL
3399 hclge_enable_vector(&hdev->misc_vector, true);
3400}
3401
aa5c4f17
HT
3402static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3403{
3404 int ret = 0;
3405
3406 switch (hdev->reset_type) {
3407 case HNAE3_FUNC_RESET:
6b9a97ee
HT
3408 /* fall through */
3409 case HNAE3_FLR_RESET:
aa5c4f17
HT
3410 ret = hclge_set_all_vf_rst(hdev, true);
3411 break;
3412 default:
3413 break;
3414 }
3415
3416 return ret;
3417}
3418
6b428b4f
HT
3419static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3420{
3421 u32 reg_val;
3422
3423 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3424 if (enable)
3425 reg_val |= HCLGE_NIC_SW_RST_RDY;
3426 else
3427 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3428
3429 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3430}
3431
35d93a30
HT
3432static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3433{
6dd22bbc 3434 u32 reg_val;
35d93a30
HT
3435 int ret = 0;
3436
3437 switch (hdev->reset_type) {
3438 case HNAE3_FUNC_RESET:
427a7bff
HT
3439 /* to confirm whether all running VF is ready
3440 * before request PF reset
aa5c4f17 3441 */
427a7bff
HT
3442 ret = hclge_func_reset_sync_vf(hdev);
3443 if (ret)
3444 return ret;
3445
35d93a30
HT
3446 ret = hclge_func_reset_cmd(hdev, 0);
3447 if (ret) {
3448 dev_err(&hdev->pdev->dev,
141b95d5 3449 "asserting function reset fail %d!\n", ret);
35d93a30
HT
3450 return ret;
3451 }
3452
3453 /* After performaning pf reset, it is not necessary to do the
3454 * mailbox handling or send any command to firmware, because
3455 * any mailbox handling or command to firmware is only valid
3456 * after hclge_cmd_init is called.
3457 */
3458 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
f02eb82d 3459 hdev->rst_stats.pf_rst_cnt++;
35d93a30 3460 break;
6b9a97ee 3461 case HNAE3_FLR_RESET:
427a7bff
HT
3462 /* to confirm whether all running VF is ready
3463 * before request PF reset
6b9a97ee 3464 */
427a7bff
HT
3465 ret = hclge_func_reset_sync_vf(hdev);
3466 if (ret)
3467 return ret;
3468
6b9a97ee
HT
3469 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3470 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
f02eb82d 3471 hdev->rst_stats.flr_rst_cnt++;
6b9a97ee 3472 break;
6dd22bbc
HT
3473 case HNAE3_IMP_RESET:
3474 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3475 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3476 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3477 break;
35d93a30
HT
3478 default:
3479 break;
3480 }
3481
ada13ee3
HT
3482 /* inform hardware that preparatory work is done */
3483 msleep(HCLGE_RESET_SYNC_TIME);
6b428b4f 3484 hclge_reset_handshake(hdev, true);
35d93a30
HT
3485 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3486
3487 return ret;
3488}
3489
8e9eee78 3490static bool hclge_reset_err_handle(struct hclge_dev *hdev)
65e41e7e
HT
3491{
3492#define MAX_RESET_FAIL_CNT 5
65e41e7e
HT
3493
3494 if (hdev->reset_pending) {
3495 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3496 hdev->reset_pending);
3497 return true;
3498 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3499 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3500 BIT(HCLGE_IMP_RESET_BIT))) {
3501 dev_info(&hdev->pdev->dev,
3502 "reset failed because IMP Reset is pending\n");
3503 hclge_clear_reset_cause(hdev);
3504 return false;
3505 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3506 hdev->reset_fail_cnt++;
8e9eee78
HT
3507 set_bit(hdev->reset_type, &hdev->reset_pending);
3508 dev_info(&hdev->pdev->dev,
3509 "re-schedule reset task(%d)\n",
3510 hdev->reset_fail_cnt);
3511 return true;
65e41e7e
HT
3512 }
3513
3514 hclge_clear_reset_cause(hdev);
6b428b4f
HT
3515
3516 /* recover the handshake status when reset fail */
3517 hclge_reset_handshake(hdev, true);
3518
65e41e7e
HT
3519 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3520 return false;
3521}
3522
72e2fb07
HT
3523static int hclge_set_rst_done(struct hclge_dev *hdev)
3524{
3525 struct hclge_pf_rst_done_cmd *req;
3526 struct hclge_desc desc;
3527
3528 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3529 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3530 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3531
3532 return hclge_cmd_send(&hdev->hw, &desc, 1);
3533}
3534
aa5c4f17
HT
3535static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3536{
3537 int ret = 0;
3538
3539 switch (hdev->reset_type) {
3540 case HNAE3_FUNC_RESET:
6b9a97ee
HT
3541 /* fall through */
3542 case HNAE3_FLR_RESET:
aa5c4f17
HT
3543 ret = hclge_set_all_vf_rst(hdev, false);
3544 break;
72e2fb07
HT
3545 case HNAE3_GLOBAL_RESET:
3546 /* fall through */
3547 case HNAE3_IMP_RESET:
3548 ret = hclge_set_rst_done(hdev);
3549 break;
aa5c4f17
HT
3550 default:
3551 break;
3552 }
3553
6b428b4f
HT
3554 /* clear up the handshake status after re-initialize done */
3555 hclge_reset_handshake(hdev, false);
3556
aa5c4f17
HT
3557 return ret;
3558}
3559
63cbf7a9
YM
3560static int hclge_reset_stack(struct hclge_dev *hdev)
3561{
3562 int ret;
3563
3564 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3565 if (ret)
3566 return ret;
3567
3568 ret = hclge_reset_ae_dev(hdev->ae_dev);
3569 if (ret)
3570 return ret;
3571
3572 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3573 if (ret)
3574 return ret;
3575
3576 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3577}
3578
f2f432f2
SM
3579static void hclge_reset(struct hclge_dev *hdev)
3580{
6871af29 3581 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
65e41e7e 3582 int ret;
9de0b86f 3583
6871af29
JS
3584 /* Initialize ae_dev reset status as well, in case enet layer wants to
3585 * know if device is undergoing reset
3586 */
3587 ae_dev->reset_type = hdev->reset_type;
f02eb82d 3588 hdev->rst_stats.reset_cnt++;
f2f432f2 3589 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
3590 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3591 if (ret)
3592 goto err_reset;
3593
aa5c4f17
HT
3594 ret = hclge_reset_prepare_down(hdev);
3595 if (ret)
3596 goto err_reset;
3597
6d4fab39 3598 rtnl_lock();
65e41e7e
HT
3599 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3600 if (ret)
3601 goto err_reset_lock;
f2f432f2 3602
65e41e7e 3603 rtnl_unlock();
35d93a30 3604
65e41e7e
HT
3605 ret = hclge_reset_prepare_wait(hdev);
3606 if (ret)
3607 goto err_reset;
cd8c5c26 3608
8e9eee78 3609 if (hclge_reset_wait(hdev))
65e41e7e 3610 goto err_reset;
f2f432f2 3611
f02eb82d
HT
3612 hdev->rst_stats.hw_reset_done_cnt++;
3613
65e41e7e
HT
3614 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3615 if (ret)
3616 goto err_reset;
3617
3618 rtnl_lock();
65e41e7e 3619
63cbf7a9 3620 ret = hclge_reset_stack(hdev);
1f609492
YL
3621 if (ret)
3622 goto err_reset_lock;
3623
65e41e7e
HT
3624 hclge_clear_reset_cause(hdev);
3625
aa5c4f17
HT
3626 ret = hclge_reset_prepare_up(hdev);
3627 if (ret)
3628 goto err_reset_lock;
3629
63cbf7a9
YM
3630 rtnl_unlock();
3631
3632 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3633 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3634 * times
3635 */
3636 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3637 goto err_reset;
3638
3639 rtnl_lock();
3640
65e41e7e
HT
3641 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3642 if (ret)
3643 goto err_reset_lock;
3644
6d4fab39 3645 rtnl_unlock();
f403a84f 3646
65e41e7e
HT
3647 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3648 if (ret)
3649 goto err_reset;
3650
b644a8d4
HT
3651 hdev->last_reset_time = jiffies;
3652 hdev->reset_fail_cnt = 0;
f02eb82d 3653 hdev->rst_stats.reset_done_cnt++;
b644a8d4 3654 ae_dev->reset_type = HNAE3_NONE_RESET;
012fcb52
HT
3655
3656 /* if default_reset_request has a higher level reset request,
3657 * it should be handled as soon as possible. since some errors
3658 * need this kind of reset to fix.
3659 */
3660 hdev->reset_level = hclge_get_reset_level(ae_dev,
3661 &hdev->default_reset_request);
3662 if (hdev->reset_level != HNAE3_NONE_RESET)
3663 set_bit(hdev->reset_level, &hdev->reset_request);
b644a8d4 3664
65e41e7e
HT
3665 return;
3666
3667err_reset_lock:
3668 rtnl_unlock();
3669err_reset:
8e9eee78 3670 if (hclge_reset_err_handle(hdev))
65e41e7e 3671 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3672}
3673
6ae4e733
SJ
3674static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3675{
3676 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3677 struct hclge_dev *hdev = ae_dev->priv;
3678
3679 /* We might end up getting called broadly because of 2 below cases:
3680 * 1. Recoverable error was conveyed through APEI and only way to bring
3681 * normalcy is to reset.
3682 * 2. A new reset request from the stack due to timeout
3683 *
3684 * For the first case,error event might not have ae handle available.
3685 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3686 * last reset attempt did not succeed and watchdog hit us again. We will
3687 * know this if last reset request did not occur very recently (watchdog
3688 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3689 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3690 * And if it is a repeat reset request of the most recent one then we
3691 * want to make sure we throttle the reset request. Therefore, we will
3692 * not allow it again before 3*HZ times.
6d4c3981 3693 */
6ae4e733
SJ
3694 if (!handle)
3695 handle = &hdev->vport[0].nic;
3696
b37ce587 3697 if (time_before(jiffies, (hdev->last_reset_time +
012fcb52
HT
3698 HCLGE_RESET_INTERVAL))) {
3699 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9de0b86f 3700 return;
012fcb52 3701 } else if (hdev->default_reset_request)
0742ed7c 3702 hdev->reset_level =
123297b7 3703 hclge_get_reset_level(ae_dev,
720bd583 3704 &hdev->default_reset_request);
0742ed7c
HT
3705 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3706 hdev->reset_level = HNAE3_FUNC_RESET;
4ed340ab 3707
6d4c3981 3708 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
0742ed7c 3709 hdev->reset_level);
6d4c3981
SM
3710
3711 /* request reset & schedule reset task */
0742ed7c 3712 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3713 hclge_reset_task_schedule(hdev);
3714
0742ed7c
HT
3715 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3716 hdev->reset_level++;
4ed340ab
L
3717}
3718
720bd583
HT
3719static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3720 enum hnae3_reset_type rst_type)
3721{
3722 struct hclge_dev *hdev = ae_dev->priv;
3723
3724 set_bit(rst_type, &hdev->default_reset_request);
3725}
3726
65e41e7e
HT
3727static void hclge_reset_timer(struct timer_list *t)
3728{
3729 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3730
012fcb52
HT
3731 /* if default_reset_request has no value, it means that this reset
3732 * request has already be handled, so just return here
3733 */
3734 if (!hdev->default_reset_request)
3735 return;
3736
65e41e7e 3737 dev_info(&hdev->pdev->dev,
e3b84ed2 3738 "triggering reset in reset timer\n");
65e41e7e
HT
3739 hclge_reset_event(hdev->pdev, NULL);
3740}
3741
4ed340ab
L
3742static void hclge_reset_subtask(struct hclge_dev *hdev)
3743{
123297b7
SJ
3744 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3745
f2f432f2
SM
3746 /* check if there is any ongoing reset in the hardware. This status can
3747 * be checked from reset_pending. If there is then, we need to wait for
3748 * hardware to complete reset.
3749 * a. If we are able to figure out in reasonable time that hardware
3750 * has fully resetted then, we can proceed with driver, client
3751 * reset.
3752 * b. else, we can come back later to check this status so re-sched
3753 * now.
3754 */
0742ed7c 3755 hdev->last_reset_time = jiffies;
123297b7 3756 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
f2f432f2
SM
3757 if (hdev->reset_type != HNAE3_NONE_RESET)
3758 hclge_reset(hdev);
4ed340ab 3759
f2f432f2 3760 /* check if we got any *new* reset requests to be honored */
123297b7 3761 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
f2f432f2
SM
3762 if (hdev->reset_type != HNAE3_NONE_RESET)
3763 hclge_do_reset(hdev);
4ed340ab 3764
4ed340ab
L
3765 hdev->reset_type = HNAE3_NONE_RESET;
3766}
3767
cb1b9f77 3768static void hclge_reset_service_task(struct work_struct *work)
466b0c00 3769{
cb1b9f77
SM
3770 struct hclge_dev *hdev =
3771 container_of(work, struct hclge_dev, rst_service_task);
3772
3773 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3774 return;
3775
3776 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3777
4ed340ab 3778 hclge_reset_subtask(hdev);
cb1b9f77
SM
3779
3780 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
466b0c00
L
3781}
3782
c1a81619
SM
3783static void hclge_mailbox_service_task(struct work_struct *work)
3784{
3785 struct hclge_dev *hdev =
3786 container_of(work, struct hclge_dev, mbx_service_task);
3787
3788 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3789 return;
3790
3791 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3792
3793 hclge_mbx_handler(hdev);
3794
3795 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3796}
3797
a6d818e3
YL
3798static void hclge_update_vport_alive(struct hclge_dev *hdev)
3799{
3800 int i;
3801
3802 /* start from vport 1 for PF is always alive */
3803 for (i = 1; i < hdev->num_alloc_vport; i++) {
3804 struct hclge_vport *vport = &hdev->vport[i];
3805
3806 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3807 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
3808
3809 /* If vf is not alive, set to default value */
3810 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3811 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
3812 }
3813}
3814
46a3df9f
S
3815static void hclge_service_task(struct work_struct *work)
3816{
3817 struct hclge_dev *hdev =
7be1b9f3
YL
3818 container_of(work, struct hclge_dev, service_task.work);
3819
3820 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
46a3df9f 3821
c5f65480
JS
3822 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3823 hclge_update_stats_for_all(hdev);
3824 hdev->hw_stats.stats_timer = 0;
3825 }
3826
88d10bd6 3827 hclge_update_port_info(hdev);
46a3df9f 3828 hclge_update_link_status(hdev);
a6d818e3 3829 hclge_update_vport_alive(hdev);
fe4144d4 3830 hclge_sync_vlan_filter(hdev);
d93ed94f
JS
3831 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3832 hclge_rfs_filter_expire(hdev);
3833 hdev->fd_arfs_expire_timer = 0;
3834 }
7be1b9f3 3835
ed8fb4b2 3836 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
46a3df9f
S
3837}
3838
46a3df9f
S
3839struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3840{
3841 /* VF handle has no client */
3842 if (!handle->client)
3843 return container_of(handle, struct hclge_vport, nic);
3844 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3845 return container_of(handle, struct hclge_vport, roce);
3846 else
3847 return container_of(handle, struct hclge_vport, nic);
3848}
3849
3850static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3851 struct hnae3_vector_info *vector_info)
3852{
3853 struct hclge_vport *vport = hclge_get_vport(handle);
3854 struct hnae3_vector_info *vector = vector_info;
3855 struct hclge_dev *hdev = vport->back;
3856 int alloc = 0;
3857 int i, j;
3858
3859 vector_num = min(hdev->num_msi_left, vector_num);
3860
3861 for (j = 0; j < vector_num; j++) {
3862 for (i = 1; i < hdev->num_msi; i++) {
3863 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3864 vector->vector = pci_irq_vector(hdev->pdev, i);
3865 vector->io_addr = hdev->hw.io_base +
3866 HCLGE_VECTOR_REG_BASE +
3867 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3868 vport->vport_id *
3869 HCLGE_VECTOR_VF_OFFSET;
3870 hdev->vector_status[i] = vport->vport_id;
887c3820 3871 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
3872
3873 vector++;
3874 alloc++;
3875
3876 break;
3877 }
3878 }
3879 }
3880 hdev->num_msi_left -= alloc;
3881 hdev->num_msi_used += alloc;
3882
3883 return alloc;
3884}
3885
3886static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3887{
3888 int i;
3889
887c3820
SM
3890 for (i = 0; i < hdev->num_msi; i++)
3891 if (vector == hdev->vector_irq[i])
3892 return i;
3893
46a3df9f
S
3894 return -EINVAL;
3895}
3896
0d3e6631
YL
3897static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3898{
3899 struct hclge_vport *vport = hclge_get_vport(handle);
3900 struct hclge_dev *hdev = vport->back;
3901 int vector_id;
3902
3903 vector_id = hclge_get_vector_index(hdev, vector);
3904 if (vector_id < 0) {
3905 dev_err(&hdev->pdev->dev,
3906 "Get vector index fail. vector_id =%d\n", vector_id);
3907 return vector_id;
3908 }
3909
3910 hclge_free_vector(hdev, vector_id);
3911
3912 return 0;
3913}
3914
46a3df9f
S
3915static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3916{
3917 return HCLGE_RSS_KEY_SIZE;
3918}
3919
3920static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3921{
3922 return HCLGE_RSS_IND_TBL_SIZE;
3923}
3924
46a3df9f
S
3925static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3926 const u8 hfunc, const u8 *key)
3927{
d44f9b63 3928 struct hclge_rss_config_cmd *req;
ebaf1908 3929 unsigned int key_offset = 0;
46a3df9f 3930 struct hclge_desc desc;
3caf772b 3931 int key_counts;
46a3df9f
S
3932 int key_size;
3933 int ret;
3934
3caf772b 3935 key_counts = HCLGE_RSS_KEY_SIZE;
d44f9b63 3936 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f 3937
3caf772b 3938 while (key_counts) {
46a3df9f
S
3939 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3940 false);
3941
3942 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3943 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3944
3caf772b 3945 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
46a3df9f
S
3946 memcpy(req->hash_key,
3947 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3948
3caf772b
YM
3949 key_counts -= key_size;
3950 key_offset++;
46a3df9f
S
3951 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3952 if (ret) {
3953 dev_err(&hdev->pdev->dev,
3954 "Configure RSS config fail, status = %d\n",
3955 ret);
3956 return ret;
3957 }
3958 }
3959 return 0;
3960}
3961
89523cfa 3962static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
46a3df9f 3963{
d44f9b63 3964 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
3965 struct hclge_desc desc;
3966 int i, j;
3967 int ret;
3968
d44f9b63 3969 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
3970
3971 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3972 hclge_cmd_setup_basic_desc
3973 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3974
a90bb9a5
YL
3975 req->start_table_index =
3976 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3977 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
3978
3979 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3980 req->rss_result[j] =
3981 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3982
3983 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3984 if (ret) {
3985 dev_err(&hdev->pdev->dev,
3986 "Configure rss indir table fail,status = %d\n",
3987 ret);
3988 return ret;
3989 }
3990 }
3991 return 0;
3992}
3993
3994static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3995 u16 *tc_size, u16 *tc_offset)
3996{
d44f9b63 3997 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
3998 struct hclge_desc desc;
3999 int ret;
4000 int i;
4001
4002 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 4003 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
4004
4005 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
4006 u16 mode = 0;
4007
e4e87715
PL
4008 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4009 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4010 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4011 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4012 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
4013
4014 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
4015 }
4016
4017 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4018 if (ret)
46a3df9f
S
4019 dev_err(&hdev->pdev->dev,
4020 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 4021
3f639907 4022 return ret;
46a3df9f
S
4023}
4024
232fc64b
PL
4025static void hclge_get_rss_type(struct hclge_vport *vport)
4026{
4027 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4028 vport->rss_tuple_sets.ipv4_udp_en ||
4029 vport->rss_tuple_sets.ipv4_sctp_en ||
4030 vport->rss_tuple_sets.ipv6_tcp_en ||
4031 vport->rss_tuple_sets.ipv6_udp_en ||
4032 vport->rss_tuple_sets.ipv6_sctp_en)
4033 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4034 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4035 vport->rss_tuple_sets.ipv6_fragment_en)
4036 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4037 else
4038 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4039}
4040
46a3df9f
S
4041static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4042{
d44f9b63 4043 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
4044 struct hclge_desc desc;
4045 int ret;
4046
4047 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4048
d44f9b63 4049 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
4050
4051 /* Get the tuple cfg from pf */
4052 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4053 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4054 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4055 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4056 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4057 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4058 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4059 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 4060 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 4061 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4062 if (ret)
46a3df9f
S
4063 dev_err(&hdev->pdev->dev,
4064 "Configure rss input fail, status = %d\n", ret);
3f639907 4065 return ret;
46a3df9f
S
4066}
4067
4068static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4069 u8 *key, u8 *hfunc)
4070{
4071 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
4072 int i;
4073
4074 /* Get hash algorithm */
775501a1
JS
4075 if (hfunc) {
4076 switch (vport->rss_algo) {
4077 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4078 *hfunc = ETH_RSS_HASH_TOP;
4079 break;
4080 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4081 *hfunc = ETH_RSS_HASH_XOR;
4082 break;
4083 default:
4084 *hfunc = ETH_RSS_HASH_UNKNOWN;
4085 break;
4086 }
4087 }
46a3df9f
S
4088
4089 /* Get the RSS Key required by the user */
4090 if (key)
4091 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4092
4093 /* Get indirect table */
4094 if (indir)
4095 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4096 indir[i] = vport->rss_indirection_tbl[i];
4097
4098 return 0;
4099}
4100
4101static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4102 const u8 *key, const u8 hfunc)
4103{
4104 struct hclge_vport *vport = hclge_get_vport(handle);
4105 struct hclge_dev *hdev = vport->back;
4106 u8 hash_algo;
4107 int ret, i;
4108
4109 /* Set the RSS Hash Key if specififed by the user */
4110 if (key) {
775501a1
JS
4111 switch (hfunc) {
4112 case ETH_RSS_HASH_TOP:
46a3df9f 4113 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
4114 break;
4115 case ETH_RSS_HASH_XOR:
4116 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4117 break;
4118 case ETH_RSS_HASH_NO_CHANGE:
4119 hash_algo = vport->rss_algo;
4120 break;
4121 default:
46a3df9f 4122 return -EINVAL;
775501a1
JS
4123 }
4124
46a3df9f
S
4125 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4126 if (ret)
4127 return ret;
89523cfa
YL
4128
4129 /* Update the shadow RSS key with user specified qids */
4130 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4131 vport->rss_algo = hash_algo;
46a3df9f
S
4132 }
4133
4134 /* Update the shadow RSS table with user specified qids */
4135 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4136 vport->rss_indirection_tbl[i] = indir[i];
4137
4138 /* Update the hardware */
89523cfa 4139 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
4140}
4141
f7db940a
L
4142static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4143{
4144 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4145
4146 if (nfc->data & RXH_L4_B_2_3)
4147 hash_sets |= HCLGE_D_PORT_BIT;
4148 else
4149 hash_sets &= ~HCLGE_D_PORT_BIT;
4150
4151 if (nfc->data & RXH_IP_SRC)
4152 hash_sets |= HCLGE_S_IP_BIT;
4153 else
4154 hash_sets &= ~HCLGE_S_IP_BIT;
4155
4156 if (nfc->data & RXH_IP_DST)
4157 hash_sets |= HCLGE_D_IP_BIT;
4158 else
4159 hash_sets &= ~HCLGE_D_IP_BIT;
4160
4161 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4162 hash_sets |= HCLGE_V_TAG_BIT;
4163
4164 return hash_sets;
4165}
4166
4167static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4168 struct ethtool_rxnfc *nfc)
4169{
4170 struct hclge_vport *vport = hclge_get_vport(handle);
4171 struct hclge_dev *hdev = vport->back;
4172 struct hclge_rss_input_tuple_cmd *req;
4173 struct hclge_desc desc;
4174 u8 tuple_sets;
4175 int ret;
4176
4177 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4178 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4179 return -EINVAL;
4180
4181 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429 4182 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
f7db940a 4183
6f2af429
YL
4184 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4185 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4186 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4187 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4188 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4189 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4190 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4191 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
4192
4193 tuple_sets = hclge_get_rss_hash_bits(nfc);
4194 switch (nfc->flow_type) {
4195 case TCP_V4_FLOW:
4196 req->ipv4_tcp_en = tuple_sets;
4197 break;
4198 case TCP_V6_FLOW:
4199 req->ipv6_tcp_en = tuple_sets;
4200 break;
4201 case UDP_V4_FLOW:
4202 req->ipv4_udp_en = tuple_sets;
4203 break;
4204 case UDP_V6_FLOW:
4205 req->ipv6_udp_en = tuple_sets;
4206 break;
4207 case SCTP_V4_FLOW:
4208 req->ipv4_sctp_en = tuple_sets;
4209 break;
4210 case SCTP_V6_FLOW:
4211 if ((nfc->data & RXH_L4_B_0_1) ||
4212 (nfc->data & RXH_L4_B_2_3))
4213 return -EINVAL;
4214
4215 req->ipv6_sctp_en = tuple_sets;
4216 break;
4217 case IPV4_FLOW:
4218 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4219 break;
4220 case IPV6_FLOW:
4221 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4222 break;
4223 default:
4224 return -EINVAL;
4225 }
4226
4227 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 4228 if (ret) {
f7db940a
L
4229 dev_err(&hdev->pdev->dev,
4230 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
4231 return ret;
4232 }
f7db940a 4233
6f2af429
YL
4234 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4235 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4236 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4237 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4238 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4239 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4240 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4241 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 4242 hclge_get_rss_type(vport);
6f2af429 4243 return 0;
f7db940a
L
4244}
4245
07d29954
L
4246static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4247 struct ethtool_rxnfc *nfc)
4248{
4249 struct hclge_vport *vport = hclge_get_vport(handle);
07d29954 4250 u8 tuple_sets;
07d29954
L
4251
4252 nfc->data = 0;
4253
07d29954
L
4254 switch (nfc->flow_type) {
4255 case TCP_V4_FLOW:
6f2af429 4256 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
4257 break;
4258 case UDP_V4_FLOW:
6f2af429 4259 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
4260 break;
4261 case TCP_V6_FLOW:
6f2af429 4262 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
4263 break;
4264 case UDP_V6_FLOW:
6f2af429 4265 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
4266 break;
4267 case SCTP_V4_FLOW:
6f2af429 4268 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
4269 break;
4270 case SCTP_V6_FLOW:
6f2af429 4271 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
4272 break;
4273 case IPV4_FLOW:
4274 case IPV6_FLOW:
4275 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4276 break;
4277 default:
4278 return -EINVAL;
4279 }
4280
4281 if (!tuple_sets)
4282 return 0;
4283
4284 if (tuple_sets & HCLGE_D_PORT_BIT)
4285 nfc->data |= RXH_L4_B_2_3;
4286 if (tuple_sets & HCLGE_S_PORT_BIT)
4287 nfc->data |= RXH_L4_B_0_1;
4288 if (tuple_sets & HCLGE_D_IP_BIT)
4289 nfc->data |= RXH_IP_DST;
4290 if (tuple_sets & HCLGE_S_IP_BIT)
4291 nfc->data |= RXH_IP_SRC;
4292
4293 return 0;
4294}
4295
46a3df9f
S
4296static int hclge_get_tc_size(struct hnae3_handle *handle)
4297{
4298 struct hclge_vport *vport = hclge_get_vport(handle);
4299 struct hclge_dev *hdev = vport->back;
4300
4301 return hdev->rss_size_max;
4302}
4303
77f255c1 4304int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f 4305{
46a3df9f 4306 struct hclge_vport *vport = hdev->vport;
268f5dfa
YL
4307 u8 *rss_indir = vport[0].rss_indirection_tbl;
4308 u16 rss_size = vport[0].alloc_rss_size;
354d0fab
PL
4309 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4310 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
268f5dfa
YL
4311 u8 *key = vport[0].rss_hash_key;
4312 u8 hfunc = vport[0].rss_algo;
46a3df9f 4313 u16 tc_valid[HCLGE_MAX_TC_NUM];
268f5dfa 4314 u16 roundup_size;
ebaf1908
WL
4315 unsigned int i;
4316 int ret;
68ece54e 4317
46a3df9f
S
4318 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4319 if (ret)
268f5dfa 4320 return ret;
46a3df9f 4321
46a3df9f
S
4322 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4323 if (ret)
268f5dfa 4324 return ret;
46a3df9f
S
4325
4326 ret = hclge_set_rss_input_tuple(hdev);
4327 if (ret)
268f5dfa 4328 return ret;
46a3df9f 4329
68ece54e
YL
4330 /* Each TC have the same queue size, and tc_size set to hardware is
4331 * the log2 of roundup power of two of rss_size, the acutal queue
4332 * size is limited by indirection table.
4333 */
4334 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4335 dev_err(&hdev->pdev->dev,
4336 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4337 rss_size);
268f5dfa 4338 return -EINVAL;
68ece54e
YL
4339 }
4340
4341 roundup_size = roundup_pow_of_two(rss_size);
4342 roundup_size = ilog2(roundup_size);
4343
46a3df9f 4344 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 4345 tc_valid[i] = 0;
46a3df9f 4346
68ece54e
YL
4347 if (!(hdev->hw_tc_map & BIT(i)))
4348 continue;
4349
4350 tc_valid[i] = 1;
4351 tc_size[i] = roundup_size;
4352 tc_offset[i] = rss_size * i;
46a3df9f 4353 }
68ece54e 4354
268f5dfa
YL
4355 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4356}
46a3df9f 4357
268f5dfa
YL
4358void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4359{
4360 struct hclge_vport *vport = hdev->vport;
4361 int i, j;
46a3df9f 4362
268f5dfa
YL
4363 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4364 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4365 vport[j].rss_indirection_tbl[i] =
4366 i % vport[j].alloc_rss_size;
4367 }
4368}
4369
4370static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4371{
472d7ece 4372 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 4373 struct hclge_vport *vport = hdev->vport;
472d7ece
JS
4374
4375 if (hdev->pdev->revision >= 0x21)
4376 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 4377
268f5dfa
YL
4378 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4379 vport[i].rss_tuple_sets.ipv4_tcp_en =
4380 HCLGE_RSS_INPUT_TUPLE_OTHER;
4381 vport[i].rss_tuple_sets.ipv4_udp_en =
4382 HCLGE_RSS_INPUT_TUPLE_OTHER;
4383 vport[i].rss_tuple_sets.ipv4_sctp_en =
4384 HCLGE_RSS_INPUT_TUPLE_SCTP;
4385 vport[i].rss_tuple_sets.ipv4_fragment_en =
4386 HCLGE_RSS_INPUT_TUPLE_OTHER;
4387 vport[i].rss_tuple_sets.ipv6_tcp_en =
4388 HCLGE_RSS_INPUT_TUPLE_OTHER;
4389 vport[i].rss_tuple_sets.ipv6_udp_en =
4390 HCLGE_RSS_INPUT_TUPLE_OTHER;
4391 vport[i].rss_tuple_sets.ipv6_sctp_en =
4392 HCLGE_RSS_INPUT_TUPLE_SCTP;
4393 vport[i].rss_tuple_sets.ipv6_fragment_en =
4394 HCLGE_RSS_INPUT_TUPLE_OTHER;
4395
472d7ece 4396 vport[i].rss_algo = rss_algo;
ea739c90 4397
472d7ece
JS
4398 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4399 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
4400 }
4401
4402 hclge_rss_indir_init_cfg(hdev);
46a3df9f
S
4403}
4404
84e095d6
SM
4405int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4406 int vector_id, bool en,
4407 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4408{
4409 struct hclge_dev *hdev = vport->back;
46a3df9f
S
4410 struct hnae3_ring_chain_node *node;
4411 struct hclge_desc desc;
37417c66
GL
4412 struct hclge_ctrl_vector_chain_cmd *req =
4413 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
84e095d6
SM
4414 enum hclge_cmd_status status;
4415 enum hclge_opcode_type op;
4416 u16 tqp_type_and_id;
46a3df9f
S
4417 int i;
4418
84e095d6
SM
4419 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4420 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
4421 req->int_vector_id = vector_id;
4422
4423 i = 0;
4424 for (node = ring_chain; node; node = node->next) {
84e095d6 4425 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
4426 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4427 HCLGE_INT_TYPE_S,
4428 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4429 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4430 HCLGE_TQP_ID_S, node->tqp_index);
4431 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4432 HCLGE_INT_GL_IDX_S,
4433 hnae3_get_field(node->int_gl_idx,
4434 HNAE3_RING_GL_IDX_M,
4435 HNAE3_RING_GL_IDX_S));
84e095d6 4436 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
4437 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4438 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 4439 req->vfid = vport->vport_id;
46a3df9f 4440
84e095d6
SM
4441 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4442 if (status) {
46a3df9f
S
4443 dev_err(&hdev->pdev->dev,
4444 "Map TQP fail, status is %d.\n",
84e095d6
SM
4445 status);
4446 return -EIO;
46a3df9f
S
4447 }
4448 i = 0;
4449
4450 hclge_cmd_setup_basic_desc(&desc,
84e095d6 4451 op,
46a3df9f
S
4452 false);
4453 req->int_vector_id = vector_id;
4454 }
4455 }
4456
4457 if (i > 0) {
4458 req->int_cause_num = i;
84e095d6
SM
4459 req->vfid = vport->vport_id;
4460 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4461 if (status) {
46a3df9f 4462 dev_err(&hdev->pdev->dev,
84e095d6
SM
4463 "Map TQP fail, status is %d.\n", status);
4464 return -EIO;
46a3df9f
S
4465 }
4466 }
4467
4468 return 0;
4469}
4470
9b2f3477 4471static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
84e095d6 4472 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4473{
4474 struct hclge_vport *vport = hclge_get_vport(handle);
4475 struct hclge_dev *hdev = vport->back;
4476 int vector_id;
4477
4478 vector_id = hclge_get_vector_index(hdev, vector);
4479 if (vector_id < 0) {
4480 dev_err(&hdev->pdev->dev,
84e095d6 4481 "Get vector index fail. vector_id =%d\n", vector_id);
46a3df9f
S
4482 return vector_id;
4483 }
4484
84e095d6 4485 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
4486}
4487
9b2f3477 4488static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
84e095d6 4489 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4490{
4491 struct hclge_vport *vport = hclge_get_vport(handle);
4492 struct hclge_dev *hdev = vport->back;
84e095d6 4493 int vector_id, ret;
46a3df9f 4494
b50ae26c
PL
4495 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4496 return 0;
4497
46a3df9f
S
4498 vector_id = hclge_get_vector_index(hdev, vector);
4499 if (vector_id < 0) {
4500 dev_err(&handle->pdev->dev,
4501 "Get vector index fail. ret =%d\n", vector_id);
4502 return vector_id;
4503 }
4504
84e095d6 4505 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 4506 if (ret)
84e095d6
SM
4507 dev_err(&handle->pdev->dev,
4508 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
9b2f3477 4509 vector_id, ret);
46a3df9f 4510
0d3e6631 4511 return ret;
46a3df9f
S
4512}
4513
4514int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4515 struct hclge_promisc_param *param)
4516{
d44f9b63 4517 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
4518 struct hclge_desc desc;
4519 int ret;
4520
4521 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4522
d44f9b63 4523 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f 4524 req->vf_id = param->vf_id;
96c0e861
PL
4525
4526 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4527 * pdev revision(0x20), new revision support them. The
4528 * value of this two fields will not return error when driver
4529 * send command to fireware in revision(0x20).
4530 */
4531 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4532 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
46a3df9f
S
4533
4534 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4535 if (ret)
46a3df9f
S
4536 dev_err(&hdev->pdev->dev,
4537 "Set promisc mode fail, status is %d.\n", ret);
3f639907
JS
4538
4539 return ret;
46a3df9f
S
4540}
4541
4542void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4543 bool en_mc, bool en_bc, int vport_id)
4544{
4545 if (!param)
4546 return;
4547
4548 memset(param, 0, sizeof(struct hclge_promisc_param));
4549 if (en_uc)
4550 param->enable = HCLGE_PROMISC_EN_UC;
4551 if (en_mc)
4552 param->enable |= HCLGE_PROMISC_EN_MC;
4553 if (en_bc)
4554 param->enable |= HCLGE_PROMISC_EN_BC;
4555 param->vf_id = vport_id;
4556}
4557
7fa6be4f
HT
4558static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4559 bool en_mc_pmc)
46a3df9f
S
4560{
4561 struct hclge_vport *vport = hclge_get_vport(handle);
4562 struct hclge_dev *hdev = vport->back;
4563 struct hclge_promisc_param param;
28673b33 4564 bool en_bc_pmc = true;
46a3df9f 4565
28673b33
JS
4566 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4567 * always bypassed. So broadcast promisc should be disabled until
4568 * user enable promisc mode
4569 */
4570 if (handle->pdev->revision == 0x20)
4571 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4572
4573 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3b75c3df 4574 vport->vport_id);
7fa6be4f 4575 return hclge_cmd_set_promisc_mode(hdev, &param);
46a3df9f
S
4576}
4577
d695964d
JS
4578static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4579{
4580 struct hclge_get_fd_mode_cmd *req;
4581 struct hclge_desc desc;
4582 int ret;
4583
4584 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4585
4586 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4587
4588 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4589 if (ret) {
4590 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4591 return ret;
4592 }
4593
4594 *fd_mode = req->mode;
4595
4596 return ret;
4597}
4598
4599static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4600 u32 *stage1_entry_num,
4601 u32 *stage2_entry_num,
4602 u16 *stage1_counter_num,
4603 u16 *stage2_counter_num)
4604{
4605 struct hclge_get_fd_allocation_cmd *req;
4606 struct hclge_desc desc;
4607 int ret;
4608
4609 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4610
4611 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4612
4613 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4614 if (ret) {
4615 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4616 ret);
4617 return ret;
4618 }
4619
4620 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4621 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4622 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4623 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4624
4625 return ret;
4626}
4627
4628static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4629{
4630 struct hclge_set_fd_key_config_cmd *req;
4631 struct hclge_fd_key_cfg *stage;
4632 struct hclge_desc desc;
4633 int ret;
4634
4635 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4636
4637 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4638 stage = &hdev->fd_cfg.key_cfg[stage_num];
4639 req->stage = stage_num;
4640 req->key_select = stage->key_sel;
4641 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4642 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4643 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4644 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4645 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4646 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4647
4648 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4649 if (ret)
4650 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4651
4652 return ret;
4653}
4654
4655static int hclge_init_fd_config(struct hclge_dev *hdev)
4656{
4657#define LOW_2_WORDS 0x03
4658 struct hclge_fd_key_cfg *key_cfg;
4659 int ret;
4660
4661 if (!hnae3_dev_fd_supported(hdev))
4662 return 0;
4663
4664 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4665 if (ret)
4666 return ret;
4667
4668 switch (hdev->fd_cfg.fd_mode) {
4669 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4670 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4671 break;
4672 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4673 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4674 break;
4675 default:
4676 dev_err(&hdev->pdev->dev,
4677 "Unsupported flow director mode %d\n",
4678 hdev->fd_cfg.fd_mode);
4679 return -EOPNOTSUPP;
4680 }
4681
d695964d
JS
4682 hdev->fd_cfg.proto_support =
4683 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4684 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4685 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4686 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4687 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4688 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4689 key_cfg->outer_sipv6_word_en = 0;
4690 key_cfg->outer_dipv6_word_en = 0;
4691
4692 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4693 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4694 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4695 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4696
4697 /* If use max 400bit key, we can support tuples for ether type */
4698 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4699 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4700 key_cfg->tuple_active |=
4701 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4702 }
4703
4704 /* roce_type is used to filter roce frames
4705 * dst_vport is used to specify the rule
4706 */
4707 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4708
4709 ret = hclge_get_fd_allocation(hdev,
4710 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4711 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4712 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4713 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4714 if (ret)
4715 return ret;
4716
4717 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4718}
4719
11732868
JS
4720static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4721 int loc, u8 *key, bool is_add)
4722{
4723 struct hclge_fd_tcam_config_1_cmd *req1;
4724 struct hclge_fd_tcam_config_2_cmd *req2;
4725 struct hclge_fd_tcam_config_3_cmd *req3;
4726 struct hclge_desc desc[3];
4727 int ret;
4728
4729 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4730 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4731 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4732 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4733 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4734
4735 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4736 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4737 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4738
4739 req1->stage = stage;
4740 req1->xy_sel = sel_x ? 1 : 0;
4741 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4742 req1->index = cpu_to_le32(loc);
4743 req1->entry_vld = sel_x ? is_add : 0;
4744
4745 if (key) {
4746 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4747 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4748 sizeof(req2->tcam_data));
4749 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4750 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4751 }
4752
4753 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4754 if (ret)
4755 dev_err(&hdev->pdev->dev,
4756 "config tcam key fail, ret=%d\n",
4757 ret);
4758
4759 return ret;
4760}
4761
4762static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4763 struct hclge_fd_ad_data *action)
4764{
4765 struct hclge_fd_ad_config_cmd *req;
4766 struct hclge_desc desc;
4767 u64 ad_data = 0;
4768 int ret;
4769
4770 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4771
4772 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4773 req->index = cpu_to_le32(loc);
4774 req->stage = stage;
4775
4776 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4777 action->write_rule_id_to_bd);
4778 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4779 action->rule_id);
4780 ad_data <<= 32;
4781 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4782 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4783 action->forward_to_direct_queue);
4784 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4785 action->queue_id);
4786 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4787 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4788 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4789 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4790 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4791 action->counter_id);
4792
4793 req->ad_data = cpu_to_le64(ad_data);
4794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4795 if (ret)
4796 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4797
4798 return ret;
4799}
4800
4801static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4802 struct hclge_fd_rule *rule)
4803{
4804 u16 tmp_x_s, tmp_y_s;
4805 u32 tmp_x_l, tmp_y_l;
4806 int i;
4807
4808 if (rule->unused_tuple & tuple_bit)
4809 return true;
4810
4811 switch (tuple_bit) {
4812 case 0:
4813 return false;
4814 case BIT(INNER_DST_MAC):
e91e388c
JS
4815 for (i = 0; i < ETH_ALEN; i++) {
4816 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868 4817 rule->tuples_mask.dst_mac[i]);
e91e388c 4818 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868
JS
4819 rule->tuples_mask.dst_mac[i]);
4820 }
4821
4822 return true;
4823 case BIT(INNER_SRC_MAC):
e91e388c
JS
4824 for (i = 0; i < ETH_ALEN; i++) {
4825 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868 4826 rule->tuples.src_mac[i]);
e91e388c 4827 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868
JS
4828 rule->tuples.src_mac[i]);
4829 }
4830
4831 return true;
4832 case BIT(INNER_VLAN_TAG_FST):
4833 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4834 rule->tuples_mask.vlan_tag1);
4835 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4836 rule->tuples_mask.vlan_tag1);
4837 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4838 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4839
4840 return true;
4841 case BIT(INNER_ETH_TYPE):
4842 calc_x(tmp_x_s, rule->tuples.ether_proto,
4843 rule->tuples_mask.ether_proto);
4844 calc_y(tmp_y_s, rule->tuples.ether_proto,
4845 rule->tuples_mask.ether_proto);
4846 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4847 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4848
4849 return true;
4850 case BIT(INNER_IP_TOS):
4851 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4852 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4853
4854 return true;
4855 case BIT(INNER_IP_PROTO):
4856 calc_x(*key_x, rule->tuples.ip_proto,
4857 rule->tuples_mask.ip_proto);
4858 calc_y(*key_y, rule->tuples.ip_proto,
4859 rule->tuples_mask.ip_proto);
4860
4861 return true;
4862 case BIT(INNER_SRC_IP):
e91e388c
JS
4863 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4864 rule->tuples_mask.src_ip[IPV4_INDEX]);
4865 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4866 rule->tuples_mask.src_ip[IPV4_INDEX]);
11732868
JS
4867 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4868 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4869
4870 return true;
4871 case BIT(INNER_DST_IP):
e91e388c
JS
4872 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4873 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4874 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4875 rule->tuples_mask.dst_ip[IPV4_INDEX]);
11732868
JS
4876 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4877 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4878
4879 return true;
4880 case BIT(INNER_SRC_PORT):
4881 calc_x(tmp_x_s, rule->tuples.src_port,
4882 rule->tuples_mask.src_port);
4883 calc_y(tmp_y_s, rule->tuples.src_port,
4884 rule->tuples_mask.src_port);
4885 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4886 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4887
4888 return true;
4889 case BIT(INNER_DST_PORT):
4890 calc_x(tmp_x_s, rule->tuples.dst_port,
4891 rule->tuples_mask.dst_port);
4892 calc_y(tmp_y_s, rule->tuples.dst_port,
4893 rule->tuples_mask.dst_port);
4894 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4895 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4896
4897 return true;
4898 default:
4899 return false;
4900 }
4901}
4902
4903static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4904 u8 vf_id, u8 network_port_id)
4905{
4906 u32 port_number = 0;
4907
4908 if (port_type == HOST_PORT) {
4909 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4910 pf_id);
4911 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4912 vf_id);
4913 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4914 } else {
4915 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4916 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4917 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4918 }
4919
4920 return port_number;
4921}
4922
4923static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4924 __le32 *key_x, __le32 *key_y,
4925 struct hclge_fd_rule *rule)
4926{
4927 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4928 u8 cur_pos = 0, tuple_size, shift_bits;
ebaf1908 4929 unsigned int i;
11732868
JS
4930
4931 for (i = 0; i < MAX_META_DATA; i++) {
4932 tuple_size = meta_data_key_info[i].key_length;
4933 tuple_bit = key_cfg->meta_data_active & BIT(i);
4934
4935 switch (tuple_bit) {
4936 case BIT(ROCE_TYPE):
4937 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4938 cur_pos += tuple_size;
4939 break;
4940 case BIT(DST_VPORT):
4941 port_number = hclge_get_port_number(HOST_PORT, 0,
4942 rule->vf_id, 0);
4943 hnae3_set_field(meta_data,
4944 GENMASK(cur_pos + tuple_size, cur_pos),
4945 cur_pos, port_number);
4946 cur_pos += tuple_size;
4947 break;
4948 default:
4949 break;
4950 }
4951 }
4952
4953 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4954 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4955 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4956
4957 *key_x = cpu_to_le32(tmp_x << shift_bits);
4958 *key_y = cpu_to_le32(tmp_y << shift_bits);
4959}
4960
4961/* A complete key is combined with meta data key and tuple key.
4962 * Meta data key is stored at the MSB region, and tuple key is stored at
4963 * the LSB region, unused bits will be filled 0.
4964 */
4965static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4966 struct hclge_fd_rule *rule)
4967{
4968 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4969 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4970 u8 *cur_key_x, *cur_key_y;
ebaf1908
WL
4971 unsigned int i;
4972 int ret, tuple_size;
11732868
JS
4973 u8 meta_data_region;
4974
4975 memset(key_x, 0, sizeof(key_x));
4976 memset(key_y, 0, sizeof(key_y));
4977 cur_key_x = key_x;
4978 cur_key_y = key_y;
4979
4980 for (i = 0 ; i < MAX_TUPLE; i++) {
4981 bool tuple_valid;
4982 u32 check_tuple;
4983
4984 tuple_size = tuple_key_info[i].key_length / 8;
4985 check_tuple = key_cfg->tuple_active & BIT(i);
4986
4987 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4988 cur_key_y, rule);
4989 if (tuple_valid) {
4990 cur_key_x += tuple_size;
4991 cur_key_y += tuple_size;
4992 }
4993 }
4994
4995 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4996 MAX_META_DATA_LENGTH / 8;
4997
4998 hclge_fd_convert_meta_data(key_cfg,
4999 (__le32 *)(key_x + meta_data_region),
5000 (__le32 *)(key_y + meta_data_region),
5001 rule);
5002
5003 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5004 true);
5005 if (ret) {
5006 dev_err(&hdev->pdev->dev,
5007 "fd key_y config fail, loc=%d, ret=%d\n",
5008 rule->queue_id, ret);
5009 return ret;
5010 }
5011
5012 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5013 true);
5014 if (ret)
5015 dev_err(&hdev->pdev->dev,
5016 "fd key_x config fail, loc=%d, ret=%d\n",
5017 rule->queue_id, ret);
5018 return ret;
5019}
5020
5021static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5022 struct hclge_fd_rule *rule)
5023{
5024 struct hclge_fd_ad_data ad_data;
5025
5026 ad_data.ad_id = rule->location;
5027
5028 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5029 ad_data.drop_packet = true;
5030 ad_data.forward_to_direct_queue = false;
5031 ad_data.queue_id = 0;
5032 } else {
5033 ad_data.drop_packet = false;
5034 ad_data.forward_to_direct_queue = true;
5035 ad_data.queue_id = rule->queue_id;
5036 }
5037
5038 ad_data.use_counter = false;
5039 ad_data.counter_id = 0;
5040
5041 ad_data.use_next_stage = false;
5042 ad_data.next_input_key = 0;
5043
5044 ad_data.write_rule_id_to_bd = true;
5045 ad_data.rule_id = rule->location;
5046
5047 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5048}
5049
dd74f815
JS
5050static int hclge_fd_check_spec(struct hclge_dev *hdev,
5051 struct ethtool_rx_flow_spec *fs, u32 *unused)
5052{
5053 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5054 struct ethtool_usrip4_spec *usr_ip4_spec;
5055 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5056 struct ethtool_usrip6_spec *usr_ip6_spec;
5057 struct ethhdr *ether_spec;
5058
5059 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5060 return -EINVAL;
5061
5062 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5063 return -EOPNOTSUPP;
5064
5065 if ((fs->flow_type & FLOW_EXT) &&
5066 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5067 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5068 return -EOPNOTSUPP;
5069 }
5070
5071 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5072 case SCTP_V4_FLOW:
5073 case TCP_V4_FLOW:
5074 case UDP_V4_FLOW:
5075 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5076 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5077
5078 if (!tcp_ip4_spec->ip4src)
5079 *unused |= BIT(INNER_SRC_IP);
5080
5081 if (!tcp_ip4_spec->ip4dst)
5082 *unused |= BIT(INNER_DST_IP);
5083
5084 if (!tcp_ip4_spec->psrc)
5085 *unused |= BIT(INNER_SRC_PORT);
5086
5087 if (!tcp_ip4_spec->pdst)
5088 *unused |= BIT(INNER_DST_PORT);
5089
5090 if (!tcp_ip4_spec->tos)
5091 *unused |= BIT(INNER_IP_TOS);
5092
5093 break;
5094 case IP_USER_FLOW:
5095 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5096 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5097 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5098
5099 if (!usr_ip4_spec->ip4src)
5100 *unused |= BIT(INNER_SRC_IP);
5101
5102 if (!usr_ip4_spec->ip4dst)
5103 *unused |= BIT(INNER_DST_IP);
5104
5105 if (!usr_ip4_spec->tos)
5106 *unused |= BIT(INNER_IP_TOS);
5107
5108 if (!usr_ip4_spec->proto)
5109 *unused |= BIT(INNER_IP_PROTO);
5110
5111 if (usr_ip4_spec->l4_4_bytes)
5112 return -EOPNOTSUPP;
5113
5114 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5115 return -EOPNOTSUPP;
5116
5117 break;
5118 case SCTP_V6_FLOW:
5119 case TCP_V6_FLOW:
5120 case UDP_V6_FLOW:
5121 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5122 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5123 BIT(INNER_IP_TOS);
5124
e91e388c 5125 /* check whether src/dst ip address used */
dd74f815
JS
5126 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5127 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5128 *unused |= BIT(INNER_SRC_IP);
5129
5130 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5131 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5132 *unused |= BIT(INNER_DST_IP);
5133
5134 if (!tcp_ip6_spec->psrc)
5135 *unused |= BIT(INNER_SRC_PORT);
5136
5137 if (!tcp_ip6_spec->pdst)
5138 *unused |= BIT(INNER_DST_PORT);
5139
5140 if (tcp_ip6_spec->tclass)
5141 return -EOPNOTSUPP;
5142
5143 break;
5144 case IPV6_USER_FLOW:
5145 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5146 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5147 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5148 BIT(INNER_DST_PORT);
5149
e91e388c 5150 /* check whether src/dst ip address used */
dd74f815
JS
5151 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5152 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5153 *unused |= BIT(INNER_SRC_IP);
5154
5155 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5156 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5157 *unused |= BIT(INNER_DST_IP);
5158
5159 if (!usr_ip6_spec->l4_proto)
5160 *unused |= BIT(INNER_IP_PROTO);
5161
5162 if (usr_ip6_spec->tclass)
5163 return -EOPNOTSUPP;
5164
5165 if (usr_ip6_spec->l4_4_bytes)
5166 return -EOPNOTSUPP;
5167
5168 break;
5169 case ETHER_FLOW:
5170 ether_spec = &fs->h_u.ether_spec;
5171 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5172 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5173 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5174
5175 if (is_zero_ether_addr(ether_spec->h_source))
5176 *unused |= BIT(INNER_SRC_MAC);
5177
5178 if (is_zero_ether_addr(ether_spec->h_dest))
5179 *unused |= BIT(INNER_DST_MAC);
5180
5181 if (!ether_spec->h_proto)
5182 *unused |= BIT(INNER_ETH_TYPE);
5183
5184 break;
5185 default:
5186 return -EOPNOTSUPP;
5187 }
5188
5189 if ((fs->flow_type & FLOW_EXT)) {
5190 if (fs->h_ext.vlan_etype)
5191 return -EOPNOTSUPP;
5192 if (!fs->h_ext.vlan_tci)
5193 *unused |= BIT(INNER_VLAN_TAG_FST);
5194
5195 if (fs->m_ext.vlan_tci) {
5196 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5197 return -EINVAL;
5198 }
5199 } else {
5200 *unused |= BIT(INNER_VLAN_TAG_FST);
5201 }
5202
5203 if (fs->flow_type & FLOW_MAC_EXT) {
5204 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5205 return -EOPNOTSUPP;
5206
5207 if (is_zero_ether_addr(fs->h_ext.h_dest))
5208 *unused |= BIT(INNER_DST_MAC);
5209 else
5210 *unused &= ~(BIT(INNER_DST_MAC));
5211 }
5212
5213 return 0;
5214}
5215
5216static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5217{
5218 struct hclge_fd_rule *rule = NULL;
5219 struct hlist_node *node2;
5220
44122887 5221 spin_lock_bh(&hdev->fd_rule_lock);
dd74f815
JS
5222 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5223 if (rule->location >= location)
5224 break;
5225 }
5226
44122887
JS
5227 spin_unlock_bh(&hdev->fd_rule_lock);
5228
dd74f815
JS
5229 return rule && rule->location == location;
5230}
5231
44122887 5232/* make sure being called after lock up with fd_rule_lock */
dd74f815
JS
5233static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5234 struct hclge_fd_rule *new_rule,
5235 u16 location,
5236 bool is_add)
5237{
5238 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5239 struct hlist_node *node2;
5240
5241 if (is_add && !new_rule)
5242 return -EINVAL;
5243
5244 hlist_for_each_entry_safe(rule, node2,
5245 &hdev->fd_rule_list, rule_node) {
5246 if (rule->location >= location)
5247 break;
5248 parent = rule;
5249 }
5250
5251 if (rule && rule->location == location) {
5252 hlist_del(&rule->rule_node);
5253 kfree(rule);
5254 hdev->hclge_fd_rule_num--;
5255
44122887
JS
5256 if (!is_add) {
5257 if (!hdev->hclge_fd_rule_num)
5258 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5259 clear_bit(location, hdev->fd_bmap);
dd74f815 5260
44122887
JS
5261 return 0;
5262 }
dd74f815
JS
5263 } else if (!is_add) {
5264 dev_err(&hdev->pdev->dev,
5265 "delete fail, rule %d is inexistent\n",
5266 location);
5267 return -EINVAL;
5268 }
5269
5270 INIT_HLIST_NODE(&new_rule->rule_node);
5271
5272 if (parent)
5273 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5274 else
5275 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5276
44122887 5277 set_bit(location, hdev->fd_bmap);
dd74f815 5278 hdev->hclge_fd_rule_num++;
44122887 5279 hdev->fd_active_type = new_rule->rule_type;
dd74f815
JS
5280
5281 return 0;
5282}
5283
5284static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5285 struct ethtool_rx_flow_spec *fs,
5286 struct hclge_fd_rule *rule)
5287{
5288 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5289
5290 switch (flow_type) {
5291 case SCTP_V4_FLOW:
5292 case TCP_V4_FLOW:
5293 case UDP_V4_FLOW:
e91e388c 5294 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5295 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
e91e388c 5296 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5297 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5298
e91e388c 5299 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5300 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
e91e388c 5301 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5302 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5303
5304 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5305 rule->tuples_mask.src_port =
5306 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5307
5308 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5309 rule->tuples_mask.dst_port =
5310 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5311
5312 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5313 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5314
5315 rule->tuples.ether_proto = ETH_P_IP;
5316 rule->tuples_mask.ether_proto = 0xFFFF;
5317
5318 break;
5319 case IP_USER_FLOW:
e91e388c 5320 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5321 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
e91e388c 5322 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5323 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5324
e91e388c 5325 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5326 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
e91e388c 5327 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5328 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5329
5330 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5331 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5332
5333 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5334 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5335
5336 rule->tuples.ether_proto = ETH_P_IP;
5337 rule->tuples_mask.ether_proto = 0xFFFF;
5338
5339 break;
5340 case SCTP_V6_FLOW:
5341 case TCP_V6_FLOW:
5342 case UDP_V6_FLOW:
5343 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5344 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5345 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5346 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5347
5348 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5349 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5350 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5351 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5352
5353 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5354 rule->tuples_mask.src_port =
5355 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5356
5357 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5358 rule->tuples_mask.dst_port =
5359 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5360
5361 rule->tuples.ether_proto = ETH_P_IPV6;
5362 rule->tuples_mask.ether_proto = 0xFFFF;
5363
5364 break;
5365 case IPV6_USER_FLOW:
5366 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5367 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5368 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5369 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5370
5371 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5372 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5373 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5374 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5375
5376 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5377 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5378
5379 rule->tuples.ether_proto = ETH_P_IPV6;
5380 rule->tuples_mask.ether_proto = 0xFFFF;
5381
5382 break;
5383 case ETHER_FLOW:
5384 ether_addr_copy(rule->tuples.src_mac,
5385 fs->h_u.ether_spec.h_source);
5386 ether_addr_copy(rule->tuples_mask.src_mac,
5387 fs->m_u.ether_spec.h_source);
5388
5389 ether_addr_copy(rule->tuples.dst_mac,
5390 fs->h_u.ether_spec.h_dest);
5391 ether_addr_copy(rule->tuples_mask.dst_mac,
5392 fs->m_u.ether_spec.h_dest);
5393
5394 rule->tuples.ether_proto =
5395 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5396 rule->tuples_mask.ether_proto =
5397 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5398
5399 break;
5400 default:
5401 return -EOPNOTSUPP;
5402 }
5403
5404 switch (flow_type) {
5405 case SCTP_V4_FLOW:
5406 case SCTP_V6_FLOW:
5407 rule->tuples.ip_proto = IPPROTO_SCTP;
5408 rule->tuples_mask.ip_proto = 0xFF;
5409 break;
5410 case TCP_V4_FLOW:
5411 case TCP_V6_FLOW:
5412 rule->tuples.ip_proto = IPPROTO_TCP;
5413 rule->tuples_mask.ip_proto = 0xFF;
5414 break;
5415 case UDP_V4_FLOW:
5416 case UDP_V6_FLOW:
5417 rule->tuples.ip_proto = IPPROTO_UDP;
5418 rule->tuples_mask.ip_proto = 0xFF;
5419 break;
5420 default:
5421 break;
5422 }
5423
5424 if ((fs->flow_type & FLOW_EXT)) {
5425 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5426 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5427 }
5428
5429 if (fs->flow_type & FLOW_MAC_EXT) {
5430 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5431 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5432 }
5433
5434 return 0;
5435}
5436
44122887
JS
5437/* make sure being called after lock up with fd_rule_lock */
5438static int hclge_fd_config_rule(struct hclge_dev *hdev,
5439 struct hclge_fd_rule *rule)
5440{
5441 int ret;
5442
5443 if (!rule) {
5444 dev_err(&hdev->pdev->dev,
5445 "The flow director rule is NULL\n");
5446 return -EINVAL;
5447 }
5448
5449 /* it will never fail here, so needn't to check return value */
5450 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5451
5452 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5453 if (ret)
5454 goto clear_rule;
5455
5456 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5457 if (ret)
5458 goto clear_rule;
5459
5460 return 0;
5461
5462clear_rule:
5463 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5464 return ret;
5465}
5466
dd74f815
JS
5467static int hclge_add_fd_entry(struct hnae3_handle *handle,
5468 struct ethtool_rxnfc *cmd)
5469{
5470 struct hclge_vport *vport = hclge_get_vport(handle);
5471 struct hclge_dev *hdev = vport->back;
5472 u16 dst_vport_id = 0, q_index = 0;
5473 struct ethtool_rx_flow_spec *fs;
5474 struct hclge_fd_rule *rule;
5475 u32 unused = 0;
5476 u8 action;
5477 int ret;
5478
5479 if (!hnae3_dev_fd_supported(hdev))
5480 return -EOPNOTSUPP;
5481
9abeb7d8 5482 if (!hdev->fd_en) {
dd74f815
JS
5483 dev_warn(&hdev->pdev->dev,
5484 "Please enable flow director first\n");
5485 return -EOPNOTSUPP;
5486 }
5487
5488 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5489
5490 ret = hclge_fd_check_spec(hdev, fs, &unused);
5491 if (ret) {
5492 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5493 return ret;
5494 }
5495
5496 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5497 action = HCLGE_FD_ACTION_DROP_PACKET;
5498 } else {
5499 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5500 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5501 u16 tqps;
5502
0285dbae
JS
5503 if (vf > hdev->num_req_vfs) {
5504 dev_err(&hdev->pdev->dev,
5505 "Error: vf id (%d) > max vf num (%d)\n",
5506 vf, hdev->num_req_vfs);
5507 return -EINVAL;
5508 }
5509
dd74f815
JS
5510 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5511 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5512
5513 if (ring >= tqps) {
5514 dev_err(&hdev->pdev->dev,
5515 "Error: queue id (%d) > max tqp num (%d)\n",
5516 ring, tqps - 1);
5517 return -EINVAL;
5518 }
5519
dd74f815
JS
5520 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5521 q_index = ring;
5522 }
5523
5524 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5525 if (!rule)
5526 return -ENOMEM;
5527
5528 ret = hclge_fd_get_tuple(hdev, fs, rule);
44122887
JS
5529 if (ret) {
5530 kfree(rule);
5531 return ret;
5532 }
dd74f815
JS
5533
5534 rule->flow_type = fs->flow_type;
5535
5536 rule->location = fs->location;
5537 rule->unused_tuple = unused;
5538 rule->vf_id = dst_vport_id;
5539 rule->queue_id = q_index;
5540 rule->action = action;
44122887 5541 rule->rule_type = HCLGE_FD_EP_ACTIVE;
dd74f815 5542
d93ed94f
JS
5543 /* to avoid rule conflict, when user configure rule by ethtool,
5544 * we need to clear all arfs rules
5545 */
5546 hclge_clear_arfs_rules(handle);
5547
44122887
JS
5548 spin_lock_bh(&hdev->fd_rule_lock);
5549 ret = hclge_fd_config_rule(hdev, rule);
dd74f815 5550
44122887 5551 spin_unlock_bh(&hdev->fd_rule_lock);
dd74f815 5552
dd74f815
JS
5553 return ret;
5554}
5555
5556static int hclge_del_fd_entry(struct hnae3_handle *handle,
5557 struct ethtool_rxnfc *cmd)
5558{
5559 struct hclge_vport *vport = hclge_get_vport(handle);
5560 struct hclge_dev *hdev = vport->back;
5561 struct ethtool_rx_flow_spec *fs;
5562 int ret;
5563
5564 if (!hnae3_dev_fd_supported(hdev))
5565 return -EOPNOTSUPP;
5566
5567 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5568
5569 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5570 return -EINVAL;
5571
5572 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5573 dev_err(&hdev->pdev->dev,
9b2f3477 5574 "Delete fail, rule %d is inexistent\n", fs->location);
dd74f815
JS
5575 return -ENOENT;
5576 }
5577
9b2f3477
WL
5578 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5579 NULL, false);
dd74f815
JS
5580 if (ret)
5581 return ret;
5582
44122887
JS
5583 spin_lock_bh(&hdev->fd_rule_lock);
5584 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5585
5586 spin_unlock_bh(&hdev->fd_rule_lock);
5587
5588 return ret;
dd74f815
JS
5589}
5590
6871af29
JS
5591static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5592 bool clear_list)
5593{
5594 struct hclge_vport *vport = hclge_get_vport(handle);
5595 struct hclge_dev *hdev = vport->back;
5596 struct hclge_fd_rule *rule;
5597 struct hlist_node *node;
44122887 5598 u16 location;
6871af29
JS
5599
5600 if (!hnae3_dev_fd_supported(hdev))
5601 return;
5602
44122887
JS
5603 spin_lock_bh(&hdev->fd_rule_lock);
5604 for_each_set_bit(location, hdev->fd_bmap,
5605 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5606 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5607 NULL, false);
5608
6871af29
JS
5609 if (clear_list) {
5610 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5611 rule_node) {
6871af29
JS
5612 hlist_del(&rule->rule_node);
5613 kfree(rule);
6871af29 5614 }
44122887
JS
5615 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5616 hdev->hclge_fd_rule_num = 0;
5617 bitmap_zero(hdev->fd_bmap,
5618 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6871af29 5619 }
44122887
JS
5620
5621 spin_unlock_bh(&hdev->fd_rule_lock);
6871af29
JS
5622}
5623
5624static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5625{
5626 struct hclge_vport *vport = hclge_get_vport(handle);
5627 struct hclge_dev *hdev = vport->back;
5628 struct hclge_fd_rule *rule;
5629 struct hlist_node *node;
5630 int ret;
5631
65e41e7e
HT
5632 /* Return ok here, because reset error handling will check this
5633 * return value. If error is returned here, the reset process will
5634 * fail.
5635 */
6871af29 5636 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 5637 return 0;
6871af29 5638
8edc2285 5639 /* if fd is disabled, should not restore it when reset */
9abeb7d8 5640 if (!hdev->fd_en)
8edc2285
JS
5641 return 0;
5642
44122887 5643 spin_lock_bh(&hdev->fd_rule_lock);
6871af29
JS
5644 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5645 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5646 if (!ret)
5647 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5648
5649 if (ret) {
5650 dev_warn(&hdev->pdev->dev,
5651 "Restore rule %d failed, remove it\n",
5652 rule->location);
44122887 5653 clear_bit(rule->location, hdev->fd_bmap);
6871af29
JS
5654 hlist_del(&rule->rule_node);
5655 kfree(rule);
5656 hdev->hclge_fd_rule_num--;
5657 }
5658 }
44122887
JS
5659
5660 if (hdev->hclge_fd_rule_num)
5661 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5662
5663 spin_unlock_bh(&hdev->fd_rule_lock);
5664
6871af29
JS
5665 return 0;
5666}
5667
05c2314f
JS
5668static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5669 struct ethtool_rxnfc *cmd)
5670{
5671 struct hclge_vport *vport = hclge_get_vport(handle);
5672 struct hclge_dev *hdev = vport->back;
5673
5674 if (!hnae3_dev_fd_supported(hdev))
5675 return -EOPNOTSUPP;
5676
5677 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5678 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5679
5680 return 0;
5681}
5682
5683static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5684 struct ethtool_rxnfc *cmd)
5685{
5686 struct hclge_vport *vport = hclge_get_vport(handle);
5687 struct hclge_fd_rule *rule = NULL;
5688 struct hclge_dev *hdev = vport->back;
5689 struct ethtool_rx_flow_spec *fs;
5690 struct hlist_node *node2;
5691
5692 if (!hnae3_dev_fd_supported(hdev))
5693 return -EOPNOTSUPP;
5694
5695 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5696
44122887
JS
5697 spin_lock_bh(&hdev->fd_rule_lock);
5698
05c2314f
JS
5699 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5700 if (rule->location >= fs->location)
5701 break;
5702 }
5703
44122887
JS
5704 if (!rule || fs->location != rule->location) {
5705 spin_unlock_bh(&hdev->fd_rule_lock);
5706
05c2314f 5707 return -ENOENT;
44122887 5708 }
05c2314f
JS
5709
5710 fs->flow_type = rule->flow_type;
5711 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5712 case SCTP_V4_FLOW:
5713 case TCP_V4_FLOW:
5714 case UDP_V4_FLOW:
5715 fs->h_u.tcp_ip4_spec.ip4src =
e91e388c 5716 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
05c2314f 5717 fs->m_u.tcp_ip4_spec.ip4src =
e91e388c
JS
5718 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5719 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
05c2314f
JS
5720
5721 fs->h_u.tcp_ip4_spec.ip4dst =
e91e388c 5722 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
05c2314f 5723 fs->m_u.tcp_ip4_spec.ip4dst =
e91e388c
JS
5724 rule->unused_tuple & BIT(INNER_DST_IP) ?
5725 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
05c2314f
JS
5726
5727 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5728 fs->m_u.tcp_ip4_spec.psrc =
5729 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5730 0 : cpu_to_be16(rule->tuples_mask.src_port);
5731
5732 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5733 fs->m_u.tcp_ip4_spec.pdst =
5734 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5735 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5736
5737 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5738 fs->m_u.tcp_ip4_spec.tos =
5739 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5740 0 : rule->tuples_mask.ip_tos;
5741
5742 break;
5743 case IP_USER_FLOW:
5744 fs->h_u.usr_ip4_spec.ip4src =
e91e388c 5745 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
05c2314f 5746 fs->m_u.tcp_ip4_spec.ip4src =
e91e388c
JS
5747 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5748 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
05c2314f
JS
5749
5750 fs->h_u.usr_ip4_spec.ip4dst =
e91e388c 5751 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
05c2314f 5752 fs->m_u.usr_ip4_spec.ip4dst =
e91e388c
JS
5753 rule->unused_tuple & BIT(INNER_DST_IP) ?
5754 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
05c2314f
JS
5755
5756 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5757 fs->m_u.usr_ip4_spec.tos =
5758 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5759 0 : rule->tuples_mask.ip_tos;
5760
5761 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5762 fs->m_u.usr_ip4_spec.proto =
5763 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5764 0 : rule->tuples_mask.ip_proto;
5765
5766 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5767
5768 break;
5769 case SCTP_V6_FLOW:
5770 case TCP_V6_FLOW:
5771 case UDP_V6_FLOW:
5772 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
e91e388c 5773 rule->tuples.src_ip, IPV6_SIZE);
05c2314f 5774 if (rule->unused_tuple & BIT(INNER_SRC_IP))
e91e388c
JS
5775 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5776 sizeof(int) * IPV6_SIZE);
05c2314f
JS
5777 else
5778 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
e91e388c 5779 rule->tuples_mask.src_ip, IPV6_SIZE);
05c2314f
JS
5780
5781 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
e91e388c 5782 rule->tuples.dst_ip, IPV6_SIZE);
05c2314f 5783 if (rule->unused_tuple & BIT(INNER_DST_IP))
e91e388c
JS
5784 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5785 sizeof(int) * IPV6_SIZE);
05c2314f
JS
5786 else
5787 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
e91e388c 5788 rule->tuples_mask.dst_ip, IPV6_SIZE);
05c2314f
JS
5789
5790 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5791 fs->m_u.tcp_ip6_spec.psrc =
5792 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5793 0 : cpu_to_be16(rule->tuples_mask.src_port);
5794
5795 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5796 fs->m_u.tcp_ip6_spec.pdst =
5797 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5798 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5799
5800 break;
5801 case IPV6_USER_FLOW:
5802 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
e91e388c 5803 rule->tuples.src_ip, IPV6_SIZE);
05c2314f 5804 if (rule->unused_tuple & BIT(INNER_SRC_IP))
e91e388c
JS
5805 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5806 sizeof(int) * IPV6_SIZE);
05c2314f
JS
5807 else
5808 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
e91e388c 5809 rule->tuples_mask.src_ip, IPV6_SIZE);
05c2314f
JS
5810
5811 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
e91e388c 5812 rule->tuples.dst_ip, IPV6_SIZE);
05c2314f 5813 if (rule->unused_tuple & BIT(INNER_DST_IP))
e91e388c
JS
5814 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5815 sizeof(int) * IPV6_SIZE);
05c2314f
JS
5816 else
5817 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
e91e388c 5818 rule->tuples_mask.dst_ip, IPV6_SIZE);
05c2314f
JS
5819
5820 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5821 fs->m_u.usr_ip6_spec.l4_proto =
5822 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5823 0 : rule->tuples_mask.ip_proto;
5824
5825 break;
5826 case ETHER_FLOW:
5827 ether_addr_copy(fs->h_u.ether_spec.h_source,
5828 rule->tuples.src_mac);
5829 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5830 eth_zero_addr(fs->m_u.ether_spec.h_source);
5831 else
5832 ether_addr_copy(fs->m_u.ether_spec.h_source,
5833 rule->tuples_mask.src_mac);
5834
5835 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5836 rule->tuples.dst_mac);
5837 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5838 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5839 else
5840 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5841 rule->tuples_mask.dst_mac);
5842
5843 fs->h_u.ether_spec.h_proto =
5844 cpu_to_be16(rule->tuples.ether_proto);
5845 fs->m_u.ether_spec.h_proto =
5846 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5847 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5848
5849 break;
5850 default:
44122887 5851 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f
JS
5852 return -EOPNOTSUPP;
5853 }
5854
5855 if (fs->flow_type & FLOW_EXT) {
5856 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5857 fs->m_ext.vlan_tci =
5858 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5859 cpu_to_be16(VLAN_VID_MASK) :
5860 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5861 }
5862
5863 if (fs->flow_type & FLOW_MAC_EXT) {
5864 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5865 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5866 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5867 else
5868 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5869 rule->tuples_mask.dst_mac);
5870 }
5871
5872 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5873 fs->ring_cookie = RX_CLS_FLOW_DISC;
5874 } else {
5875 u64 vf_id;
5876
5877 fs->ring_cookie = rule->queue_id;
5878 vf_id = rule->vf_id;
5879 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5880 fs->ring_cookie |= vf_id;
5881 }
5882
44122887
JS
5883 spin_unlock_bh(&hdev->fd_rule_lock);
5884
05c2314f
JS
5885 return 0;
5886}
5887
5888static int hclge_get_all_rules(struct hnae3_handle *handle,
5889 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5890{
5891 struct hclge_vport *vport = hclge_get_vport(handle);
5892 struct hclge_dev *hdev = vport->back;
5893 struct hclge_fd_rule *rule;
5894 struct hlist_node *node2;
5895 int cnt = 0;
5896
5897 if (!hnae3_dev_fd_supported(hdev))
5898 return -EOPNOTSUPP;
5899
5900 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5901
44122887 5902 spin_lock_bh(&hdev->fd_rule_lock);
05c2314f
JS
5903 hlist_for_each_entry_safe(rule, node2,
5904 &hdev->fd_rule_list, rule_node) {
44122887
JS
5905 if (cnt == cmd->rule_cnt) {
5906 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f 5907 return -EMSGSIZE;
44122887 5908 }
05c2314f
JS
5909
5910 rule_locs[cnt] = rule->location;
5911 cnt++;
5912 }
5913
44122887
JS
5914 spin_unlock_bh(&hdev->fd_rule_lock);
5915
05c2314f
JS
5916 cmd->rule_cnt = cnt;
5917
5918 return 0;
5919}
5920
d93ed94f
JS
5921static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5922 struct hclge_fd_rule_tuples *tuples)
5923{
5924 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5925 tuples->ip_proto = fkeys->basic.ip_proto;
5926 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5927
5928 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5929 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5930 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5931 } else {
5932 memcpy(tuples->src_ip,
5933 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5934 sizeof(tuples->src_ip));
5935 memcpy(tuples->dst_ip,
5936 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5937 sizeof(tuples->dst_ip));
5938 }
5939}
5940
5941/* traverse all rules, check whether an existed rule has the same tuples */
5942static struct hclge_fd_rule *
5943hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5944 const struct hclge_fd_rule_tuples *tuples)
5945{
5946 struct hclge_fd_rule *rule = NULL;
5947 struct hlist_node *node;
5948
5949 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5950 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5951 return rule;
5952 }
5953
5954 return NULL;
5955}
5956
5957static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5958 struct hclge_fd_rule *rule)
5959{
5960 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5961 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5962 BIT(INNER_SRC_PORT);
5963 rule->action = 0;
5964 rule->vf_id = 0;
5965 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5966 if (tuples->ether_proto == ETH_P_IP) {
5967 if (tuples->ip_proto == IPPROTO_TCP)
5968 rule->flow_type = TCP_V4_FLOW;
5969 else
5970 rule->flow_type = UDP_V4_FLOW;
5971 } else {
5972 if (tuples->ip_proto == IPPROTO_TCP)
5973 rule->flow_type = TCP_V6_FLOW;
5974 else
5975 rule->flow_type = UDP_V6_FLOW;
5976 }
5977 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5978 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5979}
5980
5981static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5982 u16 flow_id, struct flow_keys *fkeys)
5983{
d93ed94f
JS
5984 struct hclge_vport *vport = hclge_get_vport(handle);
5985 struct hclge_fd_rule_tuples new_tuples;
5986 struct hclge_dev *hdev = vport->back;
5987 struct hclge_fd_rule *rule;
5988 u16 tmp_queue_id;
5989 u16 bit_id;
5990 int ret;
5991
5992 if (!hnae3_dev_fd_supported(hdev))
5993 return -EOPNOTSUPP;
5994
5995 memset(&new_tuples, 0, sizeof(new_tuples));
5996 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5997
5998 spin_lock_bh(&hdev->fd_rule_lock);
5999
6000 /* when there is already fd rule existed add by user,
6001 * arfs should not work
6002 */
6003 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6004 spin_unlock_bh(&hdev->fd_rule_lock);
6005
6006 return -EOPNOTSUPP;
6007 }
6008
6009 /* check is there flow director filter existed for this flow,
6010 * if not, create a new filter for it;
6011 * if filter exist with different queue id, modify the filter;
6012 * if filter exist with same queue id, do nothing
6013 */
6014 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6015 if (!rule) {
6016 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6017 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6018 spin_unlock_bh(&hdev->fd_rule_lock);
6019
6020 return -ENOSPC;
6021 }
6022
d659f9f6 6023 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
d93ed94f
JS
6024 if (!rule) {
6025 spin_unlock_bh(&hdev->fd_rule_lock);
6026
6027 return -ENOMEM;
6028 }
6029
6030 set_bit(bit_id, hdev->fd_bmap);
6031 rule->location = bit_id;
6032 rule->flow_id = flow_id;
6033 rule->queue_id = queue_id;
6034 hclge_fd_build_arfs_rule(&new_tuples, rule);
6035 ret = hclge_fd_config_rule(hdev, rule);
6036
6037 spin_unlock_bh(&hdev->fd_rule_lock);
6038
6039 if (ret)
6040 return ret;
6041
6042 return rule->location;
6043 }
6044
6045 spin_unlock_bh(&hdev->fd_rule_lock);
6046
6047 if (rule->queue_id == queue_id)
6048 return rule->location;
6049
6050 tmp_queue_id = rule->queue_id;
6051 rule->queue_id = queue_id;
6052 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6053 if (ret) {
6054 rule->queue_id = tmp_queue_id;
6055 return ret;
6056 }
6057
6058 return rule->location;
d93ed94f
JS
6059}
6060
6061static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6062{
6063#ifdef CONFIG_RFS_ACCEL
6064 struct hnae3_handle *handle = &hdev->vport[0].nic;
6065 struct hclge_fd_rule *rule;
6066 struct hlist_node *node;
6067 HLIST_HEAD(del_list);
6068
6069 spin_lock_bh(&hdev->fd_rule_lock);
6070 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6071 spin_unlock_bh(&hdev->fd_rule_lock);
6072 return;
6073 }
6074 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6075 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6076 rule->flow_id, rule->location)) {
6077 hlist_del_init(&rule->rule_node);
6078 hlist_add_head(&rule->rule_node, &del_list);
6079 hdev->hclge_fd_rule_num--;
6080 clear_bit(rule->location, hdev->fd_bmap);
6081 }
6082 }
6083 spin_unlock_bh(&hdev->fd_rule_lock);
6084
6085 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6086 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6087 rule->location, NULL, false);
6088 kfree(rule);
6089 }
6090#endif
6091}
6092
6093static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6094{
6095#ifdef CONFIG_RFS_ACCEL
6096 struct hclge_vport *vport = hclge_get_vport(handle);
6097 struct hclge_dev *hdev = vport->back;
6098
6099 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6100 hclge_del_all_fd_entries(handle, true);
6101#endif
6102}
6103
4d60291b
HT
6104static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6105{
6106 struct hclge_vport *vport = hclge_get_vport(handle);
6107 struct hclge_dev *hdev = vport->back;
6108
6109 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6110 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6111}
6112
6113static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6114{
6115 struct hclge_vport *vport = hclge_get_vport(handle);
6116 struct hclge_dev *hdev = vport->back;
6117
6118 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6119}
6120
6121static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6122{
6123 struct hclge_vport *vport = hclge_get_vport(handle);
6124 struct hclge_dev *hdev = vport->back;
6125
f02eb82d 6126 return hdev->rst_stats.hw_reset_done_cnt;
4d60291b
HT
6127}
6128
c17852a8
JS
6129static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6130{
6131 struct hclge_vport *vport = hclge_get_vport(handle);
6132 struct hclge_dev *hdev = vport->back;
44122887 6133 bool clear;
c17852a8 6134
9abeb7d8 6135 hdev->fd_en = enable;
44122887 6136 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
c17852a8 6137 if (!enable)
44122887 6138 hclge_del_all_fd_entries(handle, clear);
c17852a8
JS
6139 else
6140 hclge_restore_fd_entries(handle);
6141}
6142
46a3df9f
S
6143static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6144{
6145 struct hclge_desc desc;
d44f9b63
YL
6146 struct hclge_config_mac_mode_cmd *req =
6147 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 6148 u32 loop_en = 0;
46a3df9f
S
6149 int ret;
6150
6151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
b9a8f883
YL
6152
6153 if (enable) {
6154 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6155 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6156 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6157 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6158 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6159 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6160 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6161 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6162 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6163 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6164 }
6165
a90bb9a5 6166 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
6167
6168 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6169 if (ret)
6170 dev_err(&hdev->pdev->dev,
6171 "mac enable fail, ret =%d.\n", ret);
6172}
6173
eb66d503 6174static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 6175{
c39c4d98 6176 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
6177 struct hclge_desc desc;
6178 u32 loop_en;
6179 int ret;
6180
e4d68dae
YL
6181 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6182 /* 1 Read out the MAC mode config at first */
6183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6184 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6185 if (ret) {
6186 dev_err(&hdev->pdev->dev,
6187 "mac loopback get fail, ret =%d.\n", ret);
6188 return ret;
6189 }
c39c4d98 6190
e4d68dae
YL
6191 /* 2 Then setup the loopback flag */
6192 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 6193 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
0f29fc23
YL
6194 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6195 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
e4d68dae
YL
6196
6197 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 6198
e4d68dae
YL
6199 /* 3 Config mac work mode with loopback flag
6200 * and its original configure parameters
6201 */
6202 hclge_cmd_reuse_desc(&desc, false);
6203 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6204 if (ret)
6205 dev_err(&hdev->pdev->dev,
6206 "mac loopback set fail, ret =%d.\n", ret);
6207 return ret;
6208}
c39c4d98 6209
4dc13b96
FL
6210static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6211 enum hnae3_loop loop_mode)
5fd50ac3
PL
6212{
6213#define HCLGE_SERDES_RETRY_MS 10
6214#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 6215
fd85717d 6216#define HCLGE_MAC_LINK_STATUS_MS 10
6217#define HCLGE_MAC_LINK_STATUS_NUM 100
350fda0a 6218#define HCLGE_MAC_LINK_STATUS_DOWN 0
6219#define HCLGE_MAC_LINK_STATUS_UP 1
6220
5fd50ac3
PL
6221 struct hclge_serdes_lb_cmd *req;
6222 struct hclge_desc desc;
350fda0a 6223 int mac_link_ret = 0;
5fd50ac3 6224 int ret, i = 0;
4dc13b96 6225 u8 loop_mode_b;
5fd50ac3 6226
d0d72bac 6227 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
6228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6229
4dc13b96
FL
6230 switch (loop_mode) {
6231 case HNAE3_LOOP_SERIAL_SERDES:
6232 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6233 break;
6234 case HNAE3_LOOP_PARALLEL_SERDES:
6235 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6236 break;
6237 default:
6238 dev_err(&hdev->pdev->dev,
6239 "unsupported serdes loopback mode %d\n", loop_mode);
6240 return -ENOTSUPP;
6241 }
6242
5fd50ac3 6243 if (en) {
4dc13b96
FL
6244 req->enable = loop_mode_b;
6245 req->mask = loop_mode_b;
350fda0a 6246 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5fd50ac3 6247 } else {
4dc13b96 6248 req->mask = loop_mode_b;
350fda0a 6249 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5fd50ac3
PL
6250 }
6251
6252 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6253 if (ret) {
6254 dev_err(&hdev->pdev->dev,
6255 "serdes loopback set fail, ret = %d\n", ret);
6256 return ret;
6257 }
6258
6259 do {
6260 msleep(HCLGE_SERDES_RETRY_MS);
6261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6262 true);
6263 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6264 if (ret) {
6265 dev_err(&hdev->pdev->dev,
6266 "serdes loopback get, ret = %d\n", ret);
6267 return ret;
6268 }
6269 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6270 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6271
6272 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6273 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6274 return -EBUSY;
6275 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6276 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6277 return -EIO;
6278 }
6279
0f29fc23 6280 hclge_cfg_mac_mode(hdev, en);
350fda0a 6281
6282 i = 0;
6283 do {
6284 /* serdes Internal loopback, independent of the network cable.*/
6285 msleep(HCLGE_MAC_LINK_STATUS_MS);
6286 ret = hclge_get_mac_link_status(hdev);
6287 if (ret == mac_link_ret)
6288 return 0;
6289 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6290
6291 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6292
6293 return -EBUSY;
5fd50ac3
PL
6294}
6295
ebaf1908 6296static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
0f29fc23
YL
6297 int stream_id, bool enable)
6298{
6299 struct hclge_desc desc;
6300 struct hclge_cfg_com_tqp_queue_cmd *req =
6301 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6302 int ret;
6303
6304 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6305 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6306 req->stream_id = cpu_to_le16(stream_id);
ebaf1908
WL
6307 if (enable)
6308 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
0f29fc23
YL
6309
6310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6311 if (ret)
6312 dev_err(&hdev->pdev->dev,
6313 "Tqp enable fail, status =%d.\n", ret);
6314 return ret;
6315}
6316
e4d68dae
YL
6317static int hclge_set_loopback(struct hnae3_handle *handle,
6318 enum hnae3_loop loop_mode, bool en)
6319{
6320 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6321 struct hnae3_knic_private_info *kinfo;
e4d68dae 6322 struct hclge_dev *hdev = vport->back;
0f29fc23 6323 int i, ret;
e4d68dae
YL
6324
6325 switch (loop_mode) {
eb66d503
FL
6326 case HNAE3_LOOP_APP:
6327 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 6328 break;
4dc13b96
FL
6329 case HNAE3_LOOP_SERIAL_SERDES:
6330 case HNAE3_LOOP_PARALLEL_SERDES:
6331 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 6332 break;
c39c4d98
YL
6333 default:
6334 ret = -ENOTSUPP;
6335 dev_err(&hdev->pdev->dev,
6336 "loop_mode %d is not supported\n", loop_mode);
6337 break;
6338 }
6339
47ef6dec
JS
6340 if (ret)
6341 return ret;
6342
205a24ca
HT
6343 kinfo = &vport->nic.kinfo;
6344 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
6345 ret = hclge_tqp_enable(hdev, i, 0, en);
6346 if (ret)
6347 return ret;
6348 }
46a3df9f 6349
0f29fc23 6350 return 0;
46a3df9f
S
6351}
6352
6353static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6354{
6355 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6356 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
6357 struct hnae3_queue *queue;
6358 struct hclge_tqp *tqp;
6359 int i;
6360
205a24ca
HT
6361 kinfo = &vport->nic.kinfo;
6362 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
6363 queue = handle->kinfo.tqp[i];
6364 tqp = container_of(queue, struct hclge_tqp, q);
6365 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6366 }
6367}
6368
8cdb992f
JS
6369static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6370{
6371 struct hclge_vport *vport = hclge_get_vport(handle);
6372 struct hclge_dev *hdev = vport->back;
6373
6374 if (enable) {
ed8fb4b2 6375 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
8cdb992f 6376 } else {
7be1b9f3
YL
6377 /* Set the DOWN flag here to disable the service to be
6378 * scheduled again
6379 */
6380 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6381 cancel_delayed_work_sync(&hdev->service_task);
8cdb992f
JS
6382 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6383 }
6384}
6385
46a3df9f
S
6386static int hclge_ae_start(struct hnae3_handle *handle)
6387{
6388 struct hclge_vport *vport = hclge_get_vport(handle);
6389 struct hclge_dev *hdev = vport->back;
46a3df9f 6390
46a3df9f
S
6391 /* mac enable */
6392 hclge_cfg_mac_mode(hdev, true);
6393 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 6394 hdev->hw.mac.link = 0;
46a3df9f 6395
b50ae26c
PL
6396 /* reset tqp stats */
6397 hclge_reset_tqp_stats(handle);
6398
b01b7cf1 6399 hclge_mac_start_phy(hdev);
46a3df9f 6400
46a3df9f
S
6401 return 0;
6402}
6403
6404static void hclge_ae_stop(struct hnae3_handle *handle)
6405{
6406 struct hclge_vport *vport = hclge_get_vport(handle);
6407 struct hclge_dev *hdev = vport->back;
39cfbc9c 6408 int i;
46a3df9f 6409
2f7e4896
FL
6410 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6411
d93ed94f
JS
6412 hclge_clear_arfs_rules(handle);
6413
35d93a30
HT
6414 /* If it is not PF reset, the firmware will disable the MAC,
6415 * so it only need to stop phy here.
6416 */
6417 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6418 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 6419 hclge_mac_stop_phy(hdev);
ed8fb4b2 6420 hclge_update_link_status(hdev);
b50ae26c 6421 return;
9617f668 6422 }
b50ae26c 6423
39cfbc9c
HT
6424 for (i = 0; i < handle->kinfo.num_tqps; i++)
6425 hclge_reset_tqp(handle, i);
6426
20981a1e
HT
6427 hclge_config_mac_tnl_int(hdev, false);
6428
46a3df9f
S
6429 /* Mac disable */
6430 hclge_cfg_mac_mode(hdev, false);
6431
6432 hclge_mac_stop_phy(hdev);
6433
6434 /* reset tqp stats */
6435 hclge_reset_tqp_stats(handle);
f30dfddc 6436 hclge_update_link_status(hdev);
46a3df9f
S
6437}
6438
a6d818e3
YL
6439int hclge_vport_start(struct hclge_vport *vport)
6440{
6441 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6442 vport->last_active_jiffies = jiffies;
6443 return 0;
6444}
6445
6446void hclge_vport_stop(struct hclge_vport *vport)
6447{
6448 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6449}
6450
6451static int hclge_client_start(struct hnae3_handle *handle)
6452{
6453 struct hclge_vport *vport = hclge_get_vport(handle);
6454
6455 return hclge_vport_start(vport);
6456}
6457
6458static void hclge_client_stop(struct hnae3_handle *handle)
6459{
6460 struct hclge_vport *vport = hclge_get_vport(handle);
6461
6462 hclge_vport_stop(vport);
6463}
6464
46a3df9f
S
6465static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6466 u16 cmdq_resp, u8 resp_code,
6467 enum hclge_mac_vlan_tbl_opcode op)
6468{
6469 struct hclge_dev *hdev = vport->back;
46a3df9f
S
6470
6471 if (cmdq_resp) {
6472 dev_err(&hdev->pdev->dev,
6473 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6474 cmdq_resp);
6475 return -EIO;
6476 }
6477
6478 if (op == HCLGE_MAC_VLAN_ADD) {
6479 if ((!resp_code) || (resp_code == 1)) {
6e4139f6 6480 return 0;
b37ce587 6481 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
46a3df9f
S
6482 dev_err(&hdev->pdev->dev,
6483 "add mac addr failed for uc_overflow.\n");
6e4139f6 6484 return -ENOSPC;
b37ce587 6485 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
46a3df9f
S
6486 dev_err(&hdev->pdev->dev,
6487 "add mac addr failed for mc_overflow.\n");
6e4139f6 6488 return -ENOSPC;
46a3df9f 6489 }
6e4139f6
JS
6490
6491 dev_err(&hdev->pdev->dev,
6492 "add mac addr failed for undefined, code=%u.\n",
6493 resp_code);
6494 return -EIO;
46a3df9f
S
6495 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6496 if (!resp_code) {
6e4139f6 6497 return 0;
46a3df9f 6498 } else if (resp_code == 1) {
46a3df9f
S
6499 dev_dbg(&hdev->pdev->dev,
6500 "remove mac addr failed for miss.\n");
6e4139f6 6501 return -ENOENT;
46a3df9f 6502 }
6e4139f6
JS
6503
6504 dev_err(&hdev->pdev->dev,
6505 "remove mac addr failed for undefined, code=%u.\n",
6506 resp_code);
6507 return -EIO;
46a3df9f
S
6508 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6509 if (!resp_code) {
6e4139f6 6510 return 0;
46a3df9f 6511 } else if (resp_code == 1) {
46a3df9f
S
6512 dev_dbg(&hdev->pdev->dev,
6513 "lookup mac addr failed for miss.\n");
6e4139f6 6514 return -ENOENT;
46a3df9f 6515 }
6e4139f6 6516
46a3df9f 6517 dev_err(&hdev->pdev->dev,
6e4139f6
JS
6518 "lookup mac addr failed for undefined, code=%u.\n",
6519 resp_code);
6520 return -EIO;
46a3df9f
S
6521 }
6522
6e4139f6
JS
6523 dev_err(&hdev->pdev->dev,
6524 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6525
6526 return -EINVAL;
46a3df9f
S
6527}
6528
6529static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6530{
b37ce587
YM
6531#define HCLGE_VF_NUM_IN_FIRST_DESC 192
6532
b9a8f883
YL
6533 unsigned int word_num;
6534 unsigned int bit_num;
46a3df9f
S
6535
6536 if (vfid > 255 || vfid < 0)
6537 return -EIO;
6538
b37ce587 6539 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
46a3df9f
S
6540 word_num = vfid / 32;
6541 bit_num = vfid % 32;
6542 if (clr)
a90bb9a5 6543 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 6544 else
a90bb9a5 6545 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f 6546 } else {
b37ce587 6547 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
46a3df9f
S
6548 bit_num = vfid % 32;
6549 if (clr)
a90bb9a5 6550 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 6551 else
a90bb9a5 6552 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
6553 }
6554
6555 return 0;
6556}
6557
6558static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6559{
6560#define HCLGE_DESC_NUMBER 3
6561#define HCLGE_FUNC_NUMBER_PER_DESC 6
6562 int i, j;
6563
6c39d527 6564 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
6565 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6566 if (desc[i].data[j])
6567 return false;
6568
6569 return true;
6570}
6571
d44f9b63 6572static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 6573 const u8 *addr, bool is_mc)
46a3df9f
S
6574{
6575 const unsigned char *mac_addr = addr;
6576 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6577 (mac_addr[0]) | (mac_addr[1] << 8);
6578 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6579
3a586422
WL
6580 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6581 if (is_mc) {
6582 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6583 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6584 }
6585
46a3df9f
S
6586 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6587 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6588}
6589
46a3df9f 6590static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 6591 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
6592{
6593 struct hclge_dev *hdev = vport->back;
6594 struct hclge_desc desc;
6595 u8 resp_code;
a90bb9a5 6596 u16 retval;
46a3df9f
S
6597 int ret;
6598
6599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6600
d44f9b63 6601 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
6602
6603 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6604 if (ret) {
6605 dev_err(&hdev->pdev->dev,
6606 "del mac addr failed for cmd_send, ret =%d.\n",
6607 ret);
6608 return ret;
6609 }
a90bb9a5
YL
6610 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6611 retval = le16_to_cpu(desc.retval);
46a3df9f 6612
a90bb9a5 6613 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
6614 HCLGE_MAC_VLAN_REMOVE);
6615}
6616
6617static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 6618 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
6619 struct hclge_desc *desc,
6620 bool is_mc)
6621{
6622 struct hclge_dev *hdev = vport->back;
6623 u8 resp_code;
a90bb9a5 6624 u16 retval;
46a3df9f
S
6625 int ret;
6626
6627 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6628 if (is_mc) {
6629 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6630 memcpy(desc[0].data,
6631 req,
d44f9b63 6632 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
6633 hclge_cmd_setup_basic_desc(&desc[1],
6634 HCLGE_OPC_MAC_VLAN_ADD,
6635 true);
6636 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6637 hclge_cmd_setup_basic_desc(&desc[2],
6638 HCLGE_OPC_MAC_VLAN_ADD,
6639 true);
6640 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6641 } else {
6642 memcpy(desc[0].data,
6643 req,
d44f9b63 6644 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
6645 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6646 }
6647 if (ret) {
6648 dev_err(&hdev->pdev->dev,
6649 "lookup mac addr failed for cmd_send, ret =%d.\n",
6650 ret);
6651 return ret;
6652 }
a90bb9a5
YL
6653 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6654 retval = le16_to_cpu(desc[0].retval);
46a3df9f 6655
a90bb9a5 6656 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
6657 HCLGE_MAC_VLAN_LKUP);
6658}
6659
6660static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 6661 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
6662 struct hclge_desc *mc_desc)
6663{
6664 struct hclge_dev *hdev = vport->back;
6665 int cfg_status;
6666 u8 resp_code;
a90bb9a5 6667 u16 retval;
46a3df9f
S
6668 int ret;
6669
6670 if (!mc_desc) {
6671 struct hclge_desc desc;
6672
6673 hclge_cmd_setup_basic_desc(&desc,
6674 HCLGE_OPC_MAC_VLAN_ADD,
6675 false);
d44f9b63
YL
6676 memcpy(desc.data, req,
6677 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 6678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
6679 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6680 retval = le16_to_cpu(desc.retval);
6681
6682 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
6683 resp_code,
6684 HCLGE_MAC_VLAN_ADD);
6685 } else {
c3b6f755 6686 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 6687 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 6688 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 6689 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 6690 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
6691 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6692 memcpy(mc_desc[0].data, req,
d44f9b63 6693 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 6694 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
6695 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6696 retval = le16_to_cpu(mc_desc[0].retval);
6697
6698 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
6699 resp_code,
6700 HCLGE_MAC_VLAN_ADD);
6701 }
6702
6703 if (ret) {
6704 dev_err(&hdev->pdev->dev,
6705 "add mac addr failed for cmd_send, ret =%d.\n",
6706 ret);
6707 return ret;
6708 }
6709
6710 return cfg_status;
6711}
6712
39932473
JS
6713static int hclge_init_umv_space(struct hclge_dev *hdev)
6714{
6715 u16 allocated_size = 0;
6716 int ret;
6717
6718 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6719 true);
6720 if (ret)
6721 return ret;
6722
6723 if (allocated_size < hdev->wanted_umv_size)
6724 dev_warn(&hdev->pdev->dev,
6725 "Alloc umv space failed, want %d, get %d\n",
6726 hdev->wanted_umv_size, allocated_size);
6727
6728 mutex_init(&hdev->umv_mutex);
6729 hdev->max_umv_size = allocated_size;
e91e388c
JS
6730 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6731 * preserve some unicast mac vlan table entries shared by pf
6732 * and its vfs.
6733 */
39932473
JS
6734 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6735 hdev->share_umv_size = hdev->priv_umv_size +
6736 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6737
6738 return 0;
6739}
6740
6741static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6742{
6743 int ret;
6744
6745 if (hdev->max_umv_size > 0) {
6746 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6747 false);
6748 if (ret)
6749 return ret;
6750 hdev->max_umv_size = 0;
6751 }
6752 mutex_destroy(&hdev->umv_mutex);
6753
6754 return 0;
6755}
6756
6757static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6758 u16 *allocated_size, bool is_alloc)
6759{
6760 struct hclge_umv_spc_alc_cmd *req;
6761 struct hclge_desc desc;
6762 int ret;
6763
6764 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6765 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
63cbf7a9
YM
6766 if (!is_alloc)
6767 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6768
39932473
JS
6769 req->space_size = cpu_to_le32(space_size);
6770
6771 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6772 if (ret) {
6773 dev_err(&hdev->pdev->dev,
6774 "%s umv space failed for cmd_send, ret =%d\n",
6775 is_alloc ? "allocate" : "free", ret);
6776 return ret;
6777 }
6778
6779 if (is_alloc && allocated_size)
6780 *allocated_size = le32_to_cpu(desc.data[1]);
6781
6782 return 0;
6783}
6784
6785static void hclge_reset_umv_space(struct hclge_dev *hdev)
6786{
6787 struct hclge_vport *vport;
6788 int i;
6789
6790 for (i = 0; i < hdev->num_alloc_vport; i++) {
6791 vport = &hdev->vport[i];
6792 vport->used_umv_num = 0;
6793 }
6794
6795 mutex_lock(&hdev->umv_mutex);
6796 hdev->share_umv_size = hdev->priv_umv_size +
6797 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6798 mutex_unlock(&hdev->umv_mutex);
6799}
6800
6801static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6802{
6803 struct hclge_dev *hdev = vport->back;
6804 bool is_full;
6805
6806 mutex_lock(&hdev->umv_mutex);
6807 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6808 hdev->share_umv_size == 0);
6809 mutex_unlock(&hdev->umv_mutex);
6810
6811 return is_full;
6812}
6813
6814static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6815{
6816 struct hclge_dev *hdev = vport->back;
6817
6818 mutex_lock(&hdev->umv_mutex);
6819 if (is_free) {
6820 if (vport->used_umv_num > hdev->priv_umv_size)
6821 hdev->share_umv_size++;
54a395b6 6822
6823 if (vport->used_umv_num > 0)
6824 vport->used_umv_num--;
39932473 6825 } else {
54a395b6 6826 if (vport->used_umv_num >= hdev->priv_umv_size &&
6827 hdev->share_umv_size > 0)
39932473
JS
6828 hdev->share_umv_size--;
6829 vport->used_umv_num++;
6830 }
6831 mutex_unlock(&hdev->umv_mutex);
6832}
6833
46a3df9f
S
6834static int hclge_add_uc_addr(struct hnae3_handle *handle,
6835 const unsigned char *addr)
6836{
6837 struct hclge_vport *vport = hclge_get_vport(handle);
6838
6839 return hclge_add_uc_addr_common(vport, addr);
6840}
6841
6842int hclge_add_uc_addr_common(struct hclge_vport *vport,
6843 const unsigned char *addr)
6844{
6845 struct hclge_dev *hdev = vport->back;
d44f9b63 6846 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 6847 struct hclge_desc desc;
a90bb9a5 6848 u16 egress_port = 0;
aa7a795e 6849 int ret;
46a3df9f
S
6850
6851 /* mac addr check */
6852 if (is_zero_ether_addr(addr) ||
6853 is_broadcast_ether_addr(addr) ||
6854 is_multicast_ether_addr(addr)) {
6855 dev_err(&hdev->pdev->dev,
6856 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
9b2f3477 6857 addr, is_zero_ether_addr(addr),
46a3df9f
S
6858 is_broadcast_ether_addr(addr),
6859 is_multicast_ether_addr(addr));
6860 return -EINVAL;
6861 }
6862
6863 memset(&req, 0, sizeof(req));
a90bb9a5 6864
e4e87715
PL
6865 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6866 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
6867
6868 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 6869
3a586422 6870 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 6871
d07b6bb4
JS
6872 /* Lookup the mac address in the mac_vlan table, and add
6873 * it if the entry is inexistent. Repeated unicast entry
6874 * is not allowed in the mac vlan table.
6875 */
6876 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473
JS
6877 if (ret == -ENOENT) {
6878 if (!hclge_is_umv_space_full(vport)) {
6879 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6880 if (!ret)
6881 hclge_update_umv_space(vport, false);
6882 return ret;
6883 }
6884
6885 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6886 hdev->priv_umv_size);
6887
6888 return -ENOSPC;
6889 }
d07b6bb4
JS
6890
6891 /* check if we just hit the duplicate */
72110b56
PL
6892 if (!ret) {
6893 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6894 vport->vport_id, addr);
6895 return 0;
6896 }
d07b6bb4
JS
6897
6898 dev_err(&hdev->pdev->dev,
6899 "PF failed to add unicast entry(%pM) in the MAC table\n",
6900 addr);
46a3df9f 6901
aa7a795e 6902 return ret;
46a3df9f
S
6903}
6904
6905static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6906 const unsigned char *addr)
6907{
6908 struct hclge_vport *vport = hclge_get_vport(handle);
6909
6910 return hclge_rm_uc_addr_common(vport, addr);
6911}
6912
6913int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6914 const unsigned char *addr)
6915{
6916 struct hclge_dev *hdev = vport->back;
d44f9b63 6917 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 6918 int ret;
46a3df9f
S
6919
6920 /* mac addr check */
6921 if (is_zero_ether_addr(addr) ||
6922 is_broadcast_ether_addr(addr) ||
6923 is_multicast_ether_addr(addr)) {
9b2f3477
WL
6924 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6925 addr);
46a3df9f
S
6926 return -EINVAL;
6927 }
6928
6929 memset(&req, 0, sizeof(req));
e4e87715 6930 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6931 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 6932 ret = hclge_remove_mac_vlan_tbl(vport, &req);
39932473
JS
6933 if (!ret)
6934 hclge_update_umv_space(vport, true);
46a3df9f 6935
aa7a795e 6936 return ret;
46a3df9f
S
6937}
6938
6939static int hclge_add_mc_addr(struct hnae3_handle *handle,
6940 const unsigned char *addr)
6941{
6942 struct hclge_vport *vport = hclge_get_vport(handle);
6943
a10829c4 6944 return hclge_add_mc_addr_common(vport, addr);
46a3df9f
S
6945}
6946
6947int hclge_add_mc_addr_common(struct hclge_vport *vport,
6948 const unsigned char *addr)
6949{
6950 struct hclge_dev *hdev = vport->back;
d44f9b63 6951 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 6952 struct hclge_desc desc[3];
46a3df9f
S
6953 int status;
6954
6955 /* mac addr check */
6956 if (!is_multicast_ether_addr(addr)) {
6957 dev_err(&hdev->pdev->dev,
6958 "Add mc mac err! invalid mac:%pM.\n",
6959 addr);
6960 return -EINVAL;
6961 }
6962 memset(&req, 0, sizeof(req));
e4e87715 6963 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 6964 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f 6965 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
63cbf7a9 6966 if (status) {
46a3df9f
S
6967 /* This mac addr do not exist, add new entry for it */
6968 memset(desc[0].data, 0, sizeof(desc[0].data));
6969 memset(desc[1].data, 0, sizeof(desc[0].data));
6970 memset(desc[2].data, 0, sizeof(desc[0].data));
46a3df9f 6971 }
63cbf7a9
YM
6972 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6973 if (status)
6974 return status;
6975 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
46a3df9f 6976
1f6db589
JS
6977 if (status == -ENOSPC)
6978 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
6979
6980 return status;
6981}
6982
6983static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6984 const unsigned char *addr)
6985{
6986 struct hclge_vport *vport = hclge_get_vport(handle);
6987
6988 return hclge_rm_mc_addr_common(vport, addr);
6989}
6990
6991int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6992 const unsigned char *addr)
6993{
6994 struct hclge_dev *hdev = vport->back;
d44f9b63 6995 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
6996 enum hclge_cmd_status status;
6997 struct hclge_desc desc[3];
46a3df9f
S
6998
6999 /* mac addr check */
7000 if (!is_multicast_ether_addr(addr)) {
7001 dev_dbg(&hdev->pdev->dev,
7002 "Remove mc mac err! invalid mac:%pM.\n",
7003 addr);
7004 return -EINVAL;
7005 }
7006
7007 memset(&req, 0, sizeof(req));
e4e87715 7008 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 7009 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
7010 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7011 if (!status) {
7012 /* This mac addr exist, remove this handle's VFID for it */
63cbf7a9
YM
7013 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7014 if (status)
7015 return status;
46a3df9f
S
7016
7017 if (hclge_is_all_function_id_zero(desc))
7018 /* All the vfid is zero, so need to delete this entry */
7019 status = hclge_remove_mac_vlan_tbl(vport, &req);
7020 else
7021 /* Not all the vfid is zero, update the vfid */
7022 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7023
7024 } else {
40cca1c5
XW
7025 /* Maybe this mac address is in mta table, but it cannot be
7026 * deleted here because an entry of mta represents an address
7027 * range rather than a specific address. the delete action to
7028 * all entries will take effect in update_mta_status called by
7029 * hns3_nic_set_rx_mode.
7030 */
7031 status = 0;
46a3df9f
S
7032 }
7033
46a3df9f
S
7034 return status;
7035}
7036
6dd86902 7037void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7038 enum HCLGE_MAC_ADDR_TYPE mac_type)
7039{
7040 struct hclge_vport_mac_addr_cfg *mac_cfg;
7041 struct list_head *list;
7042
7043 if (!vport->vport_id)
7044 return;
7045
7046 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7047 if (!mac_cfg)
7048 return;
7049
7050 mac_cfg->hd_tbl_status = true;
7051 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7052
7053 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7054 &vport->uc_mac_list : &vport->mc_mac_list;
7055
7056 list_add_tail(&mac_cfg->node, list);
7057}
7058
7059void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7060 bool is_write_tbl,
7061 enum HCLGE_MAC_ADDR_TYPE mac_type)
7062{
7063 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7064 struct list_head *list;
7065 bool uc_flag, mc_flag;
7066
7067 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7068 &vport->uc_mac_list : &vport->mc_mac_list;
7069
7070 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7071 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7072
7073 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7074 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7075 if (uc_flag && mac_cfg->hd_tbl_status)
7076 hclge_rm_uc_addr_common(vport, mac_addr);
7077
7078 if (mc_flag && mac_cfg->hd_tbl_status)
7079 hclge_rm_mc_addr_common(vport, mac_addr);
7080
7081 list_del(&mac_cfg->node);
7082 kfree(mac_cfg);
7083 break;
7084 }
7085 }
7086}
7087
7088void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7089 enum HCLGE_MAC_ADDR_TYPE mac_type)
7090{
7091 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7092 struct list_head *list;
7093
7094 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7095 &vport->uc_mac_list : &vport->mc_mac_list;
7096
7097 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7098 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7099 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7100
7101 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7102 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7103
7104 mac_cfg->hd_tbl_status = false;
7105 if (is_del_list) {
7106 list_del(&mac_cfg->node);
7107 kfree(mac_cfg);
7108 }
7109 }
7110}
7111
7112void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7113{
7114 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7115 struct hclge_vport *vport;
7116 int i;
7117
7118 mutex_lock(&hdev->vport_cfg_mutex);
7119 for (i = 0; i < hdev->num_alloc_vport; i++) {
7120 vport = &hdev->vport[i];
7121 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7122 list_del(&mac->node);
7123 kfree(mac);
7124 }
7125
7126 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7127 list_del(&mac->node);
7128 kfree(mac);
7129 }
7130 }
7131 mutex_unlock(&hdev->vport_cfg_mutex);
7132}
7133
f5aac71c
FL
7134static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7135 u16 cmdq_resp, u8 resp_code)
7136{
7137#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7138#define HCLGE_ETHERTYPE_ALREADY_ADD 1
7139#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7140#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7141
7142 int return_status;
7143
7144 if (cmdq_resp) {
7145 dev_err(&hdev->pdev->dev,
7146 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7147 cmdq_resp);
7148 return -EIO;
7149 }
7150
7151 switch (resp_code) {
7152 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7153 case HCLGE_ETHERTYPE_ALREADY_ADD:
7154 return_status = 0;
7155 break;
7156 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7157 dev_err(&hdev->pdev->dev,
7158 "add mac ethertype failed for manager table overflow.\n");
7159 return_status = -EIO;
7160 break;
7161 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7162 dev_err(&hdev->pdev->dev,
7163 "add mac ethertype failed for key conflict.\n");
7164 return_status = -EIO;
7165 break;
7166 default:
7167 dev_err(&hdev->pdev->dev,
7168 "add mac ethertype failed for undefined, code=%d.\n",
7169 resp_code);
7170 return_status = -EIO;
7171 }
7172
7173 return return_status;
7174}
7175
7176static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7177 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7178{
7179 struct hclge_desc desc;
7180 u8 resp_code;
7181 u16 retval;
7182 int ret;
7183
7184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7185 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7186
7187 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7188 if (ret) {
7189 dev_err(&hdev->pdev->dev,
7190 "add mac ethertype failed for cmd_send, ret =%d.\n",
7191 ret);
7192 return ret;
7193 }
7194
7195 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7196 retval = le16_to_cpu(desc.retval);
7197
7198 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7199}
7200
7201static int init_mgr_tbl(struct hclge_dev *hdev)
7202{
7203 int ret;
7204 int i;
7205
7206 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7207 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7208 if (ret) {
7209 dev_err(&hdev->pdev->dev,
7210 "add mac ethertype failed, ret =%d.\n",
7211 ret);
7212 return ret;
7213 }
7214 }
7215
7216 return 0;
7217}
7218
46a3df9f
S
7219static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7220{
7221 struct hclge_vport *vport = hclge_get_vport(handle);
7222 struct hclge_dev *hdev = vport->back;
7223
7224 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7225}
7226
59098055
FL
7227static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7228 bool is_first)
46a3df9f
S
7229{
7230 const unsigned char *new_addr = (const unsigned char *)p;
7231 struct hclge_vport *vport = hclge_get_vport(handle);
7232 struct hclge_dev *hdev = vport->back;
18838d0c 7233 int ret;
46a3df9f
S
7234
7235 /* mac addr check */
7236 if (is_zero_ether_addr(new_addr) ||
7237 is_broadcast_ether_addr(new_addr) ||
7238 is_multicast_ether_addr(new_addr)) {
7239 dev_err(&hdev->pdev->dev,
7240 "Change uc mac err! invalid mac:%p.\n",
7241 new_addr);
7242 return -EINVAL;
7243 }
7244
962e31bd
YL
7245 if ((!is_first || is_kdump_kernel()) &&
7246 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 7247 dev_warn(&hdev->pdev->dev,
59098055 7248 "remove old uc mac address fail.\n");
46a3df9f 7249
18838d0c
FL
7250 ret = hclge_add_uc_addr(handle, new_addr);
7251 if (ret) {
7252 dev_err(&hdev->pdev->dev,
7253 "add uc mac address fail, ret =%d.\n",
7254 ret);
7255
59098055
FL
7256 if (!is_first &&
7257 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 7258 dev_err(&hdev->pdev->dev,
59098055 7259 "restore uc mac address fail.\n");
18838d0c
FL
7260
7261 return -EIO;
46a3df9f
S
7262 }
7263
e98d7183 7264 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
7265 if (ret) {
7266 dev_err(&hdev->pdev->dev,
7267 "configure mac pause address fail, ret =%d.\n",
7268 ret);
7269 return -EIO;
7270 }
7271
7272 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7273
7274 return 0;
46a3df9f
S
7275}
7276
26483246
XW
7277static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7278 int cmd)
7279{
7280 struct hclge_vport *vport = hclge_get_vport(handle);
7281 struct hclge_dev *hdev = vport->back;
7282
7283 if (!hdev->hw.mac.phydev)
7284 return -EOPNOTSUPP;
7285
7286 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7287}
7288
46a3df9f 7289static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 7290 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 7291{
d44f9b63 7292 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
7293 struct hclge_desc desc;
7294 int ret;
7295
7296 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7297
d44f9b63 7298 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 7299 req->vlan_type = vlan_type;
64d114f0 7300 req->vlan_fe = filter_en ? fe_type : 0;
30ebc576 7301 req->vf_id = vf_id;
46a3df9f
S
7302
7303 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 7304 if (ret)
46a3df9f
S
7305 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7306 ret);
46a3df9f 7307
3f639907 7308 return ret;
46a3df9f
S
7309}
7310
391b5e93
JS
7311#define HCLGE_FILTER_TYPE_VF 0
7312#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
7313#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7314#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7315#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7316#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7317#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7318#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7319 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7320#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7321 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
7322
7323static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7324{
7325 struct hclge_vport *vport = hclge_get_vport(handle);
7326 struct hclge_dev *hdev = vport->back;
7327
64d114f0
ZL
7328 if (hdev->pdev->revision >= 0x21) {
7329 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 7330 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 7331 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 7332 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
7333 } else {
7334 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
7335 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7336 0);
64d114f0 7337 }
c60edc17
JS
7338 if (enable)
7339 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7340 else
7341 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
7342}
7343
ebaf1908 7344static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
dc8131d8
YL
7345 bool is_kill, u16 vlan, u8 qos,
7346 __be16 proto)
46a3df9f
S
7347{
7348#define HCLGE_MAX_VF_BYTES 16
d44f9b63
YL
7349 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7350 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
7351 struct hclge_desc desc[2];
7352 u8 vf_byte_val;
7353 u8 vf_byte_off;
7354 int ret;
7355
81a9255e
JS
7356 /* if vf vlan table is full, firmware will close vf vlan filter, it
7357 * is unable and unnecessary to add new vlan id to vf vlan filter
7358 */
7359 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7360 return 0;
7361
46a3df9f
S
7362 hclge_cmd_setup_basic_desc(&desc[0],
7363 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7364 hclge_cmd_setup_basic_desc(&desc[1],
7365 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7366
7367 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7368
7369 vf_byte_off = vfid / 8;
7370 vf_byte_val = 1 << (vfid % 8);
7371
d44f9b63
YL
7372 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7373 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 7374
a90bb9a5 7375 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
7376 req0->vlan_cfg = is_kill;
7377
7378 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7379 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7380 else
7381 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7382
7383 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7384 if (ret) {
7385 dev_err(&hdev->pdev->dev,
7386 "Send vf vlan command fail, ret =%d.\n",
7387 ret);
7388 return ret;
7389 }
7390
7391 if (!is_kill) {
6c251711 7392#define HCLGE_VF_VLAN_NO_ENTRY 2
46a3df9f
S
7393 if (!req0->resp_code || req0->resp_code == 1)
7394 return 0;
7395
6c251711 7396 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
81a9255e 7397 set_bit(vfid, hdev->vf_vlan_full);
6c251711
YL
7398 dev_warn(&hdev->pdev->dev,
7399 "vf vlan table is full, vf vlan filter is disabled\n");
7400 return 0;
7401 }
7402
46a3df9f
S
7403 dev_err(&hdev->pdev->dev,
7404 "Add vf vlan filter fail, ret =%d.\n",
7405 req0->resp_code);
7406 } else {
41dafea2 7407#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
46a3df9f
S
7408 if (!req0->resp_code)
7409 return 0;
7410
d0c31df2
JS
7411 /* vf vlan filter is disabled when vf vlan table is full,
7412 * then new vlan id will not be added into vf vlan table.
7413 * Just return 0 without warning, avoid massive verbose
7414 * print logs when unload.
7415 */
7416 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
41dafea2 7417 return 0;
41dafea2 7418
46a3df9f
S
7419 dev_err(&hdev->pdev->dev,
7420 "Kill vf vlan filter fail, ret =%d.\n",
7421 req0->resp_code);
7422 }
7423
7424 return -EIO;
7425}
7426
dc8131d8
YL
7427static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7428 u16 vlan_id, bool is_kill)
46a3df9f 7429{
d44f9b63 7430 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
7431 struct hclge_desc desc;
7432 u8 vlan_offset_byte_val;
7433 u8 vlan_offset_byte;
7434 u8 vlan_offset_160;
7435 int ret;
7436
7437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7438
7439 vlan_offset_160 = vlan_id / 160;
7440 vlan_offset_byte = (vlan_id % 160) / 8;
7441 vlan_offset_byte_val = 1 << (vlan_id % 8);
7442
d44f9b63 7443 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
7444 req->vlan_offset = vlan_offset_160;
7445 req->vlan_cfg = is_kill;
7446 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7447
7448 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
7449 if (ret)
7450 dev_err(&hdev->pdev->dev,
7451 "port vlan command, send fail, ret =%d.\n", ret);
7452 return ret;
7453}
7454
7455static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7456 u16 vport_id, u16 vlan_id, u8 qos,
7457 bool is_kill)
7458{
7459 u16 vport_idx, vport_num = 0;
7460 int ret;
7461
daaa8521
YL
7462 if (is_kill && !vlan_id)
7463 return 0;
7464
dc8131d8
YL
7465 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7466 0, proto);
46a3df9f
S
7467 if (ret) {
7468 dev_err(&hdev->pdev->dev,
dc8131d8
YL
7469 "Set %d vport vlan filter config fail, ret =%d.\n",
7470 vport_id, ret);
46a3df9f
S
7471 return ret;
7472 }
7473
dc8131d8
YL
7474 /* vlan 0 may be added twice when 8021q module is enabled */
7475 if (!is_kill && !vlan_id &&
7476 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7477 return 0;
7478
7479 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 7480 dev_err(&hdev->pdev->dev,
dc8131d8
YL
7481 "Add port vlan failed, vport %d is already in vlan %d\n",
7482 vport_id, vlan_id);
7483 return -EINVAL;
46a3df9f
S
7484 }
7485
dc8131d8
YL
7486 if (is_kill &&
7487 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7488 dev_err(&hdev->pdev->dev,
7489 "Delete port vlan failed, vport %d is not in vlan %d\n",
7490 vport_id, vlan_id);
7491 return -EINVAL;
7492 }
7493
54e97d11 7494 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
7495 vport_num++;
7496
7497 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7498 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7499 is_kill);
7500
7501 return ret;
7502}
7503
5f6ea83f
PL
7504static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7505{
7506 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7507 struct hclge_vport_vtag_tx_cfg_cmd *req;
7508 struct hclge_dev *hdev = vport->back;
7509 struct hclge_desc desc;
7510 int status;
7511
7512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7513
7514 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7515 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7516 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
7517 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7518 vcfg->accept_tag1 ? 1 : 0);
7519 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7520 vcfg->accept_untag1 ? 1 : 0);
7521 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7522 vcfg->accept_tag2 ? 1 : 0);
7523 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7524 vcfg->accept_untag2 ? 1 : 0);
7525 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7526 vcfg->insert_tag1_en ? 1 : 0);
7527 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7528 vcfg->insert_tag2_en ? 1 : 0);
7529 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
7530
7531 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7532 req->vf_bitmap[req->vf_offset] =
7533 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7534
7535 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7536 if (status)
7537 dev_err(&hdev->pdev->dev,
7538 "Send port txvlan cfg command fail, ret =%d\n",
7539 status);
7540
7541 return status;
7542}
7543
7544static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7545{
7546 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7547 struct hclge_vport_vtag_rx_cfg_cmd *req;
7548 struct hclge_dev *hdev = vport->back;
7549 struct hclge_desc desc;
7550 int status;
7551
7552 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7553
7554 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
7555 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7556 vcfg->strip_tag1_en ? 1 : 0);
7557 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7558 vcfg->strip_tag2_en ? 1 : 0);
7559 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7560 vcfg->vlan1_vlan_prionly ? 1 : 0);
7561 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7562 vcfg->vlan2_vlan_prionly ? 1 : 0);
5f6ea83f
PL
7563
7564 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7565 req->vf_bitmap[req->vf_offset] =
7566 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7567
7568 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7569 if (status)
7570 dev_err(&hdev->pdev->dev,
7571 "Send port rxvlan cfg command fail, ret =%d\n",
7572 status);
7573
7574 return status;
7575}
7576
741fca16
JS
7577static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7578 u16 port_base_vlan_state,
7579 u16 vlan_tag)
7580{
7581 int ret;
7582
7583 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7584 vport->txvlan_cfg.accept_tag1 = true;
7585 vport->txvlan_cfg.insert_tag1_en = false;
7586 vport->txvlan_cfg.default_tag1 = 0;
7587 } else {
7588 vport->txvlan_cfg.accept_tag1 = false;
7589 vport->txvlan_cfg.insert_tag1_en = true;
7590 vport->txvlan_cfg.default_tag1 = vlan_tag;
7591 }
7592
7593 vport->txvlan_cfg.accept_untag1 = true;
7594
7595 /* accept_tag2 and accept_untag2 are not supported on
7596 * pdev revision(0x20), new revision support them,
7597 * this two fields can not be configured by user.
7598 */
7599 vport->txvlan_cfg.accept_tag2 = true;
7600 vport->txvlan_cfg.accept_untag2 = true;
7601 vport->txvlan_cfg.insert_tag2_en = false;
7602 vport->txvlan_cfg.default_tag2 = 0;
7603
7604 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7605 vport->rxvlan_cfg.strip_tag1_en = false;
7606 vport->rxvlan_cfg.strip_tag2_en =
7607 vport->rxvlan_cfg.rx_vlan_offload_en;
7608 } else {
7609 vport->rxvlan_cfg.strip_tag1_en =
7610 vport->rxvlan_cfg.rx_vlan_offload_en;
7611 vport->rxvlan_cfg.strip_tag2_en = true;
7612 }
7613 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7614 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7615
7616 ret = hclge_set_vlan_tx_offload_cfg(vport);
7617 if (ret)
7618 return ret;
7619
7620 return hclge_set_vlan_rx_offload_cfg(vport);
7621}
7622
5f6ea83f
PL
7623static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7624{
7625 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7626 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7627 struct hclge_desc desc;
7628 int status;
7629
7630 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7631 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7632 rx_req->ot_fst_vlan_type =
7633 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7634 rx_req->ot_sec_vlan_type =
7635 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7636 rx_req->in_fst_vlan_type =
7637 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7638 rx_req->in_sec_vlan_type =
7639 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7640
7641 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7642 if (status) {
7643 dev_err(&hdev->pdev->dev,
7644 "Send rxvlan protocol type command fail, ret =%d\n",
7645 status);
7646 return status;
7647 }
7648
7649 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7650
d0d72bac 7651 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
7652 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7653 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7654
7655 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7656 if (status)
7657 dev_err(&hdev->pdev->dev,
7658 "Send txvlan protocol type command fail, ret =%d\n",
7659 status);
7660
7661 return status;
7662}
7663
46a3df9f
S
7664static int hclge_init_vlan_config(struct hclge_dev *hdev)
7665{
5f6ea83f
PL
7666#define HCLGE_DEF_VLAN_TYPE 0x8100
7667
c60edc17 7668 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 7669 struct hclge_vport *vport;
46a3df9f 7670 int ret;
5f6ea83f
PL
7671 int i;
7672
64d114f0 7673 if (hdev->pdev->revision >= 0x21) {
30ebc576
JS
7674 /* for revision 0x21, vf vlan filter is per function */
7675 for (i = 0; i < hdev->num_alloc_vport; i++) {
7676 vport = &hdev->vport[i];
7677 ret = hclge_set_vlan_filter_ctrl(hdev,
7678 HCLGE_FILTER_TYPE_VF,
7679 HCLGE_FILTER_FE_EGRESS,
7680 true,
7681 vport->vport_id);
7682 if (ret)
7683 return ret;
7684 }
46a3df9f 7685
64d114f0 7686 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
7687 HCLGE_FILTER_FE_INGRESS, true,
7688 0);
64d114f0
ZL
7689 if (ret)
7690 return ret;
7691 } else {
7692 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7693 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 7694 true, 0);
64d114f0
ZL
7695 if (ret)
7696 return ret;
7697 }
46a3df9f 7698
c60edc17
JS
7699 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7700
5f6ea83f
PL
7701 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7702 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7703 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7704 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7705 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7706 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7707
7708 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
7709 if (ret)
7710 return ret;
46a3df9f 7711
5f6ea83f 7712 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 7713 u16 vlan_tag;
dcb35cce 7714
741fca16
JS
7715 vport = &hdev->vport[i];
7716 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 7717
741fca16
JS
7718 ret = hclge_vlan_offload_cfg(vport,
7719 vport->port_base_vlan_cfg.state,
7720 vlan_tag);
5f6ea83f
PL
7721 if (ret)
7722 return ret;
7723 }
7724
dc8131d8 7725 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
7726}
7727
21e043cd
JS
7728static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7729 bool writen_to_tbl)
c6075b19 7730{
7731 struct hclge_vport_vlan_cfg *vlan;
7732
c6075b19 7733 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7734 if (!vlan)
7735 return;
7736
21e043cd 7737 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 7738 vlan->vlan_id = vlan_id;
7739
7740 list_add_tail(&vlan->node, &vport->vlan_list);
7741}
7742
21e043cd
JS
7743static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7744{
7745 struct hclge_vport_vlan_cfg *vlan, *tmp;
7746 struct hclge_dev *hdev = vport->back;
7747 int ret;
7748
7749 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7750 if (!vlan->hd_tbl_status) {
7751 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7752 vport->vport_id,
7753 vlan->vlan_id, 0, false);
7754 if (ret) {
7755 dev_err(&hdev->pdev->dev,
7756 "restore vport vlan list failed, ret=%d\n",
7757 ret);
7758 return ret;
7759 }
7760 }
7761 vlan->hd_tbl_status = true;
7762 }
7763
7764 return 0;
7765}
7766
7767static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7768 bool is_write_tbl)
c6075b19 7769{
7770 struct hclge_vport_vlan_cfg *vlan, *tmp;
7771 struct hclge_dev *hdev = vport->back;
7772
7773 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7774 if (vlan->vlan_id == vlan_id) {
7775 if (is_write_tbl && vlan->hd_tbl_status)
7776 hclge_set_vlan_filter_hw(hdev,
7777 htons(ETH_P_8021Q),
7778 vport->vport_id,
7779 vlan_id, 0,
7780 true);
7781
7782 list_del(&vlan->node);
7783 kfree(vlan);
7784 break;
7785 }
7786 }
7787}
7788
7789void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7790{
7791 struct hclge_vport_vlan_cfg *vlan, *tmp;
7792 struct hclge_dev *hdev = vport->back;
7793
7794 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7795 if (vlan->hd_tbl_status)
7796 hclge_set_vlan_filter_hw(hdev,
7797 htons(ETH_P_8021Q),
7798 vport->vport_id,
7799 vlan->vlan_id, 0,
7800 true);
7801
7802 vlan->hd_tbl_status = false;
7803 if (is_del_list) {
7804 list_del(&vlan->node);
7805 kfree(vlan);
7806 }
7807 }
7808}
7809
7810void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7811{
7812 struct hclge_vport_vlan_cfg *vlan, *tmp;
7813 struct hclge_vport *vport;
7814 int i;
7815
7816 mutex_lock(&hdev->vport_cfg_mutex);
7817 for (i = 0; i < hdev->num_alloc_vport; i++) {
7818 vport = &hdev->vport[i];
7819 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7820 list_del(&vlan->node);
7821 kfree(vlan);
7822 }
7823 }
7824 mutex_unlock(&hdev->vport_cfg_mutex);
7825}
7826
b524b38f
JS
7827static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7828{
7829 struct hclge_vport *vport = hclge_get_vport(handle);
7830 struct hclge_vport_vlan_cfg *vlan, *tmp;
7831 struct hclge_dev *hdev = vport->back;
7832 u16 vlan_proto, qos;
7833 u16 state, vlan_id;
7834 int i;
7835
7836 mutex_lock(&hdev->vport_cfg_mutex);
7837 for (i = 0; i < hdev->num_alloc_vport; i++) {
7838 vport = &hdev->vport[i];
7839 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7840 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7841 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7842 state = vport->port_base_vlan_cfg.state;
7843
7844 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7845 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7846 vport->vport_id, vlan_id, qos,
7847 false);
7848 continue;
7849 }
7850
7851 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7852 if (vlan->hd_tbl_status)
7853 hclge_set_vlan_filter_hw(hdev,
7854 htons(ETH_P_8021Q),
7855 vport->vport_id,
7856 vlan->vlan_id, 0,
7857 false);
7858 }
7859 }
7860
7861 mutex_unlock(&hdev->vport_cfg_mutex);
7862}
7863
b2641e2a 7864int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
7865{
7866 struct hclge_vport *vport = hclge_get_vport(handle);
7867
44e626f7
JS
7868 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7869 vport->rxvlan_cfg.strip_tag1_en = false;
7870 vport->rxvlan_cfg.strip_tag2_en = enable;
7871 } else {
7872 vport->rxvlan_cfg.strip_tag1_en = enable;
7873 vport->rxvlan_cfg.strip_tag2_en = true;
7874 }
052ece6d
PL
7875 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7876 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 7877 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
7878
7879 return hclge_set_vlan_rx_offload_cfg(vport);
7880}
7881
21e043cd
JS
7882static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7883 u16 port_base_vlan_state,
7884 struct hclge_vlan_info *new_info,
7885 struct hclge_vlan_info *old_info)
7886{
7887 struct hclge_dev *hdev = vport->back;
7888 int ret;
7889
7890 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7891 hclge_rm_vport_all_vlan_table(vport, false);
7892 return hclge_set_vlan_filter_hw(hdev,
7893 htons(new_info->vlan_proto),
7894 vport->vport_id,
7895 new_info->vlan_tag,
7896 new_info->qos, false);
7897 }
7898
7899 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7900 vport->vport_id, old_info->vlan_tag,
7901 old_info->qos, true);
7902 if (ret)
7903 return ret;
7904
7905 return hclge_add_vport_all_vlan_table(vport);
7906}
7907
7908int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7909 struct hclge_vlan_info *vlan_info)
7910{
7911 struct hnae3_handle *nic = &vport->nic;
7912 struct hclge_vlan_info *old_vlan_info;
7913 struct hclge_dev *hdev = vport->back;
7914 int ret;
7915
7916 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7917
7918 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7919 if (ret)
7920 return ret;
7921
7922 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7923 /* add new VLAN tag */
8a9a654b
JS
7924 ret = hclge_set_vlan_filter_hw(hdev,
7925 htons(vlan_info->vlan_proto),
21e043cd
JS
7926 vport->vport_id,
7927 vlan_info->vlan_tag,
7928 vlan_info->qos, false);
7929 if (ret)
7930 return ret;
7931
7932 /* remove old VLAN tag */
8a9a654b
JS
7933 ret = hclge_set_vlan_filter_hw(hdev,
7934 htons(old_vlan_info->vlan_proto),
21e043cd
JS
7935 vport->vport_id,
7936 old_vlan_info->vlan_tag,
7937 old_vlan_info->qos, true);
7938 if (ret)
7939 return ret;
7940
7941 goto update;
7942 }
7943
7944 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7945 old_vlan_info);
7946 if (ret)
7947 return ret;
7948
7949 /* update state only when disable/enable port based VLAN */
7950 vport->port_base_vlan_cfg.state = state;
7951 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7952 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7953 else
7954 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7955
7956update:
7957 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7958 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7959 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7960
7961 return 0;
7962}
7963
7964static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7965 enum hnae3_port_base_vlan_state state,
7966 u16 vlan)
7967{
7968 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7969 if (!vlan)
7970 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7971 else
7972 return HNAE3_PORT_BASE_VLAN_ENABLE;
7973 } else {
7974 if (!vlan)
7975 return HNAE3_PORT_BASE_VLAN_DISABLE;
7976 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7977 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7978 else
7979 return HNAE3_PORT_BASE_VLAN_MODIFY;
7980 }
7981}
7982
7983static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7984 u16 vlan, u8 qos, __be16 proto)
7985{
7986 struct hclge_vport *vport = hclge_get_vport(handle);
7987 struct hclge_dev *hdev = vport->back;
7988 struct hclge_vlan_info vlan_info;
7989 u16 state;
7990 int ret;
7991
7992 if (hdev->pdev->revision == 0x20)
7993 return -EOPNOTSUPP;
7994
7995 /* qos is a 3 bits value, so can not be bigger than 7 */
7996 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7997 return -EINVAL;
7998 if (proto != htons(ETH_P_8021Q))
7999 return -EPROTONOSUPPORT;
8000
8001 vport = &hdev->vport[vfid];
8002 state = hclge_get_port_base_vlan_state(vport,
8003 vport->port_base_vlan_cfg.state,
8004 vlan);
8005 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8006 return 0;
8007
8008 vlan_info.vlan_tag = vlan;
8009 vlan_info.qos = qos;
8010 vlan_info.vlan_proto = ntohs(proto);
8011
8012 /* update port based VLAN for PF */
8013 if (!vfid) {
8014 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8015 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8016 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8017
8018 return ret;
8019 }
8020
92f11ea1
JS
8021 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8022 return hclge_update_port_base_vlan_cfg(vport, state,
8023 &vlan_info);
8024 } else {
8025 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8026 (u8)vfid, state,
8027 vlan, qos,
8028 ntohs(proto));
8029 return ret;
8030 }
21e043cd
JS
8031}
8032
8033int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8034 u16 vlan_id, bool is_kill)
8035{
8036 struct hclge_vport *vport = hclge_get_vport(handle);
8037 struct hclge_dev *hdev = vport->back;
8038 bool writen_to_tbl = false;
8039 int ret = 0;
8040
fe4144d4
JS
8041 /* When device is resetting, firmware is unable to handle
8042 * mailbox. Just record the vlan id, and remove it after
8043 * reset finished.
8044 */
8045 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8046 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8047 return -EBUSY;
8048 }
8049
46ee7350 8050 /* when port base vlan enabled, we use port base vlan as the vlan
fe4144d4
JS
8051 * filter entry. In this case, we don't update vlan filter table
8052 * when user add new vlan or remove exist vlan, just update the vport
8053 * vlan list. The vlan id in vlan list will be writen in vlan filter
8054 * table until port base vlan disabled
21e043cd
JS
8055 */
8056 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8057 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8058 vlan_id, 0, is_kill);
8059 writen_to_tbl = true;
8060 }
8061
fe4144d4
JS
8062 if (!ret) {
8063 if (is_kill)
8064 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8065 else
8066 hclge_add_vport_vlan_table(vport, vlan_id,
8067 writen_to_tbl);
8068 } else if (is_kill) {
46ee7350 8069 /* when remove hw vlan filter failed, record the vlan id,
fe4144d4
JS
8070 * and try to remove it from hw later, to be consistence
8071 * with stack
8072 */
8073 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8074 }
8075 return ret;
8076}
21e043cd 8077
fe4144d4
JS
8078static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8079{
8080#define HCLGE_MAX_SYNC_COUNT 60
21e043cd 8081
fe4144d4
JS
8082 int i, ret, sync_cnt = 0;
8083 u16 vlan_id;
8084
8085 /* start from vport 1 for PF is always alive */
8086 for (i = 0; i < hdev->num_alloc_vport; i++) {
8087 struct hclge_vport *vport = &hdev->vport[i];
8088
8089 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8090 VLAN_N_VID);
8091 while (vlan_id != VLAN_N_VID) {
8092 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8093 vport->vport_id, vlan_id,
8094 0, true);
8095 if (ret && ret != -EINVAL)
8096 return;
8097
8098 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8099 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8100
8101 sync_cnt++;
8102 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8103 return;
8104
8105 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8106 VLAN_N_VID);
8107 }
8108 }
21e043cd
JS
8109}
8110
e6d7d79d 8111static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 8112{
d44f9b63 8113 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 8114 struct hclge_desc desc;
46a3df9f 8115
46a3df9f
S
8116 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8117
d44f9b63 8118 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 8119 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 8120 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 8121
e6d7d79d 8122 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
8123}
8124
dd72140c
FL
8125static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8126{
8127 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
8128
8129 return hclge_set_vport_mtu(vport, new_mtu);
8130}
8131
8132int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8133{
dd72140c 8134 struct hclge_dev *hdev = vport->back;
63cbf7a9 8135 int i, max_frm_size, ret;
dd72140c 8136
e6d7d79d
YL
8137 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8138 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8139 max_frm_size > HCLGE_MAC_MAX_FRAME)
8140 return -EINVAL;
8141
818f1675
YL
8142 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8143 mutex_lock(&hdev->vport_lock);
8144 /* VF's mps must fit within hdev->mps */
8145 if (vport->vport_id && max_frm_size > hdev->mps) {
8146 mutex_unlock(&hdev->vport_lock);
8147 return -EINVAL;
8148 } else if (vport->vport_id) {
8149 vport->mps = max_frm_size;
8150 mutex_unlock(&hdev->vport_lock);
8151 return 0;
8152 }
8153
8154 /* PF's mps must be greater then VF's mps */
8155 for (i = 1; i < hdev->num_alloc_vport; i++)
8156 if (max_frm_size < hdev->vport[i].mps) {
8157 mutex_unlock(&hdev->vport_lock);
8158 return -EINVAL;
8159 }
8160
cdca4c48
YL
8161 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8162
e6d7d79d 8163 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
8164 if (ret) {
8165 dev_err(&hdev->pdev->dev,
8166 "Change mtu fail, ret =%d\n", ret);
818f1675 8167 goto out;
dd72140c
FL
8168 }
8169
e6d7d79d 8170 hdev->mps = max_frm_size;
818f1675 8171 vport->mps = max_frm_size;
e6d7d79d 8172
dd72140c
FL
8173 ret = hclge_buffer_alloc(hdev);
8174 if (ret)
8175 dev_err(&hdev->pdev->dev,
8176 "Allocate buffer fail, ret =%d\n", ret);
8177
818f1675 8178out:
cdca4c48 8179 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 8180 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
8181 return ret;
8182}
8183
46a3df9f
S
8184static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8185 bool enable)
8186{
d44f9b63 8187 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
8188 struct hclge_desc desc;
8189 int ret;
8190
8191 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8192
d44f9b63 8193 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f 8194 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
b9a8f883
YL
8195 if (enable)
8196 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
46a3df9f
S
8197
8198 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8199 if (ret) {
8200 dev_err(&hdev->pdev->dev,
8201 "Send tqp reset cmd error, status =%d\n", ret);
8202 return ret;
8203 }
8204
8205 return 0;
8206}
8207
8208static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8209{
d44f9b63 8210 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
8211 struct hclge_desc desc;
8212 int ret;
8213
8214 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8215
d44f9b63 8216 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
8217 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8218
8219 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8220 if (ret) {
8221 dev_err(&hdev->pdev->dev,
8222 "Get reset status error, status =%d\n", ret);
8223 return ret;
8224 }
8225
e4e87715 8226 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
8227}
8228
0c29d191 8229u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
8230{
8231 struct hnae3_queue *queue;
8232 struct hclge_tqp *tqp;
8233
8234 queue = handle->kinfo.tqp[queue_id];
8235 tqp = container_of(queue, struct hclge_tqp, q);
8236
8237 return tqp->index;
8238}
8239
7fa6be4f 8240int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
8241{
8242 struct hclge_vport *vport = hclge_get_vport(handle);
8243 struct hclge_dev *hdev = vport->back;
8244 int reset_try_times = 0;
8245 int reset_status;
814e0274 8246 u16 queue_gid;
63cbf7a9 8247 int ret;
46a3df9f 8248
814e0274
PL
8249 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8250
46a3df9f
S
8251 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8252 if (ret) {
7fa6be4f
HT
8253 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8254 return ret;
46a3df9f
S
8255 }
8256
814e0274 8257 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 8258 if (ret) {
7fa6be4f
HT
8259 dev_err(&hdev->pdev->dev,
8260 "Send reset tqp cmd fail, ret = %d\n", ret);
8261 return ret;
46a3df9f
S
8262 }
8263
46a3df9f
S
8264 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8265 /* Wait for tqp hw reset */
8266 msleep(20);
814e0274 8267 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
8268 if (reset_status)
8269 break;
8270 }
8271
8272 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
8273 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8274 return ret;
46a3df9f
S
8275 }
8276
814e0274 8277 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
8278 if (ret)
8279 dev_err(&hdev->pdev->dev,
8280 "Deassert the soft reset fail, ret = %d\n", ret);
8281
8282 return ret;
46a3df9f
S
8283}
8284
1a426f8b
PL
8285void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8286{
8287 struct hclge_dev *hdev = vport->back;
8288 int reset_try_times = 0;
8289 int reset_status;
8290 u16 queue_gid;
8291 int ret;
8292
8293 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8294
8295 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8296 if (ret) {
8297 dev_warn(&hdev->pdev->dev,
8298 "Send reset tqp cmd fail, ret = %d\n", ret);
8299 return;
8300 }
8301
1a426f8b
PL
8302 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8303 /* Wait for tqp hw reset */
8304 msleep(20);
8305 reset_status = hclge_get_reset_status(hdev, queue_gid);
8306 if (reset_status)
8307 break;
8308 }
8309
8310 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8311 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8312 return;
8313 }
8314
8315 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8316 if (ret)
8317 dev_warn(&hdev->pdev->dev,
8318 "Deassert the soft reset fail, ret = %d\n", ret);
8319}
8320
46a3df9f
S
8321static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8322{
8323 struct hclge_vport *vport = hclge_get_vport(handle);
8324 struct hclge_dev *hdev = vport->back;
8325
8326 return hdev->fw_version;
8327}
8328
61387774
PL
8329static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8330{
8331 struct phy_device *phydev = hdev->hw.mac.phydev;
8332
8333 if (!phydev)
8334 return;
8335
70814e81 8336 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
8337}
8338
8339static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8340{
61387774
PL
8341 int ret;
8342
40173a2e 8343 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 8344 return 0;
61387774
PL
8345
8346 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
aacbe27e
YL
8347 if (ret)
8348 dev_err(&hdev->pdev->dev,
8349 "configure pauseparam error, ret = %d.\n", ret);
61387774 8350
aacbe27e 8351 return ret;
61387774
PL
8352}
8353
1770a7a3
PL
8354int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8355{
8356 struct phy_device *phydev = hdev->hw.mac.phydev;
8357 u16 remote_advertising = 0;
63cbf7a9 8358 u16 local_advertising;
1770a7a3
PL
8359 u32 rx_pause, tx_pause;
8360 u8 flowctl;
8361
8362 if (!phydev->link || !phydev->autoneg)
8363 return 0;
8364
3c1bcc86 8365 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
8366
8367 if (phydev->pause)
8368 remote_advertising = LPA_PAUSE_CAP;
8369
8370 if (phydev->asym_pause)
8371 remote_advertising |= LPA_PAUSE_ASYM;
8372
8373 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8374 remote_advertising);
8375 tx_pause = flowctl & FLOW_CTRL_TX;
8376 rx_pause = flowctl & FLOW_CTRL_RX;
8377
8378 if (phydev->duplex == HCLGE_MAC_HALF) {
8379 tx_pause = 0;
8380 rx_pause = 0;
8381 }
8382
8383 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8384}
8385
46a3df9f
S
8386static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8387 u32 *rx_en, u32 *tx_en)
8388{
8389 struct hclge_vport *vport = hclge_get_vport(handle);
8390 struct hclge_dev *hdev = vport->back;
fb89629f 8391 struct phy_device *phydev = hdev->hw.mac.phydev;
46a3df9f 8392
fb89629f 8393 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
46a3df9f
S
8394
8395 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8396 *rx_en = 0;
8397 *tx_en = 0;
8398 return;
8399 }
8400
8401 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8402 *rx_en = 1;
8403 *tx_en = 0;
8404 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8405 *tx_en = 1;
8406 *rx_en = 0;
8407 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8408 *rx_en = 1;
8409 *tx_en = 1;
8410 } else {
8411 *rx_en = 0;
8412 *tx_en = 0;
8413 }
8414}
8415
aacbe27e
YL
8416static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8417 u32 rx_en, u32 tx_en)
8418{
8419 if (rx_en && tx_en)
8420 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8421 else if (rx_en && !tx_en)
8422 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8423 else if (!rx_en && tx_en)
8424 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8425 else
8426 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8427
8428 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8429}
8430
61387774
PL
8431static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8432 u32 rx_en, u32 tx_en)
8433{
8434 struct hclge_vport *vport = hclge_get_vport(handle);
8435 struct hclge_dev *hdev = vport->back;
8436 struct phy_device *phydev = hdev->hw.mac.phydev;
8437 u32 fc_autoneg;
8438
fb89629f
JS
8439 if (phydev) {
8440 fc_autoneg = hclge_get_autoneg(handle);
8441 if (auto_neg != fc_autoneg) {
8442 dev_info(&hdev->pdev->dev,
8443 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8444 return -EOPNOTSUPP;
8445 }
61387774
PL
8446 }
8447
8448 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8449 dev_info(&hdev->pdev->dev,
8450 "Priority flow control enabled. Cannot set link flow control.\n");
8451 return -EOPNOTSUPP;
8452 }
8453
8454 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8455
aacbe27e
YL
8456 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8457
fb89629f 8458 if (!auto_neg)
61387774
PL
8459 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8460
22f48e24
JS
8461 if (phydev)
8462 return phy_start_aneg(phydev);
8463
fb89629f 8464 return -EOPNOTSUPP;
61387774
PL
8465}
8466
46a3df9f
S
8467static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8468 u8 *auto_neg, u32 *speed, u8 *duplex)
8469{
8470 struct hclge_vport *vport = hclge_get_vport(handle);
8471 struct hclge_dev *hdev = vport->back;
8472
8473 if (speed)
8474 *speed = hdev->hw.mac.speed;
8475 if (duplex)
8476 *duplex = hdev->hw.mac.duplex;
8477 if (auto_neg)
8478 *auto_neg = hdev->hw.mac.autoneg;
8479}
8480
88d10bd6
JS
8481static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8482 u8 *module_type)
46a3df9f
S
8483{
8484 struct hclge_vport *vport = hclge_get_vport(handle);
8485 struct hclge_dev *hdev = vport->back;
8486
8487 if (media_type)
8488 *media_type = hdev->hw.mac.media_type;
88d10bd6
JS
8489
8490 if (module_type)
8491 *module_type = hdev->hw.mac.module_type;
46a3df9f
S
8492}
8493
8494static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8495 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8496{
8497 struct hclge_vport *vport = hclge_get_vport(handle);
8498 struct hclge_dev *hdev = vport->back;
8499 struct phy_device *phydev = hdev->hw.mac.phydev;
ebaf1908
WL
8500 int mdix_ctrl, mdix, is_resolved;
8501 unsigned int retval;
46a3df9f
S
8502
8503 if (!phydev) {
8504 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8505 *tp_mdix = ETH_TP_MDI_INVALID;
8506 return;
8507 }
8508
8509 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8510
8511 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
8512 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8513 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
8514
8515 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
8516 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8517 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
8518
8519 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8520
8521 switch (mdix_ctrl) {
8522 case 0x0:
8523 *tp_mdix_ctrl = ETH_TP_MDI;
8524 break;
8525 case 0x1:
8526 *tp_mdix_ctrl = ETH_TP_MDI_X;
8527 break;
8528 case 0x3:
8529 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8530 break;
8531 default:
8532 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8533 break;
8534 }
8535
8536 if (!is_resolved)
8537 *tp_mdix = ETH_TP_MDI_INVALID;
8538 else if (mdix)
8539 *tp_mdix = ETH_TP_MDI_X;
8540 else
8541 *tp_mdix = ETH_TP_MDI;
8542}
8543
bb87be87
YL
8544static void hclge_info_show(struct hclge_dev *hdev)
8545{
8546 struct device *dev = &hdev->pdev->dev;
8547
8548 dev_info(dev, "PF info begin:\n");
8549
8550 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8551 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8552 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8553 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8554 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8555 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8556 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8557 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8558 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8559 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8560 dev_info(dev, "This is %s PF\n",
8561 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8562 dev_info(dev, "DCB %s\n",
8563 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8564 dev_info(dev, "MQPRIO %s\n",
8565 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8566
8567 dev_info(dev, "PF info end.\n");
8568}
8569
994e04f1
HT
8570static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8571 struct hclge_vport *vport)
8572{
8573 struct hnae3_client *client = vport->nic.client;
8574 struct hclge_dev *hdev = ae_dev->priv;
7cf9c069 8575 int rst_cnt;
994e04f1
HT
8576 int ret;
8577
7cf9c069 8578 rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
8579 ret = client->ops->init_instance(&vport->nic);
8580 if (ret)
8581 return ret;
8582
8583 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
8584 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8585 rst_cnt != hdev->rst_stats.reset_cnt) {
8586 ret = -EBUSY;
8587 goto init_nic_err;
8588 }
8589
00ea6e5f
WL
8590 /* Enable nic hw error interrupts */
8591 ret = hclge_config_nic_hw_error(hdev, true);
bcf643c5 8592 if (ret) {
00ea6e5f
WL
8593 dev_err(&ae_dev->pdev->dev,
8594 "fail(%d) to enable hw error interrupts\n", ret);
bcf643c5
WL
8595 goto init_nic_err;
8596 }
8597
8598 hnae3_set_client_init_flag(client, ae_dev, 1);
00ea6e5f 8599
994e04f1
HT
8600 if (netif_msg_drv(&hdev->vport->nic))
8601 hclge_info_show(hdev);
8602
00ea6e5f 8603 return ret;
7cf9c069
HT
8604
8605init_nic_err:
8606 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8607 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8608 msleep(HCLGE_WAIT_RESET_DONE);
8609
8610 client->ops->uninit_instance(&vport->nic, 0);
8611
8612 return ret;
994e04f1
HT
8613}
8614
8615static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8616 struct hclge_vport *vport)
8617{
8618 struct hnae3_client *client = vport->roce.client;
8619 struct hclge_dev *hdev = ae_dev->priv;
7cf9c069 8620 int rst_cnt;
994e04f1
HT
8621 int ret;
8622
8623 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8624 !hdev->nic_client)
8625 return 0;
8626
8627 client = hdev->roce_client;
8628 ret = hclge_init_roce_base_info(vport);
8629 if (ret)
8630 return ret;
8631
7cf9c069 8632 rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
8633 ret = client->ops->init_instance(&vport->roce);
8634 if (ret)
8635 return ret;
8636
8637 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
8638 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8639 rst_cnt != hdev->rst_stats.reset_cnt) {
8640 ret = -EBUSY;
8641 goto init_roce_err;
8642 }
8643
72fcd2be
HT
8644 /* Enable roce ras interrupts */
8645 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8646 if (ret) {
8647 dev_err(&ae_dev->pdev->dev,
8648 "fail(%d) to enable roce ras interrupts\n", ret);
8649 goto init_roce_err;
8650 }
8651
994e04f1
HT
8652 hnae3_set_client_init_flag(client, ae_dev, 1);
8653
8654 return 0;
7cf9c069
HT
8655
8656init_roce_err:
8657 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8658 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8659 msleep(HCLGE_WAIT_RESET_DONE);
8660
8661 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8662
8663 return ret;
994e04f1
HT
8664}
8665
46a3df9f
S
8666static int hclge_init_client_instance(struct hnae3_client *client,
8667 struct hnae3_ae_dev *ae_dev)
8668{
8669 struct hclge_dev *hdev = ae_dev->priv;
8670 struct hclge_vport *vport;
8671 int i, ret;
8672
8673 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8674 vport = &hdev->vport[i];
8675
8676 switch (client->type) {
8677 case HNAE3_CLIENT_KNIC:
8678
8679 hdev->nic_client = client;
8680 vport->nic.client = client;
994e04f1 8681 ret = hclge_init_nic_client_instance(ae_dev, vport);
46a3df9f 8682 if (ret)
49dd8054 8683 goto clear_nic;
46a3df9f 8684
994e04f1
HT
8685 ret = hclge_init_roce_client_instance(ae_dev, vport);
8686 if (ret)
8687 goto clear_roce;
46a3df9f 8688
46a3df9f
S
8689 break;
8690 case HNAE3_CLIENT_ROCE:
e92a0843 8691 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
8692 hdev->roce_client = client;
8693 vport->roce.client = client;
8694 }
8695
994e04f1
HT
8696 ret = hclge_init_roce_client_instance(ae_dev, vport);
8697 if (ret)
8698 goto clear_roce;
fa7a4bd5
JS
8699
8700 break;
8701 default:
8702 return -EINVAL;
46a3df9f
S
8703 }
8704 }
8705
37417c66 8706 return 0;
49dd8054
JS
8707
8708clear_nic:
8709 hdev->nic_client = NULL;
8710 vport->nic.client = NULL;
8711 return ret;
8712clear_roce:
8713 hdev->roce_client = NULL;
8714 vport->roce.client = NULL;
8715 return ret;
46a3df9f
S
8716}
8717
8718static void hclge_uninit_client_instance(struct hnae3_client *client,
8719 struct hnae3_ae_dev *ae_dev)
8720{
8721 struct hclge_dev *hdev = ae_dev->priv;
8722 struct hclge_vport *vport;
8723 int i;
8724
8725 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8726 vport = &hdev->vport[i];
a17dcf3f 8727 if (hdev->roce_client) {
2a0bfc36 8728 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
8729 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8730 msleep(HCLGE_WAIT_RESET_DONE);
8731
46a3df9f
S
8732 hdev->roce_client->ops->uninit_instance(&vport->roce,
8733 0);
a17dcf3f
L
8734 hdev->roce_client = NULL;
8735 vport->roce.client = NULL;
8736 }
46a3df9f
S
8737 if (client->type == HNAE3_CLIENT_ROCE)
8738 return;
49dd8054 8739 if (hdev->nic_client && client->ops->uninit_instance) {
bd9109c9 8740 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
8741 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8742 msleep(HCLGE_WAIT_RESET_DONE);
8743
46a3df9f 8744 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
8745 hdev->nic_client = NULL;
8746 vport->nic.client = NULL;
8747 }
46a3df9f
S
8748 }
8749}
8750
8751static int hclge_pci_init(struct hclge_dev *hdev)
8752{
8753 struct pci_dev *pdev = hdev->pdev;
8754 struct hclge_hw *hw;
8755 int ret;
8756
8757 ret = pci_enable_device(pdev);
8758 if (ret) {
8759 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 8760 return ret;
46a3df9f
S
8761 }
8762
8763 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8764 if (ret) {
8765 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8766 if (ret) {
8767 dev_err(&pdev->dev,
8768 "can't set consistent PCI DMA");
8769 goto err_disable_device;
8770 }
8771 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8772 }
8773
8774 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8775 if (ret) {
8776 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8777 goto err_disable_device;
8778 }
8779
8780 pci_set_master(pdev);
8781 hw = &hdev->hw;
46a3df9f
S
8782 hw->io_base = pcim_iomap(pdev, 2, 0);
8783 if (!hw->io_base) {
8784 dev_err(&pdev->dev, "Can't map configuration register space\n");
8785 ret = -ENOMEM;
8786 goto err_clr_master;
8787 }
8788
709eb41a
L
8789 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8790
46a3df9f
S
8791 return 0;
8792err_clr_master:
8793 pci_clear_master(pdev);
8794 pci_release_regions(pdev);
8795err_disable_device:
8796 pci_disable_device(pdev);
46a3df9f
S
8797
8798 return ret;
8799}
8800
8801static void hclge_pci_uninit(struct hclge_dev *hdev)
8802{
8803 struct pci_dev *pdev = hdev->pdev;
8804
6a814413 8805 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 8806 pci_free_irq_vectors(pdev);
46a3df9f
S
8807 pci_clear_master(pdev);
8808 pci_release_mem_regions(pdev);
8809 pci_disable_device(pdev);
8810}
8811
48569cda
PL
8812static void hclge_state_init(struct hclge_dev *hdev)
8813{
8814 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8815 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8816 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8817 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8818 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8819 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8820}
8821
8822static void hclge_state_uninit(struct hclge_dev *hdev)
8823{
8824 set_bit(HCLGE_STATE_DOWN, &hdev->state);
acfc3d55 8825 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
48569cda 8826
65e41e7e
HT
8827 if (hdev->reset_timer.function)
8828 del_timer_sync(&hdev->reset_timer);
7be1b9f3
YL
8829 if (hdev->service_task.work.func)
8830 cancel_delayed_work_sync(&hdev->service_task);
48569cda
PL
8831 if (hdev->rst_service_task.func)
8832 cancel_work_sync(&hdev->rst_service_task);
8833 if (hdev->mbx_service_task.func)
8834 cancel_work_sync(&hdev->mbx_service_task);
8835}
8836
6b9a97ee
HT
8837static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8838{
8839#define HCLGE_FLR_WAIT_MS 100
8840#define HCLGE_FLR_WAIT_CNT 50
8841 struct hclge_dev *hdev = ae_dev->priv;
8842 int cnt = 0;
8843
8844 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8845 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8846 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8847 hclge_reset_event(hdev->pdev, NULL);
8848
8849 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8850 cnt++ < HCLGE_FLR_WAIT_CNT)
8851 msleep(HCLGE_FLR_WAIT_MS);
8852
8853 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8854 dev_err(&hdev->pdev->dev,
8855 "flr wait down timeout: %d\n", cnt);
8856}
8857
8858static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8859{
8860 struct hclge_dev *hdev = ae_dev->priv;
8861
8862 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8863}
8864
31bb229d
PL
8865static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8866{
8867 u16 i;
8868
8869 for (i = 0; i < hdev->num_alloc_vport; i++) {
8870 struct hclge_vport *vport = &hdev->vport[i];
8871 int ret;
8872
8873 /* Send cmd to clear VF's FUNC_RST_ING */
8874 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8875 if (ret)
8876 dev_warn(&hdev->pdev->dev,
8877 "clear vf(%d) rst failed %d!\n",
8878 vport->vport_id, ret);
8879 }
8880}
8881
46a3df9f
S
8882static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8883{
8884 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
8885 struct hclge_dev *hdev;
8886 int ret;
8887
8888 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8889 if (!hdev) {
8890 ret = -ENOMEM;
ffd5656e 8891 goto out;
46a3df9f
S
8892 }
8893
46a3df9f
S
8894 hdev->pdev = pdev;
8895 hdev->ae_dev = ae_dev;
4ed340ab 8896 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 8897 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 8898 ae_dev->priv = hdev;
e6d7d79d 8899 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 8900
818f1675 8901 mutex_init(&hdev->vport_lock);
6dd86902 8902 mutex_init(&hdev->vport_cfg_mutex);
44122887 8903 spin_lock_init(&hdev->fd_rule_lock);
818f1675 8904
46a3df9f
S
8905 ret = hclge_pci_init(hdev);
8906 if (ret) {
8907 dev_err(&pdev->dev, "PCI init failed\n");
ffd5656e 8908 goto out;
46a3df9f
S
8909 }
8910
3efb960f
L
8911 /* Firmware command queue initialize */
8912 ret = hclge_cmd_queue_init(hdev);
8913 if (ret) {
8914 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
ffd5656e 8915 goto err_pci_uninit;
3efb960f
L
8916 }
8917
8918 /* Firmware command initialize */
46a3df9f
S
8919 ret = hclge_cmd_init(hdev);
8920 if (ret)
ffd5656e 8921 goto err_cmd_uninit;
46a3df9f
S
8922
8923 ret = hclge_get_cap(hdev);
8924 if (ret) {
e00e2197
CIK
8925 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8926 ret);
ffd5656e 8927 goto err_cmd_uninit;
46a3df9f
S
8928 }
8929
8930 ret = hclge_configure(hdev);
8931 if (ret) {
8932 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 8933 goto err_cmd_uninit;
46a3df9f
S
8934 }
8935
887c3820 8936 ret = hclge_init_msi(hdev);
46a3df9f 8937 if (ret) {
887c3820 8938 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 8939 goto err_cmd_uninit;
46a3df9f
S
8940 }
8941
466b0c00
L
8942 ret = hclge_misc_irq_init(hdev);
8943 if (ret) {
8944 dev_err(&pdev->dev,
8945 "Misc IRQ(vector0) init error, ret = %d.\n",
8946 ret);
ffd5656e 8947 goto err_msi_uninit;
466b0c00
L
8948 }
8949
46a3df9f
S
8950 ret = hclge_alloc_tqps(hdev);
8951 if (ret) {
8952 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 8953 goto err_msi_irq_uninit;
46a3df9f
S
8954 }
8955
8956 ret = hclge_alloc_vport(hdev);
8957 if (ret) {
8958 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
ffd5656e 8959 goto err_msi_irq_uninit;
46a3df9f
S
8960 }
8961
7df7dad6
L
8962 ret = hclge_map_tqp(hdev);
8963 if (ret) {
8964 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
2312e050 8965 goto err_msi_irq_uninit;
7df7dad6
L
8966 }
8967
c5ef83cb
HT
8968 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8969 ret = hclge_mac_mdio_config(hdev);
8970 if (ret) {
8971 dev_err(&hdev->pdev->dev,
8972 "mdio config fail ret=%d\n", ret);
2312e050 8973 goto err_msi_irq_uninit;
c5ef83cb 8974 }
cf9cca2d 8975 }
8976
39932473
JS
8977 ret = hclge_init_umv_space(hdev);
8978 if (ret) {
8979 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9fc55413 8980 goto err_mdiobus_unreg;
39932473
JS
8981 }
8982
46a3df9f
S
8983 ret = hclge_mac_init(hdev);
8984 if (ret) {
8985 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 8986 goto err_mdiobus_unreg;
46a3df9f 8987 }
46a3df9f
S
8988
8989 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8990 if (ret) {
8991 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 8992 goto err_mdiobus_unreg;
46a3df9f
S
8993 }
8994
b26a6fea
PL
8995 ret = hclge_config_gro(hdev, true);
8996 if (ret)
8997 goto err_mdiobus_unreg;
8998
46a3df9f
S
8999 ret = hclge_init_vlan_config(hdev);
9000 if (ret) {
9001 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 9002 goto err_mdiobus_unreg;
46a3df9f
S
9003 }
9004
9005 ret = hclge_tm_schd_init(hdev);
9006 if (ret) {
9007 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 9008 goto err_mdiobus_unreg;
68ece54e
YL
9009 }
9010
268f5dfa 9011 hclge_rss_init_cfg(hdev);
68ece54e
YL
9012 ret = hclge_rss_init_hw(hdev);
9013 if (ret) {
9014 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 9015 goto err_mdiobus_unreg;
46a3df9f
S
9016 }
9017
f5aac71c
FL
9018 ret = init_mgr_tbl(hdev);
9019 if (ret) {
9020 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 9021 goto err_mdiobus_unreg;
f5aac71c
FL
9022 }
9023
d695964d
JS
9024 ret = hclge_init_fd_config(hdev);
9025 if (ret) {
9026 dev_err(&pdev->dev,
9027 "fd table init fail, ret=%d\n", ret);
9028 goto err_mdiobus_unreg;
9029 }
9030
a6345787
WL
9031 INIT_KFIFO(hdev->mac_tnl_log);
9032
cacde272
YL
9033 hclge_dcb_ops_set(hdev);
9034
65e41e7e 9035 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7be1b9f3 9036 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
cb1b9f77 9037 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
c1a81619 9038 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
46a3df9f 9039
08125454
YL
9040 /* Setup affinity after service timer setup because add_timer_on
9041 * is called in affinity notify.
9042 */
9043 hclge_misc_affinity_setup(hdev);
9044
8e52a602 9045 hclge_clear_all_event_cause(hdev);
31bb229d 9046 hclge_clear_resetting_state(hdev);
8e52a602 9047
e4193e24
SJ
9048 /* Log and clear the hw errors those already occurred */
9049 hclge_handle_all_hns_hw_errors(ae_dev);
9050
e3b84ed2
SJ
9051 /* request delayed reset for the error recovery because an immediate
9052 * global reset on a PF affecting pending initialization of other PFs
9053 */
9054 if (ae_dev->hw_err_reset_req) {
9055 enum hnae3_reset_type reset_level;
9056
9057 reset_level = hclge_get_reset_level(ae_dev,
9058 &ae_dev->hw_err_reset_req);
9059 hclge_set_def_reset_request(ae_dev, reset_level);
9060 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9061 }
9062
466b0c00
L
9063 /* Enable MISC vector(vector0) */
9064 hclge_enable_vector(&hdev->misc_vector, true);
9065
48569cda 9066 hclge_state_init(hdev);
0742ed7c 9067 hdev->last_reset_time = jiffies;
46a3df9f 9068
08d80a4c
HT
9069 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9070 HCLGE_DRIVER_NAME);
9071
46a3df9f
S
9072 return 0;
9073
ffd5656e
HT
9074err_mdiobus_unreg:
9075 if (hdev->hw.mac.phydev)
9076 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
9077err_msi_irq_uninit:
9078 hclge_misc_irq_uninit(hdev);
9079err_msi_uninit:
9080 pci_free_irq_vectors(pdev);
9081err_cmd_uninit:
232d0d55 9082 hclge_cmd_uninit(hdev);
ffd5656e 9083err_pci_uninit:
6a814413 9084 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 9085 pci_clear_master(pdev);
46a3df9f 9086 pci_release_regions(pdev);
ffd5656e 9087 pci_disable_device(pdev);
ffd5656e 9088out:
46a3df9f
S
9089 return ret;
9090}
9091
c6dc5213 9092static void hclge_stats_clear(struct hclge_dev *hdev)
9093{
9094 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9095}
9096
a6d818e3
YL
9097static void hclge_reset_vport_state(struct hclge_dev *hdev)
9098{
9099 struct hclge_vport *vport = hdev->vport;
9100 int i;
9101
9102 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 9103 hclge_vport_stop(vport);
a6d818e3
YL
9104 vport++;
9105 }
9106}
9107
4ed340ab
L
9108static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9109{
9110 struct hclge_dev *hdev = ae_dev->priv;
9111 struct pci_dev *pdev = ae_dev->pdev;
9112 int ret;
9113
9114 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9115
c6dc5213 9116 hclge_stats_clear(hdev);
dc8131d8 9117 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
81a9255e 9118 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
c6dc5213 9119
4ed340ab
L
9120 ret = hclge_cmd_init(hdev);
9121 if (ret) {
9122 dev_err(&pdev->dev, "Cmd queue init failed\n");
9123 return ret;
9124 }
9125
4ed340ab
L
9126 ret = hclge_map_tqp(hdev);
9127 if (ret) {
9128 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9129 return ret;
9130 }
9131
39932473
JS
9132 hclge_reset_umv_space(hdev);
9133
4ed340ab
L
9134 ret = hclge_mac_init(hdev);
9135 if (ret) {
9136 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9137 return ret;
9138 }
9139
4ed340ab
L
9140 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9141 if (ret) {
9142 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9143 return ret;
9144 }
9145
b26a6fea
PL
9146 ret = hclge_config_gro(hdev, true);
9147 if (ret)
9148 return ret;
9149
4ed340ab
L
9150 ret = hclge_init_vlan_config(hdev);
9151 if (ret) {
9152 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9153 return ret;
9154 }
9155
44e59e37 9156 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 9157 if (ret) {
f31c1ba6 9158 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
9159 return ret;
9160 }
9161
9162 ret = hclge_rss_init_hw(hdev);
9163 if (ret) {
9164 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9165 return ret;
9166 }
9167
d695964d
JS
9168 ret = hclge_init_fd_config(hdev);
9169 if (ret) {
9b2f3477 9170 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
d695964d
JS
9171 return ret;
9172 }
9173
f3fa4a94 9174 /* Re-enable the hw error interrupts because
00ea6e5f 9175 * the interrupts get disabled on global reset.
01865a50 9176 */
00ea6e5f 9177 ret = hclge_config_nic_hw_error(hdev, true);
f3fa4a94
SJ
9178 if (ret) {
9179 dev_err(&pdev->dev,
00ea6e5f
WL
9180 "fail(%d) to re-enable NIC hw error interrupts\n",
9181 ret);
f3fa4a94
SJ
9182 return ret;
9183 }
01865a50 9184
00ea6e5f
WL
9185 if (hdev->roce_client) {
9186 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9187 if (ret) {
9188 dev_err(&pdev->dev,
9189 "fail(%d) to re-enable roce ras interrupts\n",
9190 ret);
9191 return ret;
9192 }
9193 }
9194
a6d818e3
YL
9195 hclge_reset_vport_state(hdev);
9196
4ed340ab
L
9197 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9198 HCLGE_DRIVER_NAME);
9199
9200 return 0;
9201}
9202
46a3df9f
S
9203static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9204{
9205 struct hclge_dev *hdev = ae_dev->priv;
9206 struct hclge_mac *mac = &hdev->hw.mac;
9207
08125454 9208 hclge_misc_affinity_teardown(hdev);
48569cda 9209 hclge_state_uninit(hdev);
46a3df9f
S
9210
9211 if (mac->phydev)
9212 mdiobus_unregister(mac->mdio_bus);
9213
39932473
JS
9214 hclge_uninit_umv_space(hdev);
9215
466b0c00
L
9216 /* Disable MISC vector(vector0) */
9217 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
9218 synchronize_irq(hdev->misc_vector.vector_irq);
9219
00ea6e5f 9220 /* Disable all hw interrupts */
a6345787 9221 hclge_config_mac_tnl_int(hdev, false);
00ea6e5f
WL
9222 hclge_config_nic_hw_error(hdev, false);
9223 hclge_config_rocee_ras_interrupt(hdev, false);
9224
232d0d55 9225 hclge_cmd_uninit(hdev);
ca1d7669 9226 hclge_misc_irq_uninit(hdev);
46a3df9f 9227 hclge_pci_uninit(hdev);
818f1675 9228 mutex_destroy(&hdev->vport_lock);
6dd86902 9229 hclge_uninit_vport_mac_table(hdev);
c6075b19 9230 hclge_uninit_vport_vlan_table(hdev);
6dd86902 9231 mutex_destroy(&hdev->vport_cfg_mutex);
46a3df9f
S
9232 ae_dev->priv = NULL;
9233}
9234
482d2e9c
PL
9235static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9236{
9237 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9238 struct hclge_vport *vport = hclge_get_vport(handle);
9239 struct hclge_dev *hdev = vport->back;
9240
c3b9c50d
HT
9241 return min_t(u32, hdev->rss_size_max,
9242 vport->alloc_tqps / kinfo->num_tc);
482d2e9c
PL
9243}
9244
9245static void hclge_get_channels(struct hnae3_handle *handle,
9246 struct ethtool_channels *ch)
9247{
482d2e9c
PL
9248 ch->max_combined = hclge_get_max_channels(handle);
9249 ch->other_count = 1;
9250 ch->max_other = 1;
c3b9c50d 9251 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
9252}
9253
09f2af64 9254static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 9255 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
9256{
9257 struct hclge_vport *vport = hclge_get_vport(handle);
9258 struct hclge_dev *hdev = vport->back;
09f2af64 9259
0d43bf45 9260 *alloc_tqps = vport->alloc_tqps;
09f2af64
PL
9261 *max_rss_size = hdev->rss_size_max;
9262}
9263
90c68a41
YL
9264static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9265 bool rxfh_configured)
09f2af64
PL
9266{
9267 struct hclge_vport *vport = hclge_get_vport(handle);
9268 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
354d0fab 9269 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
09f2af64 9270 struct hclge_dev *hdev = vport->back;
354d0fab 9271 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
09f2af64
PL
9272 int cur_rss_size = kinfo->rss_size;
9273 int cur_tqps = kinfo->num_tqps;
09f2af64 9274 u16 tc_valid[HCLGE_MAX_TC_NUM];
09f2af64
PL
9275 u16 roundup_size;
9276 u32 *rss_indir;
ebaf1908
WL
9277 unsigned int i;
9278 int ret;
09f2af64 9279
672ad0ed 9280 kinfo->req_rss_size = new_tqps_num;
09f2af64 9281
672ad0ed 9282 ret = hclge_tm_vport_map_update(hdev);
09f2af64 9283 if (ret) {
672ad0ed 9284 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
9285 return ret;
9286 }
9287
9288 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9289 roundup_size = ilog2(roundup_size);
9290 /* Set the RSS TC mode according to the new RSS size */
9291 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9292 tc_valid[i] = 0;
9293
9294 if (!(hdev->hw_tc_map & BIT(i)))
9295 continue;
9296
9297 tc_valid[i] = 1;
9298 tc_size[i] = roundup_size;
9299 tc_offset[i] = kinfo->rss_size * i;
9300 }
9301 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9302 if (ret)
9303 return ret;
9304
90c68a41
YL
9305 /* RSS indirection table has been configuared by user */
9306 if (rxfh_configured)
9307 goto out;
9308
09f2af64
PL
9309 /* Reinitializes the rss indirect table according to the new RSS size */
9310 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9311 if (!rss_indir)
9312 return -ENOMEM;
9313
9314 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9315 rss_indir[i] = i % kinfo->rss_size;
9316
9317 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9318 if (ret)
9319 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9320 ret);
9321
9322 kfree(rss_indir);
9323
90c68a41 9324out:
09f2af64
PL
9325 if (!ret)
9326 dev_info(&hdev->pdev->dev,
9327 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9328 cur_rss_size, kinfo->rss_size,
9329 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9330
9331 return ret;
9332}
9333
77b34110
FL
9334static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9335 u32 *regs_num_64_bit)
9336{
9337 struct hclge_desc desc;
9338 u32 total_num;
9339 int ret;
9340
9341 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9342 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9343 if (ret) {
9344 dev_err(&hdev->pdev->dev,
9345 "Query register number cmd failed, ret = %d.\n", ret);
9346 return ret;
9347 }
9348
9349 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9350 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9351
9352 total_num = *regs_num_32_bit + *regs_num_64_bit;
9353 if (!total_num)
9354 return -EINVAL;
9355
9356 return 0;
9357}
9358
9359static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9360 void *data)
9361{
9362#define HCLGE_32_BIT_REG_RTN_DATANUM 8
b37ce587 9363#define HCLGE_32_BIT_DESC_NODATA_LEN 2
77b34110
FL
9364
9365 struct hclge_desc *desc;
9366 u32 *reg_val = data;
9367 __le32 *desc_data;
b37ce587 9368 int nodata_num;
77b34110
FL
9369 int cmd_num;
9370 int i, k, n;
9371 int ret;
9372
9373 if (regs_num == 0)
9374 return 0;
9375
b37ce587
YM
9376 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9377 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9378 HCLGE_32_BIT_REG_RTN_DATANUM);
77b34110
FL
9379 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9380 if (!desc)
9381 return -ENOMEM;
9382
9383 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9384 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9385 if (ret) {
9386 dev_err(&hdev->pdev->dev,
9387 "Query 32 bit register cmd failed, ret = %d.\n", ret);
9388 kfree(desc);
9389 return ret;
9390 }
9391
9392 for (i = 0; i < cmd_num; i++) {
9393 if (i == 0) {
9394 desc_data = (__le32 *)(&desc[i].data[0]);
b37ce587 9395 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
77b34110
FL
9396 } else {
9397 desc_data = (__le32 *)(&desc[i]);
9398 n = HCLGE_32_BIT_REG_RTN_DATANUM;
9399 }
9400 for (k = 0; k < n; k++) {
9401 *reg_val++ = le32_to_cpu(*desc_data++);
9402
9403 regs_num--;
9404 if (!regs_num)
9405 break;
9406 }
9407 }
9408
9409 kfree(desc);
9410 return 0;
9411}
9412
9413static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9414 void *data)
9415{
9416#define HCLGE_64_BIT_REG_RTN_DATANUM 4
b37ce587 9417#define HCLGE_64_BIT_DESC_NODATA_LEN 1
77b34110
FL
9418
9419 struct hclge_desc *desc;
9420 u64 *reg_val = data;
9421 __le64 *desc_data;
b37ce587 9422 int nodata_len;
77b34110
FL
9423 int cmd_num;
9424 int i, k, n;
9425 int ret;
9426
9427 if (regs_num == 0)
9428 return 0;
9429
b37ce587
YM
9430 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9431 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9432 HCLGE_64_BIT_REG_RTN_DATANUM);
77b34110
FL
9433 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9434 if (!desc)
9435 return -ENOMEM;
9436
9437 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9438 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9439 if (ret) {
9440 dev_err(&hdev->pdev->dev,
9441 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9442 kfree(desc);
9443 return ret;
9444 }
9445
9446 for (i = 0; i < cmd_num; i++) {
9447 if (i == 0) {
9448 desc_data = (__le64 *)(&desc[i].data[0]);
b37ce587 9449 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
77b34110
FL
9450 } else {
9451 desc_data = (__le64 *)(&desc[i]);
9452 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9453 }
9454 for (k = 0; k < n; k++) {
9455 *reg_val++ = le64_to_cpu(*desc_data++);
9456
9457 regs_num--;
9458 if (!regs_num)
9459 break;
9460 }
9461 }
9462
9463 kfree(desc);
9464 return 0;
9465}
9466
ea4750ca 9467#define MAX_SEPARATE_NUM 4
ddb54554 9468#define SEPARATOR_VALUE 0xFDFCFBFA
ea4750ca
JS
9469#define REG_NUM_PER_LINE 4
9470#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
ddb54554
GH
9471#define REG_SEPARATOR_LINE 1
9472#define REG_NUM_REMAIN_MASK 3
9473#define BD_LIST_MAX_NUM 30
ea4750ca 9474
ddb54554 9475int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
77b34110 9476{
ddb54554
GH
9477 /*prepare 4 commands to query DFX BD number*/
9478 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9479 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9480 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9481 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9482 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9483 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9484 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9485
9486 return hclge_cmd_send(&hdev->hw, desc, 4);
9487}
9488
9489static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9490 int *bd_num_list,
9491 u32 type_num)
9492{
9493#define HCLGE_DFX_REG_BD_NUM 4
9494
9495 u32 entries_per_desc, desc_index, index, offset, i;
9496 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
77b34110
FL
9497 int ret;
9498
ddb54554 9499 ret = hclge_query_bd_num_cmd_send(hdev, desc);
77b34110
FL
9500 if (ret) {
9501 dev_err(&hdev->pdev->dev,
ddb54554
GH
9502 "Get dfx bd num fail, status is %d.\n", ret);
9503 return ret;
77b34110
FL
9504 }
9505
ddb54554
GH
9506 entries_per_desc = ARRAY_SIZE(desc[0].data);
9507 for (i = 0; i < type_num; i++) {
9508 offset = hclge_dfx_bd_offset_list[i];
9509 index = offset % entries_per_desc;
9510 desc_index = offset / entries_per_desc;
9511 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9512 }
ea4750ca 9513
ddb54554 9514 return ret;
77b34110
FL
9515}
9516
ddb54554
GH
9517static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9518 struct hclge_desc *desc_src, int bd_num,
9519 enum hclge_opcode_type cmd)
77b34110 9520{
ddb54554
GH
9521 struct hclge_desc *desc = desc_src;
9522 int i, ret;
9523
9524 hclge_cmd_setup_basic_desc(desc, cmd, true);
9525 for (i = 0; i < bd_num - 1; i++) {
9526 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9527 desc++;
9528 hclge_cmd_setup_basic_desc(desc, cmd, true);
9529 }
9530
9531 desc = desc_src;
9532 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9533 if (ret)
9534 dev_err(&hdev->pdev->dev,
9535 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9536 cmd, ret);
9537
9538 return ret;
9539}
9540
9541static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9542 void *data)
9543{
9544 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9545 struct hclge_desc *desc = desc_src;
ea4750ca 9546 u32 *reg = data;
ddb54554
GH
9547
9548 entries_per_desc = ARRAY_SIZE(desc->data);
9549 reg_num = entries_per_desc * bd_num;
9550 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9551 for (i = 0; i < reg_num; i++) {
9552 index = i % entries_per_desc;
9553 desc_index = i / entries_per_desc;
9554 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
9555 }
9556 for (i = 0; i < separator_num; i++)
9557 *reg++ = SEPARATOR_VALUE;
9558
9559 return reg_num + separator_num;
9560}
9561
9562static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9563{
9564 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9565 int data_len_per_desc, data_len, bd_num, i;
9566 int bd_num_list[BD_LIST_MAX_NUM];
77b34110
FL
9567 int ret;
9568
ddb54554
GH
9569 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9570 if (ret) {
9571 dev_err(&hdev->pdev->dev,
9572 "Get dfx reg bd num fail, status is %d.\n", ret);
9573 return ret;
9574 }
77b34110 9575
ddb54554
GH
9576 data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9577 *len = 0;
9578 for (i = 0; i < dfx_reg_type_num; i++) {
9579 bd_num = bd_num_list[i];
9580 data_len = data_len_per_desc * bd_num;
9581 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9582 }
9583
9584 return ret;
9585}
9586
9587static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9588{
9589 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9590 int bd_num, bd_num_max, buf_len, i;
9591 int bd_num_list[BD_LIST_MAX_NUM];
9592 struct hclge_desc *desc_src;
9593 u32 *reg = data;
9594 int ret;
9595
9596 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
77b34110
FL
9597 if (ret) {
9598 dev_err(&hdev->pdev->dev,
ddb54554
GH
9599 "Get dfx reg bd num fail, status is %d.\n", ret);
9600 return ret;
9601 }
9602
9603 bd_num_max = bd_num_list[0];
9604 for (i = 1; i < dfx_reg_type_num; i++)
9605 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9606
9607 buf_len = sizeof(*desc_src) * bd_num_max;
9608 desc_src = kzalloc(buf_len, GFP_KERNEL);
9609 if (!desc_src) {
9610 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9611 return -ENOMEM;
77b34110
FL
9612 }
9613
ddb54554
GH
9614 for (i = 0; i < dfx_reg_type_num; i++) {
9615 bd_num = bd_num_list[i];
9616 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9617 hclge_dfx_reg_opcode_list[i]);
9618 if (ret) {
9619 dev_err(&hdev->pdev->dev,
9620 "Get dfx reg fail, status is %d.\n", ret);
9621 break;
9622 }
9623
9624 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9625 }
9626
9627 kfree(desc_src);
9628 return ret;
9629}
9630
9631static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9632 struct hnae3_knic_private_info *kinfo)
9633{
9634#define HCLGE_RING_REG_OFFSET 0x200
9635#define HCLGE_RING_INT_REG_OFFSET 0x4
9636
9637 int i, j, reg_num, separator_num;
9638 int data_num_sum;
9639 u32 *reg = data;
9640
ea4750ca 9641 /* fetching per-PF registers valus from PF PCIe register space */
ddb54554
GH
9642 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9643 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9644 for (i = 0; i < reg_num; i++)
ea4750ca
JS
9645 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9646 for (i = 0; i < separator_num; i++)
9647 *reg++ = SEPARATOR_VALUE;
ddb54554 9648 data_num_sum = reg_num + separator_num;
ea4750ca 9649
ddb54554
GH
9650 reg_num = ARRAY_SIZE(common_reg_addr_list);
9651 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9652 for (i = 0; i < reg_num; i++)
ea4750ca
JS
9653 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9654 for (i = 0; i < separator_num; i++)
9655 *reg++ = SEPARATOR_VALUE;
ddb54554 9656 data_num_sum += reg_num + separator_num;
ea4750ca 9657
ddb54554
GH
9658 reg_num = ARRAY_SIZE(ring_reg_addr_list);
9659 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 9660 for (j = 0; j < kinfo->num_tqps; j++) {
ddb54554 9661 for (i = 0; i < reg_num; i++)
ea4750ca
JS
9662 *reg++ = hclge_read_dev(&hdev->hw,
9663 ring_reg_addr_list[i] +
ddb54554 9664 HCLGE_RING_REG_OFFSET * j);
ea4750ca
JS
9665 for (i = 0; i < separator_num; i++)
9666 *reg++ = SEPARATOR_VALUE;
9667 }
ddb54554 9668 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
ea4750ca 9669
ddb54554
GH
9670 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9671 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 9672 for (j = 0; j < hdev->num_msi_used - 1; j++) {
ddb54554 9673 for (i = 0; i < reg_num; i++)
ea4750ca
JS
9674 *reg++ = hclge_read_dev(&hdev->hw,
9675 tqp_intr_reg_addr_list[i] +
ddb54554 9676 HCLGE_RING_INT_REG_OFFSET * j);
ea4750ca
JS
9677 for (i = 0; i < separator_num; i++)
9678 *reg++ = SEPARATOR_VALUE;
9679 }
ddb54554
GH
9680 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9681
9682 return data_num_sum;
9683}
9684
9685static int hclge_get_regs_len(struct hnae3_handle *handle)
9686{
9687 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9688 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9689 struct hclge_vport *vport = hclge_get_vport(handle);
9690 struct hclge_dev *hdev = vport->back;
9691 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9692 int regs_lines_32_bit, regs_lines_64_bit;
9693 int ret;
9694
9695 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9696 if (ret) {
9697 dev_err(&hdev->pdev->dev,
9698 "Get register number failed, ret = %d.\n", ret);
9699 return ret;
9700 }
9701
9702 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9703 if (ret) {
9704 dev_err(&hdev->pdev->dev,
9705 "Get dfx reg len failed, ret = %d.\n", ret);
9706 return ret;
9707 }
9708
9709 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9710 REG_SEPARATOR_LINE;
9711 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9712 REG_SEPARATOR_LINE;
9713 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9714 REG_SEPARATOR_LINE;
9715 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9716 REG_SEPARATOR_LINE;
9717 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9718 REG_SEPARATOR_LINE;
9719 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9720 REG_SEPARATOR_LINE;
9721
9722 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9723 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9724 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9725}
9726
9727static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9728 void *data)
9729{
9730 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9731 struct hclge_vport *vport = hclge_get_vport(handle);
9732 struct hclge_dev *hdev = vport->back;
9733 u32 regs_num_32_bit, regs_num_64_bit;
9734 int i, reg_num, separator_num, ret;
9735 u32 *reg = data;
9736
9737 *version = hdev->fw_version;
9738
9739 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9740 if (ret) {
9741 dev_err(&hdev->pdev->dev,
9742 "Get register number failed, ret = %d.\n", ret);
9743 return;
9744 }
9745
9746 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
ea4750ca 9747
ea4750ca 9748 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
9749 if (ret) {
9750 dev_err(&hdev->pdev->dev,
9751 "Get 32 bit register failed, ret = %d.\n", ret);
9752 return;
9753 }
ddb54554
GH
9754 reg_num = regs_num_32_bit;
9755 reg += reg_num;
9756 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9757 for (i = 0; i < separator_num; i++)
9758 *reg++ = SEPARATOR_VALUE;
77b34110 9759
ea4750ca 9760 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
ddb54554 9761 if (ret) {
77b34110
FL
9762 dev_err(&hdev->pdev->dev,
9763 "Get 64 bit register failed, ret = %d.\n", ret);
ddb54554
GH
9764 return;
9765 }
9766 reg_num = regs_num_64_bit * 2;
9767 reg += reg_num;
9768 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9769 for (i = 0; i < separator_num; i++)
9770 *reg++ = SEPARATOR_VALUE;
9771
9772 ret = hclge_get_dfx_reg(hdev, reg);
9773 if (ret)
9774 dev_err(&hdev->pdev->dev,
9775 "Get dfx register failed, ret = %d.\n", ret);
77b34110
FL
9776}
9777
f6f75abc 9778static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
9779{
9780 struct hclge_set_led_state_cmd *req;
9781 struct hclge_desc desc;
9782 int ret;
9783
9784 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9785
9786 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
9787 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9788 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
9789
9790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9791 if (ret)
9792 dev_err(&hdev->pdev->dev,
9793 "Send set led state cmd error, ret =%d\n", ret);
9794
9795 return ret;
9796}
9797
9798enum hclge_led_status {
9799 HCLGE_LED_OFF,
9800 HCLGE_LED_ON,
9801 HCLGE_LED_NO_CHANGE = 0xFF,
9802};
9803
9804static int hclge_set_led_id(struct hnae3_handle *handle,
9805 enum ethtool_phys_id_state status)
9806{
07f8e940
JS
9807 struct hclge_vport *vport = hclge_get_vport(handle);
9808 struct hclge_dev *hdev = vport->back;
07f8e940
JS
9809
9810 switch (status) {
9811 case ETHTOOL_ID_ACTIVE:
f6f75abc 9812 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 9813 case ETHTOOL_ID_INACTIVE:
f6f75abc 9814 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 9815 default:
f6f75abc 9816 return -EINVAL;
07f8e940 9817 }
07f8e940
JS
9818}
9819
0979aa0b
FL
9820static void hclge_get_link_mode(struct hnae3_handle *handle,
9821 unsigned long *supported,
9822 unsigned long *advertising)
9823{
9824 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9825 struct hclge_vport *vport = hclge_get_vport(handle);
9826 struct hclge_dev *hdev = vport->back;
9827 unsigned int idx = 0;
9828
9829 for (; idx < size; idx++) {
9830 supported[idx] = hdev->hw.mac.supported[idx];
9831 advertising[idx] = hdev->hw.mac.advertising[idx];
9832 }
9833}
9834
1731be4c 9835static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
9836{
9837 struct hclge_vport *vport = hclge_get_vport(handle);
9838 struct hclge_dev *hdev = vport->back;
9839
9840 return hclge_config_gro(hdev, enable);
9841}
9842
46a3df9f
S
9843static const struct hnae3_ae_ops hclge_ops = {
9844 .init_ae_dev = hclge_init_ae_dev,
9845 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
9846 .flr_prepare = hclge_flr_prepare,
9847 .flr_done = hclge_flr_done,
46a3df9f
S
9848 .init_client_instance = hclge_init_client_instance,
9849 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
9850 .map_ring_to_vector = hclge_map_ring_to_vector,
9851 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 9852 .get_vector = hclge_get_vector,
0d3e6631 9853 .put_vector = hclge_put_vector,
46a3df9f 9854 .set_promisc_mode = hclge_set_promisc_mode,
c39c4d98 9855 .set_loopback = hclge_set_loopback,
46a3df9f
S
9856 .start = hclge_ae_start,
9857 .stop = hclge_ae_stop,
a6d818e3
YL
9858 .client_start = hclge_client_start,
9859 .client_stop = hclge_client_stop,
46a3df9f
S
9860 .get_status = hclge_get_status,
9861 .get_ksettings_an_result = hclge_get_ksettings_an_result,
46a3df9f
S
9862 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9863 .get_media_type = hclge_get_media_type,
22f48e24 9864 .check_port_speed = hclge_check_port_speed,
7e6ec914
JS
9865 .get_fec = hclge_get_fec,
9866 .set_fec = hclge_set_fec,
46a3df9f
S
9867 .get_rss_key_size = hclge_get_rss_key_size,
9868 .get_rss_indir_size = hclge_get_rss_indir_size,
9869 .get_rss = hclge_get_rss,
9870 .set_rss = hclge_set_rss,
f7db940a 9871 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 9872 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
9873 .get_tc_size = hclge_get_tc_size,
9874 .get_mac_addr = hclge_get_mac_addr,
9875 .set_mac_addr = hclge_set_mac_addr,
26483246 9876 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
9877 .add_uc_addr = hclge_add_uc_addr,
9878 .rm_uc_addr = hclge_rm_uc_addr,
9879 .add_mc_addr = hclge_add_mc_addr,
9880 .rm_mc_addr = hclge_rm_mc_addr,
9881 .set_autoneg = hclge_set_autoneg,
9882 .get_autoneg = hclge_get_autoneg,
22f48e24 9883 .restart_autoneg = hclge_restart_autoneg,
7786a996 9884 .halt_autoneg = hclge_halt_autoneg,
46a3df9f 9885 .get_pauseparam = hclge_get_pauseparam,
61387774 9886 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
9887 .set_mtu = hclge_set_mtu,
9888 .reset_queue = hclge_reset_tqp,
9889 .get_stats = hclge_get_stats,
615466ce 9890 .get_mac_stats = hclge_get_mac_stat,
46a3df9f
S
9891 .update_stats = hclge_update_stats,
9892 .get_strings = hclge_get_strings,
9893 .get_sset_count = hclge_get_sset_count,
9894 .get_fw_version = hclge_get_fw_version,
9895 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 9896 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 9897 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 9898 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 9899 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 9900 .reset_event = hclge_reset_event,
123297b7 9901 .get_reset_level = hclge_get_reset_level,
720bd583 9902 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
9903 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9904 .set_channels = hclge_set_channels,
482d2e9c 9905 .get_channels = hclge_get_channels,
77b34110
FL
9906 .get_regs_len = hclge_get_regs_len,
9907 .get_regs = hclge_get_regs,
07f8e940 9908 .set_led_id = hclge_set_led_id,
0979aa0b 9909 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
9910 .add_fd_entry = hclge_add_fd_entry,
9911 .del_fd_entry = hclge_del_fd_entry,
6871af29 9912 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
9913 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9914 .get_fd_rule_info = hclge_get_fd_rule_info,
9915 .get_fd_all_rules = hclge_get_all_rules,
6871af29 9916 .restore_fd_rules = hclge_restore_fd_entries,
c17852a8 9917 .enable_fd = hclge_enable_fd,
d93ed94f 9918 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
3c666b58 9919 .dbg_run_cmd = hclge_dbg_run_cmd,
381c356e 9920 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
9921 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9922 .ae_dev_resetting = hclge_ae_dev_resetting,
9923 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 9924 .set_gro_en = hclge_gro_en,
0c29d191 9925 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 9926 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
9927 .mac_connect_phy = hclge_mac_connect_phy,
9928 .mac_disconnect_phy = hclge_mac_disconnect_phy,
b524b38f 9929 .restore_vlan_table = hclge_restore_vlan_table,
46a3df9f
S
9930};
9931
9932static struct hnae3_ae_algo ae_algo = {
9933 .ops = &hclge_ops,
46a3df9f
S
9934 .pdev_id_table = ae_algo_pci_tbl,
9935};
9936
9937static int hclge_init(void)
9938{
9939 pr_info("%s is initializing\n", HCLGE_NAME);
9940
854cf33a
FL
9941 hnae3_register_ae_algo(&ae_algo);
9942
9943 return 0;
46a3df9f
S
9944}
9945
9946static void hclge_exit(void)
9947{
9948 hnae3_unregister_ae_algo(&ae_algo);
9949}
9950module_init(hclge_init);
9951module_exit(hclge_exit);
9952
9953MODULE_LICENSE("GPL");
9954MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9955MODULE_DESCRIPTION("HCLGE Driver");
9956MODULE_VERSION(HCLGE_MOD_VERSION);