net: hns3: add support for 1280 queues
[linux-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
f2f432f2 16#include <net/rtnetlink.h>
46a3df9f 17#include "hclge_cmd.h"
cacde272 18#include "hclge_dcb.h"
46a3df9f 19#include "hclge_main.h"
dde1a86e 20#include "hclge_mbx.h"
46a3df9f
S
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
5a9f0eac 23#include "hclge_err.h"
46a3df9f
S
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 29
ebaf1908 30#define HCLGE_BUF_SIZE_UNIT 256U
b37ce587
YM
31#define HCLGE_BUF_MUL_BY 2
32#define HCLGE_BUF_DIV_BY 2
9e15be90
YL
33#define NEED_RESERVE_TC_NUM 2
34#define BUF_MAX_PERCENT 100
35#define BUF_RESERVE_PERCENT 90
b9a400ac 36
63cbf7a9 37#define HCLGE_RESET_MAX_FAIL_CNT 5
427a7bff
HT
38#define HCLGE_RESET_SYNC_TIME 100
39#define HCLGE_PF_RESET_SYNC_TIME 20
40#define HCLGE_PF_RESET_SYNC_CNT 1500
63cbf7a9 41
ddb54554
GH
42/* Get DFX BD number offset */
43#define HCLGE_DFX_BIOS_BD_OFFSET 1
44#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46#define HCLGE_DFX_IGU_BD_OFFSET 4
47#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49#define HCLGE_DFX_NCSI_BD_OFFSET 7
50#define HCLGE_DFX_RTC_BD_OFFSET 8
51#define HCLGE_DFX_PPP_BD_OFFSET 9
52#define HCLGE_DFX_RCB_BD_OFFSET 10
53#define HCLGE_DFX_TQP_BD_OFFSET 11
54#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
c9765a89
YM
56#define HCLGE_LINK_STATUS_MS 10
57
6430f744
YM
58#define HCLGE_VF_VPORT_START_NUM 1
59
e6d7d79d 60static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 61static int hclge_init_vlan_config(struct hclge_dev *hdev);
fe4144d4 62static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
4ed340ab 63static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 64static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
d93ed94f
JS
65static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
123297b7
SJ
67static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
1cbc662d 69static int hclge_set_default_loopback(struct hclge_dev *hdev);
46a3df9f 70
ee4bcd3b 71static void hclge_sync_mac_table(struct hclge_dev *hdev);
039ba863 72static void hclge_restore_hw_table(struct hclge_dev *hdev);
c631c696 73static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
ee4bcd3b 74
46a3df9f
S
75static struct hnae3_ae_algo ae_algo;
76
0ea68902
YL
77static struct workqueue_struct *hclge_wq;
78
46a3df9f
S
79static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
ae6f010c 87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
e92a0843 88 /* required last entry */
46a3df9f
S
89 {0, }
90};
91
2f550a46
YL
92MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
ea4750ca
JS
94static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
108
109static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
114 HCLGE_FUN_RST_ING,
115 HCLGE_GRO_EN_REG};
116
117static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
142 HCLGE_RING_EN_REG};
143
144static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
149
46a3df9f 150static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 151 "App Loopback test",
4dc13b96
FL
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
46a3df9f
S
154 "Phy Loopback test"
155};
156
46a3df9f
S
157static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 302
a6c51c26
JS
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
327};
328
f5aac71c
FL
329static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
7efffc64 332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
0e02a53d 333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
f5aac71c
FL
334 .i_port_bitmap = 0x1,
335 },
336};
337
472d7ece
JS
338static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344};
345
ddb54554
GH
346static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
359};
360
361static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
374};
375
2307f4a5
Y
376static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
378 { IP_FRAGEMENT, 1},
379 { ROCE_TYPE, 1},
380 { NEXT_KEY, 5},
381 { VLAN_NUMBER, 2},
382 { SRC_VPORT, 12},
383 { DST_VPORT, 12},
384 { TUNNEL_PACKET, 1},
385};
386
387static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48},
389 { OUTER_SRC_MAC, 48},
390 { OUTER_VLAN_TAG_FST, 16},
391 { OUTER_VLAN_TAG_SEC, 16},
392 { OUTER_ETH_TYPE, 16},
393 { OUTER_L2_RSV, 16},
394 { OUTER_IP_TOS, 8},
395 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_IP, 32},
397 { OUTER_DST_IP, 32},
398 { OUTER_L3_RSV, 16},
399 { OUTER_SRC_PORT, 16},
400 { OUTER_DST_PORT, 16},
401 { OUTER_L4_RSV, 32},
402 { OUTER_TUN_VNI, 24},
403 { OUTER_TUN_FLOW_ID, 8},
404 { INNER_DST_MAC, 48},
405 { INNER_SRC_MAC, 48},
406 { INNER_VLAN_TAG_FST, 16},
407 { INNER_VLAN_TAG_SEC, 16},
408 { INNER_ETH_TYPE, 16},
409 { INNER_L2_RSV, 16},
410 { INNER_IP_TOS, 8},
411 { INNER_IP_PROTO, 8},
412 { INNER_SRC_IP, 32},
413 { INNER_DST_IP, 32},
414 { INNER_L3_RSV, 16},
415 { INNER_SRC_PORT, 16},
416 { INNER_DST_PORT, 16},
417 { INNER_L4_RSV, 32},
418};
419
d174ea75 420static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 421{
91f384f6 422#define HCLGE_MAC_CMD_NUM 21
46a3df9f 423
1c6dfe6f 424 u64 *data = (u64 *)(&hdev->mac_stats);
46a3df9f 425 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 426 __le64 *desc_data;
46a3df9f
S
427 int i, k, n;
428 int ret;
429
430 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 if (ret) {
433 dev_err(&hdev->pdev->dev,
434 "Get MAC pkt stats fail, status = %d.\n", ret);
435
436 return ret;
437 }
438
439 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 440 /* for special opcode 0032, only the first desc has the head */
46a3df9f 441 if (unlikely(i == 0)) {
a90bb9a5 442 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 443 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 444 } else {
a90bb9a5 445 desc_data = (__le64 *)(&desc[i]);
d174ea75 446 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 447 }
d174ea75 448
46a3df9f 449 for (k = 0; k < n; k++) {
d174ea75 450 *data += le64_to_cpu(*desc_data);
451 data++;
46a3df9f
S
452 desc_data++;
453 }
454 }
455
456 return 0;
457}
458
d174ea75 459static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460{
1c6dfe6f 461 u64 *data = (u64 *)(&hdev->mac_stats);
d174ea75 462 struct hclge_desc *desc;
463 __le64 *desc_data;
464 u16 i, k, n;
465 int ret;
466
9e6717af
ZL
467 /* This may be called inside atomic sections,
468 * so GFP_ATOMIC is more suitalbe here
469 */
470 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
39ee6e82
DC
471 if (!desc)
472 return -ENOMEM;
9e6717af 473
d174ea75 474 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 if (ret) {
477 kfree(desc);
478 return ret;
479 }
480
481 for (i = 0; i < desc_num; i++) {
482 /* for special opcode 0034, only the first desc has the head */
483 if (i == 0) {
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RD_FIRST_STATS_NUM;
486 } else {
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RD_OTHER_STATS_NUM;
489 }
490
491 for (k = 0; k < n; k++) {
492 *data += le64_to_cpu(*desc_data);
493 data++;
494 desc_data++;
495 }
496 }
497
498 kfree(desc);
499
500 return 0;
501}
502
503static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504{
505 struct hclge_desc desc;
506 __le32 *desc_data;
507 u32 reg_num;
508 int ret;
509
510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 if (ret)
513 return ret;
514
515 desc_data = (__le32 *)(&desc.data[0]);
516 reg_num = le32_to_cpu(*desc_data);
517
518 *desc_num = 1 + ((reg_num - 3) >> 2) +
519 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520
521 return 0;
522}
523
524static int hclge_mac_update_stats(struct hclge_dev *hdev)
525{
526 u32 desc_num;
527 int ret;
528
529 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530
531 /* The firmware supports the new statistics acquisition method */
532 if (!ret)
533 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 else if (ret == -EOPNOTSUPP)
535 ret = hclge_mac_update_stats_defective(hdev);
536 else
537 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538
539 return ret;
540}
541
46a3df9f
S
542static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543{
544 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 struct hclge_vport *vport = hclge_get_vport(handle);
546 struct hclge_dev *hdev = vport->back;
547 struct hnae3_queue *queue;
548 struct hclge_desc desc[1];
549 struct hclge_tqp *tqp;
550 int ret, i;
551
552 for (i = 0; i < kinfo->num_tqps; i++) {
553 queue = handle->kinfo.tqp[i];
554 tqp = container_of(queue, struct hclge_tqp, q);
555 /* command : HCLGE_OPC_QUERY_IGU_STAT */
4279b4d5 556 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
46a3df9f
S
557 true);
558
9a5ef4aa 559 desc[0].data[0] = cpu_to_le32(tqp->index);
46a3df9f
S
560 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 if (ret) {
562 dev_err(&hdev->pdev->dev,
563 "Query tqp stat fail, status = %d,queue = %d\n",
9b2f3477 564 ret, i);
46a3df9f
S
565 return ret;
566 }
567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 568 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
569 }
570
571 for (i = 0; i < kinfo->num_tqps; i++) {
572 queue = handle->kinfo.tqp[i];
573 tqp = container_of(queue, struct hclge_tqp, q);
574 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575 hclge_cmd_setup_basic_desc(&desc[0],
4279b4d5 576 HCLGE_OPC_QUERY_TX_STATS,
46a3df9f
S
577 true);
578
9a5ef4aa 579 desc[0].data[0] = cpu_to_le32(tqp->index);
46a3df9f
S
580 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 if (ret) {
582 dev_err(&hdev->pdev->dev,
583 "Query tqp stat fail, status = %d,queue = %d\n",
584 ret, i);
585 return ret;
586 }
587 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 588 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
589 }
590
591 return 0;
592}
593
594static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595{
596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 struct hclge_tqp *tqp;
598 u64 *buff = data;
599 int i;
600
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 603 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
604 }
605
606 for (i = 0; i < kinfo->num_tqps; i++) {
607 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 608 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
609 }
610
611 return buff;
612}
613
614static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615{
616 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617
9b2f3477 618 /* each tqp has TX & RX two queues */
46a3df9f
S
619 return kinfo->num_tqps * (2);
620}
621
622static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623{
624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 u8 *buff = data;
9d8d5a36 626 int i;
46a3df9f
S
627
628 for (i = 0; i < kinfo->num_tqps; i++) {
629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 struct hclge_tqp, q);
0c218123 631 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
46a3df9f
S
632 tqp->index);
633 buff = buff + ETH_GSTRING_LEN;
634 }
635
636 for (i = 0; i < kinfo->num_tqps; i++) {
637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 struct hclge_tqp, q);
0c218123 639 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
46a3df9f
S
640 tqp->index);
641 buff = buff + ETH_GSTRING_LEN;
642 }
643
644 return buff;
645}
646
ebaf1908 647static u64 *hclge_comm_get_stats(const void *comm_stats,
46a3df9f
S
648 const struct hclge_comm_stats_str strs[],
649 int size, u64 *data)
650{
651 u64 *buf = data;
652 u32 i;
653
654 for (i = 0; i < size; i++)
655 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656
657 return buf + size;
658}
659
660static u8 *hclge_comm_get_strings(u32 stringset,
661 const struct hclge_comm_stats_str strs[],
662 int size, u8 *data)
663{
664 char *buff = (char *)data;
665 u32 i;
666
667 if (stringset != ETH_SS_STATS)
668 return buff;
669
670 for (i = 0; i < size; i++) {
18d219b7 671 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
46a3df9f
S
672 buff = buff + ETH_GSTRING_LEN;
673 }
674
675 return (u8 *)buff;
676}
677
46a3df9f
S
678static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679{
680 struct hnae3_handle *handle;
681 int status;
682
683 handle = &hdev->vport[0].nic;
684 if (handle->client) {
685 status = hclge_tqps_update_stats(handle);
686 if (status) {
687 dev_err(&hdev->pdev->dev,
688 "Update TQPS stats fail, status = %d.\n",
689 status);
690 }
691 }
692
693 status = hclge_mac_update_stats(hdev);
694 if (status)
695 dev_err(&hdev->pdev->dev,
696 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
697}
698
699static void hclge_update_stats(struct hnae3_handle *handle,
700 struct net_device_stats *net_stats)
701{
702 struct hclge_vport *vport = hclge_get_vport(handle);
703 struct hclge_dev *hdev = vport->back;
46a3df9f
S
704 int status;
705
c5f65480
JS
706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707 return;
708
46a3df9f
S
709 status = hclge_mac_update_stats(hdev);
710 if (status)
711 dev_err(&hdev->pdev->dev,
712 "Update MAC stats fail, status = %d.\n",
713 status);
714
46a3df9f
S
715 status = hclge_tqps_update_stats(handle);
716 if (status)
717 dev_err(&hdev->pdev->dev,
718 "Update TQPS stats fail, status = %d.\n",
719 status);
720
c5f65480 721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
722}
723
724static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725{
4dc13b96
FL
726#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 HNAE3_SUPPORT_PHY_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
730
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
733 int count = 0;
734
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
739 */
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
295ba232 743 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
4dc13b96 744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 count += 1;
eb66d503 748 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 749 }
5fd50ac3 750
4dc13b96
FL
751 count += 2;
752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
c9765a89
YM
754
755 if (hdev->hw.mac.phydev) {
756 count += 1;
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758 }
759
46a3df9f
S
760 } else if (stringset == ETH_SS_STATS) {
761 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
762 hclge_tqps_get_sset_count(handle, stringset);
763 }
764
765 return count;
766}
767
9b2f3477 768static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
46a3df9f
S
769 u8 *data)
770{
771 u8 *p = (char *)data;
772 int size;
773
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
9b2f3477
WL
776 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 size, p);
46a3df9f
S
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
eb66d503 780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
9b2f3477 781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
782 ETH_GSTRING_LEN);
783 p += ETH_GSTRING_LEN;
784 }
4dc13b96 785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
9b2f3477 786 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
4dc13b96
FL
787 ETH_GSTRING_LEN);
788 p += ETH_GSTRING_LEN;
789 }
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 memcpy(p,
792 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
793 ETH_GSTRING_LEN);
794 p += ETH_GSTRING_LEN;
795 }
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
9b2f3477 797 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
798 ETH_GSTRING_LEN);
799 p += ETH_GSTRING_LEN;
800 }
801 }
802}
803
804static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805{
806 struct hclge_vport *vport = hclge_get_vport(handle);
807 struct hclge_dev *hdev = vport->back;
808 u64 *p;
809
1c6dfe6f 810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
9b2f3477 811 ARRAY_SIZE(g_mac_stats_string), data);
46a3df9f
S
812 p = hclge_tqps_get_stats(handle, p);
813}
814
615466ce
YM
815static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 struct hns3_mac_stats *mac_stats)
e511c97d
JS
817{
818 struct hclge_vport *vport = hclge_get_vport(handle);
819 struct hclge_dev *hdev = vport->back;
820
615466ce
YM
821 hclge_update_stats(handle, NULL);
822
1c6dfe6f
YL
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
e511c97d
JS
825}
826
46a3df9f 827static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 828 struct hclge_func_status_cmd *status)
46a3df9f 829{
ded45d40
YM
830#define HCLGE_MAC_ID_MASK 0xF
831
46a3df9f
S
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833 return -EINVAL;
834
835 /* Set the pf to main pf */
836 if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 hdev->flag |= HCLGE_FLAG_MAIN;
838 else
839 hdev->flag &= ~HCLGE_FLAG_MAIN;
840
ded45d40 841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
46a3df9f
S
842 return 0;
843}
844
845static int hclge_query_function_status(struct hclge_dev *hdev)
846{
b37ce587
YM
847#define HCLGE_QUERY_MAX_CNT 5
848
d44f9b63 849 struct hclge_func_status_cmd *req;
46a3df9f
S
850 struct hclge_desc desc;
851 int timeout = 0;
852 int ret;
853
854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 855 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
856
857 do {
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 if (ret) {
860 dev_err(&hdev->pdev->dev,
9b2f3477 861 "query function status failed %d.\n", ret);
46a3df9f
S
862 return ret;
863 }
864
865 /* Check pf reset is done */
866 if (req->pf_state)
867 break;
868 usleep_range(1000, 2000);
b37ce587 869 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
46a3df9f 870
60df7e91 871 return hclge_parse_func_status(hdev, req);
46a3df9f
S
872}
873
874static int hclge_query_pf_resource(struct hclge_dev *hdev)
875{
d44f9b63 876 struct hclge_pf_res_cmd *req;
46a3df9f
S
877 struct hclge_desc desc;
878 int ret;
879
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 if (ret) {
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
885 return ret;
886 }
887
d44f9b63 888 req = (struct hclge_pf_res_cmd *)desc.data;
9a5ef4aa
YL
889 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 le16_to_cpu(req->ext_tqp_num);
60df7e91 891 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
46a3df9f 892
368686be
YL
893 if (req->tx_buf_size)
894 hdev->tx_buf_size =
60df7e91 895 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
896 else
897 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
b9a400ac
YL
899 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
368686be
YL
901 if (req->dv_buf_size)
902 hdev->dv_buf_size =
60df7e91 903 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
904 else
905 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
b9a400ac
YL
907 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908
e92a0843 909 if (hnae3_dev_roce_supported(hdev)) {
375dd5e4 910 hdev->roce_base_msix_offset =
60df7e91 911 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
375dd5e4 912 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
887c3820 913 hdev->num_roce_msi =
60df7e91 914 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 915 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f 916
580a05f9
YL
917 /* nic's msix numbers is always equals to the roce's. */
918 hdev->num_nic_msi = hdev->num_roce_msi;
919
46a3df9f
S
920 /* PF should have NIC vectors and Roce vectors,
921 * NIC vectors are queued before Roce vectors.
922 */
9b2f3477 923 hdev->num_msi = hdev->num_roce_msi +
375dd5e4 924 hdev->roce_base_msix_offset;
46a3df9f
S
925 } else {
926 hdev->num_msi =
60df7e91 927 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 928 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
580a05f9
YL
929
930 hdev->num_nic_msi = hdev->num_msi;
931 }
932
933 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
934 dev_err(&hdev->pdev->dev,
935 "Just %u msi resources, not enough for pf(min:2).\n",
936 hdev->num_nic_msi);
937 return -EINVAL;
46a3df9f
S
938 }
939
940 return 0;
941}
942
943static int hclge_parse_speed(int speed_cmd, int *speed)
944{
945 switch (speed_cmd) {
946 case 6:
947 *speed = HCLGE_MAC_SPEED_10M;
948 break;
949 case 7:
950 *speed = HCLGE_MAC_SPEED_100M;
951 break;
952 case 0:
953 *speed = HCLGE_MAC_SPEED_1G;
954 break;
955 case 1:
956 *speed = HCLGE_MAC_SPEED_10G;
957 break;
958 case 2:
959 *speed = HCLGE_MAC_SPEED_25G;
960 break;
961 case 3:
962 *speed = HCLGE_MAC_SPEED_40G;
963 break;
964 case 4:
965 *speed = HCLGE_MAC_SPEED_50G;
966 break;
967 case 5:
968 *speed = HCLGE_MAC_SPEED_100G;
969 break;
ae6f010c
GH
970 case 8:
971 *speed = HCLGE_MAC_SPEED_200G;
972 break;
46a3df9f
S
973 default:
974 return -EINVAL;
975 }
976
977 return 0;
978}
979
22f48e24
JS
980static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
981{
982 struct hclge_vport *vport = hclge_get_vport(handle);
983 struct hclge_dev *hdev = vport->back;
984 u32 speed_ability = hdev->hw.mac.speed_ability;
985 u32 speed_bit = 0;
986
987 switch (speed) {
988 case HCLGE_MAC_SPEED_10M:
989 speed_bit = HCLGE_SUPPORT_10M_BIT;
990 break;
991 case HCLGE_MAC_SPEED_100M:
992 speed_bit = HCLGE_SUPPORT_100M_BIT;
993 break;
994 case HCLGE_MAC_SPEED_1G:
995 speed_bit = HCLGE_SUPPORT_1G_BIT;
996 break;
997 case HCLGE_MAC_SPEED_10G:
998 speed_bit = HCLGE_SUPPORT_10G_BIT;
999 break;
1000 case HCLGE_MAC_SPEED_25G:
1001 speed_bit = HCLGE_SUPPORT_25G_BIT;
1002 break;
1003 case HCLGE_MAC_SPEED_40G:
1004 speed_bit = HCLGE_SUPPORT_40G_BIT;
1005 break;
1006 case HCLGE_MAC_SPEED_50G:
1007 speed_bit = HCLGE_SUPPORT_50G_BIT;
1008 break;
1009 case HCLGE_MAC_SPEED_100G:
1010 speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 break;
ae6f010c
GH
1012 case HCLGE_MAC_SPEED_200G:
1013 speed_bit = HCLGE_SUPPORT_200G_BIT;
1014 break;
22f48e24
JS
1015 default:
1016 return -EINVAL;
1017 }
1018
1019 if (speed_bit & speed_ability)
1020 return 0;
1021
1022 return -EINVAL;
1023}
1024
ae6f010c 1025static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
0979aa0b 1026{
0979aa0b 1027 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e 1028 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
88d10bd6
JS
1029 mac->supported);
1030 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1032 mac->supported);
1033 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1035 mac->supported);
1036 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1038 mac->supported);
1039 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1041 mac->supported);
ae6f010c
GH
1042 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1044 mac->supported);
88d10bd6 1045}
0979aa0b 1046
ae6f010c 1047static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
88d10bd6
JS
1048{
1049 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1051 mac->supported);
0979aa0b 1052 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e 1053 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
88d10bd6
JS
1054 mac->supported);
1055 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1057 mac->supported);
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1060 mac->supported);
1061 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1063 mac->supported);
ae6f010c
GH
1064 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1065 linkmode_set_bit(
1066 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1067 mac->supported);
88d10bd6 1068}
0979aa0b 1069
ae6f010c 1070static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
88d10bd6
JS
1071{
1072 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1073 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1074 mac->supported);
1075 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1076 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1077 mac->supported);
1078 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1079 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1080 mac->supported);
0979aa0b 1081 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
88d10bd6
JS
1082 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1083 mac->supported);
1084 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1086 mac->supported);
ae6f010c
GH
1087 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1089 mac->supported);
88d10bd6 1090}
0979aa0b 1091
ae6f010c 1092static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
88d10bd6
JS
1093{
1094 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1095 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1096 mac->supported);
1097 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1098 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1099 mac->supported);
1100 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1102 mac->supported);
1103 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1105 mac->supported);
1106 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1107 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1108 mac->supported);
0979aa0b 1109 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
88d10bd6
JS
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1111 mac->supported);
ae6f010c
GH
1112 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1114 mac->supported);
88d10bd6 1115}
0979aa0b 1116
7e6ec914
JS
1117static void hclge_convert_setting_fec(struct hclge_mac *mac)
1118{
1119 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1120 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1121
1122 switch (mac->speed) {
1123 case HCLGE_MAC_SPEED_10G:
1124 case HCLGE_MAC_SPEED_40G:
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1126 mac->supported);
1127 mac->fec_ability =
1128 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1129 break;
1130 case HCLGE_MAC_SPEED_25G:
1131 case HCLGE_MAC_SPEED_50G:
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1133 mac->supported);
1134 mac->fec_ability =
1135 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1136 BIT(HNAE3_FEC_AUTO);
1137 break;
1138 case HCLGE_MAC_SPEED_100G:
ae6f010c 1139 case HCLGE_MAC_SPEED_200G:
7e6ec914
JS
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1141 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1142 break;
1143 default:
1144 mac->fec_ability = 0;
1145 break;
1146 }
1147}
1148
88d10bd6 1149static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
ae6f010c 1150 u16 speed_ability)
88d10bd6
JS
1151{
1152 struct hclge_mac *mac = &hdev->hw.mac;
1153
1154 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1156 mac->supported);
1157
1158 hclge_convert_setting_sr(mac, speed_ability);
1159 hclge_convert_setting_lr(mac, speed_ability);
1160 hclge_convert_setting_cr(mac, speed_ability);
74ba23a1 1161 if (hnae3_dev_fec_supported(hdev))
7e6ec914 1162 hclge_convert_setting_fec(mac);
88d10bd6
JS
1163
1164 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1166 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
88d10bd6
JS
1167}
1168
1169static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
ae6f010c 1170 u16 speed_ability)
88d10bd6
JS
1171{
1172 struct hclge_mac *mac = &hdev->hw.mac;
1173
1174 hclge_convert_setting_kr(mac, speed_ability);
74ba23a1 1175 if (hnae3_dev_fec_supported(hdev))
7e6ec914 1176 hclge_convert_setting_fec(mac);
88d10bd6
JS
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1179 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
0979aa0b
FL
1180}
1181
f18635d5 1182static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
ae6f010c 1183 u16 speed_ability)
f18635d5
JS
1184{
1185 unsigned long *supported = hdev->hw.mac.supported;
1186
1187 /* default to support all speed for GE port */
1188 if (!speed_ability)
1189 speed_ability = HCLGE_SUPPORT_GE;
1190
1191 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1193 supported);
1194
1195 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1197 supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1199 supported);
1200 }
1201
1202 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1204 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1205 }
1206
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
bc3781ed 1210 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
f18635d5
JS
1211}
1212
ae6f010c 1213static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
0979aa0b
FL
1214{
1215 u8 media_type = hdev->hw.mac.media_type;
1216
f18635d5
JS
1217 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1218 hclge_parse_fiber_link_mode(hdev, speed_ability);
1219 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1220 hclge_parse_copper_link_mode(hdev, speed_ability);
88d10bd6
JS
1221 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1222 hclge_parse_backplane_link_mode(hdev, speed_ability);
0979aa0b 1223}
37417c66 1224
ae6f010c 1225static u32 hclge_get_max_speed(u16 speed_ability)
ee9e4424 1226{
ae6f010c
GH
1227 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1228 return HCLGE_MAC_SPEED_200G;
1229
ee9e4424
YL
1230 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1231 return HCLGE_MAC_SPEED_100G;
1232
1233 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1234 return HCLGE_MAC_SPEED_50G;
1235
1236 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1237 return HCLGE_MAC_SPEED_40G;
1238
1239 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1240 return HCLGE_MAC_SPEED_25G;
1241
1242 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1243 return HCLGE_MAC_SPEED_10G;
1244
1245 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1246 return HCLGE_MAC_SPEED_1G;
1247
1248 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1249 return HCLGE_MAC_SPEED_100M;
1250
1251 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1252 return HCLGE_MAC_SPEED_10M;
1253
1254 return HCLGE_MAC_SPEED_1G;
1255}
1256
46a3df9f
S
1257static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1258{
ae6f010c
GH
1259#define SPEED_ABILITY_EXT_SHIFT 8
1260
d44f9b63 1261 struct hclge_cfg_param_cmd *req;
46a3df9f 1262 u64 mac_addr_tmp_high;
ae6f010c 1263 u16 speed_ability_ext;
46a3df9f 1264 u64 mac_addr_tmp;
ebaf1908 1265 unsigned int i;
46a3df9f 1266
d44f9b63 1267 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
1268
1269 /* get the configuration */
e4e87715
PL
1270 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271 HCLGE_CFG_VMDQ_M,
1272 HCLGE_CFG_VMDQ_S);
1273 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1275 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1276 HCLGE_CFG_TQP_DESC_N_M,
1277 HCLGE_CFG_TQP_DESC_N_S);
1278
1279 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_PHY_ADDR_M,
1281 HCLGE_CFG_PHY_ADDR_S);
1282 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 HCLGE_CFG_MEDIA_TP_M,
1284 HCLGE_CFG_MEDIA_TP_S);
1285 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1286 HCLGE_CFG_RX_BUF_LEN_M,
1287 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
1288 /* get mac_address */
1289 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
1290 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1291 HCLGE_CFG_MAC_ADDR_H_M,
1292 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
1293
1294 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1295
e4e87715
PL
1296 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1297 HCLGE_CFG_DEFAULT_SPEED_M,
1298 HCLGE_CFG_DEFAULT_SPEED_S);
1299 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1300 HCLGE_CFG_RSS_SIZE_M,
1301 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 1302
46a3df9f
S
1303 for (i = 0; i < ETH_ALEN; i++)
1304 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1305
d44f9b63 1306 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 1307 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 1308
e4e87715
PL
1309 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1310 HCLGE_CFG_SPEED_ABILITY_M,
1311 HCLGE_CFG_SPEED_ABILITY_S);
ae6f010c
GH
1312 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1314 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1315 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1316
39932473
JS
1317 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1318 HCLGE_CFG_UMV_TBL_SPACE_M,
1319 HCLGE_CFG_UMV_TBL_SPACE_S);
1320 if (!cfg->umv_space)
1321 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
46a3df9f
S
1322}
1323
1324/* hclge_get_cfg: query the static parameter from flash
1325 * @hdev: pointer to struct hclge_dev
1326 * @hcfg: the config structure to be getted
1327 */
1328static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1329{
1330 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 1331 struct hclge_cfg_param_cmd *req;
ebaf1908
WL
1332 unsigned int i;
1333 int ret;
46a3df9f
S
1334
1335 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1336 u32 offset = 0;
1337
d44f9b63 1338 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1339 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1340 true);
e4e87715
PL
1341 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1342 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 1343 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
1344 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1345 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1346 req->offset = cpu_to_le32(offset);
46a3df9f
S
1347 }
1348
1349 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1350 if (ret) {
3f639907 1351 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
1352 return ret;
1353 }
1354
1355 hclge_parse_cfg(hcfg, desc);
3f639907 1356
46a3df9f
S
1357 return 0;
1358}
1359
af2aedc5
GH
1360static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1361{
1362#define HCLGE_MAX_NON_TSO_BD_NUM 8U
1363
1364 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1365
1366 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1367 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1368 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
d9c7d20d 1369 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ab16b49c 1370 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
af2aedc5
GH
1371}
1372
1373static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1374 struct hclge_desc *desc)
1375{
1376 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1377 struct hclge_dev_specs_0_cmd *req0;
ab16b49c 1378 struct hclge_dev_specs_1_cmd *req1;
af2aedc5
GH
1379
1380 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
ab16b49c 1381 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
af2aedc5
GH
1382
1383 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1384 ae_dev->dev_specs.rss_ind_tbl_size =
1385 le16_to_cpu(req0->rss_ind_tbl_size);
91bfae25 1386 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
af2aedc5 1387 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
d9c7d20d 1388 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
ab16b49c 1389 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
af2aedc5
GH
1390}
1391
13297028
GH
1392static void hclge_check_dev_specs(struct hclge_dev *hdev)
1393{
1394 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1395
1396 if (!dev_specs->max_non_tso_bd_num)
1397 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1398 if (!dev_specs->rss_ind_tbl_size)
1399 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1400 if (!dev_specs->rss_key_size)
1401 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1402 if (!dev_specs->max_tm_rate)
1403 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
ab16b49c
HT
1404 if (!dev_specs->max_int_gl)
1405 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
13297028
GH
1406}
1407
af2aedc5
GH
1408static int hclge_query_dev_specs(struct hclge_dev *hdev)
1409{
1410 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1411 int ret;
1412 int i;
1413
1414 /* set default specifications as devices lower than version V3 do not
1415 * support querying specifications from firmware.
1416 */
1417 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1418 hclge_set_default_dev_specs(hdev);
1419 return 0;
1420 }
1421
1422 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1423 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1424 true);
1425 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1426 }
1427 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1428
1429 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1430 if (ret)
1431 return ret;
1432
1433 hclge_parse_dev_specs(hdev, desc);
13297028 1434 hclge_check_dev_specs(hdev);
af2aedc5
GH
1435
1436 return 0;
1437}
1438
46a3df9f
S
1439static int hclge_get_cap(struct hclge_dev *hdev)
1440{
1441 int ret;
1442
1443 ret = hclge_query_function_status(hdev);
1444 if (ret) {
1445 dev_err(&hdev->pdev->dev,
1446 "query function status error %d.\n", ret);
1447 return ret;
1448 }
1449
1450 /* get pf resource */
60df7e91 1451 return hclge_query_pf_resource(hdev);
46a3df9f
S
1452}
1453
962e31bd
YL
1454static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1455{
1456#define HCLGE_MIN_TX_DESC 64
1457#define HCLGE_MIN_RX_DESC 64
1458
1459 if (!is_kdump_kernel())
1460 return;
1461
1462 dev_info(&hdev->pdev->dev,
1463 "Running kdump kernel. Using minimal resources\n");
1464
1465 /* minimal queue pairs equals to the number of vports */
1466 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1467 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1468 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1469}
1470
46a3df9f
S
1471static int hclge_configure(struct hclge_dev *hdev)
1472{
1473 struct hclge_cfg cfg;
ebaf1908
WL
1474 unsigned int i;
1475 int ret;
46a3df9f
S
1476
1477 ret = hclge_get_cfg(hdev, &cfg);
727f514b 1478 if (ret)
46a3df9f 1479 return ret;
46a3df9f
S
1480
1481 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1482 hdev->base_tqp_pid = 0;
0e7a40cd 1483 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1484 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1485 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1486 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1487 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1488 hdev->num_tx_desc = cfg.tqp_desc_num;
1489 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1490 hdev->tm_info.num_pg = 1;
cacde272 1491 hdev->tc_max = cfg.tc_num;
46a3df9f 1492 hdev->tm_info.hw_pfc_map = 0;
39932473 1493 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1494
44122887 1495 if (hnae3_dev_fd_supported(hdev)) {
9abeb7d8 1496 hdev->fd_en = true;
44122887
JS
1497 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1498 }
9abeb7d8 1499
46a3df9f
S
1500 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1501 if (ret) {
ead38a85
HT
1502 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1503 cfg.default_speed, ret);
46a3df9f
S
1504 return ret;
1505 }
1506
0979aa0b
FL
1507 hclge_parse_link_mode(hdev, cfg.speed_ability);
1508
ee9e4424
YL
1509 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1510
cacde272
YL
1511 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1512 (hdev->tc_max < 1)) {
adcf738b 1513 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
cacde272
YL
1514 hdev->tc_max);
1515 hdev->tc_max = 1;
46a3df9f
S
1516 }
1517
cacde272
YL
1518 /* Dev does not support DCB */
1519 if (!hnae3_dev_dcb_supported(hdev)) {
1520 hdev->tc_max = 1;
1521 hdev->pfc_max = 0;
1522 } else {
1523 hdev->pfc_max = hdev->tc_max;
1524 }
1525
a2987975 1526 hdev->tm_info.num_tc = 1;
cacde272 1527
46a3df9f 1528 /* Currently not support uncontiuous tc */
cacde272 1529 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1530 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1531
71b83869 1532 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1533
962e31bd
YL
1534 hclge_init_kdump_kernel_config(hdev);
1535
08125454
YL
1536 /* Set the init affinity based on pci func number */
1537 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1538 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1539 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1540 &hdev->affinity_mask);
1541
46a3df9f
S
1542 return ret;
1543}
1544
9f5a9816
HT
1545static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1546 u16 tso_mss_max)
46a3df9f 1547{
d44f9b63 1548 struct hclge_cfg_tso_status_cmd *req;
46a3df9f
S
1549 struct hclge_desc desc;
1550
1551 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1552
d44f9b63 1553 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
9f5a9816
HT
1554 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1555 req->tso_mss_max = cpu_to_le16(tso_mss_max);
46a3df9f
S
1556
1557 return hclge_cmd_send(&hdev->hw, &desc, 1);
1558}
1559
b26a6fea
PL
1560static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1561{
1562 struct hclge_cfg_gro_status_cmd *req;
1563 struct hclge_desc desc;
1564 int ret;
1565
1566 if (!hnae3_dev_gro_supported(hdev))
1567 return 0;
1568
1569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1570 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1571
639d84d0 1572 req->gro_en = en ? 1 : 0;
b26a6fea
PL
1573
1574 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1575 if (ret)
1576 dev_err(&hdev->pdev->dev,
1577 "GRO hardware config cmd failed, ret = %d\n", ret);
1578
1579 return ret;
1580}
1581
46a3df9f
S
1582static int hclge_alloc_tqps(struct hclge_dev *hdev)
1583{
1584 struct hclge_tqp *tqp;
1585 int i;
1586
1587 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1588 sizeof(struct hclge_tqp), GFP_KERNEL);
1589 if (!hdev->htqp)
1590 return -ENOMEM;
1591
1592 tqp = hdev->htqp;
1593
1594 for (i = 0; i < hdev->num_tqps; i++) {
1595 tqp->dev = &hdev->pdev->dev;
1596 tqp->index = i;
1597
1598 tqp->q.ae_algo = &ae_algo;
1599 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1600 tqp->q.tx_desc_num = hdev->num_tx_desc;
1601 tqp->q.rx_desc_num = hdev->num_rx_desc;
9a5ef4aa
YL
1602
1603 /* need an extended offset to configure queues >=
1604 * HCLGE_TQP_MAX_SIZE_DEV_V2
1605 */
1606 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1607 tqp->q.io_base = hdev->hw.io_base +
1608 HCLGE_TQP_REG_OFFSET +
1609 i * HCLGE_TQP_REG_SIZE;
1610 else
1611 tqp->q.io_base = hdev->hw.io_base +
1612 HCLGE_TQP_REG_OFFSET +
1613 HCLGE_TQP_EXT_REG_OFFSET +
1614 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1615 HCLGE_TQP_REG_SIZE;
46a3df9f
S
1616
1617 tqp++;
1618 }
1619
1620 return 0;
1621}
1622
1623static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1624 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1625{
d44f9b63 1626 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1627 struct hclge_desc desc;
1628 int ret;
1629
1630 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1631
d44f9b63 1632 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1633 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1634 req->tqp_vf = func_id;
b9a8f883
YL
1635 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1636 if (!is_pf)
1637 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
46a3df9f
S
1638 req->tqp_vid = cpu_to_le16(tqp_vid);
1639
1640 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1641 if (ret)
1642 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1643
3f639907 1644 return ret;
46a3df9f
S
1645}
1646
672ad0ed 1647static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1648{
128b900d 1649 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1650 struct hclge_dev *hdev = vport->back;
7df7dad6 1651 int i, alloced;
46a3df9f
S
1652
1653 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1654 alloced < num_tqps; i++) {
46a3df9f
S
1655 if (!hdev->htqp[i].alloced) {
1656 hdev->htqp[i].q.handle = &vport->nic;
1657 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1658 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1659 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1660 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1661 hdev->htqp[i].alloced = true;
46a3df9f
S
1662 alloced++;
1663 }
1664 }
672ad0ed
HT
1665 vport->alloc_tqps = alloced;
1666 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1667 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f 1668
580a05f9
YL
1669 /* ensure one to one mapping between irq and queue at default */
1670 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1671 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1672
46a3df9f
S
1673 return 0;
1674}
1675
c0425944
PL
1676static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1677 u16 num_tx_desc, u16 num_rx_desc)
1678
46a3df9f
S
1679{
1680 struct hnae3_handle *nic = &vport->nic;
1681 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1682 struct hclge_dev *hdev = vport->back;
af958827 1683 int ret;
46a3df9f 1684
c0425944
PL
1685 kinfo->num_tx_desc = num_tx_desc;
1686 kinfo->num_rx_desc = num_rx_desc;
1687
46a3df9f 1688 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1689
672ad0ed 1690 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1691 sizeof(struct hnae3_queue *), GFP_KERNEL);
1692 if (!kinfo->tqp)
1693 return -ENOMEM;
1694
672ad0ed 1695 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1696 if (ret)
46a3df9f 1697 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1698
3f639907 1699 return ret;
46a3df9f
S
1700}
1701
7df7dad6
L
1702static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1703 struct hclge_vport *vport)
1704{
1705 struct hnae3_handle *nic = &vport->nic;
1706 struct hnae3_knic_private_info *kinfo;
1707 u16 i;
1708
1709 kinfo = &nic->kinfo;
205a24ca 1710 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1711 struct hclge_tqp *q =
1712 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1713 bool is_pf;
1714 int ret;
1715
1716 is_pf = !(vport->vport_id);
1717 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1718 i, is_pf);
1719 if (ret)
1720 return ret;
1721 }
1722
1723 return 0;
1724}
1725
1726static int hclge_map_tqp(struct hclge_dev *hdev)
1727{
1728 struct hclge_vport *vport = hdev->vport;
1729 u16 i, num_vport;
1730
1731 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1732 for (i = 0; i < num_vport; i++) {
1733 int ret;
1734
1735 ret = hclge_map_tqp_to_vport(hdev, vport);
1736 if (ret)
1737 return ret;
1738
1739 vport++;
1740 }
1741
1742 return 0;
1743}
1744
46a3df9f
S
1745static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1746{
1747 struct hnae3_handle *nic = &vport->nic;
1748 struct hclge_dev *hdev = vport->back;
1749 int ret;
1750
1751 nic->pdev = hdev->pdev;
1752 nic->ae_algo = &ae_algo;
1753 nic->numa_node_mask = hdev->numa_node_mask;
1754
b69c9737
YL
1755 ret = hclge_knic_setup(vport, num_tqps,
1756 hdev->num_tx_desc, hdev->num_rx_desc);
1757 if (ret)
1758 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
46a3df9f 1759
b69c9737 1760 return ret;
46a3df9f
S
1761}
1762
1763static int hclge_alloc_vport(struct hclge_dev *hdev)
1764{
1765 struct pci_dev *pdev = hdev->pdev;
1766 struct hclge_vport *vport;
1767 u32 tqp_main_vport;
1768 u32 tqp_per_vport;
1769 int num_vport, i;
1770 int ret;
1771
1772 /* We need to alloc a vport for main NIC of PF */
1773 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1774
38e62046 1775 if (hdev->num_tqps < num_vport) {
adcf738b 1776 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
38e62046
HT
1777 hdev->num_tqps, num_vport);
1778 return -EINVAL;
1779 }
46a3df9f
S
1780
1781 /* Alloc the same number of TQPs for every vport */
1782 tqp_per_vport = hdev->num_tqps / num_vport;
1783 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1784
1785 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1786 GFP_KERNEL);
1787 if (!vport)
1788 return -ENOMEM;
1789
1790 hdev->vport = vport;
1791 hdev->num_alloc_vport = num_vport;
1792
2312e050
FL
1793 if (IS_ENABLED(CONFIG_PCI_IOV))
1794 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1795
1796 for (i = 0; i < num_vport; i++) {
1797 vport->back = hdev;
1798 vport->vport_id = i;
6430f744 1799 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
818f1675 1800 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1801 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1802 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1803 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1804 INIT_LIST_HEAD(&vport->uc_mac_list);
1805 INIT_LIST_HEAD(&vport->mc_mac_list);
ee4bcd3b 1806 spin_lock_init(&vport->mac_list_lock);
46a3df9f
S
1807
1808 if (i == 0)
1809 ret = hclge_vport_setup(vport, tqp_main_vport);
1810 else
1811 ret = hclge_vport_setup(vport, tqp_per_vport);
1812 if (ret) {
1813 dev_err(&pdev->dev,
1814 "vport setup failed for vport %d, %d\n",
1815 i, ret);
1816 return ret;
1817 }
1818
1819 vport++;
1820 }
1821
1822 return 0;
1823}
1824
acf61ecd
YL
1825static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1826 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1827{
1828/* TX buffer size is unit by 128 byte */
1829#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1830#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1831 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1832 struct hclge_desc desc;
1833 int ret;
1834 u8 i;
1835
d44f9b63 1836 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1837
1838 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1839 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1840 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1841
46a3df9f
S
1842 req->tx_pkt_buff[i] =
1843 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1844 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1845 }
46a3df9f
S
1846
1847 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1848 if (ret)
46a3df9f
S
1849 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1850 ret);
46a3df9f 1851
3f639907 1852 return ret;
46a3df9f
S
1853}
1854
acf61ecd
YL
1855static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1856 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1857{
acf61ecd 1858 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1859
3f639907
JS
1860 if (ret)
1861 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1862
3f639907 1863 return ret;
46a3df9f
S
1864}
1865
1a49f3c6 1866static u32 hclge_get_tc_num(struct hclge_dev *hdev)
46a3df9f 1867{
ebaf1908
WL
1868 unsigned int i;
1869 u32 cnt = 0;
46a3df9f
S
1870
1871 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1872 if (hdev->hw_tc_map & BIT(i))
1873 cnt++;
1874 return cnt;
1875}
1876
46a3df9f 1877/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1878static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1879 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1880{
1881 struct hclge_priv_buf *priv;
ebaf1908
WL
1882 unsigned int i;
1883 int cnt = 0;
46a3df9f
S
1884
1885 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1886 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1887 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1888 priv->enable)
1889 cnt++;
1890 }
1891
1892 return cnt;
1893}
1894
1895/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1896static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1897 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1898{
1899 struct hclge_priv_buf *priv;
ebaf1908
WL
1900 unsigned int i;
1901 int cnt = 0;
46a3df9f
S
1902
1903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1904 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1905 if (hdev->hw_tc_map & BIT(i) &&
1906 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1907 priv->enable)
1908 cnt++;
1909 }
1910
1911 return cnt;
1912}
1913
acf61ecd 1914static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1915{
1916 struct hclge_priv_buf *priv;
1917 u32 rx_priv = 0;
1918 int i;
1919
1920 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1921 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1922 if (priv->enable)
1923 rx_priv += priv->buf_size;
1924 }
1925 return rx_priv;
1926}
1927
acf61ecd 1928static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1929{
1930 u32 i, total_tx_size = 0;
1931
1932 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1933 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1934
1935 return total_tx_size;
1936}
1937
acf61ecd
YL
1938static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1939 struct hclge_pkt_buf_alloc *buf_alloc,
1940 u32 rx_all)
46a3df9f 1941{
1a49f3c6
YL
1942 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1943 u32 tc_num = hclge_get_tc_num(hdev);
b9a400ac 1944 u32 shared_buf, aligned_mps;
46a3df9f
S
1945 u32 rx_priv;
1946 int i;
1947
b9a400ac 1948 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1949
d221df4e 1950 if (hnae3_dev_dcb_supported(hdev))
b37ce587
YM
1951 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1952 hdev->dv_buf_size;
d221df4e 1953 else
b9a400ac 1954 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1955 + hdev->dv_buf_size;
d221df4e 1956
db5936db 1957 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1958 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1959 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1960
acf61ecd 1961 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1962 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1963 return false;
1964
b9a400ac 1965 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1966 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1967 if (hnae3_dev_dcb_supported(hdev)) {
1968 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1969 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b37ce587
YM
1970 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1971 HCLGE_BUF_SIZE_UNIT);
368686be 1972 } else {
b9a400ac 1973 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1974 HCLGE_NON_DCB_ADDITIONAL_BUF;
1a49f3c6
YL
1975 buf_alloc->s_buf.self.low = aligned_mps;
1976 }
1977
1978 if (hnae3_dev_dcb_supported(hdev)) {
9e15be90
YL
1979 hi_thrd = shared_buf - hdev->dv_buf_size;
1980
1981 if (tc_num <= NEED_RESERVE_TC_NUM)
1982 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1983 / BUF_MAX_PERCENT;
1984
1a49f3c6 1985 if (tc_num)
9e15be90 1986 hi_thrd = hi_thrd / tc_num;
1a49f3c6 1987
b37ce587 1988 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1a49f3c6 1989 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
b37ce587 1990 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1a49f3c6
YL
1991 } else {
1992 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1993 lo_thrd = aligned_mps;
368686be 1994 }
46a3df9f
S
1995
1996 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1a49f3c6
YL
1997 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1998 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
46a3df9f
S
1999 }
2000
2001 return true;
2002}
2003
acf61ecd
YL
2004static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2005 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
2006{
2007 u32 i, total_size;
2008
2009 total_size = hdev->pkt_buf_size;
2010
2011 /* alloc tx buffer for all enabled tc */
2012 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2013 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 2014
b6b4f987
HT
2015 if (hdev->hw_tc_map & BIT(i)) {
2016 if (total_size < hdev->tx_buf_size)
2017 return -ENOMEM;
9ffe79a9 2018
368686be 2019 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 2020 } else {
9ffe79a9 2021 priv->tx_buf_size = 0;
b6b4f987 2022 }
9ffe79a9
YL
2023
2024 total_size -= priv->tx_buf_size;
2025 }
2026
2027 return 0;
2028}
2029
8ca754b1
YL
2030static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2031 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2032{
8ca754b1
YL
2033 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2034 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
ebaf1908 2035 unsigned int i;
46a3df9f 2036
46a3df9f 2037 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 2038 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 2039
bb1fe9ea
YL
2040 priv->enable = 0;
2041 priv->wl.low = 0;
2042 priv->wl.high = 0;
2043 priv->buf_size = 0;
2044
2045 if (!(hdev->hw_tc_map & BIT(i)))
2046 continue;
2047
2048 priv->enable = 1;
46a3df9f
S
2049
2050 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
b37ce587 2051 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
8ca754b1
YL
2052 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2053 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
2054 } else {
2055 priv->wl.low = 0;
b37ce587
YM
2056 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2057 aligned_mps;
46a3df9f 2058 }
8ca754b1
YL
2059
2060 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
2061 }
2062
8ca754b1
YL
2063 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2064}
46a3df9f 2065
8ca754b1
YL
2066static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2067 struct hclge_pkt_buf_alloc *buf_alloc)
2068{
2069 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2070 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2071 int i;
46a3df9f
S
2072
2073 /* let the last to be cleared first */
2074 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 2075 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 2076 unsigned int mask = BIT((unsigned int)i);
46a3df9f 2077
ebaf1908
WL
2078 if (hdev->hw_tc_map & mask &&
2079 !(hdev->tm_info.hw_pfc_map & mask)) {
46a3df9f
S
2080 /* Clear the no pfc TC private buffer */
2081 priv->wl.low = 0;
2082 priv->wl.high = 0;
2083 priv->buf_size = 0;
2084 priv->enable = 0;
2085 no_pfc_priv_num--;
2086 }
2087
acf61ecd 2088 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
2089 no_pfc_priv_num == 0)
2090 break;
2091 }
2092
8ca754b1
YL
2093 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2094}
46a3df9f 2095
8ca754b1
YL
2096static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2097 struct hclge_pkt_buf_alloc *buf_alloc)
2098{
2099 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2100 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2101 int i;
46a3df9f
S
2102
2103 /* let the last to be cleared first */
2104 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 2105 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 2106 unsigned int mask = BIT((unsigned int)i);
46a3df9f 2107
ebaf1908
WL
2108 if (hdev->hw_tc_map & mask &&
2109 hdev->tm_info.hw_pfc_map & mask) {
46a3df9f
S
2110 /* Reduce the number of pfc TC with private buffer */
2111 priv->wl.low = 0;
2112 priv->enable = 0;
2113 priv->wl.high = 0;
2114 priv->buf_size = 0;
2115 pfc_priv_num--;
2116 }
2117
acf61ecd 2118 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
2119 pfc_priv_num == 0)
2120 break;
2121 }
8ca754b1
YL
2122
2123 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2124}
2125
9e15be90
YL
2126static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2127 struct hclge_pkt_buf_alloc *buf_alloc)
2128{
2129#define COMPENSATE_BUFFER 0x3C00
2130#define COMPENSATE_HALF_MPS_NUM 5
2131#define PRIV_WL_GAP 0x1800
2132
2133 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2134 u32 tc_num = hclge_get_tc_num(hdev);
2135 u32 half_mps = hdev->mps >> 1;
2136 u32 min_rx_priv;
2137 unsigned int i;
2138
2139 if (tc_num)
2140 rx_priv = rx_priv / tc_num;
2141
2142 if (tc_num <= NEED_RESERVE_TC_NUM)
2143 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2144
2145 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2146 COMPENSATE_HALF_MPS_NUM * half_mps;
2147 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2148 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2149
2150 if (rx_priv < min_rx_priv)
2151 return false;
2152
2153 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2154 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2155
2156 priv->enable = 0;
2157 priv->wl.low = 0;
2158 priv->wl.high = 0;
2159 priv->buf_size = 0;
2160
2161 if (!(hdev->hw_tc_map & BIT(i)))
2162 continue;
2163
2164 priv->enable = 1;
2165 priv->buf_size = rx_priv;
2166 priv->wl.high = rx_priv - hdev->dv_buf_size;
2167 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2168 }
2169
2170 buf_alloc->s_buf.buf_size = 0;
2171
2172 return true;
2173}
2174
8ca754b1
YL
2175/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2176 * @hdev: pointer to struct hclge_dev
2177 * @buf_alloc: pointer to buffer calculation data
2178 * @return: 0: calculate sucessful, negative: fail
2179 */
2180static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2181 struct hclge_pkt_buf_alloc *buf_alloc)
2182{
2183 /* When DCB is not supported, rx private buffer is not allocated. */
2184 if (!hnae3_dev_dcb_supported(hdev)) {
2185 u32 rx_all = hdev->pkt_buf_size;
2186
2187 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2188 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2189 return -ENOMEM;
2190
2191 return 0;
2192 }
2193
9e15be90
YL
2194 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2195 return 0;
2196
8ca754b1
YL
2197 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2198 return 0;
2199
2200 /* try to decrease the buffer size */
2201 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2202 return 0;
2203
2204 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2205 return 0;
2206
2207 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
2208 return 0;
2209
2210 return -ENOMEM;
2211}
2212
acf61ecd
YL
2213static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2214 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2215{
d44f9b63 2216 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
2217 struct hclge_desc desc;
2218 int ret;
2219 int i;
2220
2221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 2222 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
2223
2224 /* Alloc private buffer TCs */
2225 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2226 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
2227
2228 req->buf_num[i] =
2229 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2230 req->buf_num[i] |=
5bca3b94 2231 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
2232 }
2233
b8c8bf47 2234 req->shared_buf =
acf61ecd 2235 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
2236 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2237
46a3df9f 2238 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2239 if (ret)
46a3df9f
S
2240 dev_err(&hdev->pdev->dev,
2241 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 2242
3f639907 2243 return ret;
46a3df9f
S
2244}
2245
acf61ecd
YL
2246static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2247 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
2248{
2249 struct hclge_rx_priv_wl_buf *req;
2250 struct hclge_priv_buf *priv;
2251 struct hclge_desc desc[2];
2252 int i, j;
2253 int ret;
2254
2255 for (i = 0; i < 2; i++) {
2256 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2257 false);
2258 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2259
2260 /* The first descriptor set the NEXT bit to 1 */
2261 if (i == 0)
2262 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2263 else
2264 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2265
2266 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
2267 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2268
2269 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
2270 req->tc_wl[j].high =
2271 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2272 req->tc_wl[j].high |=
3738287c 2273 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2274 req->tc_wl[j].low =
2275 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2276 req->tc_wl[j].low |=
3738287c 2277 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2278 }
2279 }
2280
2281 /* Send 2 descriptor at one time */
2282 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2283 if (ret)
46a3df9f
S
2284 dev_err(&hdev->pdev->dev,
2285 "rx private waterline config cmd failed %d\n",
2286 ret);
3f639907 2287 return ret;
46a3df9f
S
2288}
2289
acf61ecd
YL
2290static int hclge_common_thrd_config(struct hclge_dev *hdev,
2291 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2292{
acf61ecd 2293 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
2294 struct hclge_rx_com_thrd *req;
2295 struct hclge_desc desc[2];
2296 struct hclge_tc_thrd *tc;
2297 int i, j;
2298 int ret;
2299
2300 for (i = 0; i < 2; i++) {
2301 hclge_cmd_setup_basic_desc(&desc[i],
2302 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2303 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2304
2305 /* The first descriptor set the NEXT bit to 1 */
2306 if (i == 0)
2307 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2308 else
2309 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2310
2311 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2312 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2313
2314 req->com_thrd[j].high =
2315 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2316 req->com_thrd[j].high |=
3738287c 2317 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2318 req->com_thrd[j].low =
2319 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2320 req->com_thrd[j].low |=
3738287c 2321 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2322 }
2323 }
2324
2325 /* Send 2 descriptors at one time */
2326 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2327 if (ret)
46a3df9f
S
2328 dev_err(&hdev->pdev->dev,
2329 "common threshold config cmd failed %d\n", ret);
3f639907 2330 return ret;
46a3df9f
S
2331}
2332
acf61ecd
YL
2333static int hclge_common_wl_config(struct hclge_dev *hdev,
2334 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2335{
acf61ecd 2336 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
2337 struct hclge_rx_com_wl *req;
2338 struct hclge_desc desc;
2339 int ret;
2340
2341 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2342
2343 req = (struct hclge_rx_com_wl *)desc.data;
2344 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 2345 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2346
2347 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 2348 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2349
2350 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2351 if (ret)
46a3df9f
S
2352 dev_err(&hdev->pdev->dev,
2353 "common waterline config cmd failed %d\n", ret);
46a3df9f 2354
3f639907 2355 return ret;
46a3df9f
S
2356}
2357
2358int hclge_buffer_alloc(struct hclge_dev *hdev)
2359{
acf61ecd 2360 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
2361 int ret;
2362
acf61ecd
YL
2363 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2364 if (!pkt_buf)
46a3df9f
S
2365 return -ENOMEM;
2366
acf61ecd 2367 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
2368 if (ret) {
2369 dev_err(&hdev->pdev->dev,
2370 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 2371 goto out;
9ffe79a9
YL
2372 }
2373
acf61ecd 2374 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
2375 if (ret) {
2376 dev_err(&hdev->pdev->dev,
2377 "could not alloc tx buffers %d\n", ret);
acf61ecd 2378 goto out;
46a3df9f
S
2379 }
2380
acf61ecd 2381 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
2382 if (ret) {
2383 dev_err(&hdev->pdev->dev,
2384 "could not calc rx priv buffer size for all TCs %d\n",
2385 ret);
acf61ecd 2386 goto out;
46a3df9f
S
2387 }
2388
acf61ecd 2389 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
2390 if (ret) {
2391 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2392 ret);
acf61ecd 2393 goto out;
46a3df9f
S
2394 }
2395
2daf4a65 2396 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 2397 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
2398 if (ret) {
2399 dev_err(&hdev->pdev->dev,
2400 "could not configure rx private waterline %d\n",
2401 ret);
acf61ecd 2402 goto out;
2daf4a65 2403 }
46a3df9f 2404
acf61ecd 2405 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
2406 if (ret) {
2407 dev_err(&hdev->pdev->dev,
2408 "could not configure common threshold %d\n",
2409 ret);
acf61ecd 2410 goto out;
2daf4a65 2411 }
46a3df9f
S
2412 }
2413
acf61ecd
YL
2414 ret = hclge_common_wl_config(hdev, pkt_buf);
2415 if (ret)
46a3df9f
S
2416 dev_err(&hdev->pdev->dev,
2417 "could not configure common waterline %d\n", ret);
46a3df9f 2418
acf61ecd
YL
2419out:
2420 kfree(pkt_buf);
2421 return ret;
46a3df9f
S
2422}
2423
2424static int hclge_init_roce_base_info(struct hclge_vport *vport)
2425{
2426 struct hnae3_handle *roce = &vport->roce;
2427 struct hnae3_handle *nic = &vport->nic;
2428
887c3820 2429 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
2430
2431 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2432 vport->back->num_msi_left == 0)
2433 return -EINVAL;
2434
2435 roce->rinfo.base_vector = vport->back->roce_base_vector;
2436
2437 roce->rinfo.netdev = nic->kinfo.netdev;
2438 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2439
2440 roce->pdev = nic->pdev;
2441 roce->ae_algo = nic->ae_algo;
2442 roce->numa_node_mask = nic->numa_node_mask;
2443
2444 return 0;
2445}
2446
887c3820 2447static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
2448{
2449 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
2450 int vectors;
2451 int i;
46a3df9f 2452
580a05f9
YL
2453 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2454 hdev->num_msi,
887c3820
SM
2455 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2456 if (vectors < 0) {
2457 dev_err(&pdev->dev,
2458 "failed(%d) to allocate MSI/MSI-X vectors\n",
2459 vectors);
2460 return vectors;
46a3df9f 2461 }
887c3820
SM
2462 if (vectors < hdev->num_msi)
2463 dev_warn(&hdev->pdev->dev,
adcf738b 2464 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
887c3820 2465 hdev->num_msi, vectors);
46a3df9f 2466
887c3820
SM
2467 hdev->num_msi = vectors;
2468 hdev->num_msi_left = vectors;
580a05f9 2469
887c3820 2470 hdev->base_msi_vector = pdev->irq;
46a3df9f 2471 hdev->roce_base_vector = hdev->base_msi_vector +
375dd5e4 2472 hdev->roce_base_msix_offset;
46a3df9f 2473
46a3df9f
S
2474 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2475 sizeof(u16), GFP_KERNEL);
887c3820
SM
2476 if (!hdev->vector_status) {
2477 pci_free_irq_vectors(pdev);
46a3df9f 2478 return -ENOMEM;
887c3820 2479 }
46a3df9f
S
2480
2481 for (i = 0; i < hdev->num_msi; i++)
2482 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2483
887c3820
SM
2484 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2485 sizeof(int), GFP_KERNEL);
2486 if (!hdev->vector_irq) {
2487 pci_free_irq_vectors(pdev);
2488 return -ENOMEM;
46a3df9f 2489 }
46a3df9f
S
2490
2491 return 0;
2492}
2493
2d03eacc 2494static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 2495{
2d03eacc
YL
2496 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2497 duplex = HCLGE_MAC_FULL;
46a3df9f 2498
2d03eacc 2499 return duplex;
46a3df9f
S
2500}
2501
2d03eacc
YL
2502static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2503 u8 duplex)
46a3df9f 2504{
d44f9b63 2505 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2506 struct hclge_desc desc;
2507 int ret;
2508
d44f9b63 2509 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2510
2511 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2512
63cbf7a9
YM
2513 if (duplex)
2514 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
46a3df9f
S
2515
2516 switch (speed) {
2517 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
2518 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2519 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2520 break;
2521 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2522 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2523 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2524 break;
2525 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2526 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2527 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2528 break;
2529 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2530 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2531 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2532 break;
2533 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2534 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2535 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2536 break;
2537 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2538 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2539 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2540 break;
2541 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2542 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2543 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2544 break;
2545 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2546 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2547 HCLGE_CFG_SPEED_S, 5);
46a3df9f 2548 break;
ae6f010c
GH
2549 case HCLGE_MAC_SPEED_200G:
2550 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2551 HCLGE_CFG_SPEED_S, 8);
2552 break;
46a3df9f 2553 default:
d7629e74 2554 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2555 return -EINVAL;
2556 }
2557
e4e87715
PL
2558 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2559 1);
46a3df9f
S
2560
2561 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2562 if (ret) {
2563 dev_err(&hdev->pdev->dev,
2564 "mac speed/duplex config cmd failed %d.\n", ret);
2565 return ret;
2566 }
2567
2d03eacc
YL
2568 return 0;
2569}
2570
2571int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2572{
68e1006f 2573 struct hclge_mac *mac = &hdev->hw.mac;
2d03eacc
YL
2574 int ret;
2575
2576 duplex = hclge_check_speed_dup(duplex, speed);
68e1006f
JS
2577 if (!mac->support_autoneg && mac->speed == speed &&
2578 mac->duplex == duplex)
2d03eacc
YL
2579 return 0;
2580
2581 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2582 if (ret)
2583 return ret;
2584
2585 hdev->hw.mac.speed = speed;
2586 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2587
2588 return 0;
2589}
2590
2591static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2592 u8 duplex)
2593{
2594 struct hclge_vport *vport = hclge_get_vport(handle);
2595 struct hclge_dev *hdev = vport->back;
2596
2597 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2598}
2599
46a3df9f
S
2600static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2601{
d44f9b63 2602 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2603 struct hclge_desc desc;
a90bb9a5 2604 u32 flag = 0;
46a3df9f
S
2605 int ret;
2606
2607 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2608
d44f9b63 2609 req = (struct hclge_config_auto_neg_cmd *)desc.data;
b9a8f883
YL
2610 if (enable)
2611 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
a90bb9a5 2612 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2613
2614 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2615 if (ret)
46a3df9f
S
2616 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2617 ret);
46a3df9f 2618
3f639907 2619 return ret;
46a3df9f
S
2620}
2621
2622static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2623{
2624 struct hclge_vport *vport = hclge_get_vport(handle);
2625 struct hclge_dev *hdev = vport->back;
2626
22f48e24
JS
2627 if (!hdev->hw.mac.support_autoneg) {
2628 if (enable) {
2629 dev_err(&hdev->pdev->dev,
2630 "autoneg is not supported by current port\n");
2631 return -EOPNOTSUPP;
2632 } else {
2633 return 0;
2634 }
2635 }
2636
46a3df9f
S
2637 return hclge_set_autoneg_en(hdev, enable);
2638}
2639
2640static int hclge_get_autoneg(struct hnae3_handle *handle)
2641{
2642 struct hclge_vport *vport = hclge_get_vport(handle);
2643 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2644 struct phy_device *phydev = hdev->hw.mac.phydev;
2645
2646 if (phydev)
2647 return phydev->autoneg;
46a3df9f
S
2648
2649 return hdev->hw.mac.autoneg;
2650}
2651
22f48e24
JS
2652static int hclge_restart_autoneg(struct hnae3_handle *handle)
2653{
2654 struct hclge_vport *vport = hclge_get_vport(handle);
2655 struct hclge_dev *hdev = vport->back;
2656 int ret;
2657
2658 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2659
2660 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2661 if (ret)
2662 return ret;
2663 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2664}
2665
7786a996
JS
2666static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2667{
2668 struct hclge_vport *vport = hclge_get_vport(handle);
2669 struct hclge_dev *hdev = vport->back;
2670
2671 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2672 return hclge_set_autoneg_en(hdev, !halt);
2673
2674 return 0;
2675}
2676
7e6ec914
JS
2677static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2678{
2679 struct hclge_config_fec_cmd *req;
2680 struct hclge_desc desc;
2681 int ret;
2682
2683 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2684
2685 req = (struct hclge_config_fec_cmd *)desc.data;
2686 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2687 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2688 if (fec_mode & BIT(HNAE3_FEC_RS))
2689 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2690 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2691 if (fec_mode & BIT(HNAE3_FEC_BASER))
2692 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2693 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2694
2695 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2696 if (ret)
2697 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2698
2699 return ret;
2700}
2701
2702static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2703{
2704 struct hclge_vport *vport = hclge_get_vport(handle);
2705 struct hclge_dev *hdev = vport->back;
2706 struct hclge_mac *mac = &hdev->hw.mac;
2707 int ret;
2708
2709 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2710 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2711 return -EINVAL;
2712 }
2713
2714 ret = hclge_set_fec_hw(hdev, fec_mode);
2715 if (ret)
2716 return ret;
2717
2718 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2719 return 0;
2720}
2721
2722static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2723 u8 *fec_mode)
2724{
2725 struct hclge_vport *vport = hclge_get_vport(handle);
2726 struct hclge_dev *hdev = vport->back;
2727 struct hclge_mac *mac = &hdev->hw.mac;
2728
2729 if (fec_ability)
2730 *fec_ability = mac->fec_ability;
2731 if (fec_mode)
2732 *fec_mode = mac->fec_mode;
2733}
2734
46a3df9f
S
2735static int hclge_mac_init(struct hclge_dev *hdev)
2736{
2737 struct hclge_mac *mac = &hdev->hw.mac;
2738 int ret;
2739
5d497936 2740 hdev->support_sfp_query = true;
2d03eacc
YL
2741 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2742 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2743 hdev->hw.mac.duplex);
60df7e91 2744 if (ret)
46a3df9f 2745 return ret;
46a3df9f 2746
d736fc6c
JS
2747 if (hdev->hw.mac.support_autoneg) {
2748 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
60df7e91 2749 if (ret)
d736fc6c 2750 return ret;
d736fc6c
JS
2751 }
2752
46a3df9f
S
2753 mac->link = 0;
2754
7e6ec914
JS
2755 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2756 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
60df7e91 2757 if (ret)
7e6ec914 2758 return ret;
7e6ec914
JS
2759 }
2760
e6d7d79d
YL
2761 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2762 if (ret) {
2763 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2764 return ret;
2765 }
f9fd82a9 2766
1cbc662d
YM
2767 ret = hclge_set_default_loopback(hdev);
2768 if (ret)
2769 return ret;
2770
e6d7d79d 2771 ret = hclge_buffer_alloc(hdev);
3f639907 2772 if (ret)
f9fd82a9 2773 dev_err(&hdev->pdev->dev,
e6d7d79d 2774 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2775
3f639907 2776 return ret;
46a3df9f
S
2777}
2778
c1a81619
SM
2779static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2780{
1c6dfe6f 2781 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
18e24888 2782 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2783 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2784 hclge_wq, &hdev->service_task, 0);
c1a81619
SM
2785}
2786
cb1b9f77
SM
2787static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2788{
acfc3d55
HT
2789 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2790 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2791 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2792 hclge_wq, &hdev->service_task, 0);
cb1b9f77
SM
2793}
2794
ed8fb4b2 2795void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
46a3df9f 2796{
d5432455
GL
2797 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2798 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
08125454 2799 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2800 hclge_wq, &hdev->service_task,
ed8fb4b2 2801 delay_time);
46a3df9f
S
2802}
2803
fac24df7 2804static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
46a3df9f 2805{
d44f9b63 2806 struct hclge_link_status_cmd *req;
46a3df9f 2807 struct hclge_desc desc;
46a3df9f
S
2808 int ret;
2809
2810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2811 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2812 if (ret) {
2813 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2814 ret);
2815 return ret;
2816 }
2817
d44f9b63 2818 req = (struct hclge_link_status_cmd *)desc.data;
fac24df7
JS
2819 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2820 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
46a3df9f 2821
fac24df7 2822 return 0;
46a3df9f
S
2823}
2824
fac24df7 2825static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
46a3df9f 2826{
fac24df7
JS
2827 struct phy_device *phydev = hdev->hw.mac.phydev;
2828
2829 *link_status = HCLGE_LINK_STATUS_DOWN;
46a3df9f 2830
582d37bb
PL
2831 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2832 return 0;
2833
fac24df7
JS
2834 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2835 return 0;
46a3df9f 2836
fac24df7 2837 return hclge_get_mac_link_status(hdev, link_status);
46a3df9f
S
2838}
2839
2840static void hclge_update_link_status(struct hclge_dev *hdev)
2841{
45e92b7e 2842 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2843 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2844 struct hnae3_handle *rhandle;
46a3df9f
S
2845 struct hnae3_handle *handle;
2846 int state;
fac24df7 2847 int ret;
46a3df9f
S
2848 int i;
2849
2850 if (!client)
2851 return;
1c6dfe6f
YL
2852
2853 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2854 return;
2855
fac24df7
JS
2856 ret = hclge_get_mac_phy_link(hdev, &state);
2857 if (ret) {
2858 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2859 return;
2860 }
2861
46a3df9f
S
2862 if (state != hdev->hw.mac.link) {
2863 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2864 handle = &hdev->vport[i].nic;
2865 client->ops->link_status_change(handle, state);
a6345787 2866 hclge_config_mac_tnl_int(hdev, state);
45e92b7e
PL
2867 rhandle = &hdev->vport[i].roce;
2868 if (rclient && rclient->ops->link_status_change)
2869 rclient->ops->link_status_change(rhandle,
2870 state);
46a3df9f
S
2871 }
2872 hdev->hw.mac.link = state;
2873 }
1c6dfe6f
YL
2874
2875 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
46a3df9f
S
2876}
2877
88d10bd6
JS
2878static void hclge_update_port_capability(struct hclge_mac *mac)
2879{
f438bfe9
JS
2880 /* update fec ability by speed */
2881 hclge_convert_setting_fec(mac);
2882
88d10bd6
JS
2883 /* firmware can not identify back plane type, the media type
2884 * read from configuration can help deal it
2885 */
2886 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2887 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2888 mac->module_type = HNAE3_MODULE_TYPE_KR;
2889 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2890 mac->module_type = HNAE3_MODULE_TYPE_TP;
2891
db4d3d55 2892 if (mac->support_autoneg) {
88d10bd6
JS
2893 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2894 linkmode_copy(mac->advertising, mac->supported);
2895 } else {
2896 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2897 mac->supported);
2898 linkmode_zero(mac->advertising);
2899 }
2900}
2901
5d497936
PL
2902static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2903{
63cbf7a9 2904 struct hclge_sfp_info_cmd *resp;
5d497936
PL
2905 struct hclge_desc desc;
2906 int ret;
2907
88d10bd6
JS
2908 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2909 resp = (struct hclge_sfp_info_cmd *)desc.data;
5d497936
PL
2910 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2911 if (ret == -EOPNOTSUPP) {
2912 dev_warn(&hdev->pdev->dev,
2913 "IMP do not support get SFP speed %d\n", ret);
2914 return ret;
2915 } else if (ret) {
2916 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2917 return ret;
2918 }
2919
88d10bd6 2920 *speed = le32_to_cpu(resp->speed);
5d497936
PL
2921
2922 return 0;
2923}
2924
88d10bd6 2925static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
46a3df9f 2926{
88d10bd6
JS
2927 struct hclge_sfp_info_cmd *resp;
2928 struct hclge_desc desc;
46a3df9f
S
2929 int ret;
2930
88d10bd6
JS
2931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2932 resp = (struct hclge_sfp_info_cmd *)desc.data;
2933
2934 resp->query_type = QUERY_ACTIVE_SPEED;
2935
2936 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2937 if (ret == -EOPNOTSUPP) {
2938 dev_warn(&hdev->pdev->dev,
2939 "IMP does not support get SFP info %d\n", ret);
2940 return ret;
2941 } else if (ret) {
2942 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2943 return ret;
2944 }
2945
2af8cb61
GL
2946 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2947 * set to mac->speed.
2948 */
2949 if (!le32_to_cpu(resp->speed))
2950 return 0;
2951
88d10bd6
JS
2952 mac->speed = le32_to_cpu(resp->speed);
2953 /* if resp->speed_ability is 0, it means it's an old version
2954 * firmware, do not update these params
46a3df9f 2955 */
88d10bd6
JS
2956 if (resp->speed_ability) {
2957 mac->module_type = le32_to_cpu(resp->module_type);
2958 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2959 mac->autoneg = resp->autoneg;
2960 mac->support_autoneg = resp->autoneg_ability;
49b12556 2961 mac->speed_type = QUERY_ACTIVE_SPEED;
f438bfe9
JS
2962 if (!resp->active_fec)
2963 mac->fec_mode = 0;
2964 else
2965 mac->fec_mode = BIT(resp->active_fec);
88d10bd6
JS
2966 } else {
2967 mac->speed_type = QUERY_SFP_SPEED;
2968 }
2969
2970 return 0;
2971}
2972
2973static int hclge_update_port_info(struct hclge_dev *hdev)
2974{
2975 struct hclge_mac *mac = &hdev->hw.mac;
2976 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2977 int ret;
2978
2979 /* get the port info from SFP cmd if not copper port */
2980 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
46a3df9f
S
2981 return 0;
2982
88d10bd6 2983 /* if IMP does not support get SFP/qSFP info, return directly */
5d497936
PL
2984 if (!hdev->support_sfp_query)
2985 return 0;
46a3df9f 2986
295ba232 2987 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
88d10bd6
JS
2988 ret = hclge_get_sfp_info(hdev, mac);
2989 else
2990 ret = hclge_get_sfp_speed(hdev, &speed);
2991
5d497936
PL
2992 if (ret == -EOPNOTSUPP) {
2993 hdev->support_sfp_query = false;
2994 return ret;
2995 } else if (ret) {
2d03eacc 2996 return ret;
46a3df9f
S
2997 }
2998
295ba232 2999 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
88d10bd6
JS
3000 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3001 hclge_update_port_capability(mac);
3002 return 0;
3003 }
3004 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3005 HCLGE_MAC_FULL);
3006 } else {
3007 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3008 return 0; /* do nothing if no SFP */
46a3df9f 3009
88d10bd6
JS
3010 /* must config full duplex for SFP */
3011 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3012 }
46a3df9f
S
3013}
3014
3015static int hclge_get_status(struct hnae3_handle *handle)
3016{
3017 struct hclge_vport *vport = hclge_get_vport(handle);
3018 struct hclge_dev *hdev = vport->back;
3019
3020 hclge_update_link_status(hdev);
3021
3022 return hdev->hw.mac.link;
3023}
3024
6430f744
YM
3025static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3026{
60df7e91 3027 if (!pci_num_vf(hdev->pdev)) {
6430f744
YM
3028 dev_err(&hdev->pdev->dev,
3029 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3030 return NULL;
3031 }
3032
3033 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3034 dev_err(&hdev->pdev->dev,
3035 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3036 vf, pci_num_vf(hdev->pdev));
3037 return NULL;
3038 }
3039
3040 /* VF start from 1 in vport */
3041 vf += HCLGE_VF_VPORT_START_NUM;
3042 return &hdev->vport[vf];
3043}
3044
3045static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3046 struct ifla_vf_info *ivf)
3047{
3048 struct hclge_vport *vport = hclge_get_vport(handle);
3049 struct hclge_dev *hdev = vport->back;
3050
3051 vport = hclge_get_vf_vport(hdev, vf);
3052 if (!vport)
3053 return -EINVAL;
3054
3055 ivf->vf = vf;
3056 ivf->linkstate = vport->vf_info.link_state;
22044f95 3057 ivf->spoofchk = vport->vf_info.spoofchk;
e196ec75 3058 ivf->trusted = vport->vf_info.trusted;
ee9e4424
YL
3059 ivf->min_tx_rate = 0;
3060 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
89b40c7f
HT
3061 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3062 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3063 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
6430f744
YM
3064 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3065
3066 return 0;
3067}
3068
3069static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3070 int link_state)
3071{
3072 struct hclge_vport *vport = hclge_get_vport(handle);
3073 struct hclge_dev *hdev = vport->back;
3074
3075 vport = hclge_get_vf_vport(hdev, vf);
3076 if (!vport)
3077 return -EINVAL;
3078
3079 vport->vf_info.link_state = link_state;
3080
3081 return 0;
3082}
3083
ca1d7669
SM
3084static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3085{
5705b451 3086 u32 cmdq_src_reg, msix_src_reg;
ca1d7669
SM
3087
3088 /* fetch the events from their corresponding regs */
c1a81619 3089 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
5705b451 3090 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619
SM
3091
3092 /* Assumption: If by any chance reset and mailbox events are reported
3093 * together then we will only process reset event in this go and will
3094 * defer the processing of the mailbox events. Since, we would have not
3095 * cleared RX CMDQ event this time we would receive again another
3096 * interrupt from H/W just for the mailbox.
46ee7350
GL
3097 *
3098 * check for vector0 reset event sources
c1a81619 3099 */
5705b451 3100 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
6dd22bbc
HT
3101 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3102 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3103 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3104 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
f02eb82d 3105 hdev->rst_stats.imp_rst_cnt++;
6dd22bbc
HT
3106 return HCLGE_VECTOR0_EVENT_RST;
3107 }
3108
5705b451 3109 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
65e41e7e 3110 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 3111 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
3112 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3113 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
f02eb82d 3114 hdev->rst_stats.global_rst_cnt++;
ca1d7669
SM
3115 return HCLGE_VECTOR0_EVENT_RST;
3116 }
3117
f6162d44 3118 /* check for vector0 msix event source */
147175c9 3119 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
9bc6ac91 3120 *clearval = msix_src_reg;
f6162d44 3121 return HCLGE_VECTOR0_EVENT_ERR;
147175c9 3122 }
f6162d44 3123
c1a81619
SM
3124 /* check for vector0 mailbox(=CMDQ RX) event source */
3125 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3126 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3127 *clearval = cmdq_src_reg;
3128 return HCLGE_VECTOR0_EVENT_MBX;
3129 }
ca1d7669 3130
147175c9 3131 /* print other vector0 event source */
9bc6ac91
HT
3132 dev_info(&hdev->pdev->dev,
3133 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3134 cmdq_src_reg, msix_src_reg);
3135 *clearval = msix_src_reg;
3136
ca1d7669
SM
3137 return HCLGE_VECTOR0_EVENT_OTHER;
3138}
3139
3140static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3141 u32 regclr)
3142{
c1a81619
SM
3143 switch (event_type) {
3144 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 3145 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
3146 break;
3147 case HCLGE_VECTOR0_EVENT_MBX:
3148 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3149 break;
fa7a4bd5
JS
3150 default:
3151 break;
c1a81619 3152 }
ca1d7669
SM
3153}
3154
8e52a602
XW
3155static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3156{
3157 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3158 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3159 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3160 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3161 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3162}
3163
466b0c00
L
3164static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3165{
3166 writel(enable ? 1 : 0, vector->addr);
3167}
3168
3169static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3170{
3171 struct hclge_dev *hdev = data;
ebaf1908 3172 u32 clearval = 0;
ca1d7669 3173 u32 event_cause;
466b0c00
L
3174
3175 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
3176 event_cause = hclge_check_event_cause(hdev, &clearval);
3177
c1a81619 3178 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 3179 switch (event_cause) {
f6162d44
SM
3180 case HCLGE_VECTOR0_EVENT_ERR:
3181 /* we do not know what type of reset is required now. This could
3182 * only be decided after we fetch the type of errors which
3183 * caused this event. Therefore, we will do below for now:
3184 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3185 * have defered type of reset to be used.
3186 * 2. Schedule the reset serivce task.
3187 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3188 * will fetch the correct type of reset. This would be done
3189 * by first decoding the types of errors.
3190 */
3191 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
df561f66 3192 fallthrough;
ca1d7669 3193 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 3194 hclge_reset_task_schedule(hdev);
ca1d7669 3195 break;
c1a81619
SM
3196 case HCLGE_VECTOR0_EVENT_MBX:
3197 /* If we are here then,
3198 * 1. Either we are not handling any mbx task and we are not
3199 * scheduled as well
3200 * OR
3201 * 2. We could be handling a mbx task but nothing more is
3202 * scheduled.
3203 * In both cases, we should schedule mbx task as there are more
3204 * mbx messages reported by this interrupt.
3205 */
3206 hclge_mbx_task_schedule(hdev);
f0ad97ac 3207 break;
ca1d7669 3208 default:
f0ad97ac
YL
3209 dev_warn(&hdev->pdev->dev,
3210 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
3211 break;
3212 }
3213
72e2fb07
HT
3214 hclge_clear_event_cause(hdev, event_cause, clearval);
3215
3216 /* Enable interrupt if it is not cause by reset. And when
3217 * clearval equal to 0, it means interrupt status may be
3218 * cleared by hardware before driver reads status register.
3219 * For this case, vector0 interrupt also should be enabled.
3220 */
9bc6ac91
HT
3221 if (!clearval ||
3222 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
3223 hclge_enable_vector(&hdev->misc_vector, true);
3224 }
466b0c00
L
3225
3226 return IRQ_HANDLED;
3227}
3228
3229static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3230{
36cbbdf6
PL
3231 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3232 dev_warn(&hdev->pdev->dev,
3233 "vector(vector_id %d) has been freed.\n", vector_id);
3234 return;
3235 }
3236
466b0c00
L
3237 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3238 hdev->num_msi_left += 1;
3239 hdev->num_msi_used -= 1;
3240}
3241
3242static void hclge_get_misc_vector(struct hclge_dev *hdev)
3243{
3244 struct hclge_misc_vector *vector = &hdev->misc_vector;
3245
3246 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3247
3248 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3249 hdev->vector_status[0] = 0;
3250
3251 hdev->num_msi_left -= 1;
3252 hdev->num_msi_used += 1;
3253}
3254
08125454
YL
3255static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3256 const cpumask_t *mask)
3257{
3258 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3259 affinity_notify);
3260
3261 cpumask_copy(&hdev->affinity_mask, mask);
3262}
3263
3264static void hclge_irq_affinity_release(struct kref *ref)
3265{
3266}
3267
3268static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3269{
3270 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3271 &hdev->affinity_mask);
3272
3273 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3274 hdev->affinity_notify.release = hclge_irq_affinity_release;
3275 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3276 &hdev->affinity_notify);
3277}
3278
3279static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3280{
3281 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3282 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3283}
3284
466b0c00
L
3285static int hclge_misc_irq_init(struct hclge_dev *hdev)
3286{
3287 int ret;
3288
3289 hclge_get_misc_vector(hdev);
3290
ca1d7669 3291 /* this would be explicitly freed in the end */
f97c4d82
YL
3292 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3293 HCLGE_NAME, pci_name(hdev->pdev));
ca1d7669 3294 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
f97c4d82 3295 0, hdev->misc_vector.name, hdev);
466b0c00
L
3296 if (ret) {
3297 hclge_free_vector(hdev, 0);
3298 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3299 hdev->misc_vector.vector_irq);
3300 }
3301
3302 return ret;
3303}
3304
ca1d7669
SM
3305static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3306{
3307 free_irq(hdev->misc_vector.vector_irq, hdev);
3308 hclge_free_vector(hdev, 0);
3309}
3310
af013903
HT
3311int hclge_notify_client(struct hclge_dev *hdev,
3312 enum hnae3_reset_notify_type type)
4ed340ab
L
3313{
3314 struct hnae3_client *client = hdev->nic_client;
3315 u16 i;
3316
9b2f3477 3317 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
bd9109c9
HT
3318 return 0;
3319
4ed340ab
L
3320 if (!client->ops->reset_notify)
3321 return -EOPNOTSUPP;
3322
3323 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3324 struct hnae3_handle *handle = &hdev->vport[i].nic;
3325 int ret;
3326
3327 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
3328 if (ret) {
3329 dev_err(&hdev->pdev->dev,
3330 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 3331 return ret;
65e41e7e 3332 }
4ed340ab
L
3333 }
3334
3335 return 0;
3336}
3337
f403a84f
HT
3338static int hclge_notify_roce_client(struct hclge_dev *hdev,
3339 enum hnae3_reset_notify_type type)
3340{
3341 struct hnae3_client *client = hdev->roce_client;
9d8d5a36 3342 int ret;
f403a84f
HT
3343 u16 i;
3344
9b2f3477 3345 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
f403a84f
HT
3346 return 0;
3347
3348 if (!client->ops->reset_notify)
3349 return -EOPNOTSUPP;
3350
3351 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3352 struct hnae3_handle *handle = &hdev->vport[i].roce;
3353
3354 ret = client->ops->reset_notify(handle, type);
3355 if (ret) {
3356 dev_err(&hdev->pdev->dev,
3357 "notify roce client failed %d(%d)",
3358 type, ret);
3359 return ret;
3360 }
3361 }
3362
3363 return ret;
3364}
3365
4ed340ab
L
3366static int hclge_reset_wait(struct hclge_dev *hdev)
3367{
3368#define HCLGE_RESET_WATI_MS 100
5bb784e9
HT
3369#define HCLGE_RESET_WAIT_CNT 350
3370
4ed340ab
L
3371 u32 val, reg, reg_bit;
3372 u32 cnt = 0;
3373
3374 switch (hdev->reset_type) {
6dd22bbc
HT
3375 case HNAE3_IMP_RESET:
3376 reg = HCLGE_GLOBAL_RESET_REG;
3377 reg_bit = HCLGE_IMP_RESET_BIT;
3378 break;
4ed340ab
L
3379 case HNAE3_GLOBAL_RESET:
3380 reg = HCLGE_GLOBAL_RESET_REG;
3381 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3382 break;
4ed340ab
L
3383 case HNAE3_FUNC_RESET:
3384 reg = HCLGE_FUN_RST_ING;
3385 reg_bit = HCLGE_FUN_RST_ING_B;
3386 break;
3387 default:
3388 dev_err(&hdev->pdev->dev,
3389 "Wait for unsupported reset type: %d\n",
3390 hdev->reset_type);
3391 return -EINVAL;
3392 }
3393
3394 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 3395 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
3396 msleep(HCLGE_RESET_WATI_MS);
3397 val = hclge_read_dev(&hdev->hw, reg);
3398 cnt++;
3399 }
3400
4ed340ab
L
3401 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3402 dev_warn(&hdev->pdev->dev,
3403 "Wait for reset timeout: %d\n", hdev->reset_type);
3404 return -EBUSY;
3405 }
3406
3407 return 0;
3408}
3409
aa5c4f17
HT
3410static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3411{
3412 struct hclge_vf_rst_cmd *req;
3413 struct hclge_desc desc;
3414
3415 req = (struct hclge_vf_rst_cmd *)desc.data;
3416 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3417 req->dest_vfid = func_id;
3418
3419 if (reset)
3420 req->vf_rst = 0x1;
3421
3422 return hclge_cmd_send(&hdev->hw, &desc, 1);
3423}
3424
e511f17b 3425static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
3426{
3427 int i;
3428
3429 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3430 struct hclge_vport *vport = &hdev->vport[i];
3431 int ret;
3432
3433 /* Send cmd to set/clear VF's FUNC_RST_ING */
3434 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3435 if (ret) {
3436 dev_err(&hdev->pdev->dev,
adcf738b 3437 "set vf(%u) rst failed %d!\n",
aa5c4f17
HT
3438 vport->vport_id, ret);
3439 return ret;
3440 }
3441
cc645dfa 3442 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
3443 continue;
3444
3445 /* Inform VF to process the reset.
3446 * hclge_inform_reset_assert_to_vf may fail if VF
3447 * driver is not loaded.
3448 */
3449 ret = hclge_inform_reset_assert_to_vf(vport);
3450 if (ret)
3451 dev_warn(&hdev->pdev->dev,
adcf738b 3452 "inform reset to vf(%u) failed %d!\n",
aa5c4f17
HT
3453 vport->vport_id, ret);
3454 }
3455
3456 return 0;
3457}
3458
1c6dfe6f
YL
3459static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3460{
3461 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3462 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3463 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3464 return;
3465
3466 hclge_mbx_handler(hdev);
3467
3468 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3469}
3470
c3106cac 3471static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
427a7bff
HT
3472{
3473 struct hclge_pf_rst_sync_cmd *req;
3474 struct hclge_desc desc;
3475 int cnt = 0;
3476 int ret;
3477
3478 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3479 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3480
3481 do {
1c6dfe6f
YL
3482 /* vf need to down netdev by mbx during PF or FLR reset */
3483 hclge_mailbox_service_task(hdev);
3484
427a7bff
HT
3485 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3486 /* for compatible with old firmware, wait
3487 * 100 ms for VF to stop IO
3488 */
3489 if (ret == -EOPNOTSUPP) {
3490 msleep(HCLGE_RESET_SYNC_TIME);
c3106cac 3491 return;
427a7bff 3492 } else if (ret) {
c3106cac
HT
3493 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3494 ret);
3495 return;
427a7bff 3496 } else if (req->all_vf_ready) {
c3106cac 3497 return;
427a7bff
HT
3498 }
3499 msleep(HCLGE_PF_RESET_SYNC_TIME);
3500 hclge_cmd_reuse_desc(&desc, true);
3501 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3502
c3106cac 3503 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
427a7bff
HT
3504}
3505
a83d2961
WL
3506void hclge_report_hw_error(struct hclge_dev *hdev,
3507 enum hnae3_hw_error_type type)
3508{
3509 struct hnae3_client *client = hdev->nic_client;
3510 u16 i;
3511
3512 if (!client || !client->ops->process_hw_error ||
3513 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3514 return;
3515
3516 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3517 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3518}
3519
3520static void hclge_handle_imp_error(struct hclge_dev *hdev)
3521{
3522 u32 reg_val;
3523
3524 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3525 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3526 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3527 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3528 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3529 }
3530
3531 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3532 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3533 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3534 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3535 }
3536}
3537
2bfbd35d 3538int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
3539{
3540 struct hclge_desc desc;
3541 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3542 int ret;
3543
3544 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 3545 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
3546 req->fun_reset_vfid = func_id;
3547
3548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3549 if (ret)
3550 dev_err(&hdev->pdev->dev,
3551 "send function reset cmd fail, status =%d\n", ret);
3552
3553 return ret;
3554}
3555
f2f432f2 3556static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 3557{
4f765d3e 3558 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
3559 struct pci_dev *pdev = hdev->pdev;
3560 u32 val;
3561
4f765d3e 3562 if (hclge_get_hw_reset_stat(handle)) {
8de91e92 3563 dev_info(&pdev->dev, "hardware reset not finish\n");
4f765d3e
HT
3564 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3565 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3566 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3567 return;
3568 }
3569
f2f432f2 3570 switch (hdev->reset_type) {
4ed340ab 3571 case HNAE3_GLOBAL_RESET:
8de91e92 3572 dev_info(&pdev->dev, "global reset requested\n");
4ed340ab 3573 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 3574 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab 3575 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4ed340ab 3576 break;
4ed340ab 3577 case HNAE3_FUNC_RESET:
8de91e92 3578 dev_info(&pdev->dev, "PF reset requested\n");
cb1b9f77
SM
3579 /* schedule again to check later */
3580 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3581 hclge_reset_task_schedule(hdev);
4ed340ab
L
3582 break;
3583 default:
3584 dev_warn(&pdev->dev,
8de91e92 3585 "unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
3586 break;
3587 }
3588}
3589
123297b7 3590static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
f2f432f2
SM
3591 unsigned long *addr)
3592{
3593 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
123297b7 3594 struct hclge_dev *hdev = ae_dev->priv;
f2f432f2 3595
f6162d44
SM
3596 /* first, resolve any unknown reset type to the known type(s) */
3597 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
d9b81c96 3598 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
5705b451 3599 HCLGE_MISC_VECTOR_INT_STS);
f6162d44
SM
3600 /* we will intentionally ignore any errors from this function
3601 * as we will end up in *some* reset request in any case
3602 */
d9b81c96
HT
3603 if (hclge_handle_hw_msix_error(hdev, addr))
3604 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3605 msix_sts_reg);
3606
f6162d44
SM
3607 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3608 /* We defered the clearing of the error event which caused
3609 * interrupt since it was not posssible to do that in
3610 * interrupt context (and this is the reason we introduced
3611 * new UNKNOWN reset type). Now, the errors have been
3612 * handled and cleared in hardware we can safely enable
3613 * interrupts. This is an exception to the norm.
3614 */
3615 hclge_enable_vector(&hdev->misc_vector, true);
3616 }
3617
f2f432f2 3618 /* return the highest priority reset level amongst all */
7cea834d
HT
3619 if (test_bit(HNAE3_IMP_RESET, addr)) {
3620 rst_level = HNAE3_IMP_RESET;
3621 clear_bit(HNAE3_IMP_RESET, addr);
3622 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3623 clear_bit(HNAE3_FUNC_RESET, addr);
3624 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 3625 rst_level = HNAE3_GLOBAL_RESET;
7cea834d 3626 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3627 clear_bit(HNAE3_FUNC_RESET, addr);
3628 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 3629 rst_level = HNAE3_FUNC_RESET;
7cea834d 3630 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
3631 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3632 rst_level = HNAE3_FLR_RESET;
3633 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 3634 }
f2f432f2 3635
0fdf4d30
HT
3636 if (hdev->reset_type != HNAE3_NONE_RESET &&
3637 rst_level < hdev->reset_type)
3638 return HNAE3_NONE_RESET;
3639
f2f432f2
SM
3640 return rst_level;
3641}
3642
cd8c5c26
YL
3643static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3644{
3645 u32 clearval = 0;
3646
3647 switch (hdev->reset_type) {
3648 case HNAE3_IMP_RESET:
3649 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3650 break;
3651 case HNAE3_GLOBAL_RESET:
3652 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3653 break;
cd8c5c26 3654 default:
cd8c5c26
YL
3655 break;
3656 }
3657
3658 if (!clearval)
3659 return;
3660
72e2fb07
HT
3661 /* For revision 0x20, the reset interrupt source
3662 * can only be cleared after hardware reset done
3663 */
295ba232 3664 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
72e2fb07
HT
3665 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3666 clearval);
3667
cd8c5c26
YL
3668 hclge_enable_vector(&hdev->misc_vector, true);
3669}
3670
6b428b4f
HT
3671static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3672{
3673 u32 reg_val;
3674
3675 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3676 if (enable)
3677 reg_val |= HCLGE_NIC_SW_RST_RDY;
3678 else
3679 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3680
3681 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3682}
3683
c7554dcd
HT
3684static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3685{
3686 int ret;
3687
3688 ret = hclge_set_all_vf_rst(hdev, true);
3689 if (ret)
3690 return ret;
3691
3692 hclge_func_reset_sync_vf(hdev);
3693
3694 return 0;
3695}
3696
35d93a30
HT
3697static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3698{
6dd22bbc 3699 u32 reg_val;
35d93a30
HT
3700 int ret = 0;
3701
3702 switch (hdev->reset_type) {
3703 case HNAE3_FUNC_RESET:
c7554dcd
HT
3704 ret = hclge_func_reset_notify_vf(hdev);
3705 if (ret)
3706 return ret;
427a7bff 3707
35d93a30
HT
3708 ret = hclge_func_reset_cmd(hdev, 0);
3709 if (ret) {
3710 dev_err(&hdev->pdev->dev,
141b95d5 3711 "asserting function reset fail %d!\n", ret);
35d93a30
HT
3712 return ret;
3713 }
3714
3715 /* After performaning pf reset, it is not necessary to do the
3716 * mailbox handling or send any command to firmware, because
3717 * any mailbox handling or command to firmware is only valid
3718 * after hclge_cmd_init is called.
3719 */
3720 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
f02eb82d 3721 hdev->rst_stats.pf_rst_cnt++;
35d93a30 3722 break;
6b9a97ee 3723 case HNAE3_FLR_RESET:
c7554dcd
HT
3724 ret = hclge_func_reset_notify_vf(hdev);
3725 if (ret)
3726 return ret;
6b9a97ee 3727 break;
6dd22bbc 3728 case HNAE3_IMP_RESET:
a83d2961 3729 hclge_handle_imp_error(hdev);
6dd22bbc
HT
3730 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3731 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3732 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3733 break;
35d93a30
HT
3734 default:
3735 break;
3736 }
3737
ada13ee3
HT
3738 /* inform hardware that preparatory work is done */
3739 msleep(HCLGE_RESET_SYNC_TIME);
6b428b4f 3740 hclge_reset_handshake(hdev, true);
35d93a30
HT
3741 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3742
3743 return ret;
3744}
3745
8e9eee78 3746static bool hclge_reset_err_handle(struct hclge_dev *hdev)
65e41e7e
HT
3747{
3748#define MAX_RESET_FAIL_CNT 5
65e41e7e
HT
3749
3750 if (hdev->reset_pending) {
3751 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3752 hdev->reset_pending);
3753 return true;
2336f19d
HT
3754 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3755 HCLGE_RESET_INT_M) {
65e41e7e 3756 dev_info(&hdev->pdev->dev,
2336f19d 3757 "reset failed because new reset interrupt\n");
65e41e7e
HT
3758 hclge_clear_reset_cause(hdev);
3759 return false;
0ecf1f7b
HT
3760 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3761 hdev->rst_stats.reset_fail_cnt++;
8e9eee78
HT
3762 set_bit(hdev->reset_type, &hdev->reset_pending);
3763 dev_info(&hdev->pdev->dev,
adcf738b 3764 "re-schedule reset task(%u)\n",
0ecf1f7b 3765 hdev->rst_stats.reset_fail_cnt);
8e9eee78 3766 return true;
65e41e7e
HT
3767 }
3768
3769 hclge_clear_reset_cause(hdev);
6b428b4f
HT
3770
3771 /* recover the handshake status when reset fail */
3772 hclge_reset_handshake(hdev, true);
3773
65e41e7e 3774 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3d77d0cb
HT
3775
3776 hclge_dbg_dump_rst_info(hdev);
3777
d5432455
GL
3778 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3779
65e41e7e
HT
3780 return false;
3781}
3782
72e2fb07
HT
3783static int hclge_set_rst_done(struct hclge_dev *hdev)
3784{
3785 struct hclge_pf_rst_done_cmd *req;
3786 struct hclge_desc desc;
648db051 3787 int ret;
72e2fb07
HT
3788
3789 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3791 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3792
648db051
HT
3793 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3794 /* To be compatible with the old firmware, which does not support
3795 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3796 * return success
3797 */
3798 if (ret == -EOPNOTSUPP) {
3799 dev_warn(&hdev->pdev->dev,
3800 "current firmware does not support command(0x%x)!\n",
3801 HCLGE_OPC_PF_RST_DONE);
3802 return 0;
3803 } else if (ret) {
3804 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3805 ret);
3806 }
3807
3808 return ret;
72e2fb07
HT
3809}
3810
aa5c4f17
HT
3811static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3812{
3813 int ret = 0;
3814
3815 switch (hdev->reset_type) {
3816 case HNAE3_FUNC_RESET:
6b9a97ee 3817 case HNAE3_FLR_RESET:
aa5c4f17
HT
3818 ret = hclge_set_all_vf_rst(hdev, false);
3819 break;
72e2fb07 3820 case HNAE3_GLOBAL_RESET:
72e2fb07
HT
3821 case HNAE3_IMP_RESET:
3822 ret = hclge_set_rst_done(hdev);
3823 break;
aa5c4f17
HT
3824 default:
3825 break;
3826 }
3827
6b428b4f
HT
3828 /* clear up the handshake status after re-initialize done */
3829 hclge_reset_handshake(hdev, false);
3830
aa5c4f17
HT
3831 return ret;
3832}
3833
63cbf7a9
YM
3834static int hclge_reset_stack(struct hclge_dev *hdev)
3835{
3836 int ret;
3837
3838 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3839 if (ret)
3840 return ret;
3841
3842 ret = hclge_reset_ae_dev(hdev->ae_dev);
3843 if (ret)
3844 return ret;
3845
039ba863 3846 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
63cbf7a9
YM
3847}
3848
d4fa0656 3849static int hclge_reset_prepare(struct hclge_dev *hdev)
f2f432f2 3850{
65e41e7e 3851 int ret;
9de0b86f 3852
f02eb82d 3853 hdev->rst_stats.reset_cnt++;
f2f432f2 3854 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
3855 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3856 if (ret)
d4fa0656 3857 return ret;
65e41e7e 3858
6d4fab39 3859 rtnl_lock();
65e41e7e 3860 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
65e41e7e 3861 rtnl_unlock();
65e41e7e 3862 if (ret)
d4fa0656 3863 return ret;
cd8c5c26 3864
d4fa0656
HT
3865 return hclge_reset_prepare_wait(hdev);
3866}
3867
3868static int hclge_reset_rebuild(struct hclge_dev *hdev)
3869{
3870 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3871 enum hnae3_reset_type reset_level;
3872 int ret;
f2f432f2 3873
f02eb82d
HT
3874 hdev->rst_stats.hw_reset_done_cnt++;
3875
65e41e7e
HT
3876 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3877 if (ret)
d4fa0656 3878 return ret;
65e41e7e
HT
3879
3880 rtnl_lock();
63cbf7a9 3881 ret = hclge_reset_stack(hdev);
d4fa0656 3882 rtnl_unlock();
1f609492 3883 if (ret)
d4fa0656 3884 return ret;
1f609492 3885
65e41e7e
HT
3886 hclge_clear_reset_cause(hdev);
3887
63cbf7a9
YM
3888 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3889 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3890 * times
3891 */
0ecf1f7b
HT
3892 if (ret &&
3893 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
d4fa0656 3894 return ret;
63cbf7a9 3895
60c800c6
YM
3896 ret = hclge_reset_prepare_up(hdev);
3897 if (ret)
3898 return ret;
3899
63cbf7a9 3900 rtnl_lock();
65e41e7e 3901 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6d4fab39 3902 rtnl_unlock();
d4fa0656
HT
3903 if (ret)
3904 return ret;
f403a84f 3905
65e41e7e
HT
3906 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3907 if (ret)
d4fa0656 3908 return ret;
65e41e7e 3909
b644a8d4 3910 hdev->last_reset_time = jiffies;
0ecf1f7b 3911 hdev->rst_stats.reset_fail_cnt = 0;
f02eb82d 3912 hdev->rst_stats.reset_done_cnt++;
d5432455 3913 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
012fcb52
HT
3914
3915 /* if default_reset_request has a higher level reset request,
3916 * it should be handled as soon as possible. since some errors
3917 * need this kind of reset to fix.
3918 */
525a294e
HT
3919 reset_level = hclge_get_reset_level(ae_dev,
3920 &hdev->default_reset_request);
3921 if (reset_level != HNAE3_NONE_RESET)
3922 set_bit(reset_level, &hdev->reset_request);
b644a8d4 3923
d4fa0656
HT
3924 return 0;
3925}
3926
3927static void hclge_reset(struct hclge_dev *hdev)
3928{
3929 if (hclge_reset_prepare(hdev))
3930 goto err_reset;
3931
3932 if (hclge_reset_wait(hdev))
3933 goto err_reset;
3934
3935 if (hclge_reset_rebuild(hdev))
3936 goto err_reset;
3937
65e41e7e
HT
3938 return;
3939
65e41e7e 3940err_reset:
8e9eee78 3941 if (hclge_reset_err_handle(hdev))
65e41e7e 3942 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3943}
3944
6ae4e733
SJ
3945static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3946{
3947 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3948 struct hclge_dev *hdev = ae_dev->priv;
3949
3950 /* We might end up getting called broadly because of 2 below cases:
3951 * 1. Recoverable error was conveyed through APEI and only way to bring
3952 * normalcy is to reset.
3953 * 2. A new reset request from the stack due to timeout
3954 *
3955 * For the first case,error event might not have ae handle available.
3956 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3957 * last reset attempt did not succeed and watchdog hit us again. We will
3958 * know this if last reset request did not occur very recently (watchdog
3959 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3960 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3961 * And if it is a repeat reset request of the most recent one then we
3962 * want to make sure we throttle the reset request. Therefore, we will
3963 * not allow it again before 3*HZ times.
6d4c3981 3964 */
6ae4e733
SJ
3965 if (!handle)
3966 handle = &hdev->vport[0].nic;
3967
b37ce587 3968 if (time_before(jiffies, (hdev->last_reset_time +
012fcb52
HT
3969 HCLGE_RESET_INTERVAL))) {
3970 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9de0b86f 3971 return;
db4d3d55 3972 } else if (hdev->default_reset_request) {
0742ed7c 3973 hdev->reset_level =
123297b7 3974 hclge_get_reset_level(ae_dev,
720bd583 3975 &hdev->default_reset_request);
db4d3d55 3976 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
0742ed7c 3977 hdev->reset_level = HNAE3_FUNC_RESET;
db4d3d55 3978 }
4ed340ab 3979
96e65abb 3980 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
0742ed7c 3981 hdev->reset_level);
6d4c3981
SM
3982
3983 /* request reset & schedule reset task */
0742ed7c 3984 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3985 hclge_reset_task_schedule(hdev);
3986
0742ed7c
HT
3987 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3988 hdev->reset_level++;
4ed340ab
L
3989}
3990
720bd583
HT
3991static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3992 enum hnae3_reset_type rst_type)
3993{
3994 struct hclge_dev *hdev = ae_dev->priv;
3995
3996 set_bit(rst_type, &hdev->default_reset_request);
3997}
3998
65e41e7e
HT
3999static void hclge_reset_timer(struct timer_list *t)
4000{
4001 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4002
012fcb52
HT
4003 /* if default_reset_request has no value, it means that this reset
4004 * request has already be handled, so just return here
4005 */
4006 if (!hdev->default_reset_request)
4007 return;
4008
65e41e7e 4009 dev_info(&hdev->pdev->dev,
e3b84ed2 4010 "triggering reset in reset timer\n");
65e41e7e
HT
4011 hclge_reset_event(hdev->pdev, NULL);
4012}
4013
4ed340ab
L
4014static void hclge_reset_subtask(struct hclge_dev *hdev)
4015{
123297b7
SJ
4016 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4017
f2f432f2
SM
4018 /* check if there is any ongoing reset in the hardware. This status can
4019 * be checked from reset_pending. If there is then, we need to wait for
4020 * hardware to complete reset.
4021 * a. If we are able to figure out in reasonable time that hardware
4022 * has fully resetted then, we can proceed with driver, client
4023 * reset.
4024 * b. else, we can come back later to check this status so re-sched
4025 * now.
4026 */
0742ed7c 4027 hdev->last_reset_time = jiffies;
123297b7 4028 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
f2f432f2
SM
4029 if (hdev->reset_type != HNAE3_NONE_RESET)
4030 hclge_reset(hdev);
4ed340ab 4031
f2f432f2 4032 /* check if we got any *new* reset requests to be honored */
123297b7 4033 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
f2f432f2
SM
4034 if (hdev->reset_type != HNAE3_NONE_RESET)
4035 hclge_do_reset(hdev);
4ed340ab 4036
4ed340ab
L
4037 hdev->reset_type = HNAE3_NONE_RESET;
4038}
4039
1c6dfe6f 4040static void hclge_reset_service_task(struct hclge_dev *hdev)
466b0c00 4041{
1c6dfe6f
YL
4042 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4043 return;
cb1b9f77 4044
8627bded
HT
4045 down(&hdev->reset_sem);
4046 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
cb1b9f77 4047
4ed340ab 4048 hclge_reset_subtask(hdev);
cb1b9f77
SM
4049
4050 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8627bded 4051 up(&hdev->reset_sem);
466b0c00
L
4052}
4053
a6d818e3
YL
4054static void hclge_update_vport_alive(struct hclge_dev *hdev)
4055{
4056 int i;
4057
4058 /* start from vport 1 for PF is always alive */
4059 for (i = 1; i < hdev->num_alloc_vport; i++) {
4060 struct hclge_vport *vport = &hdev->vport[i];
4061
4062 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4063 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
4064
4065 /* If vf is not alive, set to default value */
4066 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4067 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
4068 }
4069}
4070
1c6dfe6f 4071static void hclge_periodic_service_task(struct hclge_dev *hdev)
46a3df9f 4072{
1c6dfe6f 4073 unsigned long delta = round_jiffies_relative(HZ);
7be1b9f3 4074
e6394363
GH
4075 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4076 return;
4077
1c6dfe6f
YL
4078 /* Always handle the link updating to make sure link state is
4079 * updated when it is triggered by mbx.
4080 */
4081 hclge_update_link_status(hdev);
ee4bcd3b 4082 hclge_sync_mac_table(hdev);
c631c696 4083 hclge_sync_promisc_mode(hdev);
46a3df9f 4084
1c6dfe6f
YL
4085 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4086 delta = jiffies - hdev->last_serv_processed;
4087
4088 if (delta < round_jiffies_relative(HZ)) {
4089 delta = round_jiffies_relative(HZ) - delta;
4090 goto out;
4091 }
c5f65480
JS
4092 }
4093
1c6dfe6f 4094 hdev->serv_processed_cnt++;
a6d818e3 4095 hclge_update_vport_alive(hdev);
1c6dfe6f
YL
4096
4097 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4098 hdev->last_serv_processed = jiffies;
4099 goto out;
4100 }
4101
4102 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4103 hclge_update_stats_for_all(hdev);
4104
4105 hclge_update_port_info(hdev);
fe4144d4 4106 hclge_sync_vlan_filter(hdev);
db4d3d55 4107
1c6dfe6f 4108 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
d93ed94f 4109 hclge_rfs_filter_expire(hdev);
7be1b9f3 4110
1c6dfe6f
YL
4111 hdev->last_serv_processed = jiffies;
4112
4113out:
4114 hclge_task_schedule(hdev, delta);
4115}
4116
4117static void hclge_service_task(struct work_struct *work)
4118{
4119 struct hclge_dev *hdev =
4120 container_of(work, struct hclge_dev, service_task.work);
4121
4122 hclge_reset_service_task(hdev);
4123 hclge_mailbox_service_task(hdev);
4124 hclge_periodic_service_task(hdev);
4125
4126 /* Handle reset and mbx again in case periodical task delays the
4127 * handling by calling hclge_task_schedule() in
4128 * hclge_periodic_service_task().
4129 */
4130 hclge_reset_service_task(hdev);
4131 hclge_mailbox_service_task(hdev);
46a3df9f
S
4132}
4133
46a3df9f
S
4134struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4135{
4136 /* VF handle has no client */
4137 if (!handle->client)
4138 return container_of(handle, struct hclge_vport, nic);
4139 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4140 return container_of(handle, struct hclge_vport, roce);
4141 else
4142 return container_of(handle, struct hclge_vport, nic);
4143}
4144
4145static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4146 struct hnae3_vector_info *vector_info)
4147{
4148 struct hclge_vport *vport = hclge_get_vport(handle);
4149 struct hnae3_vector_info *vector = vector_info;
4150 struct hclge_dev *hdev = vport->back;
4151 int alloc = 0;
4152 int i, j;
4153
580a05f9 4154 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
46a3df9f
S
4155 vector_num = min(hdev->num_msi_left, vector_num);
4156
4157 for (j = 0; j < vector_num; j++) {
4158 for (i = 1; i < hdev->num_msi; i++) {
4159 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4160 vector->vector = pci_irq_vector(hdev->pdev, i);
4161 vector->io_addr = hdev->hw.io_base +
4162 HCLGE_VECTOR_REG_BASE +
4163 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4164 vport->vport_id *
4165 HCLGE_VECTOR_VF_OFFSET;
4166 hdev->vector_status[i] = vport->vport_id;
887c3820 4167 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
4168
4169 vector++;
4170 alloc++;
4171
4172 break;
4173 }
4174 }
4175 }
4176 hdev->num_msi_left -= alloc;
4177 hdev->num_msi_used += alloc;
4178
4179 return alloc;
4180}
4181
4182static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4183{
4184 int i;
4185
887c3820
SM
4186 for (i = 0; i < hdev->num_msi; i++)
4187 if (vector == hdev->vector_irq[i])
4188 return i;
4189
46a3df9f
S
4190 return -EINVAL;
4191}
4192
0d3e6631
YL
4193static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4194{
4195 struct hclge_vport *vport = hclge_get_vport(handle);
4196 struct hclge_dev *hdev = vport->back;
4197 int vector_id;
4198
4199 vector_id = hclge_get_vector_index(hdev, vector);
4200 if (vector_id < 0) {
4201 dev_err(&hdev->pdev->dev,
6f8e330d 4202 "Get vector index fail. vector = %d\n", vector);
0d3e6631
YL
4203 return vector_id;
4204 }
4205
4206 hclge_free_vector(hdev, vector_id);
4207
4208 return 0;
4209}
4210
46a3df9f
S
4211static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4212{
4213 return HCLGE_RSS_KEY_SIZE;
4214}
4215
4216static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4217{
4218 return HCLGE_RSS_IND_TBL_SIZE;
4219}
4220
46a3df9f
S
4221static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4222 const u8 hfunc, const u8 *key)
4223{
d44f9b63 4224 struct hclge_rss_config_cmd *req;
ebaf1908 4225 unsigned int key_offset = 0;
46a3df9f 4226 struct hclge_desc desc;
3caf772b 4227 int key_counts;
46a3df9f
S
4228 int key_size;
4229 int ret;
4230
3caf772b 4231 key_counts = HCLGE_RSS_KEY_SIZE;
d44f9b63 4232 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f 4233
3caf772b 4234 while (key_counts) {
46a3df9f
S
4235 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4236 false);
4237
4238 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4239 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4240
3caf772b 4241 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
46a3df9f
S
4242 memcpy(req->hash_key,
4243 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4244
3caf772b
YM
4245 key_counts -= key_size;
4246 key_offset++;
46a3df9f
S
4247 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4248 if (ret) {
4249 dev_err(&hdev->pdev->dev,
4250 "Configure RSS config fail, status = %d\n",
4251 ret);
4252 return ret;
4253 }
4254 }
4255 return 0;
4256}
4257
89523cfa 4258static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
46a3df9f 4259{
d44f9b63 4260 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
4261 struct hclge_desc desc;
4262 int i, j;
4263 int ret;
4264
d44f9b63 4265 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
4266
4267 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4268 hclge_cmd_setup_basic_desc
4269 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4270
a90bb9a5
YL
4271 req->start_table_index =
4272 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4273 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
4274
4275 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4276 req->rss_result[j] =
4277 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4278
4279 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4280 if (ret) {
4281 dev_err(&hdev->pdev->dev,
4282 "Configure rss indir table fail,status = %d\n",
4283 ret);
4284 return ret;
4285 }
4286 }
4287 return 0;
4288}
4289
4290static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4291 u16 *tc_size, u16 *tc_offset)
4292{
d44f9b63 4293 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
4294 struct hclge_desc desc;
4295 int ret;
4296 int i;
4297
4298 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 4299 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
4300
4301 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
4302 u16 mode = 0;
4303
e4e87715
PL
4304 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4305 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4306 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4307 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4308 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
4309
4310 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
4311 }
4312
4313 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4314 if (ret)
46a3df9f
S
4315 dev_err(&hdev->pdev->dev,
4316 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 4317
3f639907 4318 return ret;
46a3df9f
S
4319}
4320
232fc64b
PL
4321static void hclge_get_rss_type(struct hclge_vport *vport)
4322{
4323 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4324 vport->rss_tuple_sets.ipv4_udp_en ||
4325 vport->rss_tuple_sets.ipv4_sctp_en ||
4326 vport->rss_tuple_sets.ipv6_tcp_en ||
4327 vport->rss_tuple_sets.ipv6_udp_en ||
4328 vport->rss_tuple_sets.ipv6_sctp_en)
4329 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4330 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4331 vport->rss_tuple_sets.ipv6_fragment_en)
4332 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4333 else
4334 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4335}
4336
46a3df9f
S
4337static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4338{
d44f9b63 4339 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
4340 struct hclge_desc desc;
4341 int ret;
4342
4343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4344
d44f9b63 4345 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
4346
4347 /* Get the tuple cfg from pf */
4348 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4349 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4350 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4351 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4352 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4353 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4354 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4355 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 4356 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 4357 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4358 if (ret)
46a3df9f
S
4359 dev_err(&hdev->pdev->dev,
4360 "Configure rss input fail, status = %d\n", ret);
3f639907 4361 return ret;
46a3df9f
S
4362}
4363
4364static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4365 u8 *key, u8 *hfunc)
4366{
4367 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
4368 int i;
4369
4370 /* Get hash algorithm */
775501a1
JS
4371 if (hfunc) {
4372 switch (vport->rss_algo) {
4373 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4374 *hfunc = ETH_RSS_HASH_TOP;
4375 break;
4376 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4377 *hfunc = ETH_RSS_HASH_XOR;
4378 break;
4379 default:
4380 *hfunc = ETH_RSS_HASH_UNKNOWN;
4381 break;
4382 }
4383 }
46a3df9f
S
4384
4385 /* Get the RSS Key required by the user */
4386 if (key)
4387 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4388
4389 /* Get indirect table */
4390 if (indir)
4391 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4392 indir[i] = vport->rss_indirection_tbl[i];
4393
4394 return 0;
4395}
4396
4397static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4398 const u8 *key, const u8 hfunc)
4399{
4400 struct hclge_vport *vport = hclge_get_vport(handle);
4401 struct hclge_dev *hdev = vport->back;
4402 u8 hash_algo;
4403 int ret, i;
4404
4405 /* Set the RSS Hash Key if specififed by the user */
4406 if (key) {
775501a1
JS
4407 switch (hfunc) {
4408 case ETH_RSS_HASH_TOP:
46a3df9f 4409 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
4410 break;
4411 case ETH_RSS_HASH_XOR:
4412 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4413 break;
4414 case ETH_RSS_HASH_NO_CHANGE:
4415 hash_algo = vport->rss_algo;
4416 break;
4417 default:
46a3df9f 4418 return -EINVAL;
775501a1
JS
4419 }
4420
46a3df9f
S
4421 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4422 if (ret)
4423 return ret;
89523cfa
YL
4424
4425 /* Update the shadow RSS key with user specified qids */
4426 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4427 vport->rss_algo = hash_algo;
46a3df9f
S
4428 }
4429
4430 /* Update the shadow RSS table with user specified qids */
4431 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4432 vport->rss_indirection_tbl[i] = indir[i];
4433
4434 /* Update the hardware */
89523cfa 4435 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
4436}
4437
f7db940a
L
4438static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4439{
4440 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4441
4442 if (nfc->data & RXH_L4_B_2_3)
4443 hash_sets |= HCLGE_D_PORT_BIT;
4444 else
4445 hash_sets &= ~HCLGE_D_PORT_BIT;
4446
4447 if (nfc->data & RXH_IP_SRC)
4448 hash_sets |= HCLGE_S_IP_BIT;
4449 else
4450 hash_sets &= ~HCLGE_S_IP_BIT;
4451
4452 if (nfc->data & RXH_IP_DST)
4453 hash_sets |= HCLGE_D_IP_BIT;
4454 else
4455 hash_sets &= ~HCLGE_D_IP_BIT;
4456
4457 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4458 hash_sets |= HCLGE_V_TAG_BIT;
4459
4460 return hash_sets;
4461}
4462
4463static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4464 struct ethtool_rxnfc *nfc)
4465{
4466 struct hclge_vport *vport = hclge_get_vport(handle);
4467 struct hclge_dev *hdev = vport->back;
4468 struct hclge_rss_input_tuple_cmd *req;
4469 struct hclge_desc desc;
4470 u8 tuple_sets;
4471 int ret;
4472
4473 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4474 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4475 return -EINVAL;
4476
4477 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429 4478 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
f7db940a 4479
6f2af429
YL
4480 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4481 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4482 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4483 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4484 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4485 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4486 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4487 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
4488
4489 tuple_sets = hclge_get_rss_hash_bits(nfc);
4490 switch (nfc->flow_type) {
4491 case TCP_V4_FLOW:
4492 req->ipv4_tcp_en = tuple_sets;
4493 break;
4494 case TCP_V6_FLOW:
4495 req->ipv6_tcp_en = tuple_sets;
4496 break;
4497 case UDP_V4_FLOW:
4498 req->ipv4_udp_en = tuple_sets;
4499 break;
4500 case UDP_V6_FLOW:
4501 req->ipv6_udp_en = tuple_sets;
4502 break;
4503 case SCTP_V4_FLOW:
4504 req->ipv4_sctp_en = tuple_sets;
4505 break;
4506 case SCTP_V6_FLOW:
4507 if ((nfc->data & RXH_L4_B_0_1) ||
4508 (nfc->data & RXH_L4_B_2_3))
4509 return -EINVAL;
4510
4511 req->ipv6_sctp_en = tuple_sets;
4512 break;
4513 case IPV4_FLOW:
4514 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4515 break;
4516 case IPV6_FLOW:
4517 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4518 break;
4519 default:
4520 return -EINVAL;
4521 }
4522
4523 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 4524 if (ret) {
f7db940a
L
4525 dev_err(&hdev->pdev->dev,
4526 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
4527 return ret;
4528 }
f7db940a 4529
6f2af429
YL
4530 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4531 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4532 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4533 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4534 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4535 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4536 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4537 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 4538 hclge_get_rss_type(vport);
6f2af429 4539 return 0;
f7db940a
L
4540}
4541
07d29954
L
4542static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4543 struct ethtool_rxnfc *nfc)
4544{
4545 struct hclge_vport *vport = hclge_get_vport(handle);
07d29954 4546 u8 tuple_sets;
07d29954
L
4547
4548 nfc->data = 0;
4549
07d29954
L
4550 switch (nfc->flow_type) {
4551 case TCP_V4_FLOW:
6f2af429 4552 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
4553 break;
4554 case UDP_V4_FLOW:
6f2af429 4555 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
4556 break;
4557 case TCP_V6_FLOW:
6f2af429 4558 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
4559 break;
4560 case UDP_V6_FLOW:
6f2af429 4561 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
4562 break;
4563 case SCTP_V4_FLOW:
6f2af429 4564 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
4565 break;
4566 case SCTP_V6_FLOW:
6f2af429 4567 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
4568 break;
4569 case IPV4_FLOW:
4570 case IPV6_FLOW:
4571 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4572 break;
4573 default:
4574 return -EINVAL;
4575 }
4576
4577 if (!tuple_sets)
4578 return 0;
4579
4580 if (tuple_sets & HCLGE_D_PORT_BIT)
4581 nfc->data |= RXH_L4_B_2_3;
4582 if (tuple_sets & HCLGE_S_PORT_BIT)
4583 nfc->data |= RXH_L4_B_0_1;
4584 if (tuple_sets & HCLGE_D_IP_BIT)
4585 nfc->data |= RXH_IP_DST;
4586 if (tuple_sets & HCLGE_S_IP_BIT)
4587 nfc->data |= RXH_IP_SRC;
4588
4589 return 0;
4590}
4591
46a3df9f
S
4592static int hclge_get_tc_size(struct hnae3_handle *handle)
4593{
4594 struct hclge_vport *vport = hclge_get_vport(handle);
4595 struct hclge_dev *hdev = vport->back;
4596
4597 return hdev->rss_size_max;
4598}
4599
77f255c1 4600int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f 4601{
46a3df9f 4602 struct hclge_vport *vport = hdev->vport;
268f5dfa
YL
4603 u8 *rss_indir = vport[0].rss_indirection_tbl;
4604 u16 rss_size = vport[0].alloc_rss_size;
354d0fab
PL
4605 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4606 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
268f5dfa
YL
4607 u8 *key = vport[0].rss_hash_key;
4608 u8 hfunc = vport[0].rss_algo;
46a3df9f 4609 u16 tc_valid[HCLGE_MAX_TC_NUM];
268f5dfa 4610 u16 roundup_size;
ebaf1908
WL
4611 unsigned int i;
4612 int ret;
68ece54e 4613
46a3df9f
S
4614 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4615 if (ret)
268f5dfa 4616 return ret;
46a3df9f 4617
46a3df9f
S
4618 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4619 if (ret)
268f5dfa 4620 return ret;
46a3df9f
S
4621
4622 ret = hclge_set_rss_input_tuple(hdev);
4623 if (ret)
268f5dfa 4624 return ret;
46a3df9f 4625
68ece54e
YL
4626 /* Each TC have the same queue size, and tc_size set to hardware is
4627 * the log2 of roundup power of two of rss_size, the acutal queue
4628 * size is limited by indirection table.
4629 */
4630 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4631 dev_err(&hdev->pdev->dev,
adcf738b 4632 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
68ece54e 4633 rss_size);
268f5dfa 4634 return -EINVAL;
68ece54e
YL
4635 }
4636
4637 roundup_size = roundup_pow_of_two(rss_size);
4638 roundup_size = ilog2(roundup_size);
4639
46a3df9f 4640 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 4641 tc_valid[i] = 0;
46a3df9f 4642
68ece54e
YL
4643 if (!(hdev->hw_tc_map & BIT(i)))
4644 continue;
4645
4646 tc_valid[i] = 1;
4647 tc_size[i] = roundup_size;
4648 tc_offset[i] = rss_size * i;
46a3df9f 4649 }
68ece54e 4650
268f5dfa
YL
4651 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4652}
46a3df9f 4653
268f5dfa
YL
4654void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4655{
4656 struct hclge_vport *vport = hdev->vport;
4657 int i, j;
46a3df9f 4658
268f5dfa
YL
4659 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4660 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4661 vport[j].rss_indirection_tbl[i] =
4662 i % vport[j].alloc_rss_size;
4663 }
4664}
4665
4666static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4667{
472d7ece 4668 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 4669 struct hclge_vport *vport = hdev->vport;
472d7ece 4670
295ba232 4671 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
472d7ece 4672 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 4673
268f5dfa
YL
4674 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4675 vport[i].rss_tuple_sets.ipv4_tcp_en =
4676 HCLGE_RSS_INPUT_TUPLE_OTHER;
4677 vport[i].rss_tuple_sets.ipv4_udp_en =
4678 HCLGE_RSS_INPUT_TUPLE_OTHER;
4679 vport[i].rss_tuple_sets.ipv4_sctp_en =
4680 HCLGE_RSS_INPUT_TUPLE_SCTP;
4681 vport[i].rss_tuple_sets.ipv4_fragment_en =
4682 HCLGE_RSS_INPUT_TUPLE_OTHER;
4683 vport[i].rss_tuple_sets.ipv6_tcp_en =
4684 HCLGE_RSS_INPUT_TUPLE_OTHER;
4685 vport[i].rss_tuple_sets.ipv6_udp_en =
4686 HCLGE_RSS_INPUT_TUPLE_OTHER;
4687 vport[i].rss_tuple_sets.ipv6_sctp_en =
4688 HCLGE_RSS_INPUT_TUPLE_SCTP;
4689 vport[i].rss_tuple_sets.ipv6_fragment_en =
4690 HCLGE_RSS_INPUT_TUPLE_OTHER;
4691
472d7ece 4692 vport[i].rss_algo = rss_algo;
ea739c90 4693
472d7ece
JS
4694 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4695 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
4696 }
4697
4698 hclge_rss_indir_init_cfg(hdev);
46a3df9f
S
4699}
4700
84e095d6
SM
4701int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4702 int vector_id, bool en,
4703 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4704{
4705 struct hclge_dev *hdev = vport->back;
46a3df9f
S
4706 struct hnae3_ring_chain_node *node;
4707 struct hclge_desc desc;
37417c66
GL
4708 struct hclge_ctrl_vector_chain_cmd *req =
4709 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
84e095d6
SM
4710 enum hclge_cmd_status status;
4711 enum hclge_opcode_type op;
4712 u16 tqp_type_and_id;
46a3df9f
S
4713 int i;
4714
84e095d6
SM
4715 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4716 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
4717 req->int_vector_id = vector_id;
4718
4719 i = 0;
4720 for (node = ring_chain; node; node = node->next) {
84e095d6 4721 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
4722 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4723 HCLGE_INT_TYPE_S,
4724 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4725 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4726 HCLGE_TQP_ID_S, node->tqp_index);
4727 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4728 HCLGE_INT_GL_IDX_S,
4729 hnae3_get_field(node->int_gl_idx,
4730 HNAE3_RING_GL_IDX_M,
4731 HNAE3_RING_GL_IDX_S));
84e095d6 4732 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
4733 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4734 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 4735 req->vfid = vport->vport_id;
46a3df9f 4736
84e095d6
SM
4737 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4738 if (status) {
46a3df9f
S
4739 dev_err(&hdev->pdev->dev,
4740 "Map TQP fail, status is %d.\n",
84e095d6
SM
4741 status);
4742 return -EIO;
46a3df9f
S
4743 }
4744 i = 0;
4745
4746 hclge_cmd_setup_basic_desc(&desc,
84e095d6 4747 op,
46a3df9f
S
4748 false);
4749 req->int_vector_id = vector_id;
4750 }
4751 }
4752
4753 if (i > 0) {
4754 req->int_cause_num = i;
84e095d6
SM
4755 req->vfid = vport->vport_id;
4756 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4757 if (status) {
46a3df9f 4758 dev_err(&hdev->pdev->dev,
84e095d6
SM
4759 "Map TQP fail, status is %d.\n", status);
4760 return -EIO;
46a3df9f
S
4761 }
4762 }
4763
4764 return 0;
4765}
4766
9b2f3477 4767static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
84e095d6 4768 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4769{
4770 struct hclge_vport *vport = hclge_get_vport(handle);
4771 struct hclge_dev *hdev = vport->back;
4772 int vector_id;
4773
4774 vector_id = hclge_get_vector_index(hdev, vector);
4775 if (vector_id < 0) {
4776 dev_err(&hdev->pdev->dev,
7ab2b53e 4777 "failed to get vector index. vector=%d\n", vector);
46a3df9f
S
4778 return vector_id;
4779 }
4780
84e095d6 4781 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
4782}
4783
9b2f3477 4784static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
84e095d6 4785 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4786{
4787 struct hclge_vport *vport = hclge_get_vport(handle);
4788 struct hclge_dev *hdev = vport->back;
84e095d6 4789 int vector_id, ret;
46a3df9f 4790
b50ae26c
PL
4791 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4792 return 0;
4793
46a3df9f
S
4794 vector_id = hclge_get_vector_index(hdev, vector);
4795 if (vector_id < 0) {
4796 dev_err(&handle->pdev->dev,
4797 "Get vector index fail. ret =%d\n", vector_id);
4798 return vector_id;
4799 }
4800
84e095d6 4801 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 4802 if (ret)
84e095d6
SM
4803 dev_err(&handle->pdev->dev,
4804 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
9b2f3477 4805 vector_id, ret);
46a3df9f 4806
0d3e6631 4807 return ret;
46a3df9f
S
4808}
4809
e196ec75
JS
4810static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4811 struct hclge_promisc_param *param)
46a3df9f 4812{
d44f9b63 4813 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
4814 struct hclge_desc desc;
4815 int ret;
4816
4817 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4818
d44f9b63 4819 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f 4820 req->vf_id = param->vf_id;
96c0e861
PL
4821
4822 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4823 * pdev revision(0x20), new revision support them. The
4824 * value of this two fields will not return error when driver
4825 * send command to fireware in revision(0x20).
4826 */
4827 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4828 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
46a3df9f
S
4829
4830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4831 if (ret)
46a3df9f 4832 dev_err(&hdev->pdev->dev,
c631c696
JS
4833 "failed to set vport %d promisc mode, ret = %d.\n",
4834 param->vf_id, ret);
3f639907
JS
4835
4836 return ret;
46a3df9f
S
4837}
4838
e196ec75
JS
4839static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4840 bool en_uc, bool en_mc, bool en_bc,
4841 int vport_id)
46a3df9f
S
4842{
4843 if (!param)
4844 return;
4845
4846 memset(param, 0, sizeof(struct hclge_promisc_param));
4847 if (en_uc)
4848 param->enable = HCLGE_PROMISC_EN_UC;
4849 if (en_mc)
4850 param->enable |= HCLGE_PROMISC_EN_MC;
4851 if (en_bc)
4852 param->enable |= HCLGE_PROMISC_EN_BC;
4853 param->vf_id = vport_id;
4854}
4855
e196ec75
JS
4856int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4857 bool en_mc_pmc, bool en_bc_pmc)
4858{
4859 struct hclge_dev *hdev = vport->back;
4860 struct hclge_promisc_param param;
4861
4862 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4863 vport->vport_id);
4864 return hclge_cmd_set_promisc_mode(hdev, &param);
4865}
4866
7fa6be4f
HT
4867static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4868 bool en_mc_pmc)
46a3df9f
S
4869{
4870 struct hclge_vport *vport = hclge_get_vport(handle);
295ba232 4871 struct hclge_dev *hdev = vport->back;
28673b33 4872 bool en_bc_pmc = true;
46a3df9f 4873
295ba232
GH
4874 /* For device whose version below V2, if broadcast promisc enabled,
4875 * vlan filter is always bypassed. So broadcast promisc should be
4876 * disabled until user enable promisc mode
28673b33 4877 */
295ba232 4878 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
28673b33
JS
4879 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4880
e196ec75
JS
4881 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4882 en_bc_pmc);
46a3df9f
S
4883}
4884
c631c696
JS
4885static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4886{
4887 struct hclge_vport *vport = hclge_get_vport(handle);
4888 struct hclge_dev *hdev = vport->back;
4889
4890 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4891}
4892
d695964d
JS
4893static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4894{
4895 struct hclge_get_fd_mode_cmd *req;
4896 struct hclge_desc desc;
4897 int ret;
4898
4899 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4900
4901 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4902
4903 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4904 if (ret) {
4905 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4906 return ret;
4907 }
4908
4909 *fd_mode = req->mode;
4910
4911 return ret;
4912}
4913
4914static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4915 u32 *stage1_entry_num,
4916 u32 *stage2_entry_num,
4917 u16 *stage1_counter_num,
4918 u16 *stage2_counter_num)
4919{
4920 struct hclge_get_fd_allocation_cmd *req;
4921 struct hclge_desc desc;
4922 int ret;
4923
4924 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4925
4926 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4927
4928 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4929 if (ret) {
4930 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4931 ret);
4932 return ret;
4933 }
4934
4935 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4936 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4937 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4938 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4939
4940 return ret;
4941}
4942
84944d5c
GL
4943static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4944 enum HCLGE_FD_STAGE stage_num)
d695964d
JS
4945{
4946 struct hclge_set_fd_key_config_cmd *req;
4947 struct hclge_fd_key_cfg *stage;
4948 struct hclge_desc desc;
4949 int ret;
4950
4951 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4952
4953 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4954 stage = &hdev->fd_cfg.key_cfg[stage_num];
4955 req->stage = stage_num;
4956 req->key_select = stage->key_sel;
4957 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4958 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4959 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4960 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4961 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4962 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4963
4964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4965 if (ret)
4966 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4967
4968 return ret;
4969}
4970
4971static int hclge_init_fd_config(struct hclge_dev *hdev)
4972{
4973#define LOW_2_WORDS 0x03
4974 struct hclge_fd_key_cfg *key_cfg;
4975 int ret;
4976
4977 if (!hnae3_dev_fd_supported(hdev))
4978 return 0;
4979
4980 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4981 if (ret)
4982 return ret;
4983
4984 switch (hdev->fd_cfg.fd_mode) {
4985 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4986 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4987 break;
4988 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4989 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4990 break;
4991 default:
4992 dev_err(&hdev->pdev->dev,
adcf738b 4993 "Unsupported flow director mode %u\n",
d695964d
JS
4994 hdev->fd_cfg.fd_mode);
4995 return -EOPNOTSUPP;
4996 }
4997
d695964d
JS
4998 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4999 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5000 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5001 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5002 key_cfg->outer_sipv6_word_en = 0;
5003 key_cfg->outer_dipv6_word_en = 0;
5004
5005 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5006 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5007 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5008 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5009
5010 /* If use max 400bit key, we can support tuples for ether type */
16505f87 5011 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
d695964d
JS
5012 key_cfg->tuple_active |=
5013 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
d695964d
JS
5014
5015 /* roce_type is used to filter roce frames
5016 * dst_vport is used to specify the rule
5017 */
5018 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5019
5020 ret = hclge_get_fd_allocation(hdev,
5021 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5022 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5023 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5024 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5025 if (ret)
5026 return ret;
5027
5028 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5029}
5030
11732868
JS
5031static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5032 int loc, u8 *key, bool is_add)
5033{
5034 struct hclge_fd_tcam_config_1_cmd *req1;
5035 struct hclge_fd_tcam_config_2_cmd *req2;
5036 struct hclge_fd_tcam_config_3_cmd *req3;
5037 struct hclge_desc desc[3];
5038 int ret;
5039
5040 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5041 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5042 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5043 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5044 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5045
5046 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5047 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5048 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5049
5050 req1->stage = stage;
5051 req1->xy_sel = sel_x ? 1 : 0;
5052 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5053 req1->index = cpu_to_le32(loc);
5054 req1->entry_vld = sel_x ? is_add : 0;
5055
5056 if (key) {
5057 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5058 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5059 sizeof(req2->tcam_data));
5060 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5061 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5062 }
5063
5064 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5065 if (ret)
5066 dev_err(&hdev->pdev->dev,
5067 "config tcam key fail, ret=%d\n",
5068 ret);
5069
5070 return ret;
5071}
5072
5073static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5074 struct hclge_fd_ad_data *action)
5075{
5076 struct hclge_fd_ad_config_cmd *req;
5077 struct hclge_desc desc;
5078 u64 ad_data = 0;
5079 int ret;
5080
5081 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5082
5083 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5084 req->index = cpu_to_le32(loc);
5085 req->stage = stage;
5086
5087 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5088 action->write_rule_id_to_bd);
5089 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5090 action->rule_id);
5091 ad_data <<= 32;
5092 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5093 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5094 action->forward_to_direct_queue);
5095 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5096 action->queue_id);
5097 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5098 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5099 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5100 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5101 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5102 action->counter_id);
5103
5104 req->ad_data = cpu_to_le64(ad_data);
5105 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5106 if (ret)
5107 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5108
5109 return ret;
5110}
5111
5112static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5113 struct hclge_fd_rule *rule)
5114{
5115 u16 tmp_x_s, tmp_y_s;
5116 u32 tmp_x_l, tmp_y_l;
5117 int i;
5118
5119 if (rule->unused_tuple & tuple_bit)
5120 return true;
5121
5122 switch (tuple_bit) {
11732868 5123 case BIT(INNER_DST_MAC):
e91e388c
JS
5124 for (i = 0; i < ETH_ALEN; i++) {
5125 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868 5126 rule->tuples_mask.dst_mac[i]);
e91e388c 5127 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868
JS
5128 rule->tuples_mask.dst_mac[i]);
5129 }
5130
5131 return true;
5132 case BIT(INNER_SRC_MAC):
e91e388c
JS
5133 for (i = 0; i < ETH_ALEN; i++) {
5134 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868 5135 rule->tuples.src_mac[i]);
e91e388c 5136 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868
JS
5137 rule->tuples.src_mac[i]);
5138 }
5139
5140 return true;
5141 case BIT(INNER_VLAN_TAG_FST):
5142 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5143 rule->tuples_mask.vlan_tag1);
5144 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5145 rule->tuples_mask.vlan_tag1);
5146 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5147 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5148
5149 return true;
5150 case BIT(INNER_ETH_TYPE):
5151 calc_x(tmp_x_s, rule->tuples.ether_proto,
5152 rule->tuples_mask.ether_proto);
5153 calc_y(tmp_y_s, rule->tuples.ether_proto,
5154 rule->tuples_mask.ether_proto);
5155 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5156 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5157
5158 return true;
5159 case BIT(INNER_IP_TOS):
5160 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5161 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5162
5163 return true;
5164 case BIT(INNER_IP_PROTO):
5165 calc_x(*key_x, rule->tuples.ip_proto,
5166 rule->tuples_mask.ip_proto);
5167 calc_y(*key_y, rule->tuples.ip_proto,
5168 rule->tuples_mask.ip_proto);
5169
5170 return true;
5171 case BIT(INNER_SRC_IP):
e91e388c
JS
5172 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5173 rule->tuples_mask.src_ip[IPV4_INDEX]);
5174 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5175 rule->tuples_mask.src_ip[IPV4_INDEX]);
11732868
JS
5176 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5177 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5178
5179 return true;
5180 case BIT(INNER_DST_IP):
e91e388c
JS
5181 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5182 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5183 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5184 rule->tuples_mask.dst_ip[IPV4_INDEX]);
11732868
JS
5185 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5186 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5187
5188 return true;
5189 case BIT(INNER_SRC_PORT):
5190 calc_x(tmp_x_s, rule->tuples.src_port,
5191 rule->tuples_mask.src_port);
5192 calc_y(tmp_y_s, rule->tuples.src_port,
5193 rule->tuples_mask.src_port);
5194 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5195 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5196
5197 return true;
5198 case BIT(INNER_DST_PORT):
5199 calc_x(tmp_x_s, rule->tuples.dst_port,
5200 rule->tuples_mask.dst_port);
5201 calc_y(tmp_y_s, rule->tuples.dst_port,
5202 rule->tuples_mask.dst_port);
5203 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5204 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5205
5206 return true;
5207 default:
5208 return false;
5209 }
5210}
5211
5212static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5213 u8 vf_id, u8 network_port_id)
5214{
5215 u32 port_number = 0;
5216
5217 if (port_type == HOST_PORT) {
5218 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5219 pf_id);
5220 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5221 vf_id);
5222 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5223 } else {
5224 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5225 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5226 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5227 }
5228
5229 return port_number;
5230}
5231
5232static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5233 __le32 *key_x, __le32 *key_y,
5234 struct hclge_fd_rule *rule)
5235{
5236 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5237 u8 cur_pos = 0, tuple_size, shift_bits;
ebaf1908 5238 unsigned int i;
11732868
JS
5239
5240 for (i = 0; i < MAX_META_DATA; i++) {
5241 tuple_size = meta_data_key_info[i].key_length;
5242 tuple_bit = key_cfg->meta_data_active & BIT(i);
5243
5244 switch (tuple_bit) {
5245 case BIT(ROCE_TYPE):
5246 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5247 cur_pos += tuple_size;
5248 break;
5249 case BIT(DST_VPORT):
5250 port_number = hclge_get_port_number(HOST_PORT, 0,
5251 rule->vf_id, 0);
5252 hnae3_set_field(meta_data,
5253 GENMASK(cur_pos + tuple_size, cur_pos),
5254 cur_pos, port_number);
5255 cur_pos += tuple_size;
5256 break;
5257 default:
5258 break;
5259 }
5260 }
5261
5262 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5263 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5264 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5265
5266 *key_x = cpu_to_le32(tmp_x << shift_bits);
5267 *key_y = cpu_to_le32(tmp_y << shift_bits);
5268}
5269
5270/* A complete key is combined with meta data key and tuple key.
5271 * Meta data key is stored at the MSB region, and tuple key is stored at
5272 * the LSB region, unused bits will be filled 0.
5273 */
5274static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5275 struct hclge_fd_rule *rule)
5276{
5277 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5278 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5279 u8 *cur_key_x, *cur_key_y;
11732868 5280 u8 meta_data_region;
84944d5c
GL
5281 u8 tuple_size;
5282 int ret;
5283 u32 i;
11732868
JS
5284
5285 memset(key_x, 0, sizeof(key_x));
5286 memset(key_y, 0, sizeof(key_y));
5287 cur_key_x = key_x;
5288 cur_key_y = key_y;
5289
5290 for (i = 0 ; i < MAX_TUPLE; i++) {
5291 bool tuple_valid;
5292 u32 check_tuple;
5293
5294 tuple_size = tuple_key_info[i].key_length / 8;
5295 check_tuple = key_cfg->tuple_active & BIT(i);
5296
5297 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5298 cur_key_y, rule);
5299 if (tuple_valid) {
5300 cur_key_x += tuple_size;
5301 cur_key_y += tuple_size;
5302 }
5303 }
5304
5305 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5306 MAX_META_DATA_LENGTH / 8;
5307
5308 hclge_fd_convert_meta_data(key_cfg,
5309 (__le32 *)(key_x + meta_data_region),
5310 (__le32 *)(key_y + meta_data_region),
5311 rule);
5312
5313 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5314 true);
5315 if (ret) {
5316 dev_err(&hdev->pdev->dev,
adcf738b 5317 "fd key_y config fail, loc=%u, ret=%d\n",
11732868
JS
5318 rule->queue_id, ret);
5319 return ret;
5320 }
5321
5322 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5323 true);
5324 if (ret)
5325 dev_err(&hdev->pdev->dev,
adcf738b 5326 "fd key_x config fail, loc=%u, ret=%d\n",
11732868
JS
5327 rule->queue_id, ret);
5328 return ret;
5329}
5330
5331static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5332 struct hclge_fd_rule *rule)
5333{
5334 struct hclge_fd_ad_data ad_data;
5335
5336 ad_data.ad_id = rule->location;
5337
5338 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5339 ad_data.drop_packet = true;
5340 ad_data.forward_to_direct_queue = false;
5341 ad_data.queue_id = 0;
5342 } else {
5343 ad_data.drop_packet = false;
5344 ad_data.forward_to_direct_queue = true;
5345 ad_data.queue_id = rule->queue_id;
5346 }
5347
5348 ad_data.use_counter = false;
5349 ad_data.counter_id = 0;
5350
5351 ad_data.use_next_stage = false;
5352 ad_data.next_input_key = 0;
5353
5354 ad_data.write_rule_id_to_bd = true;
5355 ad_data.rule_id = rule->location;
5356
5357 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5358}
5359
736fc0e1
JS
5360static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5361 u32 *unused_tuple)
dd74f815 5362{
736fc0e1 5363 if (!spec || !unused_tuple)
dd74f815
JS
5364 return -EINVAL;
5365
736fc0e1 5366 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
dd74f815 5367
736fc0e1
JS
5368 if (!spec->ip4src)
5369 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5370
736fc0e1
JS
5371 if (!spec->ip4dst)
5372 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5373
736fc0e1
JS
5374 if (!spec->psrc)
5375 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5376
736fc0e1
JS
5377 if (!spec->pdst)
5378 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5379
736fc0e1
JS
5380 if (!spec->tos)
5381 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5382
736fc0e1
JS
5383 return 0;
5384}
dd74f815 5385
736fc0e1
JS
5386static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5387 u32 *unused_tuple)
5388{
5389 if (!spec || !unused_tuple)
5390 return -EINVAL;
dd74f815 5391
736fc0e1
JS
5392 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5393 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5394
736fc0e1
JS
5395 if (!spec->ip4src)
5396 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5397
736fc0e1
JS
5398 if (!spec->ip4dst)
5399 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5400
736fc0e1
JS
5401 if (!spec->tos)
5402 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5403
736fc0e1
JS
5404 if (!spec->proto)
5405 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5406
736fc0e1
JS
5407 if (spec->l4_4_bytes)
5408 return -EOPNOTSUPP;
dd74f815 5409
736fc0e1
JS
5410 if (spec->ip_ver != ETH_RX_NFC_IP4)
5411 return -EOPNOTSUPP;
dd74f815 5412
736fc0e1
JS
5413 return 0;
5414}
dd74f815 5415
736fc0e1
JS
5416static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5417 u32 *unused_tuple)
5418{
5419 if (!spec || !unused_tuple)
5420 return -EINVAL;
dd74f815 5421
736fc0e1
JS
5422 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5423 BIT(INNER_IP_TOS);
dd74f815 5424
736fc0e1
JS
5425 /* check whether src/dst ip address used */
5426 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5427 !spec->ip6src[2] && !spec->ip6src[3])
5428 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5429
736fc0e1
JS
5430 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5431 !spec->ip6dst[2] && !spec->ip6dst[3])
5432 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5433
736fc0e1
JS
5434 if (!spec->psrc)
5435 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5436
736fc0e1
JS
5437 if (!spec->pdst)
5438 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5439
736fc0e1
JS
5440 if (spec->tclass)
5441 return -EOPNOTSUPP;
dd74f815 5442
736fc0e1
JS
5443 return 0;
5444}
dd74f815 5445
736fc0e1
JS
5446static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5447 u32 *unused_tuple)
5448{
5449 if (!spec || !unused_tuple)
5450 return -EINVAL;
dd74f815 5451
736fc0e1
JS
5452 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5453 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5454
736fc0e1
JS
5455 /* check whether src/dst ip address used */
5456 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5457 !spec->ip6src[2] && !spec->ip6src[3])
5458 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5459
736fc0e1
JS
5460 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5461 !spec->ip6dst[2] && !spec->ip6dst[3])
5462 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5463
736fc0e1
JS
5464 if (!spec->l4_proto)
5465 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5466
736fc0e1
JS
5467 if (spec->tclass)
5468 return -EOPNOTSUPP;
dd74f815 5469
736fc0e1 5470 if (spec->l4_4_bytes)
dd74f815 5471 return -EOPNOTSUPP;
dd74f815 5472
736fc0e1
JS
5473 return 0;
5474}
5475
5476static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5477{
5478 if (!spec || !unused_tuple)
5479 return -EINVAL;
5480
5481 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5482 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5483 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5484
5485 if (is_zero_ether_addr(spec->h_source))
5486 *unused_tuple |= BIT(INNER_SRC_MAC);
5487
5488 if (is_zero_ether_addr(spec->h_dest))
5489 *unused_tuple |= BIT(INNER_DST_MAC);
5490
5491 if (!spec->h_proto)
5492 *unused_tuple |= BIT(INNER_ETH_TYPE);
5493
5494 return 0;
5495}
5496
5497static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5498 struct ethtool_rx_flow_spec *fs,
5499 u32 *unused_tuple)
5500{
0b4bdc55 5501 if (fs->flow_type & FLOW_EXT) {
a3ca5e90
GL
5502 if (fs->h_ext.vlan_etype) {
5503 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
dd74f815 5504 return -EOPNOTSUPP;
a3ca5e90
GL
5505 }
5506
dd74f815 5507 if (!fs->h_ext.vlan_tci)
736fc0e1 5508 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815 5509
736fc0e1 5510 if (fs->m_ext.vlan_tci &&
a3ca5e90
GL
5511 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5512 dev_err(&hdev->pdev->dev,
5513 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5514 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
736fc0e1 5515 return -EINVAL;
a3ca5e90 5516 }
dd74f815 5517 } else {
736fc0e1 5518 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815
JS
5519 }
5520
5521 if (fs->flow_type & FLOW_MAC_EXT) {
16505f87 5522 if (hdev->fd_cfg.fd_mode !=
a3ca5e90
GL
5523 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5524 dev_err(&hdev->pdev->dev,
5525 "FLOW_MAC_EXT is not supported in current fd mode!\n");
dd74f815 5526 return -EOPNOTSUPP;
a3ca5e90 5527 }
dd74f815
JS
5528
5529 if (is_zero_ether_addr(fs->h_ext.h_dest))
736fc0e1 5530 *unused_tuple |= BIT(INNER_DST_MAC);
dd74f815 5531 else
0b4bdc55 5532 *unused_tuple &= ~BIT(INNER_DST_MAC);
dd74f815
JS
5533 }
5534
5535 return 0;
5536}
5537
736fc0e1
JS
5538static int hclge_fd_check_spec(struct hclge_dev *hdev,
5539 struct ethtool_rx_flow_spec *fs,
5540 u32 *unused_tuple)
5541{
16505f87 5542 u32 flow_type;
736fc0e1
JS
5543 int ret;
5544
a3ca5e90
GL
5545 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5546 dev_err(&hdev->pdev->dev,
5547 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5548 fs->location,
5549 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
736fc0e1 5550 return -EINVAL;
a3ca5e90 5551 }
736fc0e1 5552
736fc0e1
JS
5553 if ((fs->flow_type & FLOW_EXT) &&
5554 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5555 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5556 return -EOPNOTSUPP;
5557 }
5558
16505f87
GL
5559 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5560 switch (flow_type) {
736fc0e1
JS
5561 case SCTP_V4_FLOW:
5562 case TCP_V4_FLOW:
5563 case UDP_V4_FLOW:
5564 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5565 unused_tuple);
5566 break;
5567 case IP_USER_FLOW:
5568 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5569 unused_tuple);
5570 break;
5571 case SCTP_V6_FLOW:
5572 case TCP_V6_FLOW:
5573 case UDP_V6_FLOW:
5574 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5575 unused_tuple);
5576 break;
5577 case IPV6_USER_FLOW:
5578 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5579 unused_tuple);
5580 break;
5581 case ETHER_FLOW:
5582 if (hdev->fd_cfg.fd_mode !=
5583 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5584 dev_err(&hdev->pdev->dev,
5585 "ETHER_FLOW is not supported in current fd mode!\n");
5586 return -EOPNOTSUPP;
5587 }
5588
5589 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5590 unused_tuple);
5591 break;
5592 default:
a3ca5e90
GL
5593 dev_err(&hdev->pdev->dev,
5594 "unsupported protocol type, protocol type = %#x\n",
5595 flow_type);
736fc0e1
JS
5596 return -EOPNOTSUPP;
5597 }
5598
a3ca5e90
GL
5599 if (ret) {
5600 dev_err(&hdev->pdev->dev,
5601 "failed to check flow union tuple, ret = %d\n",
5602 ret);
736fc0e1 5603 return ret;
a3ca5e90 5604 }
736fc0e1
JS
5605
5606 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5607}
5608
dd74f815
JS
5609static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5610{
5611 struct hclge_fd_rule *rule = NULL;
5612 struct hlist_node *node2;
5613
44122887 5614 spin_lock_bh(&hdev->fd_rule_lock);
dd74f815
JS
5615 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5616 if (rule->location >= location)
5617 break;
5618 }
5619
44122887
JS
5620 spin_unlock_bh(&hdev->fd_rule_lock);
5621
dd74f815
JS
5622 return rule && rule->location == location;
5623}
5624
44122887 5625/* make sure being called after lock up with fd_rule_lock */
dd74f815
JS
5626static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5627 struct hclge_fd_rule *new_rule,
5628 u16 location,
5629 bool is_add)
5630{
5631 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5632 struct hlist_node *node2;
5633
5634 if (is_add && !new_rule)
5635 return -EINVAL;
5636
5637 hlist_for_each_entry_safe(rule, node2,
5638 &hdev->fd_rule_list, rule_node) {
5639 if (rule->location >= location)
5640 break;
5641 parent = rule;
5642 }
5643
5644 if (rule && rule->location == location) {
5645 hlist_del(&rule->rule_node);
5646 kfree(rule);
5647 hdev->hclge_fd_rule_num--;
5648
44122887
JS
5649 if (!is_add) {
5650 if (!hdev->hclge_fd_rule_num)
5651 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5652 clear_bit(location, hdev->fd_bmap);
dd74f815 5653
44122887
JS
5654 return 0;
5655 }
dd74f815
JS
5656 } else if (!is_add) {
5657 dev_err(&hdev->pdev->dev,
adcf738b 5658 "delete fail, rule %u is inexistent\n",
dd74f815
JS
5659 location);
5660 return -EINVAL;
5661 }
5662
5663 INIT_HLIST_NODE(&new_rule->rule_node);
5664
5665 if (parent)
5666 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5667 else
5668 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5669
44122887 5670 set_bit(location, hdev->fd_bmap);
dd74f815 5671 hdev->hclge_fd_rule_num++;
44122887 5672 hdev->fd_active_type = new_rule->rule_type;
dd74f815
JS
5673
5674 return 0;
5675}
5676
5677static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5678 struct ethtool_rx_flow_spec *fs,
5679 struct hclge_fd_rule *rule)
5680{
5681 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5682
5683 switch (flow_type) {
5684 case SCTP_V4_FLOW:
5685 case TCP_V4_FLOW:
5686 case UDP_V4_FLOW:
e91e388c 5687 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5688 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
e91e388c 5689 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5690 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5691
e91e388c 5692 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5693 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
e91e388c 5694 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5695 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5696
5697 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5698 rule->tuples_mask.src_port =
5699 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5700
5701 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5702 rule->tuples_mask.dst_port =
5703 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5704
5705 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5706 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5707
5708 rule->tuples.ether_proto = ETH_P_IP;
5709 rule->tuples_mask.ether_proto = 0xFFFF;
5710
5711 break;
5712 case IP_USER_FLOW:
e91e388c 5713 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5714 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
e91e388c 5715 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5716 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5717
e91e388c 5718 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5719 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
e91e388c 5720 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5721 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5722
5723 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5724 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5725
5726 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5727 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5728
5729 rule->tuples.ether_proto = ETH_P_IP;
5730 rule->tuples_mask.ether_proto = 0xFFFF;
5731
5732 break;
5733 case SCTP_V6_FLOW:
5734 case TCP_V6_FLOW:
5735 case UDP_V6_FLOW:
5736 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5737 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5738 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5739 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5740
5741 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5742 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5743 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5744 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5745
5746 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5747 rule->tuples_mask.src_port =
5748 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5749
5750 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5751 rule->tuples_mask.dst_port =
5752 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5753
5754 rule->tuples.ether_proto = ETH_P_IPV6;
5755 rule->tuples_mask.ether_proto = 0xFFFF;
5756
5757 break;
5758 case IPV6_USER_FLOW:
5759 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5760 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5761 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5762 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5763
5764 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5765 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5766 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5767 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5768
5769 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5770 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5771
5772 rule->tuples.ether_proto = ETH_P_IPV6;
5773 rule->tuples_mask.ether_proto = 0xFFFF;
5774
5775 break;
5776 case ETHER_FLOW:
5777 ether_addr_copy(rule->tuples.src_mac,
5778 fs->h_u.ether_spec.h_source);
5779 ether_addr_copy(rule->tuples_mask.src_mac,
5780 fs->m_u.ether_spec.h_source);
5781
5782 ether_addr_copy(rule->tuples.dst_mac,
5783 fs->h_u.ether_spec.h_dest);
5784 ether_addr_copy(rule->tuples_mask.dst_mac,
5785 fs->m_u.ether_spec.h_dest);
5786
5787 rule->tuples.ether_proto =
5788 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5789 rule->tuples_mask.ether_proto =
5790 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5791
5792 break;
5793 default:
5794 return -EOPNOTSUPP;
5795 }
5796
5797 switch (flow_type) {
5798 case SCTP_V4_FLOW:
5799 case SCTP_V6_FLOW:
5800 rule->tuples.ip_proto = IPPROTO_SCTP;
5801 rule->tuples_mask.ip_proto = 0xFF;
5802 break;
5803 case TCP_V4_FLOW:
5804 case TCP_V6_FLOW:
5805 rule->tuples.ip_proto = IPPROTO_TCP;
5806 rule->tuples_mask.ip_proto = 0xFF;
5807 break;
5808 case UDP_V4_FLOW:
5809 case UDP_V6_FLOW:
5810 rule->tuples.ip_proto = IPPROTO_UDP;
5811 rule->tuples_mask.ip_proto = 0xFF;
5812 break;
5813 default:
5814 break;
5815 }
5816
0b4bdc55 5817 if (fs->flow_type & FLOW_EXT) {
dd74f815
JS
5818 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5819 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5820 }
5821
5822 if (fs->flow_type & FLOW_MAC_EXT) {
5823 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5824 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5825 }
5826
5827 return 0;
5828}
5829
44122887
JS
5830/* make sure being called after lock up with fd_rule_lock */
5831static int hclge_fd_config_rule(struct hclge_dev *hdev,
5832 struct hclge_fd_rule *rule)
5833{
5834 int ret;
5835
5836 if (!rule) {
5837 dev_err(&hdev->pdev->dev,
5838 "The flow director rule is NULL\n");
5839 return -EINVAL;
5840 }
5841
5842 /* it will never fail here, so needn't to check return value */
5843 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5844
5845 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5846 if (ret)
5847 goto clear_rule;
5848
5849 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5850 if (ret)
5851 goto clear_rule;
5852
5853 return 0;
5854
5855clear_rule:
5856 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5857 return ret;
5858}
5859
dd74f815
JS
5860static int hclge_add_fd_entry(struct hnae3_handle *handle,
5861 struct ethtool_rxnfc *cmd)
5862{
5863 struct hclge_vport *vport = hclge_get_vport(handle);
5864 struct hclge_dev *hdev = vport->back;
5865 u16 dst_vport_id = 0, q_index = 0;
5866 struct ethtool_rx_flow_spec *fs;
5867 struct hclge_fd_rule *rule;
5868 u32 unused = 0;
5869 u8 action;
5870 int ret;
5871
a3ca5e90
GL
5872 if (!hnae3_dev_fd_supported(hdev)) {
5873 dev_err(&hdev->pdev->dev,
5874 "flow table director is not supported\n");
dd74f815 5875 return -EOPNOTSUPP;
a3ca5e90 5876 }
dd74f815 5877
9abeb7d8 5878 if (!hdev->fd_en) {
a3ca5e90
GL
5879 dev_err(&hdev->pdev->dev,
5880 "please enable flow director first\n");
dd74f815
JS
5881 return -EOPNOTSUPP;
5882 }
5883
5884 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5885
5886 ret = hclge_fd_check_spec(hdev, fs, &unused);
a3ca5e90 5887 if (ret)
dd74f815 5888 return ret;
dd74f815
JS
5889
5890 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5891 action = HCLGE_FD_ACTION_DROP_PACKET;
5892 } else {
5893 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5894 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5895 u16 tqps;
5896
0285dbae
JS
5897 if (vf > hdev->num_req_vfs) {
5898 dev_err(&hdev->pdev->dev,
adcf738b 5899 "Error: vf id (%u) > max vf num (%u)\n",
0285dbae
JS
5900 vf, hdev->num_req_vfs);
5901 return -EINVAL;
5902 }
5903
dd74f815
JS
5904 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5905 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5906
5907 if (ring >= tqps) {
5908 dev_err(&hdev->pdev->dev,
adcf738b 5909 "Error: queue id (%u) > max tqp num (%u)\n",
dd74f815
JS
5910 ring, tqps - 1);
5911 return -EINVAL;
5912 }
5913
dd74f815
JS
5914 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5915 q_index = ring;
5916 }
5917
5918 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5919 if (!rule)
5920 return -ENOMEM;
5921
5922 ret = hclge_fd_get_tuple(hdev, fs, rule);
44122887
JS
5923 if (ret) {
5924 kfree(rule);
5925 return ret;
5926 }
dd74f815
JS
5927
5928 rule->flow_type = fs->flow_type;
dd74f815
JS
5929 rule->location = fs->location;
5930 rule->unused_tuple = unused;
5931 rule->vf_id = dst_vport_id;
5932 rule->queue_id = q_index;
5933 rule->action = action;
44122887 5934 rule->rule_type = HCLGE_FD_EP_ACTIVE;
dd74f815 5935
d93ed94f
JS
5936 /* to avoid rule conflict, when user configure rule by ethtool,
5937 * we need to clear all arfs rules
5938 */
efe3fa45 5939 spin_lock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
5940 hclge_clear_arfs_rules(handle);
5941
44122887 5942 ret = hclge_fd_config_rule(hdev, rule);
dd74f815 5943
44122887 5944 spin_unlock_bh(&hdev->fd_rule_lock);
dd74f815 5945
dd74f815
JS
5946 return ret;
5947}
5948
5949static int hclge_del_fd_entry(struct hnae3_handle *handle,
5950 struct ethtool_rxnfc *cmd)
5951{
5952 struct hclge_vport *vport = hclge_get_vport(handle);
5953 struct hclge_dev *hdev = vport->back;
5954 struct ethtool_rx_flow_spec *fs;
5955 int ret;
5956
5957 if (!hnae3_dev_fd_supported(hdev))
5958 return -EOPNOTSUPP;
5959
5960 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5961
5962 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5963 return -EINVAL;
5964
5965 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5966 dev_err(&hdev->pdev->dev,
39edaf24 5967 "Delete fail, rule %u is inexistent\n", fs->location);
dd74f815
JS
5968 return -ENOENT;
5969 }
5970
9b2f3477
WL
5971 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5972 NULL, false);
dd74f815
JS
5973 if (ret)
5974 return ret;
5975
44122887
JS
5976 spin_lock_bh(&hdev->fd_rule_lock);
5977 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5978
5979 spin_unlock_bh(&hdev->fd_rule_lock);
5980
5981 return ret;
dd74f815
JS
5982}
5983
efe3fa45 5984/* make sure being called after lock up with fd_rule_lock */
6871af29
JS
5985static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5986 bool clear_list)
5987{
5988 struct hclge_vport *vport = hclge_get_vport(handle);
5989 struct hclge_dev *hdev = vport->back;
5990 struct hclge_fd_rule *rule;
5991 struct hlist_node *node;
44122887 5992 u16 location;
6871af29
JS
5993
5994 if (!hnae3_dev_fd_supported(hdev))
5995 return;
5996
44122887
JS
5997 for_each_set_bit(location, hdev->fd_bmap,
5998 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5999 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6000 NULL, false);
6001
6871af29
JS
6002 if (clear_list) {
6003 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6004 rule_node) {
6871af29
JS
6005 hlist_del(&rule->rule_node);
6006 kfree(rule);
6871af29 6007 }
44122887
JS
6008 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6009 hdev->hclge_fd_rule_num = 0;
6010 bitmap_zero(hdev->fd_bmap,
6011 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6871af29
JS
6012 }
6013}
6014
6015static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6016{
6017 struct hclge_vport *vport = hclge_get_vport(handle);
6018 struct hclge_dev *hdev = vport->back;
6019 struct hclge_fd_rule *rule;
6020 struct hlist_node *node;
6021 int ret;
6022
65e41e7e
HT
6023 /* Return ok here, because reset error handling will check this
6024 * return value. If error is returned here, the reset process will
6025 * fail.
6026 */
6871af29 6027 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 6028 return 0;
6871af29 6029
8edc2285 6030 /* if fd is disabled, should not restore it when reset */
9abeb7d8 6031 if (!hdev->fd_en)
8edc2285
JS
6032 return 0;
6033
44122887 6034 spin_lock_bh(&hdev->fd_rule_lock);
6871af29
JS
6035 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6036 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6037 if (!ret)
6038 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6039
6040 if (ret) {
6041 dev_warn(&hdev->pdev->dev,
adcf738b 6042 "Restore rule %u failed, remove it\n",
6871af29 6043 rule->location);
44122887 6044 clear_bit(rule->location, hdev->fd_bmap);
6871af29
JS
6045 hlist_del(&rule->rule_node);
6046 kfree(rule);
6047 hdev->hclge_fd_rule_num--;
6048 }
6049 }
44122887
JS
6050
6051 if (hdev->hclge_fd_rule_num)
6052 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6053
6054 spin_unlock_bh(&hdev->fd_rule_lock);
6055
6871af29
JS
6056 return 0;
6057}
6058
05c2314f
JS
6059static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6060 struct ethtool_rxnfc *cmd)
6061{
6062 struct hclge_vport *vport = hclge_get_vport(handle);
6063 struct hclge_dev *hdev = vport->back;
6064
6065 if (!hnae3_dev_fd_supported(hdev))
6066 return -EOPNOTSUPP;
6067
6068 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6069 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6070
6071 return 0;
6072}
6073
fa663c09
JS
6074static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6075 struct ethtool_tcpip4_spec *spec,
6076 struct ethtool_tcpip4_spec *spec_mask)
6077{
6078 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6079 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6080 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6081
6082 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6083 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6084 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6085
6086 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6087 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6088 0 : cpu_to_be16(rule->tuples_mask.src_port);
6089
6090 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6091 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6092 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6093
6094 spec->tos = rule->tuples.ip_tos;
6095 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6096 0 : rule->tuples_mask.ip_tos;
6097}
6098
6099static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6100 struct ethtool_usrip4_spec *spec,
6101 struct ethtool_usrip4_spec *spec_mask)
6102{
6103 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6104 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6105 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6106
6107 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6108 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6109 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6110
6111 spec->tos = rule->tuples.ip_tos;
6112 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6113 0 : rule->tuples_mask.ip_tos;
6114
6115 spec->proto = rule->tuples.ip_proto;
6116 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6117 0 : rule->tuples_mask.ip_proto;
6118
6119 spec->ip_ver = ETH_RX_NFC_IP4;
6120}
6121
6122static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6123 struct ethtool_tcpip6_spec *spec,
6124 struct ethtool_tcpip6_spec *spec_mask)
6125{
6126 cpu_to_be32_array(spec->ip6src,
6127 rule->tuples.src_ip, IPV6_SIZE);
6128 cpu_to_be32_array(spec->ip6dst,
6129 rule->tuples.dst_ip, IPV6_SIZE);
6130 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6131 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6132 else
6133 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6134 IPV6_SIZE);
6135
6136 if (rule->unused_tuple & BIT(INNER_DST_IP))
6137 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6138 else
6139 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6140 IPV6_SIZE);
6141
6142 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6143 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6144 0 : cpu_to_be16(rule->tuples_mask.src_port);
6145
6146 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6147 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6148 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6149}
6150
6151static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6152 struct ethtool_usrip6_spec *spec,
6153 struct ethtool_usrip6_spec *spec_mask)
6154{
6155 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6156 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6157 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6158 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6159 else
6160 cpu_to_be32_array(spec_mask->ip6src,
6161 rule->tuples_mask.src_ip, IPV6_SIZE);
6162
6163 if (rule->unused_tuple & BIT(INNER_DST_IP))
6164 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6165 else
6166 cpu_to_be32_array(spec_mask->ip6dst,
6167 rule->tuples_mask.dst_ip, IPV6_SIZE);
6168
6169 spec->l4_proto = rule->tuples.ip_proto;
6170 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6171 0 : rule->tuples_mask.ip_proto;
6172}
6173
6174static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6175 struct ethhdr *spec,
6176 struct ethhdr *spec_mask)
6177{
6178 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6179 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6180
6181 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6182 eth_zero_addr(spec_mask->h_source);
6183 else
6184 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6185
6186 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6187 eth_zero_addr(spec_mask->h_dest);
6188 else
6189 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6190
6191 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6192 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6193 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6194}
6195
6196static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6197 struct hclge_fd_rule *rule)
6198{
6199 if (fs->flow_type & FLOW_EXT) {
6200 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6201 fs->m_ext.vlan_tci =
6202 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6203 cpu_to_be16(VLAN_VID_MASK) :
6204 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6205 }
6206
6207 if (fs->flow_type & FLOW_MAC_EXT) {
6208 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6209 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6210 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6211 else
6212 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6213 rule->tuples_mask.dst_mac);
6214 }
6215}
6216
05c2314f
JS
6217static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6218 struct ethtool_rxnfc *cmd)
6219{
6220 struct hclge_vport *vport = hclge_get_vport(handle);
6221 struct hclge_fd_rule *rule = NULL;
6222 struct hclge_dev *hdev = vport->back;
6223 struct ethtool_rx_flow_spec *fs;
6224 struct hlist_node *node2;
6225
6226 if (!hnae3_dev_fd_supported(hdev))
6227 return -EOPNOTSUPP;
6228
6229 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6230
44122887
JS
6231 spin_lock_bh(&hdev->fd_rule_lock);
6232
05c2314f
JS
6233 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6234 if (rule->location >= fs->location)
6235 break;
6236 }
6237
44122887
JS
6238 if (!rule || fs->location != rule->location) {
6239 spin_unlock_bh(&hdev->fd_rule_lock);
6240
05c2314f 6241 return -ENOENT;
44122887 6242 }
05c2314f
JS
6243
6244 fs->flow_type = rule->flow_type;
6245 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6246 case SCTP_V4_FLOW:
6247 case TCP_V4_FLOW:
6248 case UDP_V4_FLOW:
fa663c09
JS
6249 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6250 &fs->m_u.tcp_ip4_spec);
05c2314f
JS
6251 break;
6252 case IP_USER_FLOW:
fa663c09
JS
6253 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6254 &fs->m_u.usr_ip4_spec);
05c2314f
JS
6255 break;
6256 case SCTP_V6_FLOW:
6257 case TCP_V6_FLOW:
6258 case UDP_V6_FLOW:
fa663c09
JS
6259 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6260 &fs->m_u.tcp_ip6_spec);
05c2314f
JS
6261 break;
6262 case IPV6_USER_FLOW:
fa663c09
JS
6263 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6264 &fs->m_u.usr_ip6_spec);
05c2314f 6265 break;
fa663c09
JS
6266 /* The flow type of fd rule has been checked before adding in to rule
6267 * list. As other flow types have been handled, it must be ETHER_FLOW
6268 * for the default case
6269 */
05c2314f 6270 default:
fa663c09
JS
6271 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6272 &fs->m_u.ether_spec);
6273 break;
05c2314f
JS
6274 }
6275
fa663c09 6276 hclge_fd_get_ext_info(fs, rule);
05c2314f
JS
6277
6278 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6279 fs->ring_cookie = RX_CLS_FLOW_DISC;
6280 } else {
6281 u64 vf_id;
6282
6283 fs->ring_cookie = rule->queue_id;
6284 vf_id = rule->vf_id;
6285 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6286 fs->ring_cookie |= vf_id;
6287 }
6288
44122887
JS
6289 spin_unlock_bh(&hdev->fd_rule_lock);
6290
05c2314f
JS
6291 return 0;
6292}
6293
6294static int hclge_get_all_rules(struct hnae3_handle *handle,
6295 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6296{
6297 struct hclge_vport *vport = hclge_get_vport(handle);
6298 struct hclge_dev *hdev = vport->back;
6299 struct hclge_fd_rule *rule;
6300 struct hlist_node *node2;
6301 int cnt = 0;
6302
6303 if (!hnae3_dev_fd_supported(hdev))
6304 return -EOPNOTSUPP;
6305
6306 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6307
44122887 6308 spin_lock_bh(&hdev->fd_rule_lock);
05c2314f
JS
6309 hlist_for_each_entry_safe(rule, node2,
6310 &hdev->fd_rule_list, rule_node) {
44122887
JS
6311 if (cnt == cmd->rule_cnt) {
6312 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f 6313 return -EMSGSIZE;
44122887 6314 }
05c2314f
JS
6315
6316 rule_locs[cnt] = rule->location;
6317 cnt++;
6318 }
6319
44122887
JS
6320 spin_unlock_bh(&hdev->fd_rule_lock);
6321
05c2314f
JS
6322 cmd->rule_cnt = cnt;
6323
6324 return 0;
6325}
6326
d93ed94f
JS
6327static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6328 struct hclge_fd_rule_tuples *tuples)
6329{
47327c93
GH
6330#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6331#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6332
d93ed94f
JS
6333 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6334 tuples->ip_proto = fkeys->basic.ip_proto;
6335 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6336
6337 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6338 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6339 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6340 } else {
47327c93
GH
6341 int i;
6342
6343 for (i = 0; i < IPV6_SIZE; i++) {
6344 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6345 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6346 }
d93ed94f
JS
6347 }
6348}
6349
6350/* traverse all rules, check whether an existed rule has the same tuples */
6351static struct hclge_fd_rule *
6352hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6353 const struct hclge_fd_rule_tuples *tuples)
6354{
6355 struct hclge_fd_rule *rule = NULL;
6356 struct hlist_node *node;
6357
6358 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6359 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6360 return rule;
6361 }
6362
6363 return NULL;
6364}
6365
6366static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6367 struct hclge_fd_rule *rule)
6368{
6369 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6370 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6371 BIT(INNER_SRC_PORT);
6372 rule->action = 0;
6373 rule->vf_id = 0;
6374 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6375 if (tuples->ether_proto == ETH_P_IP) {
6376 if (tuples->ip_proto == IPPROTO_TCP)
6377 rule->flow_type = TCP_V4_FLOW;
6378 else
6379 rule->flow_type = UDP_V4_FLOW;
6380 } else {
6381 if (tuples->ip_proto == IPPROTO_TCP)
6382 rule->flow_type = TCP_V6_FLOW;
6383 else
6384 rule->flow_type = UDP_V6_FLOW;
6385 }
6386 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6387 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6388}
6389
6390static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6391 u16 flow_id, struct flow_keys *fkeys)
6392{
d93ed94f 6393 struct hclge_vport *vport = hclge_get_vport(handle);
efe3fa45 6394 struct hclge_fd_rule_tuples new_tuples = {};
d93ed94f
JS
6395 struct hclge_dev *hdev = vport->back;
6396 struct hclge_fd_rule *rule;
6397 u16 tmp_queue_id;
6398 u16 bit_id;
6399 int ret;
6400
6401 if (!hnae3_dev_fd_supported(hdev))
6402 return -EOPNOTSUPP;
6403
d93ed94f
JS
6404 /* when there is already fd rule existed add by user,
6405 * arfs should not work
6406 */
efe3fa45 6407 spin_lock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6408 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6409 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6410 return -EOPNOTSUPP;
6411 }
6412
efe3fa45
GL
6413 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6414
d93ed94f
JS
6415 /* check is there flow director filter existed for this flow,
6416 * if not, create a new filter for it;
6417 * if filter exist with different queue id, modify the filter;
6418 * if filter exist with same queue id, do nothing
6419 */
6420 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6421 if (!rule) {
6422 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6423 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6424 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6425 return -ENOSPC;
6426 }
6427
d659f9f6 6428 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
d93ed94f
JS
6429 if (!rule) {
6430 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6431 return -ENOMEM;
6432 }
6433
6434 set_bit(bit_id, hdev->fd_bmap);
6435 rule->location = bit_id;
6436 rule->flow_id = flow_id;
6437 rule->queue_id = queue_id;
6438 hclge_fd_build_arfs_rule(&new_tuples, rule);
6439 ret = hclge_fd_config_rule(hdev, rule);
6440
6441 spin_unlock_bh(&hdev->fd_rule_lock);
6442
6443 if (ret)
6444 return ret;
6445
6446 return rule->location;
6447 }
6448
6449 spin_unlock_bh(&hdev->fd_rule_lock);
6450
6451 if (rule->queue_id == queue_id)
6452 return rule->location;
6453
6454 tmp_queue_id = rule->queue_id;
6455 rule->queue_id = queue_id;
6456 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6457 if (ret) {
6458 rule->queue_id = tmp_queue_id;
6459 return ret;
6460 }
6461
6462 return rule->location;
d93ed94f
JS
6463}
6464
6465static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6466{
6467#ifdef CONFIG_RFS_ACCEL
6468 struct hnae3_handle *handle = &hdev->vport[0].nic;
6469 struct hclge_fd_rule *rule;
6470 struct hlist_node *node;
6471 HLIST_HEAD(del_list);
6472
6473 spin_lock_bh(&hdev->fd_rule_lock);
6474 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6475 spin_unlock_bh(&hdev->fd_rule_lock);
6476 return;
6477 }
6478 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6479 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6480 rule->flow_id, rule->location)) {
6481 hlist_del_init(&rule->rule_node);
6482 hlist_add_head(&rule->rule_node, &del_list);
6483 hdev->hclge_fd_rule_num--;
6484 clear_bit(rule->location, hdev->fd_bmap);
6485 }
6486 }
6487 spin_unlock_bh(&hdev->fd_rule_lock);
6488
6489 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6490 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6491 rule->location, NULL, false);
6492 kfree(rule);
6493 }
6494#endif
6495}
6496
efe3fa45 6497/* make sure being called after lock up with fd_rule_lock */
d93ed94f
JS
6498static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6499{
6500#ifdef CONFIG_RFS_ACCEL
6501 struct hclge_vport *vport = hclge_get_vport(handle);
6502 struct hclge_dev *hdev = vport->back;
6503
6504 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6505 hclge_del_all_fd_entries(handle, true);
6506#endif
6507}
6508
4d60291b
HT
6509static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6510{
6511 struct hclge_vport *vport = hclge_get_vport(handle);
6512 struct hclge_dev *hdev = vport->back;
6513
6514 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6515 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6516}
6517
a4de0228
HT
6518static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6519{
6520 struct hclge_vport *vport = hclge_get_vport(handle);
6521 struct hclge_dev *hdev = vport->back;
6522
6523 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6524}
6525
4d60291b
HT
6526static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6527{
6528 struct hclge_vport *vport = hclge_get_vport(handle);
6529 struct hclge_dev *hdev = vport->back;
6530
6531 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6532}
6533
6534static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6535{
6536 struct hclge_vport *vport = hclge_get_vport(handle);
6537 struct hclge_dev *hdev = vport->back;
6538
f02eb82d 6539 return hdev->rst_stats.hw_reset_done_cnt;
4d60291b
HT
6540}
6541
c17852a8
JS
6542static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6543{
6544 struct hclge_vport *vport = hclge_get_vport(handle);
6545 struct hclge_dev *hdev = vport->back;
44122887 6546 bool clear;
c17852a8 6547
9abeb7d8 6548 hdev->fd_en = enable;
1483fa49 6549 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
efe3fa45
GL
6550
6551 if (!enable) {
6552 spin_lock_bh(&hdev->fd_rule_lock);
44122887 6553 hclge_del_all_fd_entries(handle, clear);
efe3fa45
GL
6554 spin_unlock_bh(&hdev->fd_rule_lock);
6555 } else {
c17852a8 6556 hclge_restore_fd_entries(handle);
efe3fa45 6557 }
c17852a8
JS
6558}
6559
46a3df9f
S
6560static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6561{
6562 struct hclge_desc desc;
d44f9b63
YL
6563 struct hclge_config_mac_mode_cmd *req =
6564 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 6565 u32 loop_en = 0;
46a3df9f
S
6566 int ret;
6567
6568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
b9a8f883
YL
6569
6570 if (enable) {
6571 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6572 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6573 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6574 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6575 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6576 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6577 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6578 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6579 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6580 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6581 }
6582
a90bb9a5 6583 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
6584
6585 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6586 if (ret)
6587 dev_err(&hdev->pdev->dev,
6588 "mac enable fail, ret =%d.\n", ret);
6589}
6590
dd2956ea
YM
6591static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6592 u8 switch_param, u8 param_mask)
6593{
6594 struct hclge_mac_vlan_switch_cmd *req;
6595 struct hclge_desc desc;
6596 u32 func_id;
6597 int ret;
6598
6599 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6600 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
71c5e83b
GH
6601
6602 /* read current config parameter */
dd2956ea 6603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
71c5e83b 6604 true);
dd2956ea
YM
6605 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6606 req->func_id = cpu_to_le32(func_id);
71c5e83b
GH
6607
6608 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6609 if (ret) {
6610 dev_err(&hdev->pdev->dev,
6611 "read mac vlan switch parameter fail, ret = %d\n", ret);
6612 return ret;
6613 }
6614
6615 /* modify and write new config parameter */
6616 hclge_cmd_reuse_desc(&desc, false);
6617 req->switch_param = (req->switch_param & param_mask) | switch_param;
dd2956ea
YM
6618 req->param_mask = param_mask;
6619
6620 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6621 if (ret)
6622 dev_err(&hdev->pdev->dev,
6623 "set mac vlan switch parameter fail, ret = %d\n", ret);
6624 return ret;
6625}
6626
c9765a89
YM
6627static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6628 int link_ret)
6629{
6630#define HCLGE_PHY_LINK_STATUS_NUM 200
6631
6632 struct phy_device *phydev = hdev->hw.mac.phydev;
6633 int i = 0;
6634 int ret;
6635
6636 do {
6637 ret = phy_read_status(phydev);
6638 if (ret) {
6639 dev_err(&hdev->pdev->dev,
6640 "phy update link status fail, ret = %d\n", ret);
6641 return;
6642 }
6643
6644 if (phydev->link == link_ret)
6645 break;
6646
6647 msleep(HCLGE_LINK_STATUS_MS);
6648 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6649}
6650
6651static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6652{
6653#define HCLGE_MAC_LINK_STATUS_NUM 100
6654
fac24df7 6655 int link_status;
c9765a89
YM
6656 int i = 0;
6657 int ret;
6658
6659 do {
fac24df7
JS
6660 ret = hclge_get_mac_link_status(hdev, &link_status);
6661 if (ret)
c9765a89 6662 return ret;
fac24df7 6663 if (link_status == link_ret)
c9765a89
YM
6664 return 0;
6665
6666 msleep(HCLGE_LINK_STATUS_MS);
6667 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6668 return -EBUSY;
6669}
6670
6671static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6672 bool is_phy)
6673{
c9765a89
YM
6674 int link_ret;
6675
6676 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6677
6678 if (is_phy)
6679 hclge_phy_link_status_wait(hdev, link_ret);
6680
6681 return hclge_mac_link_status_wait(hdev, link_ret);
6682}
6683
eb66d503 6684static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 6685{
c39c4d98 6686 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
6687 struct hclge_desc desc;
6688 u32 loop_en;
6689 int ret;
6690
e4d68dae
YL
6691 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6692 /* 1 Read out the MAC mode config at first */
6693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6694 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6695 if (ret) {
6696 dev_err(&hdev->pdev->dev,
6697 "mac loopback get fail, ret =%d.\n", ret);
6698 return ret;
6699 }
c39c4d98 6700
e4d68dae
YL
6701 /* 2 Then setup the loopback flag */
6702 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 6703 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
e4d68dae
YL
6704
6705 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 6706
e4d68dae
YL
6707 /* 3 Config mac work mode with loopback flag
6708 * and its original configure parameters
6709 */
6710 hclge_cmd_reuse_desc(&desc, false);
6711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6712 if (ret)
6713 dev_err(&hdev->pdev->dev,
6714 "mac loopback set fail, ret =%d.\n", ret);
6715 return ret;
6716}
c39c4d98 6717
1cbc662d 6718static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
4dc13b96 6719 enum hnae3_loop loop_mode)
5fd50ac3
PL
6720{
6721#define HCLGE_SERDES_RETRY_MS 10
6722#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 6723
5fd50ac3
PL
6724 struct hclge_serdes_lb_cmd *req;
6725 struct hclge_desc desc;
6726 int ret, i = 0;
4dc13b96 6727 u8 loop_mode_b;
5fd50ac3 6728
d0d72bac 6729 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
6730 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6731
4dc13b96
FL
6732 switch (loop_mode) {
6733 case HNAE3_LOOP_SERIAL_SERDES:
6734 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6735 break;
6736 case HNAE3_LOOP_PARALLEL_SERDES:
6737 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6738 break;
6739 default:
6740 dev_err(&hdev->pdev->dev,
6741 "unsupported serdes loopback mode %d\n", loop_mode);
6742 return -ENOTSUPP;
6743 }
6744
5fd50ac3 6745 if (en) {
4dc13b96
FL
6746 req->enable = loop_mode_b;
6747 req->mask = loop_mode_b;
5fd50ac3 6748 } else {
4dc13b96 6749 req->mask = loop_mode_b;
5fd50ac3
PL
6750 }
6751
6752 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6753 if (ret) {
6754 dev_err(&hdev->pdev->dev,
6755 "serdes loopback set fail, ret = %d\n", ret);
6756 return ret;
6757 }
6758
6759 do {
6760 msleep(HCLGE_SERDES_RETRY_MS);
6761 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6762 true);
6763 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6764 if (ret) {
6765 dev_err(&hdev->pdev->dev,
6766 "serdes loopback get, ret = %d\n", ret);
6767 return ret;
6768 }
6769 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6770 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6771
6772 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6773 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6774 return -EBUSY;
6775 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6776 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6777 return -EIO;
6778 }
1cbc662d
YM
6779 return ret;
6780}
6781
6782static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6783 enum hnae3_loop loop_mode)
6784{
6785 int ret;
6786
6787 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6788 if (ret)
6789 return ret;
5fd50ac3 6790
0f29fc23 6791 hclge_cfg_mac_mode(hdev, en);
350fda0a 6792
60df7e91 6793 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
c9765a89
YM
6794 if (ret)
6795 dev_err(&hdev->pdev->dev,
6796 "serdes loopback config mac mode timeout\n");
6797
6798 return ret;
6799}
350fda0a 6800
c9765a89
YM
6801static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6802 struct phy_device *phydev)
6803{
6804 int ret;
350fda0a 6805
c9765a89
YM
6806 if (!phydev->suspended) {
6807 ret = phy_suspend(phydev);
6808 if (ret)
6809 return ret;
6810 }
6811
6812 ret = phy_resume(phydev);
6813 if (ret)
6814 return ret;
6815
6816 return phy_loopback(phydev, true);
6817}
6818
6819static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6820 struct phy_device *phydev)
6821{
6822 int ret;
6823
6824 ret = phy_loopback(phydev, false);
6825 if (ret)
6826 return ret;
6827
6828 return phy_suspend(phydev);
6829}
6830
6831static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6832{
6833 struct phy_device *phydev = hdev->hw.mac.phydev;
6834 int ret;
6835
6836 if (!phydev)
6837 return -ENOTSUPP;
6838
6839 if (en)
6840 ret = hclge_enable_phy_loopback(hdev, phydev);
6841 else
6842 ret = hclge_disable_phy_loopback(hdev, phydev);
6843 if (ret) {
6844 dev_err(&hdev->pdev->dev,
6845 "set phy loopback fail, ret = %d\n", ret);
6846 return ret;
6847 }
6848
6849 hclge_cfg_mac_mode(hdev, en);
6850
60df7e91 6851 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
c9765a89
YM
6852 if (ret)
6853 dev_err(&hdev->pdev->dev,
6854 "phy loopback config mac mode timeout\n");
6855
6856 return ret;
5fd50ac3
PL
6857}
6858
ebaf1908 6859static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
0f29fc23
YL
6860 int stream_id, bool enable)
6861{
6862 struct hclge_desc desc;
6863 struct hclge_cfg_com_tqp_queue_cmd *req =
6864 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6865 int ret;
6866
6867 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
9a5ef4aa 6868 req->tqp_id = cpu_to_le16(tqp_id);
0f29fc23 6869 req->stream_id = cpu_to_le16(stream_id);
ebaf1908
WL
6870 if (enable)
6871 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
0f29fc23
YL
6872
6873 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6874 if (ret)
6875 dev_err(&hdev->pdev->dev,
6876 "Tqp enable fail, status =%d.\n", ret);
6877 return ret;
6878}
6879
e4d68dae
YL
6880static int hclge_set_loopback(struct hnae3_handle *handle,
6881 enum hnae3_loop loop_mode, bool en)
6882{
6883 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6884 struct hnae3_knic_private_info *kinfo;
e4d68dae 6885 struct hclge_dev *hdev = vport->back;
0f29fc23 6886 int i, ret;
e4d68dae 6887
dd2956ea
YM
6888 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6889 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6890 * the same, the packets are looped back in the SSU. If SSU loopback
6891 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6892 */
295ba232 6893 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
dd2956ea
YM
6894 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6895
6896 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6897 HCLGE_SWITCH_ALW_LPBK_MASK);
6898 if (ret)
6899 return ret;
6900 }
6901
e4d68dae 6902 switch (loop_mode) {
eb66d503
FL
6903 case HNAE3_LOOP_APP:
6904 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 6905 break;
4dc13b96
FL
6906 case HNAE3_LOOP_SERIAL_SERDES:
6907 case HNAE3_LOOP_PARALLEL_SERDES:
6908 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 6909 break;
c9765a89
YM
6910 case HNAE3_LOOP_PHY:
6911 ret = hclge_set_phy_loopback(hdev, en);
6912 break;
c39c4d98
YL
6913 default:
6914 ret = -ENOTSUPP;
6915 dev_err(&hdev->pdev->dev,
6916 "loop_mode %d is not supported\n", loop_mode);
6917 break;
6918 }
6919
47ef6dec
JS
6920 if (ret)
6921 return ret;
6922
205a24ca
HT
6923 kinfo = &vport->nic.kinfo;
6924 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
6925 ret = hclge_tqp_enable(hdev, i, 0, en);
6926 if (ret)
6927 return ret;
6928 }
46a3df9f 6929
0f29fc23 6930 return 0;
46a3df9f
S
6931}
6932
1cbc662d
YM
6933static int hclge_set_default_loopback(struct hclge_dev *hdev)
6934{
6935 int ret;
6936
6937 ret = hclge_set_app_loopback(hdev, false);
6938 if (ret)
6939 return ret;
6940
6941 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6942 if (ret)
6943 return ret;
6944
6945 return hclge_cfg_serdes_loopback(hdev, false,
6946 HNAE3_LOOP_PARALLEL_SERDES);
6947}
6948
46a3df9f
S
6949static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6950{
6951 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6952 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
6953 struct hnae3_queue *queue;
6954 struct hclge_tqp *tqp;
6955 int i;
6956
205a24ca
HT
6957 kinfo = &vport->nic.kinfo;
6958 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
6959 queue = handle->kinfo.tqp[i];
6960 tqp = container_of(queue, struct hclge_tqp, q);
6961 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6962 }
6963}
6964
1c6dfe6f
YL
6965static void hclge_flush_link_update(struct hclge_dev *hdev)
6966{
6967#define HCLGE_FLUSH_LINK_TIMEOUT 100000
6968
6969 unsigned long last = hdev->serv_processed_cnt;
6970 int i = 0;
6971
6972 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6973 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6974 last == hdev->serv_processed_cnt)
6975 usleep_range(1, 1);
6976}
6977
8cdb992f
JS
6978static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6979{
6980 struct hclge_vport *vport = hclge_get_vport(handle);
6981 struct hclge_dev *hdev = vport->back;
6982
6983 if (enable) {
a9775bb6 6984 hclge_task_schedule(hdev, 0);
8cdb992f 6985 } else {
1c6dfe6f 6986 /* Set the DOWN flag here to disable link updating */
7be1b9f3 6987 set_bit(HCLGE_STATE_DOWN, &hdev->state);
1c6dfe6f
YL
6988
6989 /* flush memory to make sure DOWN is seen by service task */
6990 smp_mb__before_atomic();
6991 hclge_flush_link_update(hdev);
8cdb992f
JS
6992 }
6993}
6994
46a3df9f
S
6995static int hclge_ae_start(struct hnae3_handle *handle)
6996{
6997 struct hclge_vport *vport = hclge_get_vport(handle);
6998 struct hclge_dev *hdev = vport->back;
46a3df9f 6999
46a3df9f
S
7000 /* mac enable */
7001 hclge_cfg_mac_mode(hdev, true);
7002 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 7003 hdev->hw.mac.link = 0;
46a3df9f 7004
b50ae26c
PL
7005 /* reset tqp stats */
7006 hclge_reset_tqp_stats(handle);
7007
b01b7cf1 7008 hclge_mac_start_phy(hdev);
46a3df9f 7009
46a3df9f
S
7010 return 0;
7011}
7012
7013static void hclge_ae_stop(struct hnae3_handle *handle)
7014{
7015 struct hclge_vport *vport = hclge_get_vport(handle);
7016 struct hclge_dev *hdev = vport->back;
39cfbc9c 7017 int i;
46a3df9f 7018
2f7e4896 7019 set_bit(HCLGE_STATE_DOWN, &hdev->state);
efe3fa45 7020 spin_lock_bh(&hdev->fd_rule_lock);
d93ed94f 7021 hclge_clear_arfs_rules(handle);
efe3fa45 7022 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f 7023
35d93a30
HT
7024 /* If it is not PF reset, the firmware will disable the MAC,
7025 * so it only need to stop phy here.
7026 */
7027 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7028 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 7029 hclge_mac_stop_phy(hdev);
ed8fb4b2 7030 hclge_update_link_status(hdev);
b50ae26c 7031 return;
9617f668 7032 }
b50ae26c 7033
39cfbc9c
HT
7034 for (i = 0; i < handle->kinfo.num_tqps; i++)
7035 hclge_reset_tqp(handle, i);
7036
20981a1e
HT
7037 hclge_config_mac_tnl_int(hdev, false);
7038
46a3df9f
S
7039 /* Mac disable */
7040 hclge_cfg_mac_mode(hdev, false);
7041
7042 hclge_mac_stop_phy(hdev);
7043
7044 /* reset tqp stats */
7045 hclge_reset_tqp_stats(handle);
f30dfddc 7046 hclge_update_link_status(hdev);
46a3df9f
S
7047}
7048
a6d818e3
YL
7049int hclge_vport_start(struct hclge_vport *vport)
7050{
ee4bcd3b
JS
7051 struct hclge_dev *hdev = vport->back;
7052
a6d818e3
YL
7053 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7054 vport->last_active_jiffies = jiffies;
ee4bcd3b 7055
039ba863
JS
7056 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7057 if (vport->vport_id) {
7058 hclge_restore_mac_table_common(vport);
7059 hclge_restore_vport_vlan_table(vport);
7060 } else {
7061 hclge_restore_hw_table(hdev);
7062 }
7063 }
ee4bcd3b
JS
7064
7065 clear_bit(vport->vport_id, hdev->vport_config_block);
7066
a6d818e3
YL
7067 return 0;
7068}
7069
7070void hclge_vport_stop(struct hclge_vport *vport)
7071{
7072 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7073}
7074
7075static int hclge_client_start(struct hnae3_handle *handle)
7076{
7077 struct hclge_vport *vport = hclge_get_vport(handle);
7078
7079 return hclge_vport_start(vport);
7080}
7081
7082static void hclge_client_stop(struct hnae3_handle *handle)
7083{
7084 struct hclge_vport *vport = hclge_get_vport(handle);
7085
7086 hclge_vport_stop(vport);
7087}
7088
46a3df9f
S
7089static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7090 u16 cmdq_resp, u8 resp_code,
7091 enum hclge_mac_vlan_tbl_opcode op)
7092{
7093 struct hclge_dev *hdev = vport->back;
46a3df9f
S
7094
7095 if (cmdq_resp) {
7096 dev_err(&hdev->pdev->dev,
adcf738b 7097 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
46a3df9f
S
7098 cmdq_resp);
7099 return -EIO;
7100 }
7101
7102 if (op == HCLGE_MAC_VLAN_ADD) {
c631c696 7103 if (!resp_code || resp_code == 1)
6e4139f6 7104 return 0;
c631c696
JS
7105 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7106 resp_code == HCLGE_ADD_MC_OVERFLOW)
6e4139f6 7107 return -ENOSPC;
6e4139f6
JS
7108
7109 dev_err(&hdev->pdev->dev,
7110 "add mac addr failed for undefined, code=%u.\n",
7111 resp_code);
7112 return -EIO;
46a3df9f
S
7113 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7114 if (!resp_code) {
6e4139f6 7115 return 0;
46a3df9f 7116 } else if (resp_code == 1) {
46a3df9f
S
7117 dev_dbg(&hdev->pdev->dev,
7118 "remove mac addr failed for miss.\n");
6e4139f6 7119 return -ENOENT;
46a3df9f 7120 }
6e4139f6
JS
7121
7122 dev_err(&hdev->pdev->dev,
7123 "remove mac addr failed for undefined, code=%u.\n",
7124 resp_code);
7125 return -EIO;
46a3df9f
S
7126 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7127 if (!resp_code) {
6e4139f6 7128 return 0;
46a3df9f 7129 } else if (resp_code == 1) {
46a3df9f
S
7130 dev_dbg(&hdev->pdev->dev,
7131 "lookup mac addr failed for miss.\n");
6e4139f6 7132 return -ENOENT;
46a3df9f 7133 }
6e4139f6 7134
46a3df9f 7135 dev_err(&hdev->pdev->dev,
6e4139f6
JS
7136 "lookup mac addr failed for undefined, code=%u.\n",
7137 resp_code);
7138 return -EIO;
46a3df9f
S
7139 }
7140
6e4139f6
JS
7141 dev_err(&hdev->pdev->dev,
7142 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7143
7144 return -EINVAL;
46a3df9f
S
7145}
7146
7147static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7148{
b37ce587
YM
7149#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7150
b9a8f883
YL
7151 unsigned int word_num;
7152 unsigned int bit_num;
46a3df9f
S
7153
7154 if (vfid > 255 || vfid < 0)
7155 return -EIO;
7156
b37ce587 7157 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
46a3df9f
S
7158 word_num = vfid / 32;
7159 bit_num = vfid % 32;
7160 if (clr)
a90bb9a5 7161 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7162 else
a90bb9a5 7163 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f 7164 } else {
b37ce587 7165 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
46a3df9f
S
7166 bit_num = vfid % 32;
7167 if (clr)
a90bb9a5 7168 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7169 else
a90bb9a5 7170 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
7171 }
7172
7173 return 0;
7174}
7175
7176static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7177{
7178#define HCLGE_DESC_NUMBER 3
7179#define HCLGE_FUNC_NUMBER_PER_DESC 6
7180 int i, j;
7181
6c39d527 7182 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
7183 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7184 if (desc[i].data[j])
7185 return false;
7186
7187 return true;
7188}
7189
d44f9b63 7190static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 7191 const u8 *addr, bool is_mc)
46a3df9f
S
7192{
7193 const unsigned char *mac_addr = addr;
7194 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7195 (mac_addr[0]) | (mac_addr[1] << 8);
7196 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7197
3a586422
WL
7198 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7199 if (is_mc) {
7200 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7201 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7202 }
7203
46a3df9f
S
7204 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7205 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7206}
7207
46a3df9f 7208static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7209 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
7210{
7211 struct hclge_dev *hdev = vport->back;
7212 struct hclge_desc desc;
7213 u8 resp_code;
a90bb9a5 7214 u16 retval;
46a3df9f
S
7215 int ret;
7216
7217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7218
d44f9b63 7219 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7220
7221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7222 if (ret) {
7223 dev_err(&hdev->pdev->dev,
7224 "del mac addr failed for cmd_send, ret =%d.\n",
7225 ret);
7226 return ret;
7227 }
a90bb9a5
YL
7228 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7229 retval = le16_to_cpu(desc.retval);
46a3df9f 7230
a90bb9a5 7231 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7232 HCLGE_MAC_VLAN_REMOVE);
7233}
7234
7235static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7236 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7237 struct hclge_desc *desc,
7238 bool is_mc)
7239{
7240 struct hclge_dev *hdev = vport->back;
7241 u8 resp_code;
a90bb9a5 7242 u16 retval;
46a3df9f
S
7243 int ret;
7244
7245 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7246 if (is_mc) {
7247 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7248 memcpy(desc[0].data,
7249 req,
d44f9b63 7250 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7251 hclge_cmd_setup_basic_desc(&desc[1],
7252 HCLGE_OPC_MAC_VLAN_ADD,
7253 true);
7254 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7255 hclge_cmd_setup_basic_desc(&desc[2],
7256 HCLGE_OPC_MAC_VLAN_ADD,
7257 true);
7258 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7259 } else {
7260 memcpy(desc[0].data,
7261 req,
d44f9b63 7262 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7263 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7264 }
7265 if (ret) {
7266 dev_err(&hdev->pdev->dev,
7267 "lookup mac addr failed for cmd_send, ret =%d.\n",
7268 ret);
7269 return ret;
7270 }
a90bb9a5
YL
7271 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7272 retval = le16_to_cpu(desc[0].retval);
46a3df9f 7273
a90bb9a5 7274 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7275 HCLGE_MAC_VLAN_LKUP);
7276}
7277
7278static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7279 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7280 struct hclge_desc *mc_desc)
7281{
7282 struct hclge_dev *hdev = vport->back;
7283 int cfg_status;
7284 u8 resp_code;
a90bb9a5 7285 u16 retval;
46a3df9f
S
7286 int ret;
7287
7288 if (!mc_desc) {
7289 struct hclge_desc desc;
7290
7291 hclge_cmd_setup_basic_desc(&desc,
7292 HCLGE_OPC_MAC_VLAN_ADD,
7293 false);
d44f9b63
YL
7294 memcpy(desc.data, req,
7295 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7296 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
7297 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7298 retval = le16_to_cpu(desc.retval);
7299
7300 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7301 resp_code,
7302 HCLGE_MAC_VLAN_ADD);
7303 } else {
c3b6f755 7304 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 7305 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7306 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 7307 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7308 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
7309 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7310 memcpy(mc_desc[0].data, req,
d44f9b63 7311 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7312 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
7313 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7314 retval = le16_to_cpu(mc_desc[0].retval);
7315
7316 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7317 resp_code,
7318 HCLGE_MAC_VLAN_ADD);
7319 }
7320
7321 if (ret) {
7322 dev_err(&hdev->pdev->dev,
7323 "add mac addr failed for cmd_send, ret =%d.\n",
7324 ret);
7325 return ret;
7326 }
7327
7328 return cfg_status;
7329}
7330
39932473 7331static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
c1c5f66e 7332 u16 *allocated_size)
39932473
JS
7333{
7334 struct hclge_umv_spc_alc_cmd *req;
7335 struct hclge_desc desc;
7336 int ret;
7337
7338 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
63cbf7a9 7340
39932473
JS
7341 req->space_size = cpu_to_le32(space_size);
7342
7343 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7344 if (ret) {
c1c5f66e
JS
7345 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7346 ret);
39932473
JS
7347 return ret;
7348 }
7349
3fd8dc26 7350 *allocated_size = le32_to_cpu(desc.data[1]);
39932473
JS
7351
7352 return 0;
7353}
7354
1ac0e6c2
JS
7355static int hclge_init_umv_space(struct hclge_dev *hdev)
7356{
7357 u16 allocated_size = 0;
7358 int ret;
7359
c1c5f66e 7360 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
1ac0e6c2
JS
7361 if (ret)
7362 return ret;
7363
7364 if (allocated_size < hdev->wanted_umv_size)
7365 dev_warn(&hdev->pdev->dev,
7366 "failed to alloc umv space, want %u, get %u\n",
7367 hdev->wanted_umv_size, allocated_size);
7368
1ac0e6c2
JS
7369 hdev->max_umv_size = allocated_size;
7370 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7371 hdev->share_umv_size = hdev->priv_umv_size +
7372 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7373
7374 return 0;
7375}
7376
39932473
JS
7377static void hclge_reset_umv_space(struct hclge_dev *hdev)
7378{
7379 struct hclge_vport *vport;
7380 int i;
7381
7382 for (i = 0; i < hdev->num_alloc_vport; i++) {
7383 vport = &hdev->vport[i];
7384 vport->used_umv_num = 0;
7385 }
7386
7d0b3451 7387 mutex_lock(&hdev->vport_lock);
39932473 7388 hdev->share_umv_size = hdev->priv_umv_size +
4c58f592 7389 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7d0b3451 7390 mutex_unlock(&hdev->vport_lock);
39932473
JS
7391}
7392
7d0b3451 7393static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
39932473
JS
7394{
7395 struct hclge_dev *hdev = vport->back;
7396 bool is_full;
7397
7d0b3451
JS
7398 if (need_lock)
7399 mutex_lock(&hdev->vport_lock);
7400
39932473
JS
7401 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7402 hdev->share_umv_size == 0);
7d0b3451
JS
7403
7404 if (need_lock)
7405 mutex_unlock(&hdev->vport_lock);
39932473
JS
7406
7407 return is_full;
7408}
7409
7410static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7411{
7412 struct hclge_dev *hdev = vport->back;
7413
39932473
JS
7414 if (is_free) {
7415 if (vport->used_umv_num > hdev->priv_umv_size)
7416 hdev->share_umv_size++;
54a395b6 7417
7418 if (vport->used_umv_num > 0)
7419 vport->used_umv_num--;
39932473 7420 } else {
54a395b6 7421 if (vport->used_umv_num >= hdev->priv_umv_size &&
7422 hdev->share_umv_size > 0)
39932473
JS
7423 hdev->share_umv_size--;
7424 vport->used_umv_num++;
7425 }
39932473
JS
7426}
7427
ee4bcd3b
JS
7428static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7429 const u8 *mac_addr)
7430{
7431 struct hclge_mac_node *mac_node, *tmp;
7432
7433 list_for_each_entry_safe(mac_node, tmp, list, node)
7434 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7435 return mac_node;
7436
7437 return NULL;
7438}
7439
7440static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7441 enum HCLGE_MAC_NODE_STATE state)
7442{
7443 switch (state) {
7444 /* from set_rx_mode or tmp_add_list */
7445 case HCLGE_MAC_TO_ADD:
7446 if (mac_node->state == HCLGE_MAC_TO_DEL)
7447 mac_node->state = HCLGE_MAC_ACTIVE;
7448 break;
7449 /* only from set_rx_mode */
7450 case HCLGE_MAC_TO_DEL:
7451 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7452 list_del(&mac_node->node);
7453 kfree(mac_node);
7454 } else {
7455 mac_node->state = HCLGE_MAC_TO_DEL;
7456 }
7457 break;
7458 /* only from tmp_add_list, the mac_node->state won't be
7459 * ACTIVE.
7460 */
7461 case HCLGE_MAC_ACTIVE:
7462 if (mac_node->state == HCLGE_MAC_TO_ADD)
7463 mac_node->state = HCLGE_MAC_ACTIVE;
7464
7465 break;
7466 }
7467}
7468
7469int hclge_update_mac_list(struct hclge_vport *vport,
7470 enum HCLGE_MAC_NODE_STATE state,
7471 enum HCLGE_MAC_ADDR_TYPE mac_type,
7472 const unsigned char *addr)
7473{
7474 struct hclge_dev *hdev = vport->back;
7475 struct hclge_mac_node *mac_node;
7476 struct list_head *list;
7477
7478 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7479 &vport->uc_mac_list : &vport->mc_mac_list;
7480
7481 spin_lock_bh(&vport->mac_list_lock);
7482
7483 /* if the mac addr is already in the mac list, no need to add a new
7484 * one into it, just check the mac addr state, convert it to a new
7485 * new state, or just remove it, or do nothing.
7486 */
7487 mac_node = hclge_find_mac_node(list, addr);
7488 if (mac_node) {
7489 hclge_update_mac_node(mac_node, state);
7490 spin_unlock_bh(&vport->mac_list_lock);
7491 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7492 return 0;
7493 }
7494
7495 /* if this address is never added, unnecessary to delete */
7496 if (state == HCLGE_MAC_TO_DEL) {
7497 spin_unlock_bh(&vport->mac_list_lock);
7498 dev_err(&hdev->pdev->dev,
7499 "failed to delete address %pM from mac list\n",
7500 addr);
7501 return -ENOENT;
7502 }
7503
7504 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7505 if (!mac_node) {
7506 spin_unlock_bh(&vport->mac_list_lock);
7507 return -ENOMEM;
7508 }
7509
7510 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7511
7512 mac_node->state = state;
7513 ether_addr_copy(mac_node->mac_addr, addr);
7514 list_add_tail(&mac_node->node, list);
7515
7516 spin_unlock_bh(&vport->mac_list_lock);
7517
7518 return 0;
7519}
7520
46a3df9f
S
7521static int hclge_add_uc_addr(struct hnae3_handle *handle,
7522 const unsigned char *addr)
7523{
7524 struct hclge_vport *vport = hclge_get_vport(handle);
7525
ee4bcd3b
JS
7526 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7527 addr);
46a3df9f
S
7528}
7529
7530int hclge_add_uc_addr_common(struct hclge_vport *vport,
7531 const unsigned char *addr)
7532{
7533 struct hclge_dev *hdev = vport->back;
d44f9b63 7534 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 7535 struct hclge_desc desc;
a90bb9a5 7536 u16 egress_port = 0;
aa7a795e 7537 int ret;
46a3df9f
S
7538
7539 /* mac addr check */
7540 if (is_zero_ether_addr(addr) ||
7541 is_broadcast_ether_addr(addr) ||
7542 is_multicast_ether_addr(addr)) {
7543 dev_err(&hdev->pdev->dev,
7544 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
9b2f3477 7545 addr, is_zero_ether_addr(addr),
46a3df9f
S
7546 is_broadcast_ether_addr(addr),
7547 is_multicast_ether_addr(addr));
7548 return -EINVAL;
7549 }
7550
7551 memset(&req, 0, sizeof(req));
a90bb9a5 7552
e4e87715
PL
7553 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7554 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
7555
7556 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 7557
3a586422 7558 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 7559
d07b6bb4
JS
7560 /* Lookup the mac address in the mac_vlan table, and add
7561 * it if the entry is inexistent. Repeated unicast entry
7562 * is not allowed in the mac vlan table.
7563 */
7564 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473 7565 if (ret == -ENOENT) {
7d0b3451
JS
7566 mutex_lock(&hdev->vport_lock);
7567 if (!hclge_is_umv_space_full(vport, false)) {
39932473
JS
7568 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7569 if (!ret)
7570 hclge_update_umv_space(vport, false);
7d0b3451 7571 mutex_unlock(&hdev->vport_lock);
39932473
JS
7572 return ret;
7573 }
7d0b3451 7574 mutex_unlock(&hdev->vport_lock);
39932473 7575
c631c696
JS
7576 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7577 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7578 hdev->priv_umv_size);
39932473
JS
7579
7580 return -ENOSPC;
7581 }
d07b6bb4
JS
7582
7583 /* check if we just hit the duplicate */
72110b56 7584 if (!ret) {
adcf738b 7585 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
72110b56
PL
7586 vport->vport_id, addr);
7587 return 0;
7588 }
d07b6bb4
JS
7589
7590 dev_err(&hdev->pdev->dev,
7591 "PF failed to add unicast entry(%pM) in the MAC table\n",
7592 addr);
46a3df9f 7593
aa7a795e 7594 return ret;
46a3df9f
S
7595}
7596
7597static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7598 const unsigned char *addr)
7599{
7600 struct hclge_vport *vport = hclge_get_vport(handle);
7601
ee4bcd3b
JS
7602 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7603 addr);
46a3df9f
S
7604}
7605
7606int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7607 const unsigned char *addr)
7608{
7609 struct hclge_dev *hdev = vport->back;
d44f9b63 7610 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 7611 int ret;
46a3df9f
S
7612
7613 /* mac addr check */
7614 if (is_zero_ether_addr(addr) ||
7615 is_broadcast_ether_addr(addr) ||
7616 is_multicast_ether_addr(addr)) {
9b2f3477
WL
7617 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7618 addr);
46a3df9f
S
7619 return -EINVAL;
7620 }
7621
7622 memset(&req, 0, sizeof(req));
e4e87715 7623 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 7624 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 7625 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7d0b3451
JS
7626 if (!ret) {
7627 mutex_lock(&hdev->vport_lock);
39932473 7628 hclge_update_umv_space(vport, true);
7d0b3451
JS
7629 mutex_unlock(&hdev->vport_lock);
7630 } else if (ret == -ENOENT) {
ee4bcd3b 7631 ret = 0;
7d0b3451 7632 }
46a3df9f 7633
aa7a795e 7634 return ret;
46a3df9f
S
7635}
7636
7637static int hclge_add_mc_addr(struct hnae3_handle *handle,
7638 const unsigned char *addr)
7639{
7640 struct hclge_vport *vport = hclge_get_vport(handle);
7641
ee4bcd3b
JS
7642 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7643 addr);
46a3df9f
S
7644}
7645
7646int hclge_add_mc_addr_common(struct hclge_vport *vport,
7647 const unsigned char *addr)
7648{
7649 struct hclge_dev *hdev = vport->back;
d44f9b63 7650 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 7651 struct hclge_desc desc[3];
46a3df9f
S
7652 int status;
7653
7654 /* mac addr check */
7655 if (!is_multicast_ether_addr(addr)) {
7656 dev_err(&hdev->pdev->dev,
7657 "Add mc mac err! invalid mac:%pM.\n",
7658 addr);
7659 return -EINVAL;
7660 }
7661 memset(&req, 0, sizeof(req));
3a586422 7662 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f 7663 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
63cbf7a9 7664 if (status) {
46a3df9f
S
7665 /* This mac addr do not exist, add new entry for it */
7666 memset(desc[0].data, 0, sizeof(desc[0].data));
7667 memset(desc[1].data, 0, sizeof(desc[0].data));
7668 memset(desc[2].data, 0, sizeof(desc[0].data));
46a3df9f 7669 }
63cbf7a9
YM
7670 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7671 if (status)
7672 return status;
7673 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
46a3df9f 7674
c631c696
JS
7675 /* if already overflow, not to print each time */
7676 if (status == -ENOSPC &&
7677 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
1f6db589 7678 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
7679
7680 return status;
7681}
7682
7683static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7684 const unsigned char *addr)
7685{
7686 struct hclge_vport *vport = hclge_get_vport(handle);
7687
ee4bcd3b
JS
7688 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7689 addr);
46a3df9f
S
7690}
7691
7692int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7693 const unsigned char *addr)
7694{
7695 struct hclge_dev *hdev = vport->back;
d44f9b63 7696 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
7697 enum hclge_cmd_status status;
7698 struct hclge_desc desc[3];
46a3df9f
S
7699
7700 /* mac addr check */
7701 if (!is_multicast_ether_addr(addr)) {
7702 dev_dbg(&hdev->pdev->dev,
7703 "Remove mc mac err! invalid mac:%pM.\n",
7704 addr);
7705 return -EINVAL;
7706 }
7707
7708 memset(&req, 0, sizeof(req));
3a586422 7709 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
7710 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7711 if (!status) {
7712 /* This mac addr exist, remove this handle's VFID for it */
63cbf7a9
YM
7713 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7714 if (status)
7715 return status;
46a3df9f
S
7716
7717 if (hclge_is_all_function_id_zero(desc))
7718 /* All the vfid is zero, so need to delete this entry */
7719 status = hclge_remove_mac_vlan_tbl(vport, &req);
7720 else
7721 /* Not all the vfid is zero, update the vfid */
7722 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7723
ee4bcd3b 7724 } else if (status == -ENOENT) {
40cca1c5 7725 status = 0;
46a3df9f
S
7726 }
7727
46a3df9f
S
7728 return status;
7729}
7730
ee4bcd3b
JS
7731static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7732 struct list_head *list,
7733 int (*sync)(struct hclge_vport *,
7734 const unsigned char *))
6dd86902 7735{
ee4bcd3b
JS
7736 struct hclge_mac_node *mac_node, *tmp;
7737 int ret;
6dd86902 7738
ee4bcd3b
JS
7739 list_for_each_entry_safe(mac_node, tmp, list, node) {
7740 ret = sync(vport, mac_node->mac_addr);
7741 if (!ret) {
7742 mac_node->state = HCLGE_MAC_ACTIVE;
7743 } else {
7744 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7745 &vport->state);
7746 break;
7747 }
7748 }
7749}
6dd86902 7750
ee4bcd3b
JS
7751static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7752 struct list_head *list,
7753 int (*unsync)(struct hclge_vport *,
7754 const unsigned char *))
7755{
7756 struct hclge_mac_node *mac_node, *tmp;
7757 int ret;
6dd86902 7758
ee4bcd3b
JS
7759 list_for_each_entry_safe(mac_node, tmp, list, node) {
7760 ret = unsync(vport, mac_node->mac_addr);
7761 if (!ret || ret == -ENOENT) {
7762 list_del(&mac_node->node);
7763 kfree(mac_node);
7764 } else {
7765 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7766 &vport->state);
7767 break;
7768 }
7769 }
7770}
6dd86902 7771
c631c696 7772static bool hclge_sync_from_add_list(struct list_head *add_list,
ee4bcd3b
JS
7773 struct list_head *mac_list)
7774{
7775 struct hclge_mac_node *mac_node, *tmp, *new_node;
c631c696 7776 bool all_added = true;
6dd86902 7777
ee4bcd3b 7778 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
c631c696
JS
7779 if (mac_node->state == HCLGE_MAC_TO_ADD)
7780 all_added = false;
7781
ee4bcd3b
JS
7782 /* if the mac address from tmp_add_list is not in the
7783 * uc/mc_mac_list, it means have received a TO_DEL request
7784 * during the time window of adding the mac address into mac
7785 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7786 * then it will be removed at next time. else it must be TO_ADD,
7787 * this address hasn't been added into mac table,
7788 * so just remove the mac node.
7789 */
7790 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7791 if (new_node) {
7792 hclge_update_mac_node(new_node, mac_node->state);
7793 list_del(&mac_node->node);
7794 kfree(mac_node);
7795 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7796 mac_node->state = HCLGE_MAC_TO_DEL;
7797 list_del(&mac_node->node);
7798 list_add_tail(&mac_node->node, mac_list);
7799 } else {
7800 list_del(&mac_node->node);
7801 kfree(mac_node);
7802 }
7803 }
c631c696
JS
7804
7805 return all_added;
6dd86902 7806}
7807
ee4bcd3b
JS
7808static void hclge_sync_from_del_list(struct list_head *del_list,
7809 struct list_head *mac_list)
6dd86902 7810{
ee4bcd3b 7811 struct hclge_mac_node *mac_node, *tmp, *new_node;
6dd86902 7812
ee4bcd3b
JS
7813 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7814 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7815 if (new_node) {
7816 /* If the mac addr exists in the mac list, it means
7817 * received a new TO_ADD request during the time window
7818 * of configuring the mac address. For the mac node
7819 * state is TO_ADD, and the address is already in the
7820 * in the hardware(due to delete fail), so we just need
7821 * to change the mac node state to ACTIVE.
7822 */
7823 new_node->state = HCLGE_MAC_ACTIVE;
7824 list_del(&mac_node->node);
7825 kfree(mac_node);
7826 } else {
7827 list_del(&mac_node->node);
7828 list_add_tail(&mac_node->node, mac_list);
7829 }
7830 }
7831}
6dd86902 7832
c631c696
JS
7833static void hclge_update_overflow_flags(struct hclge_vport *vport,
7834 enum HCLGE_MAC_ADDR_TYPE mac_type,
7835 bool is_all_added)
7836{
7837 if (mac_type == HCLGE_MAC_ADDR_UC) {
7838 if (is_all_added)
7839 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7840 else
7841 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7842 } else {
7843 if (is_all_added)
7844 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7845 else
7846 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7847 }
7848}
7849
ee4bcd3b
JS
7850static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7851 enum HCLGE_MAC_ADDR_TYPE mac_type)
7852{
7853 struct hclge_mac_node *mac_node, *tmp, *new_node;
7854 struct list_head tmp_add_list, tmp_del_list;
7855 struct list_head *list;
c631c696 7856 bool all_added;
6dd86902 7857
ee4bcd3b
JS
7858 INIT_LIST_HEAD(&tmp_add_list);
7859 INIT_LIST_HEAD(&tmp_del_list);
6dd86902 7860
ee4bcd3b
JS
7861 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7862 * we can add/delete these mac addr outside the spin lock
7863 */
7864 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7865 &vport->uc_mac_list : &vport->mc_mac_list;
6dd86902 7866
ee4bcd3b
JS
7867 spin_lock_bh(&vport->mac_list_lock);
7868
7869 list_for_each_entry_safe(mac_node, tmp, list, node) {
7870 switch (mac_node->state) {
7871 case HCLGE_MAC_TO_DEL:
7872 list_del(&mac_node->node);
7873 list_add_tail(&mac_node->node, &tmp_del_list);
7874 break;
7875 case HCLGE_MAC_TO_ADD:
7876 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7877 if (!new_node)
7878 goto stop_traverse;
7879 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7880 new_node->state = mac_node->state;
7881 list_add_tail(&new_node->node, &tmp_add_list);
7882 break;
7883 default:
6dd86902 7884 break;
7885 }
7886 }
ee4bcd3b
JS
7887
7888stop_traverse:
7889 spin_unlock_bh(&vport->mac_list_lock);
7890
7891 /* delete first, in order to get max mac table space for adding */
7892 if (mac_type == HCLGE_MAC_ADDR_UC) {
7893 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7894 hclge_rm_uc_addr_common);
7895 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7896 hclge_add_uc_addr_common);
7897 } else {
7898 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7899 hclge_rm_mc_addr_common);
7900 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7901 hclge_add_mc_addr_common);
7902 }
7903
7904 /* if some mac addresses were added/deleted fail, move back to the
7905 * mac_list, and retry at next time.
7906 */
7907 spin_lock_bh(&vport->mac_list_lock);
7908
7909 hclge_sync_from_del_list(&tmp_del_list, list);
c631c696 7910 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
ee4bcd3b
JS
7911
7912 spin_unlock_bh(&vport->mac_list_lock);
c631c696
JS
7913
7914 hclge_update_overflow_flags(vport, mac_type, all_added);
ee4bcd3b
JS
7915}
7916
7917static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7918{
7919 struct hclge_dev *hdev = vport->back;
7920
7921 if (test_bit(vport->vport_id, hdev->vport_config_block))
7922 return false;
7923
7924 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7925 return true;
7926
7927 return false;
7928}
7929
7930static void hclge_sync_mac_table(struct hclge_dev *hdev)
7931{
7932 int i;
7933
7934 for (i = 0; i < hdev->num_alloc_vport; i++) {
7935 struct hclge_vport *vport = &hdev->vport[i];
7936
7937 if (!hclge_need_sync_mac_table(vport))
7938 continue;
7939
7940 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7941 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7942 }
6dd86902 7943}
7944
7945void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7946 enum HCLGE_MAC_ADDR_TYPE mac_type)
7947{
ee4bcd3b
JS
7948 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7949 struct hclge_mac_node *mac_cfg, *tmp;
7950 struct hclge_dev *hdev = vport->back;
7951 struct list_head tmp_del_list, *list;
7952 int ret;
6dd86902 7953
ee4bcd3b
JS
7954 if (mac_type == HCLGE_MAC_ADDR_UC) {
7955 list = &vport->uc_mac_list;
7956 unsync = hclge_rm_uc_addr_common;
7957 } else {
7958 list = &vport->mc_mac_list;
7959 unsync = hclge_rm_mc_addr_common;
7960 }
6dd86902 7961
ee4bcd3b 7962 INIT_LIST_HEAD(&tmp_del_list);
6dd86902 7963
ee4bcd3b
JS
7964 if (!is_del_list)
7965 set_bit(vport->vport_id, hdev->vport_config_block);
6dd86902 7966
ee4bcd3b
JS
7967 spin_lock_bh(&vport->mac_list_lock);
7968
7969 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7970 switch (mac_cfg->state) {
7971 case HCLGE_MAC_TO_DEL:
7972 case HCLGE_MAC_ACTIVE:
6dd86902 7973 list_del(&mac_cfg->node);
ee4bcd3b
JS
7974 list_add_tail(&mac_cfg->node, &tmp_del_list);
7975 break;
7976 case HCLGE_MAC_TO_ADD:
7977 if (is_del_list) {
7978 list_del(&mac_cfg->node);
7979 kfree(mac_cfg);
7980 }
7981 break;
6dd86902 7982 }
7983 }
ee4bcd3b
JS
7984
7985 spin_unlock_bh(&vport->mac_list_lock);
7986
7987 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7988 ret = unsync(vport, mac_cfg->mac_addr);
7989 if (!ret || ret == -ENOENT) {
7990 /* clear all mac addr from hardware, but remain these
7991 * mac addr in the mac list, and restore them after
7992 * vf reset finished.
7993 */
7994 if (!is_del_list &&
7995 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7996 mac_cfg->state = HCLGE_MAC_TO_ADD;
7997 } else {
7998 list_del(&mac_cfg->node);
7999 kfree(mac_cfg);
8000 }
8001 } else if (is_del_list) {
8002 mac_cfg->state = HCLGE_MAC_TO_DEL;
8003 }
8004 }
8005
8006 spin_lock_bh(&vport->mac_list_lock);
8007
8008 hclge_sync_from_del_list(&tmp_del_list, list);
8009
8010 spin_unlock_bh(&vport->mac_list_lock);
8011}
8012
8013/* remove all mac address when uninitailize */
8014static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8015 enum HCLGE_MAC_ADDR_TYPE mac_type)
8016{
8017 struct hclge_mac_node *mac_node, *tmp;
8018 struct hclge_dev *hdev = vport->back;
8019 struct list_head tmp_del_list, *list;
8020
8021 INIT_LIST_HEAD(&tmp_del_list);
8022
8023 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8024 &vport->uc_mac_list : &vport->mc_mac_list;
8025
8026 spin_lock_bh(&vport->mac_list_lock);
8027
8028 list_for_each_entry_safe(mac_node, tmp, list, node) {
8029 switch (mac_node->state) {
8030 case HCLGE_MAC_TO_DEL:
8031 case HCLGE_MAC_ACTIVE:
8032 list_del(&mac_node->node);
8033 list_add_tail(&mac_node->node, &tmp_del_list);
8034 break;
8035 case HCLGE_MAC_TO_ADD:
8036 list_del(&mac_node->node);
8037 kfree(mac_node);
8038 break;
8039 }
8040 }
8041
8042 spin_unlock_bh(&vport->mac_list_lock);
8043
8044 if (mac_type == HCLGE_MAC_ADDR_UC)
8045 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8046 hclge_rm_uc_addr_common);
8047 else
8048 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8049 hclge_rm_mc_addr_common);
8050
8051 if (!list_empty(&tmp_del_list))
8052 dev_warn(&hdev->pdev->dev,
8053 "uninit %s mac list for vport %u not completely.\n",
8054 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8055 vport->vport_id);
8056
8057 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8058 list_del(&mac_node->node);
8059 kfree(mac_node);
8060 }
6dd86902 8061}
8062
ee4bcd3b 8063static void hclge_uninit_mac_table(struct hclge_dev *hdev)
6dd86902 8064{
6dd86902 8065 struct hclge_vport *vport;
8066 int i;
8067
6dd86902 8068 for (i = 0; i < hdev->num_alloc_vport; i++) {
8069 vport = &hdev->vport[i];
ee4bcd3b
JS
8070 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8071 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
6dd86902 8072 }
6dd86902 8073}
8074
f5aac71c
FL
8075static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8076 u16 cmdq_resp, u8 resp_code)
8077{
8078#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8079#define HCLGE_ETHERTYPE_ALREADY_ADD 1
8080#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8081#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8082
8083 int return_status;
8084
8085 if (cmdq_resp) {
8086 dev_err(&hdev->pdev->dev,
adcf738b 8087 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
f5aac71c
FL
8088 cmdq_resp);
8089 return -EIO;
8090 }
8091
8092 switch (resp_code) {
8093 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8094 case HCLGE_ETHERTYPE_ALREADY_ADD:
8095 return_status = 0;
8096 break;
8097 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8098 dev_err(&hdev->pdev->dev,
8099 "add mac ethertype failed for manager table overflow.\n");
8100 return_status = -EIO;
8101 break;
8102 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8103 dev_err(&hdev->pdev->dev,
8104 "add mac ethertype failed for key conflict.\n");
8105 return_status = -EIO;
8106 break;
8107 default:
8108 dev_err(&hdev->pdev->dev,
adcf738b 8109 "add mac ethertype failed for undefined, code=%u.\n",
f5aac71c
FL
8110 resp_code);
8111 return_status = -EIO;
8112 }
8113
8114 return return_status;
8115}
8116
8e6de441
HT
8117static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8118 u8 *mac_addr)
8119{
8120 struct hclge_mac_vlan_tbl_entry_cmd req;
8121 struct hclge_dev *hdev = vport->back;
8122 struct hclge_desc desc;
8123 u16 egress_port = 0;
8124 int i;
8125
8126 if (is_zero_ether_addr(mac_addr))
8127 return false;
8128
8129 memset(&req, 0, sizeof(req));
8130 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8131 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8132 req.egress_port = cpu_to_le16(egress_port);
8133 hclge_prepare_mac_addr(&req, mac_addr, false);
8134
8135 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8136 return true;
8137
8138 vf_idx += HCLGE_VF_VPORT_START_NUM;
8139 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8140 if (i != vf_idx &&
8141 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8142 return true;
8143
8144 return false;
8145}
8146
8147static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8148 u8 *mac_addr)
8149{
8150 struct hclge_vport *vport = hclge_get_vport(handle);
8151 struct hclge_dev *hdev = vport->back;
8152
8153 vport = hclge_get_vf_vport(hdev, vf);
8154 if (!vport)
8155 return -EINVAL;
8156
8157 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8158 dev_info(&hdev->pdev->dev,
8159 "Specified MAC(=%pM) is same as before, no change committed!\n",
8160 mac_addr);
8161 return 0;
8162 }
8163
8164 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8165 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8166 mac_addr);
8167 return -EEXIST;
8168 }
8169
8170 ether_addr_copy(vport->vf_info.mac, mac_addr);
8e6de441 8171
90913670
YL
8172 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8173 dev_info(&hdev->pdev->dev,
8174 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8175 vf, mac_addr);
8176 return hclge_inform_reset_assert_to_vf(vport);
8177 }
8178
8179 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8180 vf, mac_addr);
8181 return 0;
8e6de441
HT
8182}
8183
f5aac71c
FL
8184static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8185 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8186{
8187 struct hclge_desc desc;
8188 u8 resp_code;
8189 u16 retval;
8190 int ret;
8191
8192 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8193 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8194
8195 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8196 if (ret) {
8197 dev_err(&hdev->pdev->dev,
8198 "add mac ethertype failed for cmd_send, ret =%d.\n",
8199 ret);
8200 return ret;
8201 }
8202
8203 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8204 retval = le16_to_cpu(desc.retval);
8205
8206 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8207}
8208
8209static int init_mgr_tbl(struct hclge_dev *hdev)
8210{
8211 int ret;
8212 int i;
8213
8214 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8215 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8216 if (ret) {
8217 dev_err(&hdev->pdev->dev,
8218 "add mac ethertype failed, ret =%d.\n",
8219 ret);
8220 return ret;
8221 }
8222 }
8223
8224 return 0;
8225}
8226
46a3df9f
S
8227static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8228{
8229 struct hclge_vport *vport = hclge_get_vport(handle);
8230 struct hclge_dev *hdev = vport->back;
8231
8232 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8233}
8234
ee4bcd3b
JS
8235int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8236 const u8 *old_addr, const u8 *new_addr)
8237{
8238 struct list_head *list = &vport->uc_mac_list;
8239 struct hclge_mac_node *old_node, *new_node;
8240
8241 new_node = hclge_find_mac_node(list, new_addr);
8242 if (!new_node) {
8243 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8244 if (!new_node)
8245 return -ENOMEM;
8246
8247 new_node->state = HCLGE_MAC_TO_ADD;
8248 ether_addr_copy(new_node->mac_addr, new_addr);
8249 list_add(&new_node->node, list);
8250 } else {
8251 if (new_node->state == HCLGE_MAC_TO_DEL)
8252 new_node->state = HCLGE_MAC_ACTIVE;
8253
8254 /* make sure the new addr is in the list head, avoid dev
8255 * addr may be not re-added into mac table for the umv space
8256 * limitation after global/imp reset which will clear mac
8257 * table by hardware.
8258 */
8259 list_move(&new_node->node, list);
8260 }
8261
8262 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8263 old_node = hclge_find_mac_node(list, old_addr);
8264 if (old_node) {
8265 if (old_node->state == HCLGE_MAC_TO_ADD) {
8266 list_del(&old_node->node);
8267 kfree(old_node);
8268 } else {
8269 old_node->state = HCLGE_MAC_TO_DEL;
8270 }
8271 }
8272 }
8273
8274 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8275
8276 return 0;
8277}
8278
59098055
FL
8279static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8280 bool is_first)
46a3df9f
S
8281{
8282 const unsigned char *new_addr = (const unsigned char *)p;
8283 struct hclge_vport *vport = hclge_get_vport(handle);
8284 struct hclge_dev *hdev = vport->back;
ee4bcd3b 8285 unsigned char *old_addr = NULL;
18838d0c 8286 int ret;
46a3df9f
S
8287
8288 /* mac addr check */
8289 if (is_zero_ether_addr(new_addr) ||
8290 is_broadcast_ether_addr(new_addr) ||
8291 is_multicast_ether_addr(new_addr)) {
8292 dev_err(&hdev->pdev->dev,
ee4bcd3b 8293 "change uc mac err! invalid mac: %pM.\n",
46a3df9f
S
8294 new_addr);
8295 return -EINVAL;
8296 }
8297
ee4bcd3b 8298 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
8299 if (ret) {
8300 dev_err(&hdev->pdev->dev,
ee4bcd3b 8301 "failed to configure mac pause address, ret = %d\n",
18838d0c 8302 ret);
ee4bcd3b 8303 return ret;
46a3df9f
S
8304 }
8305
ee4bcd3b
JS
8306 if (!is_first)
8307 old_addr = hdev->hw.mac.mac_addr;
8308
8309 spin_lock_bh(&vport->mac_list_lock);
8310 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
18838d0c
FL
8311 if (ret) {
8312 dev_err(&hdev->pdev->dev,
ee4bcd3b
JS
8313 "failed to change the mac addr:%pM, ret = %d\n",
8314 new_addr, ret);
8315 spin_unlock_bh(&vport->mac_list_lock);
8316
8317 if (!is_first)
8318 hclge_pause_addr_cfg(hdev, old_addr);
18838d0c 8319
ee4bcd3b
JS
8320 return ret;
8321 }
8322 /* we must update dev addr with spin lock protect, preventing dev addr
8323 * being removed by set_rx_mode path.
8324 */
18838d0c 8325 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
ee4bcd3b
JS
8326 spin_unlock_bh(&vport->mac_list_lock);
8327
8328 hclge_task_schedule(hdev, 0);
18838d0c
FL
8329
8330 return 0;
46a3df9f
S
8331}
8332
26483246
XW
8333static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8334 int cmd)
8335{
8336 struct hclge_vport *vport = hclge_get_vport(handle);
8337 struct hclge_dev *hdev = vport->back;
8338
8339 if (!hdev->hw.mac.phydev)
8340 return -EOPNOTSUPP;
8341
8342 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8343}
8344
46a3df9f 8345static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 8346 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 8347{
d44f9b63 8348 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
8349 struct hclge_desc desc;
8350 int ret;
8351
903b85d3
JS
8352 /* read current vlan filter parameter */
8353 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
d44f9b63 8354 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 8355 req->vlan_type = vlan_type;
30ebc576 8356 req->vf_id = vf_id;
46a3df9f 8357
903b85d3
JS
8358 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8359 if (ret) {
8360 dev_err(&hdev->pdev->dev,
8361 "failed to get vlan filter config, ret = %d.\n", ret);
8362 return ret;
8363 }
8364
8365 /* modify and write new config parameter */
8366 hclge_cmd_reuse_desc(&desc, false);
8367 req->vlan_fe = filter_en ?
8368 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8369
46a3df9f 8370 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 8371 if (ret)
903b85d3 8372 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
46a3df9f 8373 ret);
46a3df9f 8374
3f639907 8375 return ret;
46a3df9f
S
8376}
8377
391b5e93
JS
8378#define HCLGE_FILTER_TYPE_VF 0
8379#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
8380#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8381#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8382#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8383#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8384#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8385#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8386 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8387#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8388 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
8389
8390static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8391{
8392 struct hclge_vport *vport = hclge_get_vport(handle);
8393 struct hclge_dev *hdev = vport->back;
8394
295ba232 8395 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
64d114f0 8396 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 8397 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 8398 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 8399 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
8400 } else {
8401 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
8402 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8403 0);
64d114f0 8404 }
c60edc17
JS
8405 if (enable)
8406 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8407 else
8408 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
8409}
8410
ebaf1908 8411static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
70a21490 8412 bool is_kill, u16 vlan,
dc8131d8 8413 __be16 proto)
46a3df9f 8414{
22044f95 8415 struct hclge_vport *vport = &hdev->vport[vfid];
d44f9b63
YL
8416 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8417 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
8418 struct hclge_desc desc[2];
8419 u8 vf_byte_val;
8420 u8 vf_byte_off;
8421 int ret;
8422
81a9255e 8423 /* if vf vlan table is full, firmware will close vf vlan filter, it
22044f95
JS
8424 * is unable and unnecessary to add new vlan id to vf vlan filter.
8425 * If spoof check is enable, and vf vlan is full, it shouldn't add
8426 * new vlan, because tx packets with these vlan id will be dropped.
81a9255e 8427 */
22044f95
JS
8428 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8429 if (vport->vf_info.spoofchk && vlan) {
8430 dev_err(&hdev->pdev->dev,
8431 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8432 return -EPERM;
8433 }
81a9255e 8434 return 0;
22044f95 8435 }
81a9255e 8436
46a3df9f
S
8437 hclge_cmd_setup_basic_desc(&desc[0],
8438 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8439 hclge_cmd_setup_basic_desc(&desc[1],
8440 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8441
8442 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8443
8444 vf_byte_off = vfid / 8;
8445 vf_byte_val = 1 << (vfid % 8);
8446
d44f9b63
YL
8447 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8448 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 8449
a90bb9a5 8450 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
8451 req0->vlan_cfg = is_kill;
8452
8453 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8454 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8455 else
8456 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8457
8458 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8459 if (ret) {
8460 dev_err(&hdev->pdev->dev,
8461 "Send vf vlan command fail, ret =%d.\n",
8462 ret);
8463 return ret;
8464 }
8465
8466 if (!is_kill) {
6c251711 8467#define HCLGE_VF_VLAN_NO_ENTRY 2
46a3df9f
S
8468 if (!req0->resp_code || req0->resp_code == 1)
8469 return 0;
8470
6c251711 8471 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
81a9255e 8472 set_bit(vfid, hdev->vf_vlan_full);
6c251711
YL
8473 dev_warn(&hdev->pdev->dev,
8474 "vf vlan table is full, vf vlan filter is disabled\n");
8475 return 0;
8476 }
8477
46a3df9f 8478 dev_err(&hdev->pdev->dev,
adcf738b 8479 "Add vf vlan filter fail, ret =%u.\n",
46a3df9f
S
8480 req0->resp_code);
8481 } else {
41dafea2 8482#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
46a3df9f
S
8483 if (!req0->resp_code)
8484 return 0;
8485
d0c31df2
JS
8486 /* vf vlan filter is disabled when vf vlan table is full,
8487 * then new vlan id will not be added into vf vlan table.
8488 * Just return 0 without warning, avoid massive verbose
8489 * print logs when unload.
8490 */
8491 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
41dafea2 8492 return 0;
41dafea2 8493
46a3df9f 8494 dev_err(&hdev->pdev->dev,
adcf738b 8495 "Kill vf vlan filter fail, ret =%u.\n",
46a3df9f
S
8496 req0->resp_code);
8497 }
8498
8499 return -EIO;
8500}
8501
dc8131d8
YL
8502static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8503 u16 vlan_id, bool is_kill)
46a3df9f 8504{
d44f9b63 8505 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
8506 struct hclge_desc desc;
8507 u8 vlan_offset_byte_val;
8508 u8 vlan_offset_byte;
8509 u8 vlan_offset_160;
8510 int ret;
8511
8512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8513
d6ad7c53
GL
8514 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8515 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8516 HCLGE_VLAN_BYTE_SIZE;
8517 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
46a3df9f 8518
d44f9b63 8519 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
8520 req->vlan_offset = vlan_offset_160;
8521 req->vlan_cfg = is_kill;
8522 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8523
8524 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
8525 if (ret)
8526 dev_err(&hdev->pdev->dev,
8527 "port vlan command, send fail, ret =%d.\n", ret);
8528 return ret;
8529}
8530
8531static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
70a21490 8532 u16 vport_id, u16 vlan_id,
dc8131d8
YL
8533 bool is_kill)
8534{
8535 u16 vport_idx, vport_num = 0;
8536 int ret;
8537
daaa8521
YL
8538 if (is_kill && !vlan_id)
8539 return 0;
8540
dc8131d8 8541 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
70a21490 8542 proto);
46a3df9f
S
8543 if (ret) {
8544 dev_err(&hdev->pdev->dev,
adcf738b 8545 "Set %u vport vlan filter config fail, ret =%d.\n",
dc8131d8 8546 vport_id, ret);
46a3df9f
S
8547 return ret;
8548 }
8549
dc8131d8
YL
8550 /* vlan 0 may be added twice when 8021q module is enabled */
8551 if (!is_kill && !vlan_id &&
8552 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8553 return 0;
8554
8555 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 8556 dev_err(&hdev->pdev->dev,
adcf738b 8557 "Add port vlan failed, vport %u is already in vlan %u\n",
dc8131d8
YL
8558 vport_id, vlan_id);
8559 return -EINVAL;
46a3df9f
S
8560 }
8561
dc8131d8
YL
8562 if (is_kill &&
8563 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8564 dev_err(&hdev->pdev->dev,
adcf738b 8565 "Delete port vlan failed, vport %u is not in vlan %u\n",
dc8131d8
YL
8566 vport_id, vlan_id);
8567 return -EINVAL;
8568 }
8569
54e97d11 8570 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
8571 vport_num++;
8572
8573 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8574 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8575 is_kill);
8576
8577 return ret;
8578}
8579
5f6ea83f
PL
8580static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8581{
8582 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8583 struct hclge_vport_vtag_tx_cfg_cmd *req;
8584 struct hclge_dev *hdev = vport->back;
8585 struct hclge_desc desc;
d9c0f275 8586 u16 bmap_index;
5f6ea83f
PL
8587 int status;
8588
8589 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8590
8591 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8592 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8593 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
8594 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8595 vcfg->accept_tag1 ? 1 : 0);
8596 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8597 vcfg->accept_untag1 ? 1 : 0);
8598 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8599 vcfg->accept_tag2 ? 1 : 0);
8600 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8601 vcfg->accept_untag2 ? 1 : 0);
8602 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8603 vcfg->insert_tag1_en ? 1 : 0);
8604 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8605 vcfg->insert_tag2_en ? 1 : 0);
8606 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
8607
8608 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8609 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8610 HCLGE_VF_NUM_PER_BYTE;
8611 req->vf_bitmap[bmap_index] =
8612 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8613
8614 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8615 if (status)
8616 dev_err(&hdev->pdev->dev,
8617 "Send port txvlan cfg command fail, ret =%d\n",
8618 status);
8619
8620 return status;
8621}
8622
8623static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8624{
8625 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8626 struct hclge_vport_vtag_rx_cfg_cmd *req;
8627 struct hclge_dev *hdev = vport->back;
8628 struct hclge_desc desc;
d9c0f275 8629 u16 bmap_index;
5f6ea83f
PL
8630 int status;
8631
8632 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8633
8634 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
8635 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8636 vcfg->strip_tag1_en ? 1 : 0);
8637 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8638 vcfg->strip_tag2_en ? 1 : 0);
8639 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8640 vcfg->vlan1_vlan_prionly ? 1 : 0);
8641 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8642 vcfg->vlan2_vlan_prionly ? 1 : 0);
5f6ea83f
PL
8643
8644 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8645 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8646 HCLGE_VF_NUM_PER_BYTE;
8647 req->vf_bitmap[bmap_index] =
8648 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8649
8650 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8651 if (status)
8652 dev_err(&hdev->pdev->dev,
8653 "Send port rxvlan cfg command fail, ret =%d\n",
8654 status);
8655
8656 return status;
8657}
8658
741fca16
JS
8659static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8660 u16 port_base_vlan_state,
8661 u16 vlan_tag)
8662{
8663 int ret;
8664
8665 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8666 vport->txvlan_cfg.accept_tag1 = true;
8667 vport->txvlan_cfg.insert_tag1_en = false;
8668 vport->txvlan_cfg.default_tag1 = 0;
8669 } else {
8670 vport->txvlan_cfg.accept_tag1 = false;
8671 vport->txvlan_cfg.insert_tag1_en = true;
8672 vport->txvlan_cfg.default_tag1 = vlan_tag;
8673 }
8674
8675 vport->txvlan_cfg.accept_untag1 = true;
8676
8677 /* accept_tag2 and accept_untag2 are not supported on
8678 * pdev revision(0x20), new revision support them,
8679 * this two fields can not be configured by user.
8680 */
8681 vport->txvlan_cfg.accept_tag2 = true;
8682 vport->txvlan_cfg.accept_untag2 = true;
8683 vport->txvlan_cfg.insert_tag2_en = false;
8684 vport->txvlan_cfg.default_tag2 = 0;
8685
8686 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8687 vport->rxvlan_cfg.strip_tag1_en = false;
8688 vport->rxvlan_cfg.strip_tag2_en =
8689 vport->rxvlan_cfg.rx_vlan_offload_en;
8690 } else {
8691 vport->rxvlan_cfg.strip_tag1_en =
8692 vport->rxvlan_cfg.rx_vlan_offload_en;
8693 vport->rxvlan_cfg.strip_tag2_en = true;
8694 }
8695 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8696 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8697
8698 ret = hclge_set_vlan_tx_offload_cfg(vport);
8699 if (ret)
8700 return ret;
8701
8702 return hclge_set_vlan_rx_offload_cfg(vport);
8703}
8704
5f6ea83f
PL
8705static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8706{
8707 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8708 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8709 struct hclge_desc desc;
8710 int status;
8711
8712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8713 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8714 rx_req->ot_fst_vlan_type =
8715 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8716 rx_req->ot_sec_vlan_type =
8717 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8718 rx_req->in_fst_vlan_type =
8719 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8720 rx_req->in_sec_vlan_type =
8721 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8722
8723 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8724 if (status) {
8725 dev_err(&hdev->pdev->dev,
8726 "Send rxvlan protocol type command fail, ret =%d\n",
8727 status);
8728 return status;
8729 }
8730
8731 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8732
d0d72bac 8733 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
8734 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8735 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8736
8737 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8738 if (status)
8739 dev_err(&hdev->pdev->dev,
8740 "Send txvlan protocol type command fail, ret =%d\n",
8741 status);
8742
8743 return status;
8744}
8745
46a3df9f
S
8746static int hclge_init_vlan_config(struct hclge_dev *hdev)
8747{
5f6ea83f
PL
8748#define HCLGE_DEF_VLAN_TYPE 0x8100
8749
c60edc17 8750 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 8751 struct hclge_vport *vport;
46a3df9f 8752 int ret;
5f6ea83f
PL
8753 int i;
8754
295ba232 8755 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
30ebc576
JS
8756 /* for revision 0x21, vf vlan filter is per function */
8757 for (i = 0; i < hdev->num_alloc_vport; i++) {
8758 vport = &hdev->vport[i];
8759 ret = hclge_set_vlan_filter_ctrl(hdev,
8760 HCLGE_FILTER_TYPE_VF,
8761 HCLGE_FILTER_FE_EGRESS,
8762 true,
8763 vport->vport_id);
8764 if (ret)
8765 return ret;
8766 }
46a3df9f 8767
64d114f0 8768 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
8769 HCLGE_FILTER_FE_INGRESS, true,
8770 0);
64d114f0
ZL
8771 if (ret)
8772 return ret;
8773 } else {
8774 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8775 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 8776 true, 0);
64d114f0
ZL
8777 if (ret)
8778 return ret;
8779 }
46a3df9f 8780
c60edc17
JS
8781 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8782
5f6ea83f
PL
8783 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8784 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8785 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8786 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8787 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8788 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8789
8790 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
8791 if (ret)
8792 return ret;
46a3df9f 8793
5f6ea83f 8794 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 8795 u16 vlan_tag;
dcb35cce 8796
741fca16
JS
8797 vport = &hdev->vport[i];
8798 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 8799
741fca16
JS
8800 ret = hclge_vlan_offload_cfg(vport,
8801 vport->port_base_vlan_cfg.state,
8802 vlan_tag);
5f6ea83f
PL
8803 if (ret)
8804 return ret;
8805 }
8806
dc8131d8 8807 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
8808}
8809
21e043cd
JS
8810static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8811 bool writen_to_tbl)
c6075b19 8812{
8813 struct hclge_vport_vlan_cfg *vlan;
8814
c6075b19 8815 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8816 if (!vlan)
8817 return;
8818
21e043cd 8819 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 8820 vlan->vlan_id = vlan_id;
8821
8822 list_add_tail(&vlan->node, &vport->vlan_list);
8823}
8824
21e043cd
JS
8825static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8826{
8827 struct hclge_vport_vlan_cfg *vlan, *tmp;
8828 struct hclge_dev *hdev = vport->back;
8829 int ret;
8830
8831 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8832 if (!vlan->hd_tbl_status) {
8833 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8834 vport->vport_id,
70a21490 8835 vlan->vlan_id, false);
21e043cd
JS
8836 if (ret) {
8837 dev_err(&hdev->pdev->dev,
8838 "restore vport vlan list failed, ret=%d\n",
8839 ret);
8840 return ret;
8841 }
8842 }
8843 vlan->hd_tbl_status = true;
8844 }
8845
8846 return 0;
8847}
8848
8849static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8850 bool is_write_tbl)
c6075b19 8851{
8852 struct hclge_vport_vlan_cfg *vlan, *tmp;
8853 struct hclge_dev *hdev = vport->back;
8854
8855 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8856 if (vlan->vlan_id == vlan_id) {
8857 if (is_write_tbl && vlan->hd_tbl_status)
8858 hclge_set_vlan_filter_hw(hdev,
8859 htons(ETH_P_8021Q),
8860 vport->vport_id,
70a21490 8861 vlan_id,
c6075b19 8862 true);
8863
8864 list_del(&vlan->node);
8865 kfree(vlan);
8866 break;
8867 }
8868 }
8869}
8870
8871void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8872{
8873 struct hclge_vport_vlan_cfg *vlan, *tmp;
8874 struct hclge_dev *hdev = vport->back;
8875
8876 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8877 if (vlan->hd_tbl_status)
8878 hclge_set_vlan_filter_hw(hdev,
8879 htons(ETH_P_8021Q),
8880 vport->vport_id,
70a21490 8881 vlan->vlan_id,
c6075b19 8882 true);
8883
8884 vlan->hd_tbl_status = false;
8885 if (is_del_list) {
8886 list_del(&vlan->node);
8887 kfree(vlan);
8888 }
8889 }
23b4201d 8890 clear_bit(vport->vport_id, hdev->vf_vlan_full);
c6075b19 8891}
8892
8893void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8894{
8895 struct hclge_vport_vlan_cfg *vlan, *tmp;
8896 struct hclge_vport *vport;
8897 int i;
8898
c6075b19 8899 for (i = 0; i < hdev->num_alloc_vport; i++) {
8900 vport = &hdev->vport[i];
8901 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8902 list_del(&vlan->node);
8903 kfree(vlan);
8904 }
8905 }
c6075b19 8906}
8907
039ba863 8908void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
b524b38f 8909{
b524b38f
JS
8910 struct hclge_vport_vlan_cfg *vlan, *tmp;
8911 struct hclge_dev *hdev = vport->back;
b943e033 8912 u16 vlan_proto;
039ba863
JS
8913 u16 vlan_id;
8914 u16 state;
8915 int ret;
b524b38f 8916
039ba863
JS
8917 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8918 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8919 state = vport->port_base_vlan_cfg.state;
b524b38f 8920
039ba863
JS
8921 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8922 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8923 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8924 vport->vport_id, vlan_id,
8925 false);
8926 return;
8927 }
22044f95 8928
039ba863
JS
8929 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8930 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8931 vport->vport_id,
8932 vlan->vlan_id, false);
8933 if (ret)
8934 break;
8935 vlan->hd_tbl_status = true;
b524b38f 8936 }
b524b38f
JS
8937}
8938
ee4bcd3b
JS
8939/* For global reset and imp reset, hardware will clear the mac table,
8940 * so we change the mac address state from ACTIVE to TO_ADD, then they
8941 * can be restored in the service task after reset complete. Furtherly,
8942 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8943 * be restored after reset, so just remove these mac nodes from mac_list.
8944 */
8945static void hclge_mac_node_convert_for_reset(struct list_head *list)
8946{
8947 struct hclge_mac_node *mac_node, *tmp;
8948
8949 list_for_each_entry_safe(mac_node, tmp, list, node) {
8950 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8951 mac_node->state = HCLGE_MAC_TO_ADD;
8952 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8953 list_del(&mac_node->node);
8954 kfree(mac_node);
8955 }
8956 }
8957}
8958
8959void hclge_restore_mac_table_common(struct hclge_vport *vport)
8960{
8961 spin_lock_bh(&vport->mac_list_lock);
8962
8963 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8964 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8965 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8966
8967 spin_unlock_bh(&vport->mac_list_lock);
8968}
8969
039ba863
JS
8970static void hclge_restore_hw_table(struct hclge_dev *hdev)
8971{
8972 struct hclge_vport *vport = &hdev->vport[0];
8973 struct hnae3_handle *handle = &vport->nic;
8974
8975 hclge_restore_mac_table_common(vport);
8976 hclge_restore_vport_vlan_table(vport);
8977 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8978
8979 hclge_restore_fd_entries(handle);
8980}
8981
b2641e2a 8982int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
8983{
8984 struct hclge_vport *vport = hclge_get_vport(handle);
8985
44e626f7
JS
8986 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8987 vport->rxvlan_cfg.strip_tag1_en = false;
8988 vport->rxvlan_cfg.strip_tag2_en = enable;
8989 } else {
8990 vport->rxvlan_cfg.strip_tag1_en = enable;
8991 vport->rxvlan_cfg.strip_tag2_en = true;
8992 }
052ece6d
PL
8993 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8994 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 8995 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
8996
8997 return hclge_set_vlan_rx_offload_cfg(vport);
8998}
8999
21e043cd
JS
9000static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9001 u16 port_base_vlan_state,
9002 struct hclge_vlan_info *new_info,
9003 struct hclge_vlan_info *old_info)
9004{
9005 struct hclge_dev *hdev = vport->back;
9006 int ret;
9007
9008 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9009 hclge_rm_vport_all_vlan_table(vport, false);
9010 return hclge_set_vlan_filter_hw(hdev,
9011 htons(new_info->vlan_proto),
9012 vport->vport_id,
9013 new_info->vlan_tag,
70a21490 9014 false);
21e043cd
JS
9015 }
9016
9017 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9018 vport->vport_id, old_info->vlan_tag,
70a21490 9019 true);
21e043cd
JS
9020 if (ret)
9021 return ret;
9022
9023 return hclge_add_vport_all_vlan_table(vport);
9024}
9025
9026int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9027 struct hclge_vlan_info *vlan_info)
9028{
9029 struct hnae3_handle *nic = &vport->nic;
9030 struct hclge_vlan_info *old_vlan_info;
9031 struct hclge_dev *hdev = vport->back;
9032 int ret;
9033
9034 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9035
9036 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9037 if (ret)
9038 return ret;
9039
9040 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9041 /* add new VLAN tag */
8a9a654b
JS
9042 ret = hclge_set_vlan_filter_hw(hdev,
9043 htons(vlan_info->vlan_proto),
21e043cd
JS
9044 vport->vport_id,
9045 vlan_info->vlan_tag,
70a21490 9046 false);
21e043cd
JS
9047 if (ret)
9048 return ret;
9049
9050 /* remove old VLAN tag */
8a9a654b
JS
9051 ret = hclge_set_vlan_filter_hw(hdev,
9052 htons(old_vlan_info->vlan_proto),
21e043cd
JS
9053 vport->vport_id,
9054 old_vlan_info->vlan_tag,
70a21490 9055 true);
21e043cd
JS
9056 if (ret)
9057 return ret;
9058
9059 goto update;
9060 }
9061
9062 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9063 old_vlan_info);
9064 if (ret)
9065 return ret;
9066
9067 /* update state only when disable/enable port based VLAN */
9068 vport->port_base_vlan_cfg.state = state;
9069 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9070 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9071 else
9072 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9073
9074update:
9075 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9076 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9077 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9078
9079 return 0;
9080}
9081
9082static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9083 enum hnae3_port_base_vlan_state state,
9084 u16 vlan)
9085{
9086 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9087 if (!vlan)
9088 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9089 else
9090 return HNAE3_PORT_BASE_VLAN_ENABLE;
9091 } else {
9092 if (!vlan)
9093 return HNAE3_PORT_BASE_VLAN_DISABLE;
9094 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9095 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9096 else
9097 return HNAE3_PORT_BASE_VLAN_MODIFY;
9098 }
9099}
9100
9101static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9102 u16 vlan, u8 qos, __be16 proto)
9103{
9104 struct hclge_vport *vport = hclge_get_vport(handle);
9105 struct hclge_dev *hdev = vport->back;
9106 struct hclge_vlan_info vlan_info;
9107 u16 state;
9108 int ret;
9109
295ba232 9110 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
21e043cd
JS
9111 return -EOPNOTSUPP;
9112
1c985508
JS
9113 vport = hclge_get_vf_vport(hdev, vfid);
9114 if (!vport)
9115 return -EINVAL;
9116
21e043cd 9117 /* qos is a 3 bits value, so can not be bigger than 7 */
1c985508 9118 if (vlan > VLAN_N_VID - 1 || qos > 7)
21e043cd
JS
9119 return -EINVAL;
9120 if (proto != htons(ETH_P_8021Q))
9121 return -EPROTONOSUPPORT;
9122
21e043cd
JS
9123 state = hclge_get_port_base_vlan_state(vport,
9124 vport->port_base_vlan_cfg.state,
9125 vlan);
9126 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9127 return 0;
9128
9129 vlan_info.vlan_tag = vlan;
9130 vlan_info.qos = qos;
9131 vlan_info.vlan_proto = ntohs(proto);
9132
92f11ea1
JS
9133 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9134 return hclge_update_port_base_vlan_cfg(vport, state,
9135 &vlan_info);
9136 } else {
9137 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
1c985508 9138 vport->vport_id, state,
92f11ea1
JS
9139 vlan, qos,
9140 ntohs(proto));
9141 return ret;
9142 }
21e043cd
JS
9143}
9144
59359fc8
JS
9145static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9146{
9147 struct hclge_vlan_info *vlan_info;
9148 struct hclge_vport *vport;
9149 int ret;
9150 int vf;
9151
9152 /* clear port base vlan for all vf */
9153 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9154 vport = &hdev->vport[vf];
9155 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9156
9157 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9158 vport->vport_id,
9159 vlan_info->vlan_tag, true);
9160 if (ret)
9161 dev_err(&hdev->pdev->dev,
9162 "failed to clear vf vlan for vf%d, ret = %d\n",
9163 vf - HCLGE_VF_VPORT_START_NUM, ret);
9164 }
9165}
9166
21e043cd
JS
9167int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9168 u16 vlan_id, bool is_kill)
9169{
9170 struct hclge_vport *vport = hclge_get_vport(handle);
9171 struct hclge_dev *hdev = vport->back;
9172 bool writen_to_tbl = false;
9173 int ret = 0;
9174
b7b5d25b
GL
9175 /* When device is resetting or reset failed, firmware is unable to
9176 * handle mailbox. Just record the vlan id, and remove it after
fe4144d4
JS
9177 * reset finished.
9178 */
b7b5d25b
GL
9179 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9180 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
fe4144d4
JS
9181 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9182 return -EBUSY;
9183 }
9184
46ee7350 9185 /* when port base vlan enabled, we use port base vlan as the vlan
fe4144d4
JS
9186 * filter entry. In this case, we don't update vlan filter table
9187 * when user add new vlan or remove exist vlan, just update the vport
9188 * vlan list. The vlan id in vlan list will be writen in vlan filter
9189 * table until port base vlan disabled
21e043cd
JS
9190 */
9191 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9192 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
70a21490 9193 vlan_id, is_kill);
21e043cd
JS
9194 writen_to_tbl = true;
9195 }
9196
fe4144d4
JS
9197 if (!ret) {
9198 if (is_kill)
9199 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9200 else
9201 hclge_add_vport_vlan_table(vport, vlan_id,
9202 writen_to_tbl);
9203 } else if (is_kill) {
46ee7350 9204 /* when remove hw vlan filter failed, record the vlan id,
fe4144d4
JS
9205 * and try to remove it from hw later, to be consistence
9206 * with stack
9207 */
9208 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9209 }
9210 return ret;
9211}
21e043cd 9212
fe4144d4
JS
9213static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9214{
9215#define HCLGE_MAX_SYNC_COUNT 60
21e043cd 9216
fe4144d4
JS
9217 int i, ret, sync_cnt = 0;
9218 u16 vlan_id;
9219
9220 /* start from vport 1 for PF is always alive */
9221 for (i = 0; i < hdev->num_alloc_vport; i++) {
9222 struct hclge_vport *vport = &hdev->vport[i];
9223
9224 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9225 VLAN_N_VID);
9226 while (vlan_id != VLAN_N_VID) {
9227 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9228 vport->vport_id, vlan_id,
70a21490 9229 true);
fe4144d4
JS
9230 if (ret && ret != -EINVAL)
9231 return;
9232
9233 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9234 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9235
9236 sync_cnt++;
9237 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9238 return;
9239
9240 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9241 VLAN_N_VID);
9242 }
9243 }
21e043cd
JS
9244}
9245
e6d7d79d 9246static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 9247{
d44f9b63 9248 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 9249 struct hclge_desc desc;
46a3df9f 9250
46a3df9f
S
9251 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9252
d44f9b63 9253 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 9254 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 9255 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 9256
e6d7d79d 9257 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
9258}
9259
dd72140c
FL
9260static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9261{
9262 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
9263
9264 return hclge_set_vport_mtu(vport, new_mtu);
9265}
9266
9267int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9268{
dd72140c 9269 struct hclge_dev *hdev = vport->back;
63cbf7a9 9270 int i, max_frm_size, ret;
dd72140c 9271
9e690456 9272 /* HW supprt 2 layer vlan */
e6d7d79d
YL
9273 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9274 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9275 max_frm_size > HCLGE_MAC_MAX_FRAME)
9276 return -EINVAL;
9277
818f1675
YL
9278 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9279 mutex_lock(&hdev->vport_lock);
9280 /* VF's mps must fit within hdev->mps */
9281 if (vport->vport_id && max_frm_size > hdev->mps) {
9282 mutex_unlock(&hdev->vport_lock);
9283 return -EINVAL;
9284 } else if (vport->vport_id) {
9285 vport->mps = max_frm_size;
9286 mutex_unlock(&hdev->vport_lock);
9287 return 0;
9288 }
9289
9290 /* PF's mps must be greater then VF's mps */
9291 for (i = 1; i < hdev->num_alloc_vport; i++)
9292 if (max_frm_size < hdev->vport[i].mps) {
9293 mutex_unlock(&hdev->vport_lock);
9294 return -EINVAL;
9295 }
9296
cdca4c48
YL
9297 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9298
e6d7d79d 9299 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
9300 if (ret) {
9301 dev_err(&hdev->pdev->dev,
9302 "Change mtu fail, ret =%d\n", ret);
818f1675 9303 goto out;
dd72140c
FL
9304 }
9305
e6d7d79d 9306 hdev->mps = max_frm_size;
818f1675 9307 vport->mps = max_frm_size;
e6d7d79d 9308
dd72140c
FL
9309 ret = hclge_buffer_alloc(hdev);
9310 if (ret)
9311 dev_err(&hdev->pdev->dev,
9312 "Allocate buffer fail, ret =%d\n", ret);
9313
818f1675 9314out:
cdca4c48 9315 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 9316 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
9317 return ret;
9318}
9319
46a3df9f
S
9320static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9321 bool enable)
9322{
d44f9b63 9323 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9324 struct hclge_desc desc;
9325 int ret;
9326
9327 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9328
d44f9b63 9329 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9a5ef4aa 9330 req->tqp_id = cpu_to_le16(queue_id);
b9a8f883
YL
9331 if (enable)
9332 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
46a3df9f
S
9333
9334 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9335 if (ret) {
9336 dev_err(&hdev->pdev->dev,
9337 "Send tqp reset cmd error, status =%d\n", ret);
9338 return ret;
9339 }
9340
9341 return 0;
9342}
9343
9344static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9345{
d44f9b63 9346 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9347 struct hclge_desc desc;
9348 int ret;
9349
9350 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9351
d44f9b63 9352 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9a5ef4aa 9353 req->tqp_id = cpu_to_le16(queue_id);
46a3df9f
S
9354
9355 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9356 if (ret) {
9357 dev_err(&hdev->pdev->dev,
9358 "Get reset status error, status =%d\n", ret);
9359 return ret;
9360 }
9361
e4e87715 9362 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
9363}
9364
0c29d191 9365u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
9366{
9367 struct hnae3_queue *queue;
9368 struct hclge_tqp *tqp;
9369
9370 queue = handle->kinfo.tqp[queue_id];
9371 tqp = container_of(queue, struct hclge_tqp, q);
9372
9373 return tqp->index;
9374}
9375
7fa6be4f 9376int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
9377{
9378 struct hclge_vport *vport = hclge_get_vport(handle);
9379 struct hclge_dev *hdev = vport->back;
9380 int reset_try_times = 0;
9381 int reset_status;
814e0274 9382 u16 queue_gid;
63cbf7a9 9383 int ret;
46a3df9f 9384
814e0274
PL
9385 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9386
46a3df9f
S
9387 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9388 if (ret) {
7fa6be4f
HT
9389 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9390 return ret;
46a3df9f
S
9391 }
9392
814e0274 9393 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 9394 if (ret) {
7fa6be4f
HT
9395 dev_err(&hdev->pdev->dev,
9396 "Send reset tqp cmd fail, ret = %d\n", ret);
9397 return ret;
46a3df9f
S
9398 }
9399
46a3df9f 9400 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
814e0274 9401 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
9402 if (reset_status)
9403 break;
e8df45c2
ZL
9404
9405 /* Wait for tqp hw reset */
9406 usleep_range(1000, 1200);
46a3df9f
S
9407 }
9408
9409 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
9410 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9411 return ret;
46a3df9f
S
9412 }
9413
814e0274 9414 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
9415 if (ret)
9416 dev_err(&hdev->pdev->dev,
9417 "Deassert the soft reset fail, ret = %d\n", ret);
9418
9419 return ret;
46a3df9f
S
9420}
9421
1a426f8b
PL
9422void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9423{
9424 struct hclge_dev *hdev = vport->back;
9425 int reset_try_times = 0;
9426 int reset_status;
9427 u16 queue_gid;
9428 int ret;
9429
9430 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9431
9432 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9433 if (ret) {
9434 dev_warn(&hdev->pdev->dev,
9435 "Send reset tqp cmd fail, ret = %d\n", ret);
9436 return;
9437 }
9438
1a426f8b 9439 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
1a426f8b
PL
9440 reset_status = hclge_get_reset_status(hdev, queue_gid);
9441 if (reset_status)
9442 break;
e8df45c2
ZL
9443
9444 /* Wait for tqp hw reset */
9445 usleep_range(1000, 1200);
1a426f8b
PL
9446 }
9447
9448 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9449 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9450 return;
9451 }
9452
9453 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9454 if (ret)
9455 dev_warn(&hdev->pdev->dev,
9456 "Deassert the soft reset fail, ret = %d\n", ret);
9457}
9458
46a3df9f
S
9459static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9460{
9461 struct hclge_vport *vport = hclge_get_vport(handle);
9462 struct hclge_dev *hdev = vport->back;
9463
9464 return hdev->fw_version;
9465}
9466
61387774
PL
9467static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9468{
9469 struct phy_device *phydev = hdev->hw.mac.phydev;
9470
9471 if (!phydev)
9472 return;
9473
70814e81 9474 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
9475}
9476
9477static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9478{
61387774
PL
9479 int ret;
9480
40173a2e 9481 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 9482 return 0;
61387774
PL
9483
9484 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
aacbe27e
YL
9485 if (ret)
9486 dev_err(&hdev->pdev->dev,
9487 "configure pauseparam error, ret = %d.\n", ret);
61387774 9488
aacbe27e 9489 return ret;
61387774
PL
9490}
9491
1770a7a3
PL
9492int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9493{
9494 struct phy_device *phydev = hdev->hw.mac.phydev;
9495 u16 remote_advertising = 0;
63cbf7a9 9496 u16 local_advertising;
1770a7a3
PL
9497 u32 rx_pause, tx_pause;
9498 u8 flowctl;
9499
9500 if (!phydev->link || !phydev->autoneg)
9501 return 0;
9502
3c1bcc86 9503 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
9504
9505 if (phydev->pause)
9506 remote_advertising = LPA_PAUSE_CAP;
9507
9508 if (phydev->asym_pause)
9509 remote_advertising |= LPA_PAUSE_ASYM;
9510
9511 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9512 remote_advertising);
9513 tx_pause = flowctl & FLOW_CTRL_TX;
9514 rx_pause = flowctl & FLOW_CTRL_RX;
9515
9516 if (phydev->duplex == HCLGE_MAC_HALF) {
9517 tx_pause = 0;
9518 rx_pause = 0;
9519 }
9520
9521 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9522}
9523
46a3df9f
S
9524static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9525 u32 *rx_en, u32 *tx_en)
9526{
9527 struct hclge_vport *vport = hclge_get_vport(handle);
9528 struct hclge_dev *hdev = vport->back;
fb89629f 9529 struct phy_device *phydev = hdev->hw.mac.phydev;
46a3df9f 9530
fb89629f 9531 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
46a3df9f
S
9532
9533 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9534 *rx_en = 0;
9535 *tx_en = 0;
9536 return;
9537 }
9538
9539 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9540 *rx_en = 1;
9541 *tx_en = 0;
9542 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9543 *tx_en = 1;
9544 *rx_en = 0;
9545 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9546 *rx_en = 1;
9547 *tx_en = 1;
9548 } else {
9549 *rx_en = 0;
9550 *tx_en = 0;
9551 }
9552}
9553
aacbe27e
YL
9554static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9555 u32 rx_en, u32 tx_en)
9556{
9557 if (rx_en && tx_en)
9558 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9559 else if (rx_en && !tx_en)
9560 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9561 else if (!rx_en && tx_en)
9562 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9563 else
9564 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9565
9566 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9567}
9568
61387774
PL
9569static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9570 u32 rx_en, u32 tx_en)
9571{
9572 struct hclge_vport *vport = hclge_get_vport(handle);
9573 struct hclge_dev *hdev = vport->back;
9574 struct phy_device *phydev = hdev->hw.mac.phydev;
9575 u32 fc_autoneg;
9576
fb89629f
JS
9577 if (phydev) {
9578 fc_autoneg = hclge_get_autoneg(handle);
9579 if (auto_neg != fc_autoneg) {
9580 dev_info(&hdev->pdev->dev,
9581 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9582 return -EOPNOTSUPP;
9583 }
61387774
PL
9584 }
9585
9586 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9587 dev_info(&hdev->pdev->dev,
9588 "Priority flow control enabled. Cannot set link flow control.\n");
9589 return -EOPNOTSUPP;
9590 }
9591
9592 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9593
aacbe27e
YL
9594 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9595
fb89629f 9596 if (!auto_neg)
61387774
PL
9597 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9598
22f48e24
JS
9599 if (phydev)
9600 return phy_start_aneg(phydev);
9601
fb89629f 9602 return -EOPNOTSUPP;
61387774
PL
9603}
9604
46a3df9f
S
9605static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9606 u8 *auto_neg, u32 *speed, u8 *duplex)
9607{
9608 struct hclge_vport *vport = hclge_get_vport(handle);
9609 struct hclge_dev *hdev = vport->back;
9610
9611 if (speed)
9612 *speed = hdev->hw.mac.speed;
9613 if (duplex)
9614 *duplex = hdev->hw.mac.duplex;
9615 if (auto_neg)
9616 *auto_neg = hdev->hw.mac.autoneg;
9617}
9618
88d10bd6
JS
9619static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9620 u8 *module_type)
46a3df9f
S
9621{
9622 struct hclge_vport *vport = hclge_get_vport(handle);
9623 struct hclge_dev *hdev = vport->back;
9624
a9775bb6
GH
9625 /* When nic is down, the service task is not running, doesn't update
9626 * the port information per second. Query the port information before
9627 * return the media type, ensure getting the correct media information.
9628 */
9629 hclge_update_port_info(hdev);
9630
46a3df9f
S
9631 if (media_type)
9632 *media_type = hdev->hw.mac.media_type;
88d10bd6
JS
9633
9634 if (module_type)
9635 *module_type = hdev->hw.mac.module_type;
46a3df9f
S
9636}
9637
9638static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9639 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9640{
9641 struct hclge_vport *vport = hclge_get_vport(handle);
9642 struct hclge_dev *hdev = vport->back;
9643 struct phy_device *phydev = hdev->hw.mac.phydev;
ebaf1908
WL
9644 int mdix_ctrl, mdix, is_resolved;
9645 unsigned int retval;
46a3df9f
S
9646
9647 if (!phydev) {
9648 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9649 *tp_mdix = ETH_TP_MDI_INVALID;
9650 return;
9651 }
9652
9653 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9654
9655 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
9656 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9657 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
9658
9659 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
9660 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9661 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
9662
9663 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9664
9665 switch (mdix_ctrl) {
9666 case 0x0:
9667 *tp_mdix_ctrl = ETH_TP_MDI;
9668 break;
9669 case 0x1:
9670 *tp_mdix_ctrl = ETH_TP_MDI_X;
9671 break;
9672 case 0x3:
9673 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9674 break;
9675 default:
9676 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9677 break;
9678 }
9679
9680 if (!is_resolved)
9681 *tp_mdix = ETH_TP_MDI_INVALID;
9682 else if (mdix)
9683 *tp_mdix = ETH_TP_MDI_X;
9684 else
9685 *tp_mdix = ETH_TP_MDI;
9686}
9687
bb87be87
YL
9688static void hclge_info_show(struct hclge_dev *hdev)
9689{
9690 struct device *dev = &hdev->pdev->dev;
9691
9692 dev_info(dev, "PF info begin:\n");
9693
adcf738b
GL
9694 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9695 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9696 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9697 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9698 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9699 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9700 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9701 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9702 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9703 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
bb87be87
YL
9704 dev_info(dev, "This is %s PF\n",
9705 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9706 dev_info(dev, "DCB %s\n",
9707 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9708 dev_info(dev, "MQPRIO %s\n",
9709 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9710
9711 dev_info(dev, "PF info end.\n");
9712}
9713
994e04f1
HT
9714static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9715 struct hclge_vport *vport)
9716{
9717 struct hnae3_client *client = vport->nic.client;
9718 struct hclge_dev *hdev = ae_dev->priv;
0bfdf286 9719 int rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9720 int ret;
9721
9722 ret = client->ops->init_instance(&vport->nic);
9723 if (ret)
9724 return ret;
9725
9726 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9727 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9728 rst_cnt != hdev->rst_stats.reset_cnt) {
9729 ret = -EBUSY;
9730 goto init_nic_err;
9731 }
9732
00ea6e5f
WL
9733 /* Enable nic hw error interrupts */
9734 ret = hclge_config_nic_hw_error(hdev, true);
bcf643c5 9735 if (ret) {
00ea6e5f
WL
9736 dev_err(&ae_dev->pdev->dev,
9737 "fail(%d) to enable hw error interrupts\n", ret);
bcf643c5
WL
9738 goto init_nic_err;
9739 }
9740
9741 hnae3_set_client_init_flag(client, ae_dev, 1);
00ea6e5f 9742
994e04f1
HT
9743 if (netif_msg_drv(&hdev->vport->nic))
9744 hclge_info_show(hdev);
9745
00ea6e5f 9746 return ret;
7cf9c069
HT
9747
9748init_nic_err:
9749 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9750 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9751 msleep(HCLGE_WAIT_RESET_DONE);
9752
9753 client->ops->uninit_instance(&vport->nic, 0);
9754
9755 return ret;
994e04f1
HT
9756}
9757
9758static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9759 struct hclge_vport *vport)
9760{
994e04f1 9761 struct hclge_dev *hdev = ae_dev->priv;
31a57fde 9762 struct hnae3_client *client;
7cf9c069 9763 int rst_cnt;
994e04f1
HT
9764 int ret;
9765
9766 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9767 !hdev->nic_client)
9768 return 0;
9769
9770 client = hdev->roce_client;
9771 ret = hclge_init_roce_base_info(vport);
9772 if (ret)
9773 return ret;
9774
7cf9c069 9775 rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9776 ret = client->ops->init_instance(&vport->roce);
9777 if (ret)
9778 return ret;
9779
9780 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9781 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9782 rst_cnt != hdev->rst_stats.reset_cnt) {
9783 ret = -EBUSY;
9784 goto init_roce_err;
9785 }
9786
72fcd2be
HT
9787 /* Enable roce ras interrupts */
9788 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9789 if (ret) {
9790 dev_err(&ae_dev->pdev->dev,
9791 "fail(%d) to enable roce ras interrupts\n", ret);
9792 goto init_roce_err;
9793 }
9794
994e04f1
HT
9795 hnae3_set_client_init_flag(client, ae_dev, 1);
9796
9797 return 0;
7cf9c069
HT
9798
9799init_roce_err:
9800 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9801 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9802 msleep(HCLGE_WAIT_RESET_DONE);
9803
9804 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9805
9806 return ret;
994e04f1
HT
9807}
9808
46a3df9f
S
9809static int hclge_init_client_instance(struct hnae3_client *client,
9810 struct hnae3_ae_dev *ae_dev)
9811{
9812 struct hclge_dev *hdev = ae_dev->priv;
9813 struct hclge_vport *vport;
9814 int i, ret;
9815
9816 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9817 vport = &hdev->vport[i];
9818
9819 switch (client->type) {
9820 case HNAE3_CLIENT_KNIC:
46a3df9f
S
9821 hdev->nic_client = client;
9822 vport->nic.client = client;
994e04f1 9823 ret = hclge_init_nic_client_instance(ae_dev, vport);
46a3df9f 9824 if (ret)
49dd8054 9825 goto clear_nic;
46a3df9f 9826
994e04f1
HT
9827 ret = hclge_init_roce_client_instance(ae_dev, vport);
9828 if (ret)
9829 goto clear_roce;
46a3df9f 9830
46a3df9f
S
9831 break;
9832 case HNAE3_CLIENT_ROCE:
e92a0843 9833 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
9834 hdev->roce_client = client;
9835 vport->roce.client = client;
9836 }
9837
994e04f1
HT
9838 ret = hclge_init_roce_client_instance(ae_dev, vport);
9839 if (ret)
9840 goto clear_roce;
fa7a4bd5
JS
9841
9842 break;
9843 default:
9844 return -EINVAL;
46a3df9f
S
9845 }
9846 }
9847
37417c66 9848 return 0;
49dd8054
JS
9849
9850clear_nic:
9851 hdev->nic_client = NULL;
9852 vport->nic.client = NULL;
9853 return ret;
9854clear_roce:
9855 hdev->roce_client = NULL;
9856 vport->roce.client = NULL;
9857 return ret;
46a3df9f
S
9858}
9859
9860static void hclge_uninit_client_instance(struct hnae3_client *client,
9861 struct hnae3_ae_dev *ae_dev)
9862{
9863 struct hclge_dev *hdev = ae_dev->priv;
9864 struct hclge_vport *vport;
9865 int i;
9866
9867 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9868 vport = &hdev->vport[i];
a17dcf3f 9869 if (hdev->roce_client) {
2a0bfc36 9870 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9871 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9872 msleep(HCLGE_WAIT_RESET_DONE);
9873
46a3df9f
S
9874 hdev->roce_client->ops->uninit_instance(&vport->roce,
9875 0);
a17dcf3f
L
9876 hdev->roce_client = NULL;
9877 vport->roce.client = NULL;
9878 }
46a3df9f
S
9879 if (client->type == HNAE3_CLIENT_ROCE)
9880 return;
49dd8054 9881 if (hdev->nic_client && client->ops->uninit_instance) {
bd9109c9 9882 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9883 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9884 msleep(HCLGE_WAIT_RESET_DONE);
9885
46a3df9f 9886 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
9887 hdev->nic_client = NULL;
9888 vport->nic.client = NULL;
9889 }
46a3df9f
S
9890 }
9891}
9892
9893static int hclge_pci_init(struct hclge_dev *hdev)
9894{
9895 struct pci_dev *pdev = hdev->pdev;
9896 struct hclge_hw *hw;
9897 int ret;
9898
9899 ret = pci_enable_device(pdev);
9900 if (ret) {
9901 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 9902 return ret;
46a3df9f
S
9903 }
9904
9905 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9906 if (ret) {
9907 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9908 if (ret) {
9909 dev_err(&pdev->dev,
9910 "can't set consistent PCI DMA");
9911 goto err_disable_device;
9912 }
9913 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9914 }
9915
9916 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9917 if (ret) {
9918 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9919 goto err_disable_device;
9920 }
9921
9922 pci_set_master(pdev);
9923 hw = &hdev->hw;
46a3df9f
S
9924 hw->io_base = pcim_iomap(pdev, 2, 0);
9925 if (!hw->io_base) {
9926 dev_err(&pdev->dev, "Can't map configuration register space\n");
9927 ret = -ENOMEM;
9928 goto err_clr_master;
9929 }
9930
709eb41a
L
9931 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9932
46a3df9f
S
9933 return 0;
9934err_clr_master:
9935 pci_clear_master(pdev);
9936 pci_release_regions(pdev);
9937err_disable_device:
9938 pci_disable_device(pdev);
46a3df9f
S
9939
9940 return ret;
9941}
9942
9943static void hclge_pci_uninit(struct hclge_dev *hdev)
9944{
9945 struct pci_dev *pdev = hdev->pdev;
9946
6a814413 9947 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 9948 pci_free_irq_vectors(pdev);
46a3df9f
S
9949 pci_clear_master(pdev);
9950 pci_release_mem_regions(pdev);
9951 pci_disable_device(pdev);
9952}
9953
48569cda
PL
9954static void hclge_state_init(struct hclge_dev *hdev)
9955{
9956 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9957 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9958 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9959 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
d5432455 9960 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
48569cda
PL
9961 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9962 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9963}
9964
9965static void hclge_state_uninit(struct hclge_dev *hdev)
9966{
9967 set_bit(HCLGE_STATE_DOWN, &hdev->state);
acfc3d55 9968 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
48569cda 9969
65e41e7e
HT
9970 if (hdev->reset_timer.function)
9971 del_timer_sync(&hdev->reset_timer);
7be1b9f3
YL
9972 if (hdev->service_task.work.func)
9973 cancel_delayed_work_sync(&hdev->service_task);
48569cda
PL
9974}
9975
6b9a97ee
HT
9976static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9977{
8627bded
HT
9978#define HCLGE_FLR_RETRY_WAIT_MS 500
9979#define HCLGE_FLR_RETRY_CNT 5
6b9a97ee 9980
8627bded
HT
9981 struct hclge_dev *hdev = ae_dev->priv;
9982 int retry_cnt = 0;
9983 int ret;
6b9a97ee 9984
8627bded
HT
9985retry:
9986 down(&hdev->reset_sem);
9987 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9988 hdev->reset_type = HNAE3_FLR_RESET;
9989 ret = hclge_reset_prepare(hdev);
bb3d8668 9990 if (ret || hdev->reset_pending) {
8627bded
HT
9991 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9992 ret);
9993 if (hdev->reset_pending ||
9994 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9995 dev_err(&hdev->pdev->dev,
9996 "reset_pending:0x%lx, retry_cnt:%d\n",
9997 hdev->reset_pending, retry_cnt);
9998 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9999 up(&hdev->reset_sem);
10000 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10001 goto retry;
10002 }
10003 }
6b9a97ee 10004
8627bded
HT
10005 /* disable misc vector before FLR done */
10006 hclge_enable_vector(&hdev->misc_vector, false);
10007 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10008 hdev->rst_stats.flr_rst_cnt++;
6b9a97ee
HT
10009}
10010
10011static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10012{
10013 struct hclge_dev *hdev = ae_dev->priv;
8627bded
HT
10014 int ret;
10015
10016 hclge_enable_vector(&hdev->misc_vector, true);
6b9a97ee 10017
8627bded
HT
10018 ret = hclge_reset_rebuild(hdev);
10019 if (ret)
10020 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10021
10022 hdev->reset_type = HNAE3_NONE_RESET;
10023 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10024 up(&hdev->reset_sem);
6b9a97ee
HT
10025}
10026
31bb229d
PL
10027static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10028{
10029 u16 i;
10030
10031 for (i = 0; i < hdev->num_alloc_vport; i++) {
10032 struct hclge_vport *vport = &hdev->vport[i];
10033 int ret;
10034
10035 /* Send cmd to clear VF's FUNC_RST_ING */
10036 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10037 if (ret)
10038 dev_warn(&hdev->pdev->dev,
adcf738b 10039 "clear vf(%u) rst failed %d!\n",
31bb229d
PL
10040 vport->vport_id, ret);
10041 }
10042}
10043
46a3df9f
S
10044static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10045{
10046 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
10047 struct hclge_dev *hdev;
10048 int ret;
10049
10050 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2421ee24
HT
10051 if (!hdev)
10052 return -ENOMEM;
46a3df9f 10053
46a3df9f
S
10054 hdev->pdev = pdev;
10055 hdev->ae_dev = ae_dev;
4ed340ab 10056 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 10057 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 10058 ae_dev->priv = hdev;
9e690456
GH
10059
10060 /* HW supprt 2 layer vlan */
e6d7d79d 10061 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 10062
818f1675 10063 mutex_init(&hdev->vport_lock);
44122887 10064 spin_lock_init(&hdev->fd_rule_lock);
8627bded 10065 sema_init(&hdev->reset_sem, 1);
818f1675 10066
46a3df9f 10067 ret = hclge_pci_init(hdev);
60df7e91 10068 if (ret)
ffd5656e 10069 goto out;
46a3df9f 10070
3efb960f
L
10071 /* Firmware command queue initialize */
10072 ret = hclge_cmd_queue_init(hdev);
60df7e91 10073 if (ret)
ffd5656e 10074 goto err_pci_uninit;
3efb960f
L
10075
10076 /* Firmware command initialize */
46a3df9f
S
10077 ret = hclge_cmd_init(hdev);
10078 if (ret)
ffd5656e 10079 goto err_cmd_uninit;
46a3df9f
S
10080
10081 ret = hclge_get_cap(hdev);
60df7e91 10082 if (ret)
ffd5656e 10083 goto err_cmd_uninit;
46a3df9f 10084
af2aedc5
GH
10085 ret = hclge_query_dev_specs(hdev);
10086 if (ret) {
10087 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10088 ret);
10089 goto err_cmd_uninit;
10090 }
10091
46a3df9f
S
10092 ret = hclge_configure(hdev);
10093 if (ret) {
10094 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 10095 goto err_cmd_uninit;
46a3df9f
S
10096 }
10097
887c3820 10098 ret = hclge_init_msi(hdev);
46a3df9f 10099 if (ret) {
887c3820 10100 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 10101 goto err_cmd_uninit;
46a3df9f
S
10102 }
10103
466b0c00 10104 ret = hclge_misc_irq_init(hdev);
60df7e91 10105 if (ret)
ffd5656e 10106 goto err_msi_uninit;
466b0c00 10107
46a3df9f
S
10108 ret = hclge_alloc_tqps(hdev);
10109 if (ret) {
10110 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 10111 goto err_msi_irq_uninit;
46a3df9f
S
10112 }
10113
10114 ret = hclge_alloc_vport(hdev);
60df7e91 10115 if (ret)
ffd5656e 10116 goto err_msi_irq_uninit;
46a3df9f 10117
7df7dad6 10118 ret = hclge_map_tqp(hdev);
60df7e91 10119 if (ret)
2312e050 10120 goto err_msi_irq_uninit;
7df7dad6 10121
c5ef83cb
HT
10122 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10123 ret = hclge_mac_mdio_config(hdev);
60df7e91 10124 if (ret)
2312e050 10125 goto err_msi_irq_uninit;
cf9cca2d 10126 }
10127
39932473 10128 ret = hclge_init_umv_space(hdev);
60df7e91 10129 if (ret)
9fc55413 10130 goto err_mdiobus_unreg;
39932473 10131
46a3df9f
S
10132 ret = hclge_mac_init(hdev);
10133 if (ret) {
10134 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 10135 goto err_mdiobus_unreg;
46a3df9f 10136 }
46a3df9f
S
10137
10138 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10139 if (ret) {
10140 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 10141 goto err_mdiobus_unreg;
46a3df9f
S
10142 }
10143
b26a6fea
PL
10144 ret = hclge_config_gro(hdev, true);
10145 if (ret)
10146 goto err_mdiobus_unreg;
10147
46a3df9f
S
10148 ret = hclge_init_vlan_config(hdev);
10149 if (ret) {
10150 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 10151 goto err_mdiobus_unreg;
46a3df9f
S
10152 }
10153
10154 ret = hclge_tm_schd_init(hdev);
10155 if (ret) {
10156 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 10157 goto err_mdiobus_unreg;
68ece54e
YL
10158 }
10159
268f5dfa 10160 hclge_rss_init_cfg(hdev);
68ece54e
YL
10161 ret = hclge_rss_init_hw(hdev);
10162 if (ret) {
10163 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 10164 goto err_mdiobus_unreg;
46a3df9f
S
10165 }
10166
f5aac71c
FL
10167 ret = init_mgr_tbl(hdev);
10168 if (ret) {
10169 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 10170 goto err_mdiobus_unreg;
f5aac71c
FL
10171 }
10172
d695964d
JS
10173 ret = hclge_init_fd_config(hdev);
10174 if (ret) {
10175 dev_err(&pdev->dev,
10176 "fd table init fail, ret=%d\n", ret);
10177 goto err_mdiobus_unreg;
10178 }
10179
a6345787
WL
10180 INIT_KFIFO(hdev->mac_tnl_log);
10181
cacde272
YL
10182 hclge_dcb_ops_set(hdev);
10183
65e41e7e 10184 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7be1b9f3 10185 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
46a3df9f 10186
08125454
YL
10187 /* Setup affinity after service timer setup because add_timer_on
10188 * is called in affinity notify.
10189 */
10190 hclge_misc_affinity_setup(hdev);
10191
8e52a602 10192 hclge_clear_all_event_cause(hdev);
31bb229d 10193 hclge_clear_resetting_state(hdev);
8e52a602 10194
e4193e24
SJ
10195 /* Log and clear the hw errors those already occurred */
10196 hclge_handle_all_hns_hw_errors(ae_dev);
10197
e3b84ed2
SJ
10198 /* request delayed reset for the error recovery because an immediate
10199 * global reset on a PF affecting pending initialization of other PFs
10200 */
10201 if (ae_dev->hw_err_reset_req) {
10202 enum hnae3_reset_type reset_level;
10203
10204 reset_level = hclge_get_reset_level(ae_dev,
10205 &ae_dev->hw_err_reset_req);
10206 hclge_set_def_reset_request(ae_dev, reset_level);
10207 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10208 }
10209
466b0c00
L
10210 /* Enable MISC vector(vector0) */
10211 hclge_enable_vector(&hdev->misc_vector, true);
10212
48569cda 10213 hclge_state_init(hdev);
0742ed7c 10214 hdev->last_reset_time = jiffies;
46a3df9f 10215
08d80a4c
HT
10216 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10217 HCLGE_DRIVER_NAME);
10218
1c6dfe6f
YL
10219 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10220
46a3df9f
S
10221 return 0;
10222
ffd5656e
HT
10223err_mdiobus_unreg:
10224 if (hdev->hw.mac.phydev)
10225 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
10226err_msi_irq_uninit:
10227 hclge_misc_irq_uninit(hdev);
10228err_msi_uninit:
10229 pci_free_irq_vectors(pdev);
10230err_cmd_uninit:
232d0d55 10231 hclge_cmd_uninit(hdev);
ffd5656e 10232err_pci_uninit:
6a814413 10233 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 10234 pci_clear_master(pdev);
46a3df9f 10235 pci_release_regions(pdev);
ffd5656e 10236 pci_disable_device(pdev);
ffd5656e 10237out:
95163521 10238 mutex_destroy(&hdev->vport_lock);
46a3df9f
S
10239 return ret;
10240}
10241
c6dc5213 10242static void hclge_stats_clear(struct hclge_dev *hdev)
10243{
1c6dfe6f 10244 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
c6dc5213 10245}
10246
22044f95
JS
10247static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10248{
10249 return hclge_config_switch_param(hdev, vf, enable,
10250 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10251}
10252
10253static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10254{
10255 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10256 HCLGE_FILTER_FE_NIC_INGRESS_B,
10257 enable, vf);
10258}
10259
10260static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10261{
10262 int ret;
10263
10264 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10265 if (ret) {
10266 dev_err(&hdev->pdev->dev,
10267 "Set vf %d mac spoof check %s failed, ret=%d\n",
10268 vf, enable ? "on" : "off", ret);
10269 return ret;
10270 }
10271
10272 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10273 if (ret)
10274 dev_err(&hdev->pdev->dev,
10275 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10276 vf, enable ? "on" : "off", ret);
10277
10278 return ret;
10279}
10280
10281static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10282 bool enable)
10283{
10284 struct hclge_vport *vport = hclge_get_vport(handle);
10285 struct hclge_dev *hdev = vport->back;
10286 u32 new_spoofchk = enable ? 1 : 0;
10287 int ret;
10288
295ba232 10289 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
22044f95
JS
10290 return -EOPNOTSUPP;
10291
10292 vport = hclge_get_vf_vport(hdev, vf);
10293 if (!vport)
10294 return -EINVAL;
10295
10296 if (vport->vf_info.spoofchk == new_spoofchk)
10297 return 0;
10298
10299 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10300 dev_warn(&hdev->pdev->dev,
10301 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10302 vf);
7d0b3451 10303 else if (enable && hclge_is_umv_space_full(vport, true))
22044f95
JS
10304 dev_warn(&hdev->pdev->dev,
10305 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10306 vf);
10307
10308 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10309 if (ret)
10310 return ret;
10311
10312 vport->vf_info.spoofchk = new_spoofchk;
10313 return 0;
10314}
10315
10316static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10317{
10318 struct hclge_vport *vport = hdev->vport;
10319 int ret;
10320 int i;
10321
295ba232 10322 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
22044f95
JS
10323 return 0;
10324
10325 /* resume the vf spoof check state after reset */
10326 for (i = 0; i < hdev->num_alloc_vport; i++) {
10327 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10328 vport->vf_info.spoofchk);
10329 if (ret)
10330 return ret;
10331
10332 vport++;
10333 }
10334
10335 return 0;
10336}
10337
e196ec75
JS
10338static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10339{
10340 struct hclge_vport *vport = hclge_get_vport(handle);
10341 struct hclge_dev *hdev = vport->back;
295ba232 10342 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
e196ec75
JS
10343 u32 new_trusted = enable ? 1 : 0;
10344 bool en_bc_pmc;
10345 int ret;
10346
10347 vport = hclge_get_vf_vport(hdev, vf);
10348 if (!vport)
10349 return -EINVAL;
10350
10351 if (vport->vf_info.trusted == new_trusted)
10352 return 0;
10353
10354 /* Disable promisc mode for VF if it is not trusted any more. */
10355 if (!enable && vport->vf_info.promisc_enable) {
295ba232 10356 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
e196ec75
JS
10357 ret = hclge_set_vport_promisc_mode(vport, false, false,
10358 en_bc_pmc);
10359 if (ret)
10360 return ret;
10361 vport->vf_info.promisc_enable = 0;
10362 hclge_inform_vf_promisc_info(vport);
10363 }
10364
10365 vport->vf_info.trusted = new_trusted;
10366
10367 return 0;
10368}
10369
ee9e4424
YL
10370static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10371{
10372 int ret;
10373 int vf;
10374
10375 /* reset vf rate to default value */
10376 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10377 struct hclge_vport *vport = &hdev->vport[vf];
10378
10379 vport->vf_info.max_tx_rate = 0;
10380 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10381 if (ret)
10382 dev_err(&hdev->pdev->dev,
10383 "vf%d failed to reset to default, ret=%d\n",
10384 vf - HCLGE_VF_VPORT_START_NUM, ret);
10385 }
10386}
10387
10388static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10389 int min_tx_rate, int max_tx_rate)
10390{
10391 if (min_tx_rate != 0 ||
10392 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10393 dev_err(&hdev->pdev->dev,
10394 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10395 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10396 return -EINVAL;
10397 }
10398
10399 return 0;
10400}
10401
10402static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10403 int min_tx_rate, int max_tx_rate, bool force)
10404{
10405 struct hclge_vport *vport = hclge_get_vport(handle);
10406 struct hclge_dev *hdev = vport->back;
10407 int ret;
10408
10409 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10410 if (ret)
10411 return ret;
10412
10413 vport = hclge_get_vf_vport(hdev, vf);
10414 if (!vport)
10415 return -EINVAL;
10416
10417 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10418 return 0;
10419
10420 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10421 if (ret)
10422 return ret;
10423
10424 vport->vf_info.max_tx_rate = max_tx_rate;
10425
10426 return 0;
10427}
10428
10429static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10430{
10431 struct hnae3_handle *handle = &hdev->vport->nic;
10432 struct hclge_vport *vport;
10433 int ret;
10434 int vf;
10435
10436 /* resume the vf max_tx_rate after reset */
10437 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10438 vport = hclge_get_vf_vport(hdev, vf);
10439 if (!vport)
10440 return -EINVAL;
10441
10442 /* zero means max rate, after reset, firmware already set it to
10443 * max rate, so just continue.
10444 */
10445 if (!vport->vf_info.max_tx_rate)
10446 continue;
10447
10448 ret = hclge_set_vf_rate(handle, vf, 0,
10449 vport->vf_info.max_tx_rate, true);
10450 if (ret) {
10451 dev_err(&hdev->pdev->dev,
10452 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10453 vf, vport->vf_info.max_tx_rate, ret);
10454 return ret;
10455 }
10456 }
10457
10458 return 0;
10459}
10460
a6d818e3
YL
10461static void hclge_reset_vport_state(struct hclge_dev *hdev)
10462{
10463 struct hclge_vport *vport = hdev->vport;
10464 int i;
10465
10466 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 10467 hclge_vport_stop(vport);
a6d818e3
YL
10468 vport++;
10469 }
10470}
10471
4ed340ab
L
10472static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10473{
10474 struct hclge_dev *hdev = ae_dev->priv;
10475 struct pci_dev *pdev = ae_dev->pdev;
10476 int ret;
10477
10478 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10479
c6dc5213 10480 hclge_stats_clear(hdev);
ee4bcd3b
JS
10481 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10482 * so here should not clean table in memory.
10483 */
10484 if (hdev->reset_type == HNAE3_IMP_RESET ||
10485 hdev->reset_type == HNAE3_GLOBAL_RESET) {
039ba863
JS
10486 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10487 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
ee4bcd3b
JS
10488 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10489 hclge_reset_umv_space(hdev);
10490 }
10491
4ed340ab
L
10492 ret = hclge_cmd_init(hdev);
10493 if (ret) {
10494 dev_err(&pdev->dev, "Cmd queue init failed\n");
10495 return ret;
10496 }
10497
4ed340ab
L
10498 ret = hclge_map_tqp(hdev);
10499 if (ret) {
10500 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10501 return ret;
10502 }
10503
10504 ret = hclge_mac_init(hdev);
10505 if (ret) {
10506 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10507 return ret;
10508 }
10509
4ed340ab
L
10510 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10511 if (ret) {
10512 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10513 return ret;
10514 }
10515
b26a6fea
PL
10516 ret = hclge_config_gro(hdev, true);
10517 if (ret)
10518 return ret;
10519
4ed340ab
L
10520 ret = hclge_init_vlan_config(hdev);
10521 if (ret) {
10522 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10523 return ret;
10524 }
10525
44e59e37 10526 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 10527 if (ret) {
f31c1ba6 10528 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
10529 return ret;
10530 }
10531
10532 ret = hclge_rss_init_hw(hdev);
10533 if (ret) {
10534 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10535 return ret;
10536 }
10537
d0db7ed3
YM
10538 ret = init_mgr_tbl(hdev);
10539 if (ret) {
10540 dev_err(&pdev->dev,
10541 "failed to reinit manager table, ret = %d\n", ret);
10542 return ret;
10543 }
10544
d695964d
JS
10545 ret = hclge_init_fd_config(hdev);
10546 if (ret) {
9b2f3477 10547 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
d695964d
JS
10548 return ret;
10549 }
10550
4fdd0bca
JS
10551 /* Log and clear the hw errors those already occurred */
10552 hclge_handle_all_hns_hw_errors(ae_dev);
10553
f3fa4a94 10554 /* Re-enable the hw error interrupts because
00ea6e5f 10555 * the interrupts get disabled on global reset.
01865a50 10556 */
00ea6e5f 10557 ret = hclge_config_nic_hw_error(hdev, true);
f3fa4a94
SJ
10558 if (ret) {
10559 dev_err(&pdev->dev,
00ea6e5f
WL
10560 "fail(%d) to re-enable NIC hw error interrupts\n",
10561 ret);
f3fa4a94
SJ
10562 return ret;
10563 }
01865a50 10564
00ea6e5f
WL
10565 if (hdev->roce_client) {
10566 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10567 if (ret) {
10568 dev_err(&pdev->dev,
10569 "fail(%d) to re-enable roce ras interrupts\n",
10570 ret);
10571 return ret;
10572 }
10573 }
10574
a6d818e3 10575 hclge_reset_vport_state(hdev);
22044f95
JS
10576 ret = hclge_reset_vport_spoofchk(hdev);
10577 if (ret)
10578 return ret;
a6d818e3 10579
ee9e4424
YL
10580 ret = hclge_resume_vf_rate(hdev);
10581 if (ret)
10582 return ret;
10583
4ed340ab
L
10584 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10585 HCLGE_DRIVER_NAME);
10586
10587 return 0;
10588}
10589
46a3df9f
S
10590static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10591{
10592 struct hclge_dev *hdev = ae_dev->priv;
10593 struct hclge_mac *mac = &hdev->hw.mac;
10594
ee9e4424 10595 hclge_reset_vf_rate(hdev);
59359fc8 10596 hclge_clear_vf_vlan(hdev);
08125454 10597 hclge_misc_affinity_teardown(hdev);
48569cda 10598 hclge_state_uninit(hdev);
ee4bcd3b 10599 hclge_uninit_mac_table(hdev);
46a3df9f
S
10600
10601 if (mac->phydev)
10602 mdiobus_unregister(mac->mdio_bus);
10603
466b0c00
L
10604 /* Disable MISC vector(vector0) */
10605 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
10606 synchronize_irq(hdev->misc_vector.vector_irq);
10607
00ea6e5f 10608 /* Disable all hw interrupts */
a6345787 10609 hclge_config_mac_tnl_int(hdev, false);
00ea6e5f
WL
10610 hclge_config_nic_hw_error(hdev, false);
10611 hclge_config_rocee_ras_interrupt(hdev, false);
10612
232d0d55 10613 hclge_cmd_uninit(hdev);
ca1d7669 10614 hclge_misc_irq_uninit(hdev);
46a3df9f 10615 hclge_pci_uninit(hdev);
818f1675 10616 mutex_destroy(&hdev->vport_lock);
c6075b19 10617 hclge_uninit_vport_vlan_table(hdev);
46a3df9f
S
10618 ae_dev->priv = NULL;
10619}
10620
482d2e9c
PL
10621static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10622{
10623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10624 struct hclge_vport *vport = hclge_get_vport(handle);
10625 struct hclge_dev *hdev = vport->back;
10626
c3b9c50d
HT
10627 return min_t(u32, hdev->rss_size_max,
10628 vport->alloc_tqps / kinfo->num_tc);
482d2e9c
PL
10629}
10630
10631static void hclge_get_channels(struct hnae3_handle *handle,
10632 struct ethtool_channels *ch)
10633{
482d2e9c
PL
10634 ch->max_combined = hclge_get_max_channels(handle);
10635 ch->other_count = 1;
10636 ch->max_other = 1;
c3b9c50d 10637 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
10638}
10639
09f2af64 10640static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 10641 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
10642{
10643 struct hclge_vport *vport = hclge_get_vport(handle);
10644 struct hclge_dev *hdev = vport->back;
09f2af64 10645
0d43bf45 10646 *alloc_tqps = vport->alloc_tqps;
09f2af64
PL
10647 *max_rss_size = hdev->rss_size_max;
10648}
10649
90c68a41
YL
10650static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10651 bool rxfh_configured)
09f2af64
PL
10652{
10653 struct hclge_vport *vport = hclge_get_vport(handle);
10654 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
354d0fab 10655 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
09f2af64 10656 struct hclge_dev *hdev = vport->back;
354d0fab 10657 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
adcf738b
GL
10658 u16 cur_rss_size = kinfo->rss_size;
10659 u16 cur_tqps = kinfo->num_tqps;
09f2af64 10660 u16 tc_valid[HCLGE_MAX_TC_NUM];
09f2af64
PL
10661 u16 roundup_size;
10662 u32 *rss_indir;
ebaf1908
WL
10663 unsigned int i;
10664 int ret;
09f2af64 10665
672ad0ed 10666 kinfo->req_rss_size = new_tqps_num;
09f2af64 10667
672ad0ed 10668 ret = hclge_tm_vport_map_update(hdev);
09f2af64 10669 if (ret) {
672ad0ed 10670 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
10671 return ret;
10672 }
10673
10674 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10675 roundup_size = ilog2(roundup_size);
10676 /* Set the RSS TC mode according to the new RSS size */
10677 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10678 tc_valid[i] = 0;
10679
10680 if (!(hdev->hw_tc_map & BIT(i)))
10681 continue;
10682
10683 tc_valid[i] = 1;
10684 tc_size[i] = roundup_size;
10685 tc_offset[i] = kinfo->rss_size * i;
10686 }
10687 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10688 if (ret)
10689 return ret;
10690
90c68a41
YL
10691 /* RSS indirection table has been configuared by user */
10692 if (rxfh_configured)
10693 goto out;
10694
09f2af64
PL
10695 /* Reinitializes the rss indirect table according to the new RSS size */
10696 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10697 if (!rss_indir)
10698 return -ENOMEM;
10699
10700 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10701 rss_indir[i] = i % kinfo->rss_size;
10702
10703 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10704 if (ret)
10705 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10706 ret);
10707
10708 kfree(rss_indir);
10709
90c68a41 10710out:
09f2af64
PL
10711 if (!ret)
10712 dev_info(&hdev->pdev->dev,
adcf738b 10713 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
09f2af64
PL
10714 cur_rss_size, kinfo->rss_size,
10715 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10716
10717 return ret;
10718}
10719
77b34110
FL
10720static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10721 u32 *regs_num_64_bit)
10722{
10723 struct hclge_desc desc;
10724 u32 total_num;
10725 int ret;
10726
10727 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10728 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10729 if (ret) {
10730 dev_err(&hdev->pdev->dev,
10731 "Query register number cmd failed, ret = %d.\n", ret);
10732 return ret;
10733 }
10734
10735 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10736 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10737
10738 total_num = *regs_num_32_bit + *regs_num_64_bit;
10739 if (!total_num)
10740 return -EINVAL;
10741
10742 return 0;
10743}
10744
10745static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10746 void *data)
10747{
10748#define HCLGE_32_BIT_REG_RTN_DATANUM 8
b37ce587 10749#define HCLGE_32_BIT_DESC_NODATA_LEN 2
77b34110
FL
10750
10751 struct hclge_desc *desc;
10752 u32 *reg_val = data;
10753 __le32 *desc_data;
b37ce587 10754 int nodata_num;
77b34110
FL
10755 int cmd_num;
10756 int i, k, n;
10757 int ret;
10758
10759 if (regs_num == 0)
10760 return 0;
10761
b37ce587
YM
10762 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10763 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10764 HCLGE_32_BIT_REG_RTN_DATANUM);
77b34110
FL
10765 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10766 if (!desc)
10767 return -ENOMEM;
10768
10769 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10770 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10771 if (ret) {
10772 dev_err(&hdev->pdev->dev,
10773 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10774 kfree(desc);
10775 return ret;
10776 }
10777
10778 for (i = 0; i < cmd_num; i++) {
10779 if (i == 0) {
10780 desc_data = (__le32 *)(&desc[i].data[0]);
b37ce587 10781 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
77b34110
FL
10782 } else {
10783 desc_data = (__le32 *)(&desc[i]);
10784 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10785 }
10786 for (k = 0; k < n; k++) {
10787 *reg_val++ = le32_to_cpu(*desc_data++);
10788
10789 regs_num--;
10790 if (!regs_num)
10791 break;
10792 }
10793 }
10794
10795 kfree(desc);
10796 return 0;
10797}
10798
10799static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10800 void *data)
10801{
10802#define HCLGE_64_BIT_REG_RTN_DATANUM 4
b37ce587 10803#define HCLGE_64_BIT_DESC_NODATA_LEN 1
77b34110
FL
10804
10805 struct hclge_desc *desc;
10806 u64 *reg_val = data;
10807 __le64 *desc_data;
b37ce587 10808 int nodata_len;
77b34110
FL
10809 int cmd_num;
10810 int i, k, n;
10811 int ret;
10812
10813 if (regs_num == 0)
10814 return 0;
10815
b37ce587
YM
10816 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10817 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10818 HCLGE_64_BIT_REG_RTN_DATANUM);
77b34110
FL
10819 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10820 if (!desc)
10821 return -ENOMEM;
10822
10823 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10824 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10825 if (ret) {
10826 dev_err(&hdev->pdev->dev,
10827 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10828 kfree(desc);
10829 return ret;
10830 }
10831
10832 for (i = 0; i < cmd_num; i++) {
10833 if (i == 0) {
10834 desc_data = (__le64 *)(&desc[i].data[0]);
b37ce587 10835 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
77b34110
FL
10836 } else {
10837 desc_data = (__le64 *)(&desc[i]);
10838 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10839 }
10840 for (k = 0; k < n; k++) {
10841 *reg_val++ = le64_to_cpu(*desc_data++);
10842
10843 regs_num--;
10844 if (!regs_num)
10845 break;
10846 }
10847 }
10848
10849 kfree(desc);
10850 return 0;
10851}
10852
ea4750ca 10853#define MAX_SEPARATE_NUM 4
ddb54554 10854#define SEPARATOR_VALUE 0xFDFCFBFA
ea4750ca
JS
10855#define REG_NUM_PER_LINE 4
10856#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
ddb54554
GH
10857#define REG_SEPARATOR_LINE 1
10858#define REG_NUM_REMAIN_MASK 3
10859#define BD_LIST_MAX_NUM 30
ea4750ca 10860
ddb54554 10861int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
77b34110 10862{
5caa039f
HT
10863 int i;
10864
10865 /* initialize command BD except the last one */
10866 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10867 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10868 true);
10869 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10870 }
10871
10872 /* initialize the last command BD */
10873 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
ddb54554 10874
5caa039f 10875 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
ddb54554
GH
10876}
10877
10878static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10879 int *bd_num_list,
10880 u32 type_num)
10881{
ddb54554 10882 u32 entries_per_desc, desc_index, index, offset, i;
9027d043 10883 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
77b34110
FL
10884 int ret;
10885
ddb54554 10886 ret = hclge_query_bd_num_cmd_send(hdev, desc);
77b34110
FL
10887 if (ret) {
10888 dev_err(&hdev->pdev->dev,
ddb54554
GH
10889 "Get dfx bd num fail, status is %d.\n", ret);
10890 return ret;
77b34110
FL
10891 }
10892
ddb54554
GH
10893 entries_per_desc = ARRAY_SIZE(desc[0].data);
10894 for (i = 0; i < type_num; i++) {
10895 offset = hclge_dfx_bd_offset_list[i];
10896 index = offset % entries_per_desc;
10897 desc_index = offset / entries_per_desc;
10898 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10899 }
ea4750ca 10900
ddb54554 10901 return ret;
77b34110
FL
10902}
10903
ddb54554
GH
10904static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10905 struct hclge_desc *desc_src, int bd_num,
10906 enum hclge_opcode_type cmd)
77b34110 10907{
ddb54554
GH
10908 struct hclge_desc *desc = desc_src;
10909 int i, ret;
10910
10911 hclge_cmd_setup_basic_desc(desc, cmd, true);
10912 for (i = 0; i < bd_num - 1; i++) {
10913 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10914 desc++;
10915 hclge_cmd_setup_basic_desc(desc, cmd, true);
10916 }
10917
10918 desc = desc_src;
10919 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10920 if (ret)
10921 dev_err(&hdev->pdev->dev,
10922 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10923 cmd, ret);
10924
10925 return ret;
10926}
10927
10928static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10929 void *data)
10930{
10931 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10932 struct hclge_desc *desc = desc_src;
ea4750ca 10933 u32 *reg = data;
ddb54554
GH
10934
10935 entries_per_desc = ARRAY_SIZE(desc->data);
10936 reg_num = entries_per_desc * bd_num;
10937 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10938 for (i = 0; i < reg_num; i++) {
10939 index = i % entries_per_desc;
10940 desc_index = i / entries_per_desc;
10941 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10942 }
10943 for (i = 0; i < separator_num; i++)
10944 *reg++ = SEPARATOR_VALUE;
10945
10946 return reg_num + separator_num;
10947}
10948
10949static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10950{
10951 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
72fa4904 10952 int data_len_per_desc, bd_num, i;
ddb54554 10953 int bd_num_list[BD_LIST_MAX_NUM];
72fa4904 10954 u32 data_len;
77b34110
FL
10955 int ret;
10956
ddb54554
GH
10957 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10958 if (ret) {
10959 dev_err(&hdev->pdev->dev,
10960 "Get dfx reg bd num fail, status is %d.\n", ret);
10961 return ret;
10962 }
77b34110 10963
c593642c 10964 data_len_per_desc = sizeof_field(struct hclge_desc, data);
ddb54554
GH
10965 *len = 0;
10966 for (i = 0; i < dfx_reg_type_num; i++) {
10967 bd_num = bd_num_list[i];
10968 data_len = data_len_per_desc * bd_num;
10969 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10970 }
10971
10972 return ret;
10973}
10974
10975static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10976{
10977 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10978 int bd_num, bd_num_max, buf_len, i;
10979 int bd_num_list[BD_LIST_MAX_NUM];
10980 struct hclge_desc *desc_src;
10981 u32 *reg = data;
10982 int ret;
10983
10984 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
77b34110
FL
10985 if (ret) {
10986 dev_err(&hdev->pdev->dev,
ddb54554
GH
10987 "Get dfx reg bd num fail, status is %d.\n", ret);
10988 return ret;
10989 }
10990
10991 bd_num_max = bd_num_list[0];
10992 for (i = 1; i < dfx_reg_type_num; i++)
10993 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10994
10995 buf_len = sizeof(*desc_src) * bd_num_max;
10996 desc_src = kzalloc(buf_len, GFP_KERNEL);
322cb97c 10997 if (!desc_src)
ddb54554 10998 return -ENOMEM;
77b34110 10999
ddb54554
GH
11000 for (i = 0; i < dfx_reg_type_num; i++) {
11001 bd_num = bd_num_list[i];
11002 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11003 hclge_dfx_reg_opcode_list[i]);
11004 if (ret) {
11005 dev_err(&hdev->pdev->dev,
11006 "Get dfx reg fail, status is %d.\n", ret);
11007 break;
11008 }
11009
11010 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11011 }
11012
11013 kfree(desc_src);
11014 return ret;
11015}
11016
11017static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11018 struct hnae3_knic_private_info *kinfo)
11019{
11020#define HCLGE_RING_REG_OFFSET 0x200
11021#define HCLGE_RING_INT_REG_OFFSET 0x4
11022
11023 int i, j, reg_num, separator_num;
11024 int data_num_sum;
11025 u32 *reg = data;
11026
ea4750ca 11027 /* fetching per-PF registers valus from PF PCIe register space */
ddb54554
GH
11028 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11029 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11030 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11031 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11032 for (i = 0; i < separator_num; i++)
11033 *reg++ = SEPARATOR_VALUE;
ddb54554 11034 data_num_sum = reg_num + separator_num;
ea4750ca 11035
ddb54554
GH
11036 reg_num = ARRAY_SIZE(common_reg_addr_list);
11037 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11038 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11039 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11040 for (i = 0; i < separator_num; i++)
11041 *reg++ = SEPARATOR_VALUE;
ddb54554 11042 data_num_sum += reg_num + separator_num;
ea4750ca 11043
ddb54554
GH
11044 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11045 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 11046 for (j = 0; j < kinfo->num_tqps; j++) {
ddb54554 11047 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11048 *reg++ = hclge_read_dev(&hdev->hw,
11049 ring_reg_addr_list[i] +
ddb54554 11050 HCLGE_RING_REG_OFFSET * j);
ea4750ca
JS
11051 for (i = 0; i < separator_num; i++)
11052 *reg++ = SEPARATOR_VALUE;
11053 }
ddb54554 11054 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
ea4750ca 11055
ddb54554
GH
11056 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11057 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 11058 for (j = 0; j < hdev->num_msi_used - 1; j++) {
ddb54554 11059 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11060 *reg++ = hclge_read_dev(&hdev->hw,
11061 tqp_intr_reg_addr_list[i] +
ddb54554 11062 HCLGE_RING_INT_REG_OFFSET * j);
ea4750ca
JS
11063 for (i = 0; i < separator_num; i++)
11064 *reg++ = SEPARATOR_VALUE;
11065 }
ddb54554
GH
11066 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11067
11068 return data_num_sum;
11069}
11070
11071static int hclge_get_regs_len(struct hnae3_handle *handle)
11072{
11073 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11074 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11075 struct hclge_vport *vport = hclge_get_vport(handle);
11076 struct hclge_dev *hdev = vport->back;
11077 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11078 int regs_lines_32_bit, regs_lines_64_bit;
11079 int ret;
11080
11081 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11082 if (ret) {
11083 dev_err(&hdev->pdev->dev,
11084 "Get register number failed, ret = %d.\n", ret);
11085 return ret;
11086 }
11087
11088 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11089 if (ret) {
11090 dev_err(&hdev->pdev->dev,
11091 "Get dfx reg len failed, ret = %d.\n", ret);
11092 return ret;
11093 }
11094
11095 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11096 REG_SEPARATOR_LINE;
11097 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11098 REG_SEPARATOR_LINE;
11099 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11100 REG_SEPARATOR_LINE;
11101 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11102 REG_SEPARATOR_LINE;
11103 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11104 REG_SEPARATOR_LINE;
11105 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11106 REG_SEPARATOR_LINE;
11107
11108 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11109 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11110 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11111}
11112
11113static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11114 void *data)
11115{
11116 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11117 struct hclge_vport *vport = hclge_get_vport(handle);
11118 struct hclge_dev *hdev = vport->back;
11119 u32 regs_num_32_bit, regs_num_64_bit;
11120 int i, reg_num, separator_num, ret;
11121 u32 *reg = data;
11122
11123 *version = hdev->fw_version;
11124
11125 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11126 if (ret) {
11127 dev_err(&hdev->pdev->dev,
11128 "Get register number failed, ret = %d.\n", ret);
11129 return;
11130 }
11131
11132 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
ea4750ca 11133
ea4750ca 11134 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
11135 if (ret) {
11136 dev_err(&hdev->pdev->dev,
11137 "Get 32 bit register failed, ret = %d.\n", ret);
11138 return;
11139 }
ddb54554
GH
11140 reg_num = regs_num_32_bit;
11141 reg += reg_num;
11142 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11143 for (i = 0; i < separator_num; i++)
11144 *reg++ = SEPARATOR_VALUE;
77b34110 11145
ea4750ca 11146 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
ddb54554 11147 if (ret) {
77b34110
FL
11148 dev_err(&hdev->pdev->dev,
11149 "Get 64 bit register failed, ret = %d.\n", ret);
ddb54554
GH
11150 return;
11151 }
11152 reg_num = regs_num_64_bit * 2;
11153 reg += reg_num;
11154 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11155 for (i = 0; i < separator_num; i++)
11156 *reg++ = SEPARATOR_VALUE;
11157
11158 ret = hclge_get_dfx_reg(hdev, reg);
11159 if (ret)
11160 dev_err(&hdev->pdev->dev,
11161 "Get dfx register failed, ret = %d.\n", ret);
77b34110
FL
11162}
11163
f6f75abc 11164static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
11165{
11166 struct hclge_set_led_state_cmd *req;
11167 struct hclge_desc desc;
11168 int ret;
11169
11170 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11171
11172 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
11173 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11174 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
11175
11176 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11177 if (ret)
11178 dev_err(&hdev->pdev->dev,
11179 "Send set led state cmd error, ret =%d\n", ret);
11180
11181 return ret;
11182}
11183
11184enum hclge_led_status {
11185 HCLGE_LED_OFF,
11186 HCLGE_LED_ON,
11187 HCLGE_LED_NO_CHANGE = 0xFF,
11188};
11189
11190static int hclge_set_led_id(struct hnae3_handle *handle,
11191 enum ethtool_phys_id_state status)
11192{
07f8e940
JS
11193 struct hclge_vport *vport = hclge_get_vport(handle);
11194 struct hclge_dev *hdev = vport->back;
07f8e940
JS
11195
11196 switch (status) {
11197 case ETHTOOL_ID_ACTIVE:
f6f75abc 11198 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 11199 case ETHTOOL_ID_INACTIVE:
f6f75abc 11200 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 11201 default:
f6f75abc 11202 return -EINVAL;
07f8e940 11203 }
07f8e940
JS
11204}
11205
0979aa0b
FL
11206static void hclge_get_link_mode(struct hnae3_handle *handle,
11207 unsigned long *supported,
11208 unsigned long *advertising)
11209{
11210 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11211 struct hclge_vport *vport = hclge_get_vport(handle);
11212 struct hclge_dev *hdev = vport->back;
11213 unsigned int idx = 0;
11214
11215 for (; idx < size; idx++) {
11216 supported[idx] = hdev->hw.mac.supported[idx];
11217 advertising[idx] = hdev->hw.mac.advertising[idx];
11218 }
11219}
11220
1731be4c 11221static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
11222{
11223 struct hclge_vport *vport = hclge_get_vport(handle);
11224 struct hclge_dev *hdev = vport->back;
11225
11226 return hclge_config_gro(hdev, enable);
11227}
11228
c631c696
JS
11229static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11230{
11231 struct hclge_vport *vport = &hdev->vport[0];
11232 struct hnae3_handle *handle = &vport->nic;
9d8d5a36 11233 u8 tmp_flags;
c631c696
JS
11234 int ret;
11235
11236 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11237 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11238 vport->last_promisc_flags = vport->overflow_promisc_flags;
11239 }
11240
11241 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11242 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11243 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11244 tmp_flags & HNAE3_MPE);
11245 if (!ret) {
11246 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11247 hclge_enable_vlan_filter(handle,
11248 tmp_flags & HNAE3_VLAN_FLTR);
11249 }
11250 }
11251}
11252
cb10228d
YL
11253static bool hclge_module_existed(struct hclge_dev *hdev)
11254{
11255 struct hclge_desc desc;
11256 u32 existed;
11257 int ret;
11258
11259 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11260 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11261 if (ret) {
11262 dev_err(&hdev->pdev->dev,
11263 "failed to get SFP exist state, ret = %d\n", ret);
11264 return false;
11265 }
11266
11267 existed = le32_to_cpu(desc.data[0]);
11268
11269 return existed != 0;
11270}
11271
11272/* need 6 bds(total 140 bytes) in one reading
11273 * return the number of bytes actually read, 0 means read failed.
11274 */
11275static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11276 u32 len, u8 *data)
11277{
11278 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11279 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11280 u16 read_len;
11281 u16 copy_len;
11282 int ret;
11283 int i;
11284
11285 /* setup all 6 bds to read module eeprom info. */
11286 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11287 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11288 true);
11289
11290 /* bd0~bd4 need next flag */
11291 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11292 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11293 }
11294
11295 /* setup bd0, this bd contains offset and read length. */
11296 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11297 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11298 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11299 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11300
11301 ret = hclge_cmd_send(&hdev->hw, desc, i);
11302 if (ret) {
11303 dev_err(&hdev->pdev->dev,
11304 "failed to get SFP eeprom info, ret = %d\n", ret);
11305 return 0;
11306 }
11307
11308 /* copy sfp info from bd0 to out buffer. */
11309 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11310 memcpy(data, sfp_info_bd0->data, copy_len);
11311 read_len = copy_len;
11312
11313 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11314 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11315 if (read_len >= len)
11316 return read_len;
11317
11318 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11319 memcpy(data + read_len, desc[i].data, copy_len);
11320 read_len += copy_len;
11321 }
11322
11323 return read_len;
11324}
11325
11326static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11327 u32 len, u8 *data)
11328{
11329 struct hclge_vport *vport = hclge_get_vport(handle);
11330 struct hclge_dev *hdev = vport->back;
11331 u32 read_len = 0;
11332 u16 data_len;
11333
11334 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11335 return -EOPNOTSUPP;
11336
11337 if (!hclge_module_existed(hdev))
11338 return -ENXIO;
11339
11340 while (read_len < len) {
11341 data_len = hclge_get_sfp_eeprom_info(hdev,
11342 offset + read_len,
11343 len - read_len,
11344 data + read_len);
11345 if (!data_len)
11346 return -EIO;
11347
11348 read_len += data_len;
11349 }
11350
11351 return 0;
11352}
11353
46a3df9f
S
11354static const struct hnae3_ae_ops hclge_ops = {
11355 .init_ae_dev = hclge_init_ae_dev,
11356 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
11357 .flr_prepare = hclge_flr_prepare,
11358 .flr_done = hclge_flr_done,
46a3df9f
S
11359 .init_client_instance = hclge_init_client_instance,
11360 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
11361 .map_ring_to_vector = hclge_map_ring_to_vector,
11362 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 11363 .get_vector = hclge_get_vector,
0d3e6631 11364 .put_vector = hclge_put_vector,
46a3df9f 11365 .set_promisc_mode = hclge_set_promisc_mode,
c631c696 11366 .request_update_promisc_mode = hclge_request_update_promisc_mode,
c39c4d98 11367 .set_loopback = hclge_set_loopback,
46a3df9f
S
11368 .start = hclge_ae_start,
11369 .stop = hclge_ae_stop,
a6d818e3
YL
11370 .client_start = hclge_client_start,
11371 .client_stop = hclge_client_stop,
46a3df9f
S
11372 .get_status = hclge_get_status,
11373 .get_ksettings_an_result = hclge_get_ksettings_an_result,
46a3df9f
S
11374 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11375 .get_media_type = hclge_get_media_type,
22f48e24 11376 .check_port_speed = hclge_check_port_speed,
7e6ec914
JS
11377 .get_fec = hclge_get_fec,
11378 .set_fec = hclge_set_fec,
46a3df9f
S
11379 .get_rss_key_size = hclge_get_rss_key_size,
11380 .get_rss_indir_size = hclge_get_rss_indir_size,
11381 .get_rss = hclge_get_rss,
11382 .set_rss = hclge_set_rss,
f7db940a 11383 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 11384 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
11385 .get_tc_size = hclge_get_tc_size,
11386 .get_mac_addr = hclge_get_mac_addr,
11387 .set_mac_addr = hclge_set_mac_addr,
26483246 11388 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
11389 .add_uc_addr = hclge_add_uc_addr,
11390 .rm_uc_addr = hclge_rm_uc_addr,
11391 .add_mc_addr = hclge_add_mc_addr,
11392 .rm_mc_addr = hclge_rm_mc_addr,
11393 .set_autoneg = hclge_set_autoneg,
11394 .get_autoneg = hclge_get_autoneg,
22f48e24 11395 .restart_autoneg = hclge_restart_autoneg,
7786a996 11396 .halt_autoneg = hclge_halt_autoneg,
46a3df9f 11397 .get_pauseparam = hclge_get_pauseparam,
61387774 11398 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
11399 .set_mtu = hclge_set_mtu,
11400 .reset_queue = hclge_reset_tqp,
11401 .get_stats = hclge_get_stats,
615466ce 11402 .get_mac_stats = hclge_get_mac_stat,
46a3df9f
S
11403 .update_stats = hclge_update_stats,
11404 .get_strings = hclge_get_strings,
11405 .get_sset_count = hclge_get_sset_count,
11406 .get_fw_version = hclge_get_fw_version,
11407 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 11408 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 11409 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 11410 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 11411 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 11412 .reset_event = hclge_reset_event,
123297b7 11413 .get_reset_level = hclge_get_reset_level,
720bd583 11414 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
11415 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11416 .set_channels = hclge_set_channels,
482d2e9c 11417 .get_channels = hclge_get_channels,
77b34110
FL
11418 .get_regs_len = hclge_get_regs_len,
11419 .get_regs = hclge_get_regs,
07f8e940 11420 .set_led_id = hclge_set_led_id,
0979aa0b 11421 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
11422 .add_fd_entry = hclge_add_fd_entry,
11423 .del_fd_entry = hclge_del_fd_entry,
6871af29 11424 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
11425 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11426 .get_fd_rule_info = hclge_get_fd_rule_info,
11427 .get_fd_all_rules = hclge_get_all_rules,
c17852a8 11428 .enable_fd = hclge_enable_fd,
d93ed94f 11429 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
3c666b58 11430 .dbg_run_cmd = hclge_dbg_run_cmd,
381c356e 11431 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
11432 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11433 .ae_dev_resetting = hclge_ae_dev_resetting,
11434 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 11435 .set_gro_en = hclge_gro_en,
0c29d191 11436 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 11437 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
11438 .mac_connect_phy = hclge_mac_connect_phy,
11439 .mac_disconnect_phy = hclge_mac_disconnect_phy,
6430f744
YM
11440 .get_vf_config = hclge_get_vf_config,
11441 .set_vf_link_state = hclge_set_vf_link_state,
22044f95 11442 .set_vf_spoofchk = hclge_set_vf_spoofchk,
e196ec75 11443 .set_vf_trust = hclge_set_vf_trust,
ee9e4424 11444 .set_vf_rate = hclge_set_vf_rate,
8e6de441 11445 .set_vf_mac = hclge_set_vf_mac,
cb10228d 11446 .get_module_eeprom = hclge_get_module_eeprom,
a4de0228 11447 .get_cmdq_stat = hclge_get_cmdq_stat,
46a3df9f
S
11448};
11449
11450static struct hnae3_ae_algo ae_algo = {
11451 .ops = &hclge_ops,
46a3df9f
S
11452 .pdev_id_table = ae_algo_pci_tbl,
11453};
11454
11455static int hclge_init(void)
11456{
11457 pr_info("%s is initializing\n", HCLGE_NAME);
11458
16deaef2 11459 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
0ea68902
YL
11460 if (!hclge_wq) {
11461 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11462 return -ENOMEM;
11463 }
11464
854cf33a
FL
11465 hnae3_register_ae_algo(&ae_algo);
11466
11467 return 0;
46a3df9f
S
11468}
11469
11470static void hclge_exit(void)
11471{
11472 hnae3_unregister_ae_algo(&ae_algo);
0ea68902 11473 destroy_workqueue(hclge_wq);
46a3df9f
S
11474}
11475module_init(hclge_init);
11476module_exit(hclge_exit);
11477
11478MODULE_LICENSE("GPL");
11479MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11480MODULE_DESCRIPTION("HCLGE Driver");
11481MODULE_VERSION(HCLGE_MOD_VERSION);