Merge branch 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
f2f432f2 16#include <net/rtnetlink.h>
46a3df9f 17#include "hclge_cmd.h"
cacde272 18#include "hclge_dcb.h"
46a3df9f 19#include "hclge_main.h"
dde1a86e 20#include "hclge_mbx.h"
46a3df9f
S
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
5a9f0eac 23#include "hclge_err.h"
46a3df9f
S
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 29
ebaf1908 30#define HCLGE_BUF_SIZE_UNIT 256U
b37ce587
YM
31#define HCLGE_BUF_MUL_BY 2
32#define HCLGE_BUF_DIV_BY 2
9e15be90
YL
33#define NEED_RESERVE_TC_NUM 2
34#define BUF_MAX_PERCENT 100
35#define BUF_RESERVE_PERCENT 90
b9a400ac 36
63cbf7a9 37#define HCLGE_RESET_MAX_FAIL_CNT 5
427a7bff
HT
38#define HCLGE_RESET_SYNC_TIME 100
39#define HCLGE_PF_RESET_SYNC_TIME 20
40#define HCLGE_PF_RESET_SYNC_CNT 1500
63cbf7a9 41
ddb54554
GH
42/* Get DFX BD number offset */
43#define HCLGE_DFX_BIOS_BD_OFFSET 1
44#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46#define HCLGE_DFX_IGU_BD_OFFSET 4
47#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49#define HCLGE_DFX_NCSI_BD_OFFSET 7
50#define HCLGE_DFX_RTC_BD_OFFSET 8
51#define HCLGE_DFX_PPP_BD_OFFSET 9
52#define HCLGE_DFX_RCB_BD_OFFSET 10
53#define HCLGE_DFX_TQP_BD_OFFSET 11
54#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
c9765a89
YM
56#define HCLGE_LINK_STATUS_MS 10
57
6430f744
YM
58#define HCLGE_VF_VPORT_START_NUM 1
59
e6d7d79d 60static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 61static int hclge_init_vlan_config(struct hclge_dev *hdev);
fe4144d4 62static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
4ed340ab 63static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 64static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
39932473
JS
65static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
d93ed94f
JS
67static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
123297b7
SJ
69static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70 unsigned long *addr);
1cbc662d 71static int hclge_set_default_loopback(struct hclge_dev *hdev);
46a3df9f
S
72
73static struct hnae3_ae_algo ae_algo;
74
0ea68902
YL
75static struct workqueue_struct *hclge_wq;
76
46a3df9f
S
77static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 85 /* required last entry */
46a3df9f
S
86 {0, }
87};
88
2f550a46
YL
89MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
ea4750ca
JS
91static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
105
106static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
111 HCLGE_FUN_RST_ING,
112 HCLGE_GRO_EN_REG};
113
114static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
139 HCLGE_RING_EN_REG};
140
141static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
146
46a3df9f 147static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 148 "App Loopback test",
4dc13b96
FL
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
46a3df9f
S
151 "Phy Loopback test"
152};
153
46a3df9f
S
154static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 299
a6c51c26
JS
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
324};
325
f5aac71c
FL
326static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327 {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
7efffc64 329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
0e02a53d 330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
f5aac71c
FL
331 .i_port_bitmap = 0x1,
332 },
333};
334
472d7ece
JS
335static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341};
342
ddb54554
GH
343static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
356};
357
358static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
371};
372
2307f4a5
Y
373static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
375 { IP_FRAGEMENT, 1},
376 { ROCE_TYPE, 1},
377 { NEXT_KEY, 5},
378 { VLAN_NUMBER, 2},
379 { SRC_VPORT, 12},
380 { DST_VPORT, 12},
381 { TUNNEL_PACKET, 1},
382};
383
384static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
390 { OUTER_L2_RSV, 16},
391 { OUTER_IP_TOS, 8},
392 { OUTER_IP_PROTO, 8},
393 { OUTER_SRC_IP, 32},
394 { OUTER_DST_IP, 32},
395 { OUTER_L3_RSV, 16},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
398 { OUTER_L4_RSV, 32},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
406 { INNER_L2_RSV, 16},
407 { INNER_IP_TOS, 8},
408 { INNER_IP_PROTO, 8},
409 { INNER_SRC_IP, 32},
410 { INNER_DST_IP, 32},
411 { INNER_L3_RSV, 16},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
414 { INNER_L4_RSV, 32},
415};
416
d174ea75 417static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 418{
91f384f6 419#define HCLGE_MAC_CMD_NUM 21
46a3df9f 420
1c6dfe6f 421 u64 *data = (u64 *)(&hdev->mac_stats);
46a3df9f 422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 423 __le64 *desc_data;
46a3df9f
S
424 int i, k, n;
425 int ret;
426
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429 if (ret) {
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
432
433 return ret;
434 }
435
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 437 /* for special opcode 0032, only the first desc has the head */
46a3df9f 438 if (unlikely(i == 0)) {
a90bb9a5 439 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 440 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 441 } else {
a90bb9a5 442 desc_data = (__le64 *)(&desc[i]);
d174ea75 443 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 444 }
d174ea75 445
46a3df9f 446 for (k = 0; k < n; k++) {
d174ea75 447 *data += le64_to_cpu(*desc_data);
448 data++;
46a3df9f
S
449 desc_data++;
450 }
451 }
452
453 return 0;
454}
455
d174ea75 456static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457{
1c6dfe6f 458 u64 *data = (u64 *)(&hdev->mac_stats);
d174ea75 459 struct hclge_desc *desc;
460 __le64 *desc_data;
461 u16 i, k, n;
462 int ret;
463
9e6717af
ZL
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
466 */
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
39ee6e82
DC
468 if (!desc)
469 return -ENOMEM;
9e6717af 470
d174ea75 471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473 if (ret) {
474 kfree(desc);
475 return ret;
476 }
477
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
480 if (i == 0) {
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
483 } else {
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
486 }
487
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
490 data++;
491 desc_data++;
492 }
493 }
494
495 kfree(desc);
496
497 return 0;
498}
499
500static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501{
502 struct hclge_desc desc;
503 __le32 *desc_data;
504 u32 reg_num;
505 int ret;
506
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509 if (ret)
510 return ret;
511
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
514
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518 return 0;
519}
520
521static int hclge_mac_update_stats(struct hclge_dev *hdev)
522{
523 u32 desc_num;
524 int ret;
525
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528 /* The firmware supports the new statistics acquisition method */
529 if (!ret)
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
533 else
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536 return ret;
537}
538
46a3df9f
S
539static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540{
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
547 int ret, i;
548
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
9b2f3477 553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
46a3df9f
S
554 true);
555
a90bb9a5 556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558 if (ret) {
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
9b2f3477 561 ret, i);
46a3df9f
S
562 return ret;
563 }
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 565 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
566 }
567
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
574 true);
575
a90bb9a5 576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578 if (ret) {
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
581 ret, i);
582 return ret;
583 }
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 585 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
586 }
587
588 return 0;
589}
590
591static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592{
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
595 u64 *buff = data;
596 int i;
597
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
601 }
602
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
606 }
607
608 return buff;
609}
610
611static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612{
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
9b2f3477 615 /* each tqp has TX & RX two queues */
46a3df9f
S
616 return kinfo->num_tqps * (2);
617}
618
619static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620{
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 u8 *buff = data;
623 int i = 0;
624
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
0c218123 628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
46a3df9f
S
629 tqp->index);
630 buff = buff + ETH_GSTRING_LEN;
631 }
632
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
0c218123 636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
46a3df9f
S
637 tqp->index);
638 buff = buff + ETH_GSTRING_LEN;
639 }
640
641 return buff;
642}
643
ebaf1908 644static u64 *hclge_comm_get_stats(const void *comm_stats,
46a3df9f
S
645 const struct hclge_comm_stats_str strs[],
646 int size, u64 *data)
647{
648 u64 *buf = data;
649 u32 i;
650
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654 return buf + size;
655}
656
657static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
659 int size, u8 *data)
660{
661 char *buff = (char *)data;
662 u32 i;
663
664 if (stringset != ETH_SS_STATS)
665 return buff;
666
667 for (i = 0; i < size; i++) {
18d219b7 668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
46a3df9f
S
669 buff = buff + ETH_GSTRING_LEN;
670 }
671
672 return (u8 *)buff;
673}
674
46a3df9f
S
675static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676{
677 struct hnae3_handle *handle;
678 int status;
679
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
683 if (status) {
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
686 status);
687 }
688 }
689
690 status = hclge_mac_update_stats(hdev);
691 if (status)
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
694}
695
696static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
698{
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
46a3df9f
S
701 int status;
702
c5f65480
JS
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704 return;
705
46a3df9f
S
706 status = hclge_mac_update_stats(hdev);
707 if (status)
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
710 status);
711
46a3df9f
S
712 status = hclge_tqps_update_stats(handle);
713 if (status)
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
716 status);
717
c5f65480 718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
719}
720
721static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722{
4dc13b96
FL
723#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
727
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
730 int count = 0;
731
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
736 */
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
3ff6cde8 740 if (hdev->pdev->revision >= 0x21 ||
4dc13b96 741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744 count += 1;
eb66d503 745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 746 }
5fd50ac3 747
4dc13b96
FL
748 count += 2;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
c9765a89
YM
751
752 if (hdev->hw.mac.phydev) {
753 count += 1;
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755 }
756
46a3df9f
S
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
759 hclge_tqps_get_sset_count(handle, stringset);
760 }
761
762 return count;
763}
764
9b2f3477 765static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
46a3df9f
S
766 u8 *data)
767{
768 u8 *p = (char *)data;
769 int size;
770
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
9b2f3477
WL
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774 size, p);
46a3df9f
S
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
eb66d503 777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
9b2f3477 778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
779 ETH_GSTRING_LEN);
780 p += ETH_GSTRING_LEN;
781 }
4dc13b96 782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
9b2f3477 783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
4dc13b96
FL
784 ETH_GSTRING_LEN);
785 p += ETH_GSTRING_LEN;
786 }
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788 memcpy(p,
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
790 ETH_GSTRING_LEN);
791 p += ETH_GSTRING_LEN;
792 }
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
9b2f3477 794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
795 ETH_GSTRING_LEN);
796 p += ETH_GSTRING_LEN;
797 }
798 }
799}
800
801static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802{
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
805 u64 *p;
806
1c6dfe6f 807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
9b2f3477 808 ARRAY_SIZE(g_mac_stats_string), data);
46a3df9f
S
809 p = hclge_tqps_get_stats(handle, p);
810}
811
615466ce
YM
812static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
e511c97d
JS
814{
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
817
615466ce
YM
818 hclge_update_stats(handle, NULL);
819
1c6dfe6f
YL
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
e511c97d
JS
822}
823
46a3df9f 824static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 825 struct hclge_func_status_cmd *status)
46a3df9f
S
826{
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828 return -EINVAL;
829
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
833 else
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
46a3df9f
S
836 return 0;
837}
838
839static int hclge_query_function_status(struct hclge_dev *hdev)
840{
b37ce587
YM
841#define HCLGE_QUERY_MAX_CNT 5
842
d44f9b63 843 struct hclge_func_status_cmd *req;
46a3df9f
S
844 struct hclge_desc desc;
845 int timeout = 0;
846 int ret;
847
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 849 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
850
851 do {
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853 if (ret) {
854 dev_err(&hdev->pdev->dev,
9b2f3477 855 "query function status failed %d.\n", ret);
46a3df9f
S
856 return ret;
857 }
858
859 /* Check pf reset is done */
860 if (req->pf_state)
861 break;
862 usleep_range(1000, 2000);
b37ce587 863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
46a3df9f 864
60df7e91 865 return hclge_parse_func_status(hdev, req);
46a3df9f
S
866}
867
868static int hclge_query_pf_resource(struct hclge_dev *hdev)
869{
d44f9b63 870 struct hclge_pf_res_cmd *req;
46a3df9f
S
871 struct hclge_desc desc;
872 int ret;
873
874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
876 if (ret) {
877 dev_err(&hdev->pdev->dev,
878 "query pf resource failed %d.\n", ret);
879 return ret;
880 }
881
d44f9b63 882 req = (struct hclge_pf_res_cmd *)desc.data;
60df7e91
HT
883 hdev->num_tqps = le16_to_cpu(req->tqp_num);
884 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
46a3df9f 885
368686be
YL
886 if (req->tx_buf_size)
887 hdev->tx_buf_size =
60df7e91 888 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
889 else
890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891
b9a400ac
YL
892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893
368686be
YL
894 if (req->dv_buf_size)
895 hdev->dv_buf_size =
60df7e91 896 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
897 else
898 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899
b9a400ac
YL
900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901
e92a0843 902 if (hnae3_dev_roce_supported(hdev)) {
375dd5e4 903 hdev->roce_base_msix_offset =
60df7e91 904 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
375dd5e4 905 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
887c3820 906 hdev->num_roce_msi =
60df7e91 907 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f 909
580a05f9
YL
910 /* nic's msix numbers is always equals to the roce's. */
911 hdev->num_nic_msi = hdev->num_roce_msi;
912
46a3df9f
S
913 /* PF should have NIC vectors and Roce vectors,
914 * NIC vectors are queued before Roce vectors.
915 */
9b2f3477 916 hdev->num_msi = hdev->num_roce_msi +
375dd5e4 917 hdev->roce_base_msix_offset;
46a3df9f
S
918 } else {
919 hdev->num_msi =
60df7e91 920 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 921 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
580a05f9
YL
922
923 hdev->num_nic_msi = hdev->num_msi;
924 }
925
926 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927 dev_err(&hdev->pdev->dev,
928 "Just %u msi resources, not enough for pf(min:2).\n",
929 hdev->num_nic_msi);
930 return -EINVAL;
46a3df9f
S
931 }
932
933 return 0;
934}
935
936static int hclge_parse_speed(int speed_cmd, int *speed)
937{
938 switch (speed_cmd) {
939 case 6:
940 *speed = HCLGE_MAC_SPEED_10M;
941 break;
942 case 7:
943 *speed = HCLGE_MAC_SPEED_100M;
944 break;
945 case 0:
946 *speed = HCLGE_MAC_SPEED_1G;
947 break;
948 case 1:
949 *speed = HCLGE_MAC_SPEED_10G;
950 break;
951 case 2:
952 *speed = HCLGE_MAC_SPEED_25G;
953 break;
954 case 3:
955 *speed = HCLGE_MAC_SPEED_40G;
956 break;
957 case 4:
958 *speed = HCLGE_MAC_SPEED_50G;
959 break;
960 case 5:
961 *speed = HCLGE_MAC_SPEED_100G;
962 break;
963 default:
964 return -EINVAL;
965 }
966
967 return 0;
968}
969
22f48e24
JS
970static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971{
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
975 u32 speed_bit = 0;
976
977 switch (speed) {
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
980 break;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
983 break;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
986 break;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
989 break;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
992 break;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
995 break;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
998 break;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001 break;
1002 default:
1003 return -EINVAL;
1004 }
1005
1006 if (speed_bit & speed_ability)
1007 return 0;
1008
1009 return -EINVAL;
1010}
1011
88d10bd6 1012static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
0979aa0b 1013{
0979aa0b 1014 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e 1015 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
88d10bd6
JS
1016 mac->supported);
1017 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1019 mac->supported);
1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1022 mac->supported);
1023 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1025 mac->supported);
1026 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1028 mac->supported);
1029}
0979aa0b 1030
88d10bd6
JS
1031static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1032{
1033 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1035 mac->supported);
0979aa0b 1036 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e 1037 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
88d10bd6
JS
1038 mac->supported);
1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1041 mac->supported);
1042 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1044 mac->supported);
1045 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1047 mac->supported);
1048}
0979aa0b 1049
88d10bd6
JS
1050static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1051{
1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1054 mac->supported);
1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1057 mac->supported);
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1060 mac->supported);
0979aa0b 1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
88d10bd6
JS
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1063 mac->supported);
1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1066 mac->supported);
1067}
0979aa0b 1068
88d10bd6
JS
1069static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1070{
1071 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1073 mac->supported);
1074 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1076 mac->supported);
1077 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1079 mac->supported);
1080 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1082 mac->supported);
1083 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1085 mac->supported);
0979aa0b 1086 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
88d10bd6
JS
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1088 mac->supported);
1089}
0979aa0b 1090
7e6ec914
JS
1091static void hclge_convert_setting_fec(struct hclge_mac *mac)
1092{
1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1095
1096 switch (mac->speed) {
1097 case HCLGE_MAC_SPEED_10G:
1098 case HCLGE_MAC_SPEED_40G:
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1100 mac->supported);
1101 mac->fec_ability =
1102 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1103 break;
1104 case HCLGE_MAC_SPEED_25G:
1105 case HCLGE_MAC_SPEED_50G:
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1107 mac->supported);
1108 mac->fec_ability =
1109 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 BIT(HNAE3_FEC_AUTO);
1111 break;
1112 case HCLGE_MAC_SPEED_100G:
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1115 break;
1116 default:
1117 mac->fec_ability = 0;
1118 break;
1119 }
1120}
1121
88d10bd6
JS
1122static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1123 u8 speed_ability)
1124{
1125 struct hclge_mac *mac = &hdev->hw.mac;
1126
1127 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1129 mac->supported);
1130
1131 hclge_convert_setting_sr(mac, speed_ability);
1132 hclge_convert_setting_lr(mac, speed_ability);
1133 hclge_convert_setting_cr(mac, speed_ability);
7e6ec914
JS
1134 if (hdev->pdev->revision >= 0x21)
1135 hclge_convert_setting_fec(mac);
88d10bd6
JS
1136
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
88d10bd6
JS
1140}
1141
1142static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1143 u8 speed_ability)
1144{
1145 struct hclge_mac *mac = &hdev->hw.mac;
1146
1147 hclge_convert_setting_kr(mac, speed_ability);
7e6ec914
JS
1148 if (hdev->pdev->revision >= 0x21)
1149 hclge_convert_setting_fec(mac);
88d10bd6
JS
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
0979aa0b
FL
1153}
1154
f18635d5
JS
1155static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1156 u8 speed_ability)
1157{
1158 unsigned long *supported = hdev->hw.mac.supported;
1159
1160 /* default to support all speed for GE port */
1161 if (!speed_ability)
1162 speed_ability = HCLGE_SUPPORT_GE;
1163
1164 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1166 supported);
1167
1168 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1170 supported);
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1172 supported);
1173 }
1174
1175 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1178 }
1179
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
bc3781ed 1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
f18635d5
JS
1184}
1185
0979aa0b
FL
1186static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1187{
1188 u8 media_type = hdev->hw.mac.media_type;
1189
f18635d5
JS
1190 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193 hclge_parse_copper_link_mode(hdev, speed_ability);
88d10bd6
JS
1194 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195 hclge_parse_backplane_link_mode(hdev, speed_ability);
0979aa0b 1196}
37417c66 1197
ee9e4424
YL
1198static u32 hclge_get_max_speed(u8 speed_ability)
1199{
1200 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201 return HCLGE_MAC_SPEED_100G;
1202
1203 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204 return HCLGE_MAC_SPEED_50G;
1205
1206 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207 return HCLGE_MAC_SPEED_40G;
1208
1209 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210 return HCLGE_MAC_SPEED_25G;
1211
1212 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213 return HCLGE_MAC_SPEED_10G;
1214
1215 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216 return HCLGE_MAC_SPEED_1G;
1217
1218 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219 return HCLGE_MAC_SPEED_100M;
1220
1221 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222 return HCLGE_MAC_SPEED_10M;
1223
1224 return HCLGE_MAC_SPEED_1G;
1225}
1226
46a3df9f
S
1227static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1228{
d44f9b63 1229 struct hclge_cfg_param_cmd *req;
46a3df9f
S
1230 u64 mac_addr_tmp_high;
1231 u64 mac_addr_tmp;
ebaf1908 1232 unsigned int i;
46a3df9f 1233
d44f9b63 1234 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
1235
1236 /* get the configuration */
e4e87715
PL
1237 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1238 HCLGE_CFG_VMDQ_M,
1239 HCLGE_CFG_VMDQ_S);
1240 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TQP_DESC_N_M,
1244 HCLGE_CFG_TQP_DESC_N_S);
1245
1246 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_PHY_ADDR_M,
1248 HCLGE_CFG_PHY_ADDR_S);
1249 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 HCLGE_CFG_MEDIA_TP_M,
1251 HCLGE_CFG_MEDIA_TP_S);
1252 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 HCLGE_CFG_RX_BUF_LEN_M,
1254 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
1255 /* get mac_address */
1256 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
1257 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258 HCLGE_CFG_MAC_ADDR_H_M,
1259 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
1260
1261 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1262
e4e87715
PL
1263 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264 HCLGE_CFG_DEFAULT_SPEED_M,
1265 HCLGE_CFG_DEFAULT_SPEED_S);
1266 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 HCLGE_CFG_RSS_SIZE_M,
1268 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 1269
46a3df9f
S
1270 for (i = 0; i < ETH_ALEN; i++)
1271 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1272
d44f9b63 1273 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 1274 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 1275
e4e87715
PL
1276 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_SPEED_ABILITY_M,
1278 HCLGE_CFG_SPEED_ABILITY_S);
39932473
JS
1279 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_UMV_TBL_SPACE_M,
1281 HCLGE_CFG_UMV_TBL_SPACE_S);
1282 if (!cfg->umv_space)
1283 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
46a3df9f
S
1284}
1285
1286/* hclge_get_cfg: query the static parameter from flash
1287 * @hdev: pointer to struct hclge_dev
1288 * @hcfg: the config structure to be getted
1289 */
1290static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1291{
1292 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 1293 struct hclge_cfg_param_cmd *req;
ebaf1908
WL
1294 unsigned int i;
1295 int ret;
46a3df9f
S
1296
1297 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1298 u32 offset = 0;
1299
d44f9b63 1300 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1301 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1302 true);
e4e87715
PL
1303 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 1305 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
1306 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1308 req->offset = cpu_to_le32(offset);
46a3df9f
S
1309 }
1310
1311 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1312 if (ret) {
3f639907 1313 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
1314 return ret;
1315 }
1316
1317 hclge_parse_cfg(hcfg, desc);
3f639907 1318
46a3df9f
S
1319 return 0;
1320}
1321
1322static int hclge_get_cap(struct hclge_dev *hdev)
1323{
1324 int ret;
1325
1326 ret = hclge_query_function_status(hdev);
1327 if (ret) {
1328 dev_err(&hdev->pdev->dev,
1329 "query function status error %d.\n", ret);
1330 return ret;
1331 }
1332
1333 /* get pf resource */
60df7e91 1334 return hclge_query_pf_resource(hdev);
46a3df9f
S
1335}
1336
962e31bd
YL
1337static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1338{
1339#define HCLGE_MIN_TX_DESC 64
1340#define HCLGE_MIN_RX_DESC 64
1341
1342 if (!is_kdump_kernel())
1343 return;
1344
1345 dev_info(&hdev->pdev->dev,
1346 "Running kdump kernel. Using minimal resources\n");
1347
1348 /* minimal queue pairs equals to the number of vports */
1349 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1352}
1353
46a3df9f
S
1354static int hclge_configure(struct hclge_dev *hdev)
1355{
1356 struct hclge_cfg cfg;
ebaf1908
WL
1357 unsigned int i;
1358 int ret;
46a3df9f
S
1359
1360 ret = hclge_get_cfg(hdev, &cfg);
1361 if (ret) {
1362 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1363 return ret;
1364 }
1365
1366 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367 hdev->base_tqp_pid = 0;
0e7a40cd 1368 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1369 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1370 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1371 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1372 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1373 hdev->num_tx_desc = cfg.tqp_desc_num;
1374 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1375 hdev->tm_info.num_pg = 1;
cacde272 1376 hdev->tc_max = cfg.tc_num;
46a3df9f 1377 hdev->tm_info.hw_pfc_map = 0;
39932473 1378 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1379
44122887 1380 if (hnae3_dev_fd_supported(hdev)) {
9abeb7d8 1381 hdev->fd_en = true;
44122887
JS
1382 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1383 }
9abeb7d8 1384
46a3df9f
S
1385 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1386 if (ret) {
1387 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1388 return ret;
1389 }
1390
0979aa0b
FL
1391 hclge_parse_link_mode(hdev, cfg.speed_ability);
1392
ee9e4424
YL
1393 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1394
cacde272
YL
1395 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396 (hdev->tc_max < 1)) {
adcf738b 1397 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
cacde272
YL
1398 hdev->tc_max);
1399 hdev->tc_max = 1;
46a3df9f
S
1400 }
1401
cacde272
YL
1402 /* Dev does not support DCB */
1403 if (!hnae3_dev_dcb_supported(hdev)) {
1404 hdev->tc_max = 1;
1405 hdev->pfc_max = 0;
1406 } else {
1407 hdev->pfc_max = hdev->tc_max;
1408 }
1409
a2987975 1410 hdev->tm_info.num_tc = 1;
cacde272 1411
46a3df9f 1412 /* Currently not support uncontiuous tc */
cacde272 1413 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1414 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1415
71b83869 1416 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1417
962e31bd
YL
1418 hclge_init_kdump_kernel_config(hdev);
1419
08125454
YL
1420 /* Set the init affinity based on pci func number */
1421 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424 &hdev->affinity_mask);
1425
46a3df9f
S
1426 return ret;
1427}
1428
ebaf1908
WL
1429static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430 unsigned int tso_mss_max)
46a3df9f 1431{
d44f9b63 1432 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1433 struct hclge_desc desc;
a90bb9a5 1434 u16 tso_mss;
46a3df9f
S
1435
1436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1437
d44f9b63 1438 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1439
1440 tso_mss = 0;
e4e87715
PL
1441 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1443 req->tso_mss_min = cpu_to_le16(tso_mss);
1444
1445 tso_mss = 0;
e4e87715
PL
1446 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1448 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1449
1450 return hclge_cmd_send(&hdev->hw, &desc, 1);
1451}
1452
b26a6fea
PL
1453static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1454{
1455 struct hclge_cfg_gro_status_cmd *req;
1456 struct hclge_desc desc;
1457 int ret;
1458
1459 if (!hnae3_dev_gro_supported(hdev))
1460 return 0;
1461
1462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1464
1465 req->gro_en = cpu_to_le16(en ? 1 : 0);
1466
1467 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1468 if (ret)
1469 dev_err(&hdev->pdev->dev,
1470 "GRO hardware config cmd failed, ret = %d\n", ret);
1471
1472 return ret;
1473}
1474
46a3df9f
S
1475static int hclge_alloc_tqps(struct hclge_dev *hdev)
1476{
1477 struct hclge_tqp *tqp;
1478 int i;
1479
1480 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481 sizeof(struct hclge_tqp), GFP_KERNEL);
1482 if (!hdev->htqp)
1483 return -ENOMEM;
1484
1485 tqp = hdev->htqp;
1486
1487 for (i = 0; i < hdev->num_tqps; i++) {
1488 tqp->dev = &hdev->pdev->dev;
1489 tqp->index = i;
1490
1491 tqp->q.ae_algo = &ae_algo;
1492 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1493 tqp->q.tx_desc_num = hdev->num_tx_desc;
1494 tqp->q.rx_desc_num = hdev->num_rx_desc;
46a3df9f
S
1495 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496 i * HCLGE_TQP_REG_SIZE;
1497
1498 tqp++;
1499 }
1500
1501 return 0;
1502}
1503
1504static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1506{
d44f9b63 1507 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1508 struct hclge_desc desc;
1509 int ret;
1510
1511 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1512
d44f9b63 1513 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1514 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1515 req->tqp_vf = func_id;
b9a8f883
YL
1516 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1517 if (!is_pf)
1518 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
46a3df9f
S
1519 req->tqp_vid = cpu_to_le16(tqp_vid);
1520
1521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1522 if (ret)
1523 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1524
3f639907 1525 return ret;
46a3df9f
S
1526}
1527
672ad0ed 1528static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1529{
128b900d 1530 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1531 struct hclge_dev *hdev = vport->back;
7df7dad6 1532 int i, alloced;
46a3df9f
S
1533
1534 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1535 alloced < num_tqps; i++) {
46a3df9f
S
1536 if (!hdev->htqp[i].alloced) {
1537 hdev->htqp[i].q.handle = &vport->nic;
1538 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1539 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1541 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1542 hdev->htqp[i].alloced = true;
46a3df9f
S
1543 alloced++;
1544 }
1545 }
672ad0ed
HT
1546 vport->alloc_tqps = alloced;
1547 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f 1549
580a05f9
YL
1550 /* ensure one to one mapping between irq and queue at default */
1551 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1553
46a3df9f
S
1554 return 0;
1555}
1556
c0425944
PL
1557static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558 u16 num_tx_desc, u16 num_rx_desc)
1559
46a3df9f
S
1560{
1561 struct hnae3_handle *nic = &vport->nic;
1562 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563 struct hclge_dev *hdev = vport->back;
af958827 1564 int ret;
46a3df9f 1565
c0425944
PL
1566 kinfo->num_tx_desc = num_tx_desc;
1567 kinfo->num_rx_desc = num_rx_desc;
1568
46a3df9f 1569 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1570
672ad0ed 1571 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1572 sizeof(struct hnae3_queue *), GFP_KERNEL);
1573 if (!kinfo->tqp)
1574 return -ENOMEM;
1575
672ad0ed 1576 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1577 if (ret)
46a3df9f 1578 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1579
3f639907 1580 return ret;
46a3df9f
S
1581}
1582
7df7dad6
L
1583static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584 struct hclge_vport *vport)
1585{
1586 struct hnae3_handle *nic = &vport->nic;
1587 struct hnae3_knic_private_info *kinfo;
1588 u16 i;
1589
1590 kinfo = &nic->kinfo;
205a24ca 1591 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1592 struct hclge_tqp *q =
1593 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1594 bool is_pf;
1595 int ret;
1596
1597 is_pf = !(vport->vport_id);
1598 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1599 i, is_pf);
1600 if (ret)
1601 return ret;
1602 }
1603
1604 return 0;
1605}
1606
1607static int hclge_map_tqp(struct hclge_dev *hdev)
1608{
1609 struct hclge_vport *vport = hdev->vport;
1610 u16 i, num_vport;
1611
1612 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613 for (i = 0; i < num_vport; i++) {
1614 int ret;
1615
1616 ret = hclge_map_tqp_to_vport(hdev, vport);
1617 if (ret)
1618 return ret;
1619
1620 vport++;
1621 }
1622
1623 return 0;
1624}
1625
46a3df9f
S
1626static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1627{
1628 struct hnae3_handle *nic = &vport->nic;
1629 struct hclge_dev *hdev = vport->back;
1630 int ret;
1631
1632 nic->pdev = hdev->pdev;
1633 nic->ae_algo = &ae_algo;
1634 nic->numa_node_mask = hdev->numa_node_mask;
1635
b69c9737
YL
1636 ret = hclge_knic_setup(vport, num_tqps,
1637 hdev->num_tx_desc, hdev->num_rx_desc);
1638 if (ret)
1639 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
46a3df9f 1640
b69c9737 1641 return ret;
46a3df9f
S
1642}
1643
1644static int hclge_alloc_vport(struct hclge_dev *hdev)
1645{
1646 struct pci_dev *pdev = hdev->pdev;
1647 struct hclge_vport *vport;
1648 u32 tqp_main_vport;
1649 u32 tqp_per_vport;
1650 int num_vport, i;
1651 int ret;
1652
1653 /* We need to alloc a vport for main NIC of PF */
1654 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1655
38e62046 1656 if (hdev->num_tqps < num_vport) {
adcf738b 1657 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
38e62046
HT
1658 hdev->num_tqps, num_vport);
1659 return -EINVAL;
1660 }
46a3df9f
S
1661
1662 /* Alloc the same number of TQPs for every vport */
1663 tqp_per_vport = hdev->num_tqps / num_vport;
1664 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1665
1666 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1667 GFP_KERNEL);
1668 if (!vport)
1669 return -ENOMEM;
1670
1671 hdev->vport = vport;
1672 hdev->num_alloc_vport = num_vport;
1673
2312e050
FL
1674 if (IS_ENABLED(CONFIG_PCI_IOV))
1675 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1676
1677 for (i = 0; i < num_vport; i++) {
1678 vport->back = hdev;
1679 vport->vport_id = i;
6430f744 1680 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
818f1675 1681 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1682 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1684 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1685 INIT_LIST_HEAD(&vport->uc_mac_list);
1686 INIT_LIST_HEAD(&vport->mc_mac_list);
46a3df9f
S
1687
1688 if (i == 0)
1689 ret = hclge_vport_setup(vport, tqp_main_vport);
1690 else
1691 ret = hclge_vport_setup(vport, tqp_per_vport);
1692 if (ret) {
1693 dev_err(&pdev->dev,
1694 "vport setup failed for vport %d, %d\n",
1695 i, ret);
1696 return ret;
1697 }
1698
1699 vport++;
1700 }
1701
1702 return 0;
1703}
1704
acf61ecd
YL
1705static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1707{
1708/* TX buffer size is unit by 128 byte */
1709#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1710#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1711 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1712 struct hclge_desc desc;
1713 int ret;
1714 u8 i;
1715
d44f9b63 1716 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1717
1718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1719 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1720 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1721
46a3df9f
S
1722 req->tx_pkt_buff[i] =
1723 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1725 }
46a3df9f
S
1726
1727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1728 if (ret)
46a3df9f
S
1729 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1730 ret);
46a3df9f 1731
3f639907 1732 return ret;
46a3df9f
S
1733}
1734
acf61ecd
YL
1735static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1737{
acf61ecd 1738 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1739
3f639907
JS
1740 if (ret)
1741 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1742
3f639907 1743 return ret;
46a3df9f
S
1744}
1745
1a49f3c6 1746static u32 hclge_get_tc_num(struct hclge_dev *hdev)
46a3df9f 1747{
ebaf1908
WL
1748 unsigned int i;
1749 u32 cnt = 0;
46a3df9f
S
1750
1751 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752 if (hdev->hw_tc_map & BIT(i))
1753 cnt++;
1754 return cnt;
1755}
1756
46a3df9f 1757/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1758static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1760{
1761 struct hclge_priv_buf *priv;
ebaf1908
WL
1762 unsigned int i;
1763 int cnt = 0;
46a3df9f
S
1764
1765 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1766 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1767 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1768 priv->enable)
1769 cnt++;
1770 }
1771
1772 return cnt;
1773}
1774
1775/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1776static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1778{
1779 struct hclge_priv_buf *priv;
ebaf1908
WL
1780 unsigned int i;
1781 int cnt = 0;
46a3df9f
S
1782
1783 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1784 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1785 if (hdev->hw_tc_map & BIT(i) &&
1786 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1787 priv->enable)
1788 cnt++;
1789 }
1790
1791 return cnt;
1792}
1793
acf61ecd 1794static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1795{
1796 struct hclge_priv_buf *priv;
1797 u32 rx_priv = 0;
1798 int i;
1799
1800 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1801 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1802 if (priv->enable)
1803 rx_priv += priv->buf_size;
1804 }
1805 return rx_priv;
1806}
1807
acf61ecd 1808static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1809{
1810 u32 i, total_tx_size = 0;
1811
1812 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1813 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1814
1815 return total_tx_size;
1816}
1817
acf61ecd
YL
1818static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819 struct hclge_pkt_buf_alloc *buf_alloc,
1820 u32 rx_all)
46a3df9f 1821{
1a49f3c6
YL
1822 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823 u32 tc_num = hclge_get_tc_num(hdev);
b9a400ac 1824 u32 shared_buf, aligned_mps;
46a3df9f
S
1825 u32 rx_priv;
1826 int i;
1827
b9a400ac 1828 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1829
d221df4e 1830 if (hnae3_dev_dcb_supported(hdev))
b37ce587
YM
1831 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1832 hdev->dv_buf_size;
d221df4e 1833 else
b9a400ac 1834 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1835 + hdev->dv_buf_size;
d221df4e 1836
db5936db 1837 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1838 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1840
acf61ecd 1841 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1842 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1843 return false;
1844
b9a400ac 1845 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1846 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1847 if (hnae3_dev_dcb_supported(hdev)) {
1848 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b37ce587
YM
1850 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851 HCLGE_BUF_SIZE_UNIT);
368686be 1852 } else {
b9a400ac 1853 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1854 HCLGE_NON_DCB_ADDITIONAL_BUF;
1a49f3c6
YL
1855 buf_alloc->s_buf.self.low = aligned_mps;
1856 }
1857
1858 if (hnae3_dev_dcb_supported(hdev)) {
9e15be90
YL
1859 hi_thrd = shared_buf - hdev->dv_buf_size;
1860
1861 if (tc_num <= NEED_RESERVE_TC_NUM)
1862 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1863 / BUF_MAX_PERCENT;
1864
1a49f3c6 1865 if (tc_num)
9e15be90 1866 hi_thrd = hi_thrd / tc_num;
1a49f3c6 1867
b37ce587 1868 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1a49f3c6 1869 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
b37ce587 1870 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1a49f3c6
YL
1871 } else {
1872 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873 lo_thrd = aligned_mps;
368686be 1874 }
46a3df9f
S
1875
1876 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1a49f3c6
YL
1877 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
46a3df9f
S
1879 }
1880
1881 return true;
1882}
1883
acf61ecd
YL
1884static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1886{
1887 u32 i, total_size;
1888
1889 total_size = hdev->pkt_buf_size;
1890
1891 /* alloc tx buffer for all enabled tc */
1892 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1893 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 1894
b6b4f987
HT
1895 if (hdev->hw_tc_map & BIT(i)) {
1896 if (total_size < hdev->tx_buf_size)
1897 return -ENOMEM;
9ffe79a9 1898
368686be 1899 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 1900 } else {
9ffe79a9 1901 priv->tx_buf_size = 0;
b6b4f987 1902 }
9ffe79a9
YL
1903
1904 total_size -= priv->tx_buf_size;
1905 }
1906
1907 return 0;
1908}
1909
8ca754b1
YL
1910static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1912{
8ca754b1
YL
1913 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
ebaf1908 1915 unsigned int i;
46a3df9f 1916
46a3df9f 1917 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 1918 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 1919
bb1fe9ea
YL
1920 priv->enable = 0;
1921 priv->wl.low = 0;
1922 priv->wl.high = 0;
1923 priv->buf_size = 0;
1924
1925 if (!(hdev->hw_tc_map & BIT(i)))
1926 continue;
1927
1928 priv->enable = 1;
46a3df9f
S
1929
1930 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
b37ce587 1931 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
8ca754b1
YL
1932 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1934 } else {
1935 priv->wl.low = 0;
b37ce587
YM
1936 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1937 aligned_mps;
46a3df9f 1938 }
8ca754b1
YL
1939
1940 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
1941 }
1942
8ca754b1
YL
1943 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1944}
46a3df9f 1945
8ca754b1
YL
1946static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947 struct hclge_pkt_buf_alloc *buf_alloc)
1948{
1949 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1951 int i;
46a3df9f
S
1952
1953 /* let the last to be cleared first */
1954 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1955 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1956 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1957
ebaf1908
WL
1958 if (hdev->hw_tc_map & mask &&
1959 !(hdev->tm_info.hw_pfc_map & mask)) {
46a3df9f
S
1960 /* Clear the no pfc TC private buffer */
1961 priv->wl.low = 0;
1962 priv->wl.high = 0;
1963 priv->buf_size = 0;
1964 priv->enable = 0;
1965 no_pfc_priv_num--;
1966 }
1967
acf61ecd 1968 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1969 no_pfc_priv_num == 0)
1970 break;
1971 }
1972
8ca754b1
YL
1973 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1974}
46a3df9f 1975
8ca754b1
YL
1976static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977 struct hclge_pkt_buf_alloc *buf_alloc)
1978{
1979 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1981 int i;
46a3df9f
S
1982
1983 /* let the last to be cleared first */
1984 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1985 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1986 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1987
ebaf1908
WL
1988 if (hdev->hw_tc_map & mask &&
1989 hdev->tm_info.hw_pfc_map & mask) {
46a3df9f
S
1990 /* Reduce the number of pfc TC with private buffer */
1991 priv->wl.low = 0;
1992 priv->enable = 0;
1993 priv->wl.high = 0;
1994 priv->buf_size = 0;
1995 pfc_priv_num--;
1996 }
1997
acf61ecd 1998 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1999 pfc_priv_num == 0)
2000 break;
2001 }
8ca754b1
YL
2002
2003 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2004}
2005
9e15be90
YL
2006static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007 struct hclge_pkt_buf_alloc *buf_alloc)
2008{
2009#define COMPENSATE_BUFFER 0x3C00
2010#define COMPENSATE_HALF_MPS_NUM 5
2011#define PRIV_WL_GAP 0x1800
2012
2013 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014 u32 tc_num = hclge_get_tc_num(hdev);
2015 u32 half_mps = hdev->mps >> 1;
2016 u32 min_rx_priv;
2017 unsigned int i;
2018
2019 if (tc_num)
2020 rx_priv = rx_priv / tc_num;
2021
2022 if (tc_num <= NEED_RESERVE_TC_NUM)
2023 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2024
2025 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026 COMPENSATE_HALF_MPS_NUM * half_mps;
2027 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2029
2030 if (rx_priv < min_rx_priv)
2031 return false;
2032
2033 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2035
2036 priv->enable = 0;
2037 priv->wl.low = 0;
2038 priv->wl.high = 0;
2039 priv->buf_size = 0;
2040
2041 if (!(hdev->hw_tc_map & BIT(i)))
2042 continue;
2043
2044 priv->enable = 1;
2045 priv->buf_size = rx_priv;
2046 priv->wl.high = rx_priv - hdev->dv_buf_size;
2047 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2048 }
2049
2050 buf_alloc->s_buf.buf_size = 0;
2051
2052 return true;
2053}
2054
8ca754b1
YL
2055/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056 * @hdev: pointer to struct hclge_dev
2057 * @buf_alloc: pointer to buffer calculation data
2058 * @return: 0: calculate sucessful, negative: fail
2059 */
2060static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061 struct hclge_pkt_buf_alloc *buf_alloc)
2062{
2063 /* When DCB is not supported, rx private buffer is not allocated. */
2064 if (!hnae3_dev_dcb_supported(hdev)) {
2065 u32 rx_all = hdev->pkt_buf_size;
2066
2067 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2069 return -ENOMEM;
2070
2071 return 0;
2072 }
2073
9e15be90
YL
2074 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2075 return 0;
2076
8ca754b1
YL
2077 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2078 return 0;
2079
2080 /* try to decrease the buffer size */
2081 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2082 return 0;
2083
2084 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2085 return 0;
2086
2087 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
2088 return 0;
2089
2090 return -ENOMEM;
2091}
2092
acf61ecd
YL
2093static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2095{
d44f9b63 2096 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
2097 struct hclge_desc desc;
2098 int ret;
2099 int i;
2100
2101 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 2102 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
2103
2104 /* Alloc private buffer TCs */
2105 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2106 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
2107
2108 req->buf_num[i] =
2109 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2110 req->buf_num[i] |=
5bca3b94 2111 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
2112 }
2113
b8c8bf47 2114 req->shared_buf =
acf61ecd 2115 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
2116 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2117
46a3df9f 2118 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2119 if (ret)
46a3df9f
S
2120 dev_err(&hdev->pdev->dev,
2121 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 2122
3f639907 2123 return ret;
46a3df9f
S
2124}
2125
acf61ecd
YL
2126static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
2128{
2129 struct hclge_rx_priv_wl_buf *req;
2130 struct hclge_priv_buf *priv;
2131 struct hclge_desc desc[2];
2132 int i, j;
2133 int ret;
2134
2135 for (i = 0; i < 2; i++) {
2136 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2137 false);
2138 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2139
2140 /* The first descriptor set the NEXT bit to 1 */
2141 if (i == 0)
2142 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2143 else
2144 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2145
2146 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
2147 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2148
2149 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
2150 req->tc_wl[j].high =
2151 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152 req->tc_wl[j].high |=
3738287c 2153 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2154 req->tc_wl[j].low =
2155 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].low |=
3738287c 2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2158 }
2159 }
2160
2161 /* Send 2 descriptor at one time */
2162 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2163 if (ret)
46a3df9f
S
2164 dev_err(&hdev->pdev->dev,
2165 "rx private waterline config cmd failed %d\n",
2166 ret);
3f639907 2167 return ret;
46a3df9f
S
2168}
2169
acf61ecd
YL
2170static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2172{
acf61ecd 2173 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
2174 struct hclge_rx_com_thrd *req;
2175 struct hclge_desc desc[2];
2176 struct hclge_tc_thrd *tc;
2177 int i, j;
2178 int ret;
2179
2180 for (i = 0; i < 2; i++) {
2181 hclge_cmd_setup_basic_desc(&desc[i],
2182 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2184
2185 /* The first descriptor set the NEXT bit to 1 */
2186 if (i == 0)
2187 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2188 else
2189 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2190
2191 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2193
2194 req->com_thrd[j].high =
2195 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196 req->com_thrd[j].high |=
3738287c 2197 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2198 req->com_thrd[j].low =
2199 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].low |=
3738287c 2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2202 }
2203 }
2204
2205 /* Send 2 descriptors at one time */
2206 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2207 if (ret)
46a3df9f
S
2208 dev_err(&hdev->pdev->dev,
2209 "common threshold config cmd failed %d\n", ret);
3f639907 2210 return ret;
46a3df9f
S
2211}
2212
acf61ecd
YL
2213static int hclge_common_wl_config(struct hclge_dev *hdev,
2214 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2215{
acf61ecd 2216 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
2217 struct hclge_rx_com_wl *req;
2218 struct hclge_desc desc;
2219 int ret;
2220
2221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2222
2223 req = (struct hclge_rx_com_wl *)desc.data;
2224 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 2225 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2226
2227 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 2228 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2229
2230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2231 if (ret)
46a3df9f
S
2232 dev_err(&hdev->pdev->dev,
2233 "common waterline config cmd failed %d\n", ret);
46a3df9f 2234
3f639907 2235 return ret;
46a3df9f
S
2236}
2237
2238int hclge_buffer_alloc(struct hclge_dev *hdev)
2239{
acf61ecd 2240 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
2241 int ret;
2242
acf61ecd
YL
2243 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2244 if (!pkt_buf)
46a3df9f
S
2245 return -ENOMEM;
2246
acf61ecd 2247 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
2248 if (ret) {
2249 dev_err(&hdev->pdev->dev,
2250 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 2251 goto out;
9ffe79a9
YL
2252 }
2253
acf61ecd 2254 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
2255 if (ret) {
2256 dev_err(&hdev->pdev->dev,
2257 "could not alloc tx buffers %d\n", ret);
acf61ecd 2258 goto out;
46a3df9f
S
2259 }
2260
acf61ecd 2261 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
2262 if (ret) {
2263 dev_err(&hdev->pdev->dev,
2264 "could not calc rx priv buffer size for all TCs %d\n",
2265 ret);
acf61ecd 2266 goto out;
46a3df9f
S
2267 }
2268
acf61ecd 2269 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
2270 if (ret) {
2271 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2272 ret);
acf61ecd 2273 goto out;
46a3df9f
S
2274 }
2275
2daf4a65 2276 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 2277 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
2278 if (ret) {
2279 dev_err(&hdev->pdev->dev,
2280 "could not configure rx private waterline %d\n",
2281 ret);
acf61ecd 2282 goto out;
2daf4a65 2283 }
46a3df9f 2284
acf61ecd 2285 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
2286 if (ret) {
2287 dev_err(&hdev->pdev->dev,
2288 "could not configure common threshold %d\n",
2289 ret);
acf61ecd 2290 goto out;
2daf4a65 2291 }
46a3df9f
S
2292 }
2293
acf61ecd
YL
2294 ret = hclge_common_wl_config(hdev, pkt_buf);
2295 if (ret)
46a3df9f
S
2296 dev_err(&hdev->pdev->dev,
2297 "could not configure common waterline %d\n", ret);
46a3df9f 2298
acf61ecd
YL
2299out:
2300 kfree(pkt_buf);
2301 return ret;
46a3df9f
S
2302}
2303
2304static int hclge_init_roce_base_info(struct hclge_vport *vport)
2305{
2306 struct hnae3_handle *roce = &vport->roce;
2307 struct hnae3_handle *nic = &vport->nic;
2308
887c3820 2309 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
2310
2311 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312 vport->back->num_msi_left == 0)
2313 return -EINVAL;
2314
2315 roce->rinfo.base_vector = vport->back->roce_base_vector;
2316
2317 roce->rinfo.netdev = nic->kinfo.netdev;
2318 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2319
2320 roce->pdev = nic->pdev;
2321 roce->ae_algo = nic->ae_algo;
2322 roce->numa_node_mask = nic->numa_node_mask;
2323
2324 return 0;
2325}
2326
887c3820 2327static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
2328{
2329 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
2330 int vectors;
2331 int i;
46a3df9f 2332
580a05f9
YL
2333 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2334 hdev->num_msi,
887c3820
SM
2335 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2336 if (vectors < 0) {
2337 dev_err(&pdev->dev,
2338 "failed(%d) to allocate MSI/MSI-X vectors\n",
2339 vectors);
2340 return vectors;
46a3df9f 2341 }
887c3820
SM
2342 if (vectors < hdev->num_msi)
2343 dev_warn(&hdev->pdev->dev,
adcf738b 2344 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
887c3820 2345 hdev->num_msi, vectors);
46a3df9f 2346
887c3820
SM
2347 hdev->num_msi = vectors;
2348 hdev->num_msi_left = vectors;
580a05f9 2349
887c3820 2350 hdev->base_msi_vector = pdev->irq;
46a3df9f 2351 hdev->roce_base_vector = hdev->base_msi_vector +
375dd5e4 2352 hdev->roce_base_msix_offset;
46a3df9f 2353
46a3df9f
S
2354 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355 sizeof(u16), GFP_KERNEL);
887c3820
SM
2356 if (!hdev->vector_status) {
2357 pci_free_irq_vectors(pdev);
46a3df9f 2358 return -ENOMEM;
887c3820 2359 }
46a3df9f
S
2360
2361 for (i = 0; i < hdev->num_msi; i++)
2362 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2363
887c3820
SM
2364 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365 sizeof(int), GFP_KERNEL);
2366 if (!hdev->vector_irq) {
2367 pci_free_irq_vectors(pdev);
2368 return -ENOMEM;
46a3df9f 2369 }
46a3df9f
S
2370
2371 return 0;
2372}
2373
2d03eacc 2374static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 2375{
2d03eacc
YL
2376 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377 duplex = HCLGE_MAC_FULL;
46a3df9f 2378
2d03eacc 2379 return duplex;
46a3df9f
S
2380}
2381
2d03eacc
YL
2382static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2383 u8 duplex)
46a3df9f 2384{
d44f9b63 2385 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2386 struct hclge_desc desc;
2387 int ret;
2388
d44f9b63 2389 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2390
2391 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2392
63cbf7a9
YM
2393 if (duplex)
2394 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
46a3df9f
S
2395
2396 switch (speed) {
2397 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
2398 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2400 break;
2401 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2404 break;
2405 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2408 break;
2409 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2412 break;
2413 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2416 break;
2417 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2420 break;
2421 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2424 break;
2425 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 5);
46a3df9f
S
2428 break;
2429 default:
d7629e74 2430 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2431 return -EINVAL;
2432 }
2433
e4e87715
PL
2434 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2435 1);
46a3df9f
S
2436
2437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2438 if (ret) {
2439 dev_err(&hdev->pdev->dev,
2440 "mac speed/duplex config cmd failed %d.\n", ret);
2441 return ret;
2442 }
2443
2d03eacc
YL
2444 return 0;
2445}
2446
2447int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2448{
68e1006f 2449 struct hclge_mac *mac = &hdev->hw.mac;
2d03eacc
YL
2450 int ret;
2451
2452 duplex = hclge_check_speed_dup(duplex, speed);
68e1006f
JS
2453 if (!mac->support_autoneg && mac->speed == speed &&
2454 mac->duplex == duplex)
2d03eacc
YL
2455 return 0;
2456
2457 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2458 if (ret)
2459 return ret;
2460
2461 hdev->hw.mac.speed = speed;
2462 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2463
2464 return 0;
2465}
2466
2467static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2468 u8 duplex)
2469{
2470 struct hclge_vport *vport = hclge_get_vport(handle);
2471 struct hclge_dev *hdev = vport->back;
2472
2473 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2474}
2475
46a3df9f
S
2476static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2477{
d44f9b63 2478 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2479 struct hclge_desc desc;
a90bb9a5 2480 u32 flag = 0;
46a3df9f
S
2481 int ret;
2482
2483 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2484
d44f9b63 2485 req = (struct hclge_config_auto_neg_cmd *)desc.data;
b9a8f883
YL
2486 if (enable)
2487 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
a90bb9a5 2488 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2489
2490 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2491 if (ret)
46a3df9f
S
2492 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2493 ret);
46a3df9f 2494
3f639907 2495 return ret;
46a3df9f
S
2496}
2497
2498static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2499{
2500 struct hclge_vport *vport = hclge_get_vport(handle);
2501 struct hclge_dev *hdev = vport->back;
2502
22f48e24
JS
2503 if (!hdev->hw.mac.support_autoneg) {
2504 if (enable) {
2505 dev_err(&hdev->pdev->dev,
2506 "autoneg is not supported by current port\n");
2507 return -EOPNOTSUPP;
2508 } else {
2509 return 0;
2510 }
2511 }
2512
46a3df9f
S
2513 return hclge_set_autoneg_en(hdev, enable);
2514}
2515
2516static int hclge_get_autoneg(struct hnae3_handle *handle)
2517{
2518 struct hclge_vport *vport = hclge_get_vport(handle);
2519 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2520 struct phy_device *phydev = hdev->hw.mac.phydev;
2521
2522 if (phydev)
2523 return phydev->autoneg;
46a3df9f
S
2524
2525 return hdev->hw.mac.autoneg;
2526}
2527
22f48e24
JS
2528static int hclge_restart_autoneg(struct hnae3_handle *handle)
2529{
2530 struct hclge_vport *vport = hclge_get_vport(handle);
2531 struct hclge_dev *hdev = vport->back;
2532 int ret;
2533
2534 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2535
2536 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2537 if (ret)
2538 return ret;
2539 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2540}
2541
7786a996
JS
2542static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2543{
2544 struct hclge_vport *vport = hclge_get_vport(handle);
2545 struct hclge_dev *hdev = vport->back;
2546
2547 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2548 return hclge_set_autoneg_en(hdev, !halt);
2549
2550 return 0;
2551}
2552
7e6ec914
JS
2553static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2554{
2555 struct hclge_config_fec_cmd *req;
2556 struct hclge_desc desc;
2557 int ret;
2558
2559 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2560
2561 req = (struct hclge_config_fec_cmd *)desc.data;
2562 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2563 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2564 if (fec_mode & BIT(HNAE3_FEC_RS))
2565 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2566 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2567 if (fec_mode & BIT(HNAE3_FEC_BASER))
2568 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2569 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2570
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572 if (ret)
2573 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2574
2575 return ret;
2576}
2577
2578static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2579{
2580 struct hclge_vport *vport = hclge_get_vport(handle);
2581 struct hclge_dev *hdev = vport->back;
2582 struct hclge_mac *mac = &hdev->hw.mac;
2583 int ret;
2584
2585 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2586 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2587 return -EINVAL;
2588 }
2589
2590 ret = hclge_set_fec_hw(hdev, fec_mode);
2591 if (ret)
2592 return ret;
2593
2594 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2595 return 0;
2596}
2597
2598static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2599 u8 *fec_mode)
2600{
2601 struct hclge_vport *vport = hclge_get_vport(handle);
2602 struct hclge_dev *hdev = vport->back;
2603 struct hclge_mac *mac = &hdev->hw.mac;
2604
2605 if (fec_ability)
2606 *fec_ability = mac->fec_ability;
2607 if (fec_mode)
2608 *fec_mode = mac->fec_mode;
2609}
2610
46a3df9f
S
2611static int hclge_mac_init(struct hclge_dev *hdev)
2612{
2613 struct hclge_mac *mac = &hdev->hw.mac;
2614 int ret;
2615
5d497936 2616 hdev->support_sfp_query = true;
2d03eacc
YL
2617 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2618 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2619 hdev->hw.mac.duplex);
60df7e91 2620 if (ret)
46a3df9f 2621 return ret;
46a3df9f 2622
d736fc6c
JS
2623 if (hdev->hw.mac.support_autoneg) {
2624 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
60df7e91 2625 if (ret)
d736fc6c 2626 return ret;
d736fc6c
JS
2627 }
2628
46a3df9f
S
2629 mac->link = 0;
2630
7e6ec914
JS
2631 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2632 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
60df7e91 2633 if (ret)
7e6ec914 2634 return ret;
7e6ec914
JS
2635 }
2636
e6d7d79d
YL
2637 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2638 if (ret) {
2639 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2640 return ret;
2641 }
f9fd82a9 2642
1cbc662d
YM
2643 ret = hclge_set_default_loopback(hdev);
2644 if (ret)
2645 return ret;
2646
e6d7d79d 2647 ret = hclge_buffer_alloc(hdev);
3f639907 2648 if (ret)
f9fd82a9 2649 dev_err(&hdev->pdev->dev,
e6d7d79d 2650 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2651
3f639907 2652 return ret;
46a3df9f
S
2653}
2654
c1a81619
SM
2655static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2656{
1c6dfe6f 2657 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
18e24888 2658 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2659 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2660 hclge_wq, &hdev->service_task, 0);
c1a81619
SM
2661}
2662
cb1b9f77
SM
2663static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2664{
acfc3d55
HT
2665 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2666 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2667 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2668 hclge_wq, &hdev->service_task, 0);
cb1b9f77
SM
2669}
2670
ed8fb4b2 2671void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
46a3df9f 2672{
d5432455
GL
2673 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2674 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
08125454 2675 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2676 hclge_wq, &hdev->service_task,
ed8fb4b2 2677 delay_time);
46a3df9f
S
2678}
2679
2680static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2681{
d44f9b63 2682 struct hclge_link_status_cmd *req;
46a3df9f
S
2683 struct hclge_desc desc;
2684 int link_status;
2685 int ret;
2686
2687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2689 if (ret) {
2690 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2691 ret);
2692 return ret;
2693 }
2694
d44f9b63 2695 req = (struct hclge_link_status_cmd *)desc.data;
c79301d8 2696 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
46a3df9f
S
2697
2698 return !!link_status;
2699}
2700
2701static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2702{
ebaf1908 2703 unsigned int mac_state;
46a3df9f
S
2704 int link_stat;
2705
582d37bb
PL
2706 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2707 return 0;
2708
46a3df9f
S
2709 mac_state = hclge_get_mac_link_status(hdev);
2710
2711 if (hdev->hw.mac.phydev) {
fd813314 2712 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
46a3df9f
S
2713 link_stat = mac_state &
2714 hdev->hw.mac.phydev->link;
2715 else
2716 link_stat = 0;
2717
2718 } else {
2719 link_stat = mac_state;
2720 }
2721
2722 return !!link_stat;
2723}
2724
2725static void hclge_update_link_status(struct hclge_dev *hdev)
2726{
45e92b7e 2727 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2728 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2729 struct hnae3_handle *rhandle;
46a3df9f
S
2730 struct hnae3_handle *handle;
2731 int state;
2732 int i;
2733
2734 if (!client)
2735 return;
1c6dfe6f
YL
2736
2737 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2738 return;
2739
46a3df9f
S
2740 state = hclge_get_mac_phy_link(hdev);
2741 if (state != hdev->hw.mac.link) {
2742 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2743 handle = &hdev->vport[i].nic;
2744 client->ops->link_status_change(handle, state);
a6345787 2745 hclge_config_mac_tnl_int(hdev, state);
45e92b7e
PL
2746 rhandle = &hdev->vport[i].roce;
2747 if (rclient && rclient->ops->link_status_change)
2748 rclient->ops->link_status_change(rhandle,
2749 state);
46a3df9f
S
2750 }
2751 hdev->hw.mac.link = state;
2752 }
1c6dfe6f
YL
2753
2754 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
46a3df9f
S
2755}
2756
88d10bd6
JS
2757static void hclge_update_port_capability(struct hclge_mac *mac)
2758{
f438bfe9
JS
2759 /* update fec ability by speed */
2760 hclge_convert_setting_fec(mac);
2761
88d10bd6
JS
2762 /* firmware can not identify back plane type, the media type
2763 * read from configuration can help deal it
2764 */
2765 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2766 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2767 mac->module_type = HNAE3_MODULE_TYPE_KR;
2768 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2769 mac->module_type = HNAE3_MODULE_TYPE_TP;
2770
db4d3d55 2771 if (mac->support_autoneg) {
88d10bd6
JS
2772 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2773 linkmode_copy(mac->advertising, mac->supported);
2774 } else {
2775 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2776 mac->supported);
2777 linkmode_zero(mac->advertising);
2778 }
2779}
2780
5d497936
PL
2781static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2782{
63cbf7a9 2783 struct hclge_sfp_info_cmd *resp;
5d497936
PL
2784 struct hclge_desc desc;
2785 int ret;
2786
88d10bd6
JS
2787 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2788 resp = (struct hclge_sfp_info_cmd *)desc.data;
5d497936
PL
2789 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2790 if (ret == -EOPNOTSUPP) {
2791 dev_warn(&hdev->pdev->dev,
2792 "IMP do not support get SFP speed %d\n", ret);
2793 return ret;
2794 } else if (ret) {
2795 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2796 return ret;
2797 }
2798
88d10bd6 2799 *speed = le32_to_cpu(resp->speed);
5d497936
PL
2800
2801 return 0;
2802}
2803
88d10bd6 2804static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
46a3df9f 2805{
88d10bd6
JS
2806 struct hclge_sfp_info_cmd *resp;
2807 struct hclge_desc desc;
46a3df9f
S
2808 int ret;
2809
88d10bd6
JS
2810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2811 resp = (struct hclge_sfp_info_cmd *)desc.data;
2812
2813 resp->query_type = QUERY_ACTIVE_SPEED;
2814
2815 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2816 if (ret == -EOPNOTSUPP) {
2817 dev_warn(&hdev->pdev->dev,
2818 "IMP does not support get SFP info %d\n", ret);
2819 return ret;
2820 } else if (ret) {
2821 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2822 return ret;
2823 }
2824
2af8cb61
GL
2825 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2826 * set to mac->speed.
2827 */
2828 if (!le32_to_cpu(resp->speed))
2829 return 0;
2830
88d10bd6
JS
2831 mac->speed = le32_to_cpu(resp->speed);
2832 /* if resp->speed_ability is 0, it means it's an old version
2833 * firmware, do not update these params
46a3df9f 2834 */
88d10bd6
JS
2835 if (resp->speed_ability) {
2836 mac->module_type = le32_to_cpu(resp->module_type);
2837 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2838 mac->autoneg = resp->autoneg;
2839 mac->support_autoneg = resp->autoneg_ability;
49b12556 2840 mac->speed_type = QUERY_ACTIVE_SPEED;
f438bfe9
JS
2841 if (!resp->active_fec)
2842 mac->fec_mode = 0;
2843 else
2844 mac->fec_mode = BIT(resp->active_fec);
88d10bd6
JS
2845 } else {
2846 mac->speed_type = QUERY_SFP_SPEED;
2847 }
2848
2849 return 0;
2850}
2851
2852static int hclge_update_port_info(struct hclge_dev *hdev)
2853{
2854 struct hclge_mac *mac = &hdev->hw.mac;
2855 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2856 int ret;
2857
2858 /* get the port info from SFP cmd if not copper port */
2859 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
46a3df9f
S
2860 return 0;
2861
88d10bd6 2862 /* if IMP does not support get SFP/qSFP info, return directly */
5d497936
PL
2863 if (!hdev->support_sfp_query)
2864 return 0;
46a3df9f 2865
88d10bd6
JS
2866 if (hdev->pdev->revision >= 0x21)
2867 ret = hclge_get_sfp_info(hdev, mac);
2868 else
2869 ret = hclge_get_sfp_speed(hdev, &speed);
2870
5d497936
PL
2871 if (ret == -EOPNOTSUPP) {
2872 hdev->support_sfp_query = false;
2873 return ret;
2874 } else if (ret) {
2d03eacc 2875 return ret;
46a3df9f
S
2876 }
2877
88d10bd6
JS
2878 if (hdev->pdev->revision >= 0x21) {
2879 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2880 hclge_update_port_capability(mac);
2881 return 0;
2882 }
2883 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2884 HCLGE_MAC_FULL);
2885 } else {
2886 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2887 return 0; /* do nothing if no SFP */
46a3df9f 2888
88d10bd6
JS
2889 /* must config full duplex for SFP */
2890 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2891 }
46a3df9f
S
2892}
2893
2894static int hclge_get_status(struct hnae3_handle *handle)
2895{
2896 struct hclge_vport *vport = hclge_get_vport(handle);
2897 struct hclge_dev *hdev = vport->back;
2898
2899 hclge_update_link_status(hdev);
2900
2901 return hdev->hw.mac.link;
2902}
2903
6430f744
YM
2904static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2905{
60df7e91 2906 if (!pci_num_vf(hdev->pdev)) {
6430f744
YM
2907 dev_err(&hdev->pdev->dev,
2908 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2909 return NULL;
2910 }
2911
2912 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2913 dev_err(&hdev->pdev->dev,
2914 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2915 vf, pci_num_vf(hdev->pdev));
2916 return NULL;
2917 }
2918
2919 /* VF start from 1 in vport */
2920 vf += HCLGE_VF_VPORT_START_NUM;
2921 return &hdev->vport[vf];
2922}
2923
2924static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2925 struct ifla_vf_info *ivf)
2926{
2927 struct hclge_vport *vport = hclge_get_vport(handle);
2928 struct hclge_dev *hdev = vport->back;
2929
2930 vport = hclge_get_vf_vport(hdev, vf);
2931 if (!vport)
2932 return -EINVAL;
2933
2934 ivf->vf = vf;
2935 ivf->linkstate = vport->vf_info.link_state;
22044f95 2936 ivf->spoofchk = vport->vf_info.spoofchk;
e196ec75 2937 ivf->trusted = vport->vf_info.trusted;
ee9e4424
YL
2938 ivf->min_tx_rate = 0;
2939 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
89b40c7f
HT
2940 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2941 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2942 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
6430f744
YM
2943 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2944
2945 return 0;
2946}
2947
2948static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2949 int link_state)
2950{
2951 struct hclge_vport *vport = hclge_get_vport(handle);
2952 struct hclge_dev *hdev = vport->back;
2953
2954 vport = hclge_get_vf_vport(hdev, vf);
2955 if (!vport)
2956 return -EINVAL;
2957
2958 vport->vf_info.link_state = link_state;
2959
2960 return 0;
2961}
2962
ca1d7669
SM
2963static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2964{
f6162d44 2965 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
ca1d7669
SM
2966
2967 /* fetch the events from their corresponding regs */
9ca8d1a7 2968 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619 2969 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
f6162d44
SM
2970 msix_src_reg = hclge_read_dev(&hdev->hw,
2971 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
c1a81619
SM
2972
2973 /* Assumption: If by any chance reset and mailbox events are reported
2974 * together then we will only process reset event in this go and will
2975 * defer the processing of the mailbox events. Since, we would have not
2976 * cleared RX CMDQ event this time we would receive again another
2977 * interrupt from H/W just for the mailbox.
46ee7350
GL
2978 *
2979 * check for vector0 reset event sources
c1a81619 2980 */
6dd22bbc
HT
2981 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
f02eb82d 2986 hdev->rst_stats.imp_rst_cnt++;
6dd22bbc
HT
2987 return HCLGE_VECTOR0_EVENT_RST;
2988 }
2989
ca1d7669 2990 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
65e41e7e 2991 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 2992 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2993 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
f02eb82d 2995 hdev->rst_stats.global_rst_cnt++;
ca1d7669
SM
2996 return HCLGE_VECTOR0_EVENT_RST;
2997 }
2998
f6162d44 2999 /* check for vector0 msix event source */
147175c9 3000 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
9bc6ac91 3001 *clearval = msix_src_reg;
f6162d44 3002 return HCLGE_VECTOR0_EVENT_ERR;
147175c9 3003 }
f6162d44 3004
c1a81619
SM
3005 /* check for vector0 mailbox(=CMDQ RX) event source */
3006 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3007 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3008 *clearval = cmdq_src_reg;
3009 return HCLGE_VECTOR0_EVENT_MBX;
3010 }
ca1d7669 3011
147175c9 3012 /* print other vector0 event source */
9bc6ac91
HT
3013 dev_info(&hdev->pdev->dev,
3014 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3015 cmdq_src_reg, msix_src_reg);
3016 *clearval = msix_src_reg;
3017
ca1d7669
SM
3018 return HCLGE_VECTOR0_EVENT_OTHER;
3019}
3020
3021static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3022 u32 regclr)
3023{
c1a81619
SM
3024 switch (event_type) {
3025 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 3026 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
3027 break;
3028 case HCLGE_VECTOR0_EVENT_MBX:
3029 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3030 break;
fa7a4bd5
JS
3031 default:
3032 break;
c1a81619 3033 }
ca1d7669
SM
3034}
3035
8e52a602
XW
3036static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3037{
3038 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3039 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3040 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3041 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3042 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3043}
3044
466b0c00
L
3045static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3046{
3047 writel(enable ? 1 : 0, vector->addr);
3048}
3049
3050static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3051{
3052 struct hclge_dev *hdev = data;
ebaf1908 3053 u32 clearval = 0;
ca1d7669 3054 u32 event_cause;
466b0c00
L
3055
3056 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
3057 event_cause = hclge_check_event_cause(hdev, &clearval);
3058
c1a81619 3059 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 3060 switch (event_cause) {
f6162d44
SM
3061 case HCLGE_VECTOR0_EVENT_ERR:
3062 /* we do not know what type of reset is required now. This could
3063 * only be decided after we fetch the type of errors which
3064 * caused this event. Therefore, we will do below for now:
3065 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3066 * have defered type of reset to be used.
3067 * 2. Schedule the reset serivce task.
3068 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3069 * will fetch the correct type of reset. This would be done
3070 * by first decoding the types of errors.
3071 */
3072 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3073 /* fall through */
ca1d7669 3074 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 3075 hclge_reset_task_schedule(hdev);
ca1d7669 3076 break;
c1a81619
SM
3077 case HCLGE_VECTOR0_EVENT_MBX:
3078 /* If we are here then,
3079 * 1. Either we are not handling any mbx task and we are not
3080 * scheduled as well
3081 * OR
3082 * 2. We could be handling a mbx task but nothing more is
3083 * scheduled.
3084 * In both cases, we should schedule mbx task as there are more
3085 * mbx messages reported by this interrupt.
3086 */
3087 hclge_mbx_task_schedule(hdev);
f0ad97ac 3088 break;
ca1d7669 3089 default:
f0ad97ac
YL
3090 dev_warn(&hdev->pdev->dev,
3091 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
3092 break;
3093 }
3094
72e2fb07
HT
3095 hclge_clear_event_cause(hdev, event_cause, clearval);
3096
3097 /* Enable interrupt if it is not cause by reset. And when
3098 * clearval equal to 0, it means interrupt status may be
3099 * cleared by hardware before driver reads status register.
3100 * For this case, vector0 interrupt also should be enabled.
3101 */
9bc6ac91
HT
3102 if (!clearval ||
3103 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
3104 hclge_enable_vector(&hdev->misc_vector, true);
3105 }
466b0c00
L
3106
3107 return IRQ_HANDLED;
3108}
3109
3110static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3111{
36cbbdf6
PL
3112 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3113 dev_warn(&hdev->pdev->dev,
3114 "vector(vector_id %d) has been freed.\n", vector_id);
3115 return;
3116 }
3117
466b0c00
L
3118 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3119 hdev->num_msi_left += 1;
3120 hdev->num_msi_used -= 1;
3121}
3122
3123static void hclge_get_misc_vector(struct hclge_dev *hdev)
3124{
3125 struct hclge_misc_vector *vector = &hdev->misc_vector;
3126
3127 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3128
3129 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3130 hdev->vector_status[0] = 0;
3131
3132 hdev->num_msi_left -= 1;
3133 hdev->num_msi_used += 1;
3134}
3135
08125454
YL
3136static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3137 const cpumask_t *mask)
3138{
3139 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3140 affinity_notify);
3141
3142 cpumask_copy(&hdev->affinity_mask, mask);
3143}
3144
3145static void hclge_irq_affinity_release(struct kref *ref)
3146{
3147}
3148
3149static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3150{
3151 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3152 &hdev->affinity_mask);
3153
3154 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3155 hdev->affinity_notify.release = hclge_irq_affinity_release;
3156 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3157 &hdev->affinity_notify);
3158}
3159
3160static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3161{
3162 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3163 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3164}
3165
466b0c00
L
3166static int hclge_misc_irq_init(struct hclge_dev *hdev)
3167{
3168 int ret;
3169
3170 hclge_get_misc_vector(hdev);
3171
ca1d7669 3172 /* this would be explicitly freed in the end */
f97c4d82
YL
3173 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3174 HCLGE_NAME, pci_name(hdev->pdev));
ca1d7669 3175 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
f97c4d82 3176 0, hdev->misc_vector.name, hdev);
466b0c00
L
3177 if (ret) {
3178 hclge_free_vector(hdev, 0);
3179 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180 hdev->misc_vector.vector_irq);
3181 }
3182
3183 return ret;
3184}
3185
ca1d7669
SM
3186static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3187{
3188 free_irq(hdev->misc_vector.vector_irq, hdev);
3189 hclge_free_vector(hdev, 0);
3190}
3191
af013903
HT
3192int hclge_notify_client(struct hclge_dev *hdev,
3193 enum hnae3_reset_notify_type type)
4ed340ab
L
3194{
3195 struct hnae3_client *client = hdev->nic_client;
3196 u16 i;
3197
9b2f3477 3198 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
bd9109c9
HT
3199 return 0;
3200
4ed340ab
L
3201 if (!client->ops->reset_notify)
3202 return -EOPNOTSUPP;
3203
3204 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205 struct hnae3_handle *handle = &hdev->vport[i].nic;
3206 int ret;
3207
3208 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
3209 if (ret) {
3210 dev_err(&hdev->pdev->dev,
3211 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 3212 return ret;
65e41e7e 3213 }
4ed340ab
L
3214 }
3215
3216 return 0;
3217}
3218
f403a84f
HT
3219static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220 enum hnae3_reset_notify_type type)
3221{
3222 struct hnae3_client *client = hdev->roce_client;
3223 int ret = 0;
3224 u16 i;
3225
9b2f3477 3226 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
f403a84f
HT
3227 return 0;
3228
3229 if (!client->ops->reset_notify)
3230 return -EOPNOTSUPP;
3231
3232 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233 struct hnae3_handle *handle = &hdev->vport[i].roce;
3234
3235 ret = client->ops->reset_notify(handle, type);
3236 if (ret) {
3237 dev_err(&hdev->pdev->dev,
3238 "notify roce client failed %d(%d)",
3239 type, ret);
3240 return ret;
3241 }
3242 }
3243
3244 return ret;
3245}
3246
4ed340ab
L
3247static int hclge_reset_wait(struct hclge_dev *hdev)
3248{
3249#define HCLGE_RESET_WATI_MS 100
5bb784e9
HT
3250#define HCLGE_RESET_WAIT_CNT 350
3251
4ed340ab
L
3252 u32 val, reg, reg_bit;
3253 u32 cnt = 0;
3254
3255 switch (hdev->reset_type) {
6dd22bbc
HT
3256 case HNAE3_IMP_RESET:
3257 reg = HCLGE_GLOBAL_RESET_REG;
3258 reg_bit = HCLGE_IMP_RESET_BIT;
3259 break;
4ed340ab
L
3260 case HNAE3_GLOBAL_RESET:
3261 reg = HCLGE_GLOBAL_RESET_REG;
3262 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3263 break;
4ed340ab
L
3264 case HNAE3_FUNC_RESET:
3265 reg = HCLGE_FUN_RST_ING;
3266 reg_bit = HCLGE_FUN_RST_ING_B;
3267 break;
3268 default:
3269 dev_err(&hdev->pdev->dev,
3270 "Wait for unsupported reset type: %d\n",
3271 hdev->reset_type);
3272 return -EINVAL;
3273 }
3274
3275 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 3276 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
3277 msleep(HCLGE_RESET_WATI_MS);
3278 val = hclge_read_dev(&hdev->hw, reg);
3279 cnt++;
3280 }
3281
4ed340ab
L
3282 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3283 dev_warn(&hdev->pdev->dev,
3284 "Wait for reset timeout: %d\n", hdev->reset_type);
3285 return -EBUSY;
3286 }
3287
3288 return 0;
3289}
3290
aa5c4f17
HT
3291static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3292{
3293 struct hclge_vf_rst_cmd *req;
3294 struct hclge_desc desc;
3295
3296 req = (struct hclge_vf_rst_cmd *)desc.data;
3297 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3298 req->dest_vfid = func_id;
3299
3300 if (reset)
3301 req->vf_rst = 0x1;
3302
3303 return hclge_cmd_send(&hdev->hw, &desc, 1);
3304}
3305
e511f17b 3306static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
3307{
3308 int i;
3309
3310 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3311 struct hclge_vport *vport = &hdev->vport[i];
3312 int ret;
3313
3314 /* Send cmd to set/clear VF's FUNC_RST_ING */
3315 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3316 if (ret) {
3317 dev_err(&hdev->pdev->dev,
adcf738b 3318 "set vf(%u) rst failed %d!\n",
aa5c4f17
HT
3319 vport->vport_id, ret);
3320 return ret;
3321 }
3322
cc645dfa 3323 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
3324 continue;
3325
3326 /* Inform VF to process the reset.
3327 * hclge_inform_reset_assert_to_vf may fail if VF
3328 * driver is not loaded.
3329 */
3330 ret = hclge_inform_reset_assert_to_vf(vport);
3331 if (ret)
3332 dev_warn(&hdev->pdev->dev,
adcf738b 3333 "inform reset to vf(%u) failed %d!\n",
aa5c4f17
HT
3334 vport->vport_id, ret);
3335 }
3336
3337 return 0;
3338}
3339
1c6dfe6f
YL
3340static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3341{
3342 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3343 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3344 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3345 return;
3346
3347 hclge_mbx_handler(hdev);
3348
3349 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3350}
3351
c3106cac 3352static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
427a7bff
HT
3353{
3354 struct hclge_pf_rst_sync_cmd *req;
3355 struct hclge_desc desc;
3356 int cnt = 0;
3357 int ret;
3358
3359 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3361
3362 do {
1c6dfe6f
YL
3363 /* vf need to down netdev by mbx during PF or FLR reset */
3364 hclge_mailbox_service_task(hdev);
3365
427a7bff
HT
3366 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367 /* for compatible with old firmware, wait
3368 * 100 ms for VF to stop IO
3369 */
3370 if (ret == -EOPNOTSUPP) {
3371 msleep(HCLGE_RESET_SYNC_TIME);
c3106cac 3372 return;
427a7bff 3373 } else if (ret) {
c3106cac
HT
3374 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3375 ret);
3376 return;
427a7bff 3377 } else if (req->all_vf_ready) {
c3106cac 3378 return;
427a7bff
HT
3379 }
3380 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381 hclge_cmd_reuse_desc(&desc, true);
3382 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3383
c3106cac 3384 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
427a7bff
HT
3385}
3386
a83d2961
WL
3387void hclge_report_hw_error(struct hclge_dev *hdev,
3388 enum hnae3_hw_error_type type)
3389{
3390 struct hnae3_client *client = hdev->nic_client;
3391 u16 i;
3392
3393 if (!client || !client->ops->process_hw_error ||
3394 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3395 return;
3396
3397 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3398 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3399}
3400
3401static void hclge_handle_imp_error(struct hclge_dev *hdev)
3402{
3403 u32 reg_val;
3404
3405 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3406 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3407 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3408 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3409 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3410 }
3411
3412 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3413 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3414 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3415 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3416 }
3417}
3418
2bfbd35d 3419int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
3420{
3421 struct hclge_desc desc;
3422 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3423 int ret;
3424
3425 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 3426 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
3427 req->fun_reset_vfid = func_id;
3428
3429 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3430 if (ret)
3431 dev_err(&hdev->pdev->dev,
3432 "send function reset cmd fail, status =%d\n", ret);
3433
3434 return ret;
3435}
3436
f2f432f2 3437static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 3438{
4f765d3e 3439 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
3440 struct pci_dev *pdev = hdev->pdev;
3441 u32 val;
3442
4f765d3e
HT
3443 if (hclge_get_hw_reset_stat(handle)) {
3444 dev_info(&pdev->dev, "Hardware reset not finish\n");
3445 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3446 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3447 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3448 return;
3449 }
3450
f2f432f2 3451 switch (hdev->reset_type) {
4ed340ab
L
3452 case HNAE3_GLOBAL_RESET:
3453 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 3454 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab
L
3455 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3456 dev_info(&pdev->dev, "Global Reset requested\n");
3457 break;
4ed340ab
L
3458 case HNAE3_FUNC_RESET:
3459 dev_info(&pdev->dev, "PF Reset requested\n");
cb1b9f77
SM
3460 /* schedule again to check later */
3461 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3462 hclge_reset_task_schedule(hdev);
4ed340ab
L
3463 break;
3464 default:
3465 dev_warn(&pdev->dev,
f2f432f2 3466 "Unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
3467 break;
3468 }
3469}
3470
123297b7 3471static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
f2f432f2
SM
3472 unsigned long *addr)
3473{
3474 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
123297b7 3475 struct hclge_dev *hdev = ae_dev->priv;
f2f432f2 3476
f6162d44
SM
3477 /* first, resolve any unknown reset type to the known type(s) */
3478 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
d9b81c96
HT
3479 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3480 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
f6162d44
SM
3481 /* we will intentionally ignore any errors from this function
3482 * as we will end up in *some* reset request in any case
3483 */
d9b81c96
HT
3484 if (hclge_handle_hw_msix_error(hdev, addr))
3485 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3486 msix_sts_reg);
3487
f6162d44
SM
3488 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3489 /* We defered the clearing of the error event which caused
3490 * interrupt since it was not posssible to do that in
3491 * interrupt context (and this is the reason we introduced
3492 * new UNKNOWN reset type). Now, the errors have been
3493 * handled and cleared in hardware we can safely enable
3494 * interrupts. This is an exception to the norm.
3495 */
3496 hclge_enable_vector(&hdev->misc_vector, true);
3497 }
3498
f2f432f2 3499 /* return the highest priority reset level amongst all */
7cea834d
HT
3500 if (test_bit(HNAE3_IMP_RESET, addr)) {
3501 rst_level = HNAE3_IMP_RESET;
3502 clear_bit(HNAE3_IMP_RESET, addr);
3503 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3504 clear_bit(HNAE3_FUNC_RESET, addr);
3505 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 3506 rst_level = HNAE3_GLOBAL_RESET;
7cea834d 3507 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3508 clear_bit(HNAE3_FUNC_RESET, addr);
3509 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 3510 rst_level = HNAE3_FUNC_RESET;
7cea834d 3511 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
3512 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3513 rst_level = HNAE3_FLR_RESET;
3514 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 3515 }
f2f432f2 3516
0fdf4d30
HT
3517 if (hdev->reset_type != HNAE3_NONE_RESET &&
3518 rst_level < hdev->reset_type)
3519 return HNAE3_NONE_RESET;
3520
f2f432f2
SM
3521 return rst_level;
3522}
3523
cd8c5c26
YL
3524static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3525{
3526 u32 clearval = 0;
3527
3528 switch (hdev->reset_type) {
3529 case HNAE3_IMP_RESET:
3530 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3531 break;
3532 case HNAE3_GLOBAL_RESET:
3533 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3534 break;
cd8c5c26 3535 default:
cd8c5c26
YL
3536 break;
3537 }
3538
3539 if (!clearval)
3540 return;
3541
72e2fb07
HT
3542 /* For revision 0x20, the reset interrupt source
3543 * can only be cleared after hardware reset done
3544 */
3545 if (hdev->pdev->revision == 0x20)
3546 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3547 clearval);
3548
cd8c5c26
YL
3549 hclge_enable_vector(&hdev->misc_vector, true);
3550}
3551
6b428b4f
HT
3552static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3553{
3554 u32 reg_val;
3555
3556 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3557 if (enable)
3558 reg_val |= HCLGE_NIC_SW_RST_RDY;
3559 else
3560 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3561
3562 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3563}
3564
c7554dcd
HT
3565static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3566{
3567 int ret;
3568
3569 ret = hclge_set_all_vf_rst(hdev, true);
3570 if (ret)
3571 return ret;
3572
3573 hclge_func_reset_sync_vf(hdev);
3574
3575 return 0;
3576}
3577
35d93a30
HT
3578static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3579{
6dd22bbc 3580 u32 reg_val;
35d93a30
HT
3581 int ret = 0;
3582
3583 switch (hdev->reset_type) {
3584 case HNAE3_FUNC_RESET:
c7554dcd
HT
3585 ret = hclge_func_reset_notify_vf(hdev);
3586 if (ret)
3587 return ret;
427a7bff 3588
35d93a30
HT
3589 ret = hclge_func_reset_cmd(hdev, 0);
3590 if (ret) {
3591 dev_err(&hdev->pdev->dev,
141b95d5 3592 "asserting function reset fail %d!\n", ret);
35d93a30
HT
3593 return ret;
3594 }
3595
3596 /* After performaning pf reset, it is not necessary to do the
3597 * mailbox handling or send any command to firmware, because
3598 * any mailbox handling or command to firmware is only valid
3599 * after hclge_cmd_init is called.
3600 */
3601 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
f02eb82d 3602 hdev->rst_stats.pf_rst_cnt++;
35d93a30 3603 break;
6b9a97ee 3604 case HNAE3_FLR_RESET:
c7554dcd
HT
3605 ret = hclge_func_reset_notify_vf(hdev);
3606 if (ret)
3607 return ret;
6b9a97ee 3608 break;
6dd22bbc 3609 case HNAE3_IMP_RESET:
a83d2961 3610 hclge_handle_imp_error(hdev);
6dd22bbc
HT
3611 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3612 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3613 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3614 break;
35d93a30
HT
3615 default:
3616 break;
3617 }
3618
ada13ee3
HT
3619 /* inform hardware that preparatory work is done */
3620 msleep(HCLGE_RESET_SYNC_TIME);
6b428b4f 3621 hclge_reset_handshake(hdev, true);
35d93a30
HT
3622 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3623
3624 return ret;
3625}
3626
8e9eee78 3627static bool hclge_reset_err_handle(struct hclge_dev *hdev)
65e41e7e
HT
3628{
3629#define MAX_RESET_FAIL_CNT 5
65e41e7e
HT
3630
3631 if (hdev->reset_pending) {
3632 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3633 hdev->reset_pending);
3634 return true;
2336f19d
HT
3635 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3636 HCLGE_RESET_INT_M) {
65e41e7e 3637 dev_info(&hdev->pdev->dev,
2336f19d 3638 "reset failed because new reset interrupt\n");
65e41e7e
HT
3639 hclge_clear_reset_cause(hdev);
3640 return false;
0ecf1f7b
HT
3641 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3642 hdev->rst_stats.reset_fail_cnt++;
8e9eee78
HT
3643 set_bit(hdev->reset_type, &hdev->reset_pending);
3644 dev_info(&hdev->pdev->dev,
adcf738b 3645 "re-schedule reset task(%u)\n",
0ecf1f7b 3646 hdev->rst_stats.reset_fail_cnt);
8e9eee78 3647 return true;
65e41e7e
HT
3648 }
3649
3650 hclge_clear_reset_cause(hdev);
6b428b4f
HT
3651
3652 /* recover the handshake status when reset fail */
3653 hclge_reset_handshake(hdev, true);
3654
65e41e7e 3655 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3d77d0cb
HT
3656
3657 hclge_dbg_dump_rst_info(hdev);
3658
d5432455
GL
3659 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3660
65e41e7e
HT
3661 return false;
3662}
3663
72e2fb07
HT
3664static int hclge_set_rst_done(struct hclge_dev *hdev)
3665{
3666 struct hclge_pf_rst_done_cmd *req;
3667 struct hclge_desc desc;
648db051 3668 int ret;
72e2fb07
HT
3669
3670 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3672 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3673
648db051
HT
3674 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3675 /* To be compatible with the old firmware, which does not support
3676 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3677 * return success
3678 */
3679 if (ret == -EOPNOTSUPP) {
3680 dev_warn(&hdev->pdev->dev,
3681 "current firmware does not support command(0x%x)!\n",
3682 HCLGE_OPC_PF_RST_DONE);
3683 return 0;
3684 } else if (ret) {
3685 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3686 ret);
3687 }
3688
3689 return ret;
72e2fb07
HT
3690}
3691
aa5c4f17
HT
3692static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3693{
3694 int ret = 0;
3695
3696 switch (hdev->reset_type) {
3697 case HNAE3_FUNC_RESET:
6b9a97ee
HT
3698 /* fall through */
3699 case HNAE3_FLR_RESET:
aa5c4f17
HT
3700 ret = hclge_set_all_vf_rst(hdev, false);
3701 break;
72e2fb07
HT
3702 case HNAE3_GLOBAL_RESET:
3703 /* fall through */
3704 case HNAE3_IMP_RESET:
3705 ret = hclge_set_rst_done(hdev);
3706 break;
aa5c4f17
HT
3707 default:
3708 break;
3709 }
3710
6b428b4f
HT
3711 /* clear up the handshake status after re-initialize done */
3712 hclge_reset_handshake(hdev, false);
3713
aa5c4f17
HT
3714 return ret;
3715}
3716
63cbf7a9
YM
3717static int hclge_reset_stack(struct hclge_dev *hdev)
3718{
3719 int ret;
3720
3721 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3722 if (ret)
3723 return ret;
3724
3725 ret = hclge_reset_ae_dev(hdev->ae_dev);
3726 if (ret)
3727 return ret;
3728
3729 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3730 if (ret)
3731 return ret;
3732
3733 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3734}
3735
d4fa0656 3736static int hclge_reset_prepare(struct hclge_dev *hdev)
f2f432f2 3737{
6871af29 3738 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
65e41e7e 3739 int ret;
9de0b86f 3740
6871af29
JS
3741 /* Initialize ae_dev reset status as well, in case enet layer wants to
3742 * know if device is undergoing reset
3743 */
3744 ae_dev->reset_type = hdev->reset_type;
f02eb82d 3745 hdev->rst_stats.reset_cnt++;
f2f432f2 3746 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
3747 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3748 if (ret)
d4fa0656 3749 return ret;
65e41e7e 3750
6d4fab39 3751 rtnl_lock();
65e41e7e 3752 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
65e41e7e 3753 rtnl_unlock();
65e41e7e 3754 if (ret)
d4fa0656 3755 return ret;
cd8c5c26 3756
d4fa0656
HT
3757 return hclge_reset_prepare_wait(hdev);
3758}
3759
3760static int hclge_reset_rebuild(struct hclge_dev *hdev)
3761{
3762 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3763 enum hnae3_reset_type reset_level;
3764 int ret;
f2f432f2 3765
f02eb82d
HT
3766 hdev->rst_stats.hw_reset_done_cnt++;
3767
65e41e7e
HT
3768 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3769 if (ret)
d4fa0656 3770 return ret;
65e41e7e
HT
3771
3772 rtnl_lock();
63cbf7a9 3773 ret = hclge_reset_stack(hdev);
d4fa0656 3774 rtnl_unlock();
1f609492 3775 if (ret)
d4fa0656 3776 return ret;
1f609492 3777
65e41e7e
HT
3778 hclge_clear_reset_cause(hdev);
3779
aa5c4f17
HT
3780 ret = hclge_reset_prepare_up(hdev);
3781 if (ret)
d4fa0656 3782 return ret;
aa5c4f17 3783
63cbf7a9
YM
3784
3785 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3786 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3787 * times
3788 */
0ecf1f7b
HT
3789 if (ret &&
3790 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
d4fa0656 3791 return ret;
63cbf7a9
YM
3792
3793 rtnl_lock();
65e41e7e 3794 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6d4fab39 3795 rtnl_unlock();
d4fa0656
HT
3796 if (ret)
3797 return ret;
f403a84f 3798
65e41e7e
HT
3799 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3800 if (ret)
d4fa0656 3801 return ret;
65e41e7e 3802
b644a8d4 3803 hdev->last_reset_time = jiffies;
0ecf1f7b 3804 hdev->rst_stats.reset_fail_cnt = 0;
f02eb82d 3805 hdev->rst_stats.reset_done_cnt++;
b644a8d4 3806 ae_dev->reset_type = HNAE3_NONE_RESET;
d5432455 3807 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
012fcb52
HT
3808
3809 /* if default_reset_request has a higher level reset request,
3810 * it should be handled as soon as possible. since some errors
3811 * need this kind of reset to fix.
3812 */
525a294e
HT
3813 reset_level = hclge_get_reset_level(ae_dev,
3814 &hdev->default_reset_request);
3815 if (reset_level != HNAE3_NONE_RESET)
3816 set_bit(reset_level, &hdev->reset_request);
b644a8d4 3817
d4fa0656
HT
3818 return 0;
3819}
3820
3821static void hclge_reset(struct hclge_dev *hdev)
3822{
3823 if (hclge_reset_prepare(hdev))
3824 goto err_reset;
3825
3826 if (hclge_reset_wait(hdev))
3827 goto err_reset;
3828
3829 if (hclge_reset_rebuild(hdev))
3830 goto err_reset;
3831
65e41e7e
HT
3832 return;
3833
65e41e7e 3834err_reset:
8e9eee78 3835 if (hclge_reset_err_handle(hdev))
65e41e7e 3836 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3837}
3838
6ae4e733
SJ
3839static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3840{
3841 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3842 struct hclge_dev *hdev = ae_dev->priv;
3843
3844 /* We might end up getting called broadly because of 2 below cases:
3845 * 1. Recoverable error was conveyed through APEI and only way to bring
3846 * normalcy is to reset.
3847 * 2. A new reset request from the stack due to timeout
3848 *
3849 * For the first case,error event might not have ae handle available.
3850 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3851 * last reset attempt did not succeed and watchdog hit us again. We will
3852 * know this if last reset request did not occur very recently (watchdog
3853 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3854 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3855 * And if it is a repeat reset request of the most recent one then we
3856 * want to make sure we throttle the reset request. Therefore, we will
3857 * not allow it again before 3*HZ times.
6d4c3981 3858 */
6ae4e733
SJ
3859 if (!handle)
3860 handle = &hdev->vport[0].nic;
3861
b37ce587 3862 if (time_before(jiffies, (hdev->last_reset_time +
012fcb52
HT
3863 HCLGE_RESET_INTERVAL))) {
3864 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9de0b86f 3865 return;
db4d3d55 3866 } else if (hdev->default_reset_request) {
0742ed7c 3867 hdev->reset_level =
123297b7 3868 hclge_get_reset_level(ae_dev,
720bd583 3869 &hdev->default_reset_request);
db4d3d55 3870 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
0742ed7c 3871 hdev->reset_level = HNAE3_FUNC_RESET;
db4d3d55 3872 }
4ed340ab 3873
96e65abb 3874 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
0742ed7c 3875 hdev->reset_level);
6d4c3981
SM
3876
3877 /* request reset & schedule reset task */
0742ed7c 3878 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3879 hclge_reset_task_schedule(hdev);
3880
0742ed7c
HT
3881 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3882 hdev->reset_level++;
4ed340ab
L
3883}
3884
720bd583
HT
3885static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3886 enum hnae3_reset_type rst_type)
3887{
3888 struct hclge_dev *hdev = ae_dev->priv;
3889
3890 set_bit(rst_type, &hdev->default_reset_request);
3891}
3892
65e41e7e
HT
3893static void hclge_reset_timer(struct timer_list *t)
3894{
3895 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3896
012fcb52
HT
3897 /* if default_reset_request has no value, it means that this reset
3898 * request has already be handled, so just return here
3899 */
3900 if (!hdev->default_reset_request)
3901 return;
3902
65e41e7e 3903 dev_info(&hdev->pdev->dev,
e3b84ed2 3904 "triggering reset in reset timer\n");
65e41e7e
HT
3905 hclge_reset_event(hdev->pdev, NULL);
3906}
3907
4ed340ab
L
3908static void hclge_reset_subtask(struct hclge_dev *hdev)
3909{
123297b7
SJ
3910 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3911
f2f432f2
SM
3912 /* check if there is any ongoing reset in the hardware. This status can
3913 * be checked from reset_pending. If there is then, we need to wait for
3914 * hardware to complete reset.
3915 * a. If we are able to figure out in reasonable time that hardware
3916 * has fully resetted then, we can proceed with driver, client
3917 * reset.
3918 * b. else, we can come back later to check this status so re-sched
3919 * now.
3920 */
0742ed7c 3921 hdev->last_reset_time = jiffies;
123297b7 3922 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
f2f432f2
SM
3923 if (hdev->reset_type != HNAE3_NONE_RESET)
3924 hclge_reset(hdev);
4ed340ab 3925
f2f432f2 3926 /* check if we got any *new* reset requests to be honored */
123297b7 3927 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
f2f432f2
SM
3928 if (hdev->reset_type != HNAE3_NONE_RESET)
3929 hclge_do_reset(hdev);
4ed340ab 3930
4ed340ab
L
3931 hdev->reset_type = HNAE3_NONE_RESET;
3932}
3933
1c6dfe6f 3934static void hclge_reset_service_task(struct hclge_dev *hdev)
466b0c00 3935{
1c6dfe6f
YL
3936 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3937 return;
cb1b9f77 3938
8627bded
HT
3939 down(&hdev->reset_sem);
3940 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
cb1b9f77 3941
4ed340ab 3942 hclge_reset_subtask(hdev);
cb1b9f77
SM
3943
3944 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8627bded 3945 up(&hdev->reset_sem);
466b0c00
L
3946}
3947
a6d818e3
YL
3948static void hclge_update_vport_alive(struct hclge_dev *hdev)
3949{
3950 int i;
3951
3952 /* start from vport 1 for PF is always alive */
3953 for (i = 1; i < hdev->num_alloc_vport; i++) {
3954 struct hclge_vport *vport = &hdev->vport[i];
3955
3956 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3957 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
3958
3959 /* If vf is not alive, set to default value */
3960 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3961 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
3962 }
3963}
3964
1c6dfe6f 3965static void hclge_periodic_service_task(struct hclge_dev *hdev)
46a3df9f 3966{
1c6dfe6f 3967 unsigned long delta = round_jiffies_relative(HZ);
7be1b9f3 3968
1c6dfe6f
YL
3969 /* Always handle the link updating to make sure link state is
3970 * updated when it is triggered by mbx.
3971 */
3972 hclge_update_link_status(hdev);
46a3df9f 3973
1c6dfe6f
YL
3974 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3975 delta = jiffies - hdev->last_serv_processed;
3976
3977 if (delta < round_jiffies_relative(HZ)) {
3978 delta = round_jiffies_relative(HZ) - delta;
3979 goto out;
3980 }
c5f65480
JS
3981 }
3982
1c6dfe6f 3983 hdev->serv_processed_cnt++;
a6d818e3 3984 hclge_update_vport_alive(hdev);
1c6dfe6f
YL
3985
3986 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3987 hdev->last_serv_processed = jiffies;
3988 goto out;
3989 }
3990
3991 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3992 hclge_update_stats_for_all(hdev);
3993
3994 hclge_update_port_info(hdev);
fe4144d4 3995 hclge_sync_vlan_filter(hdev);
db4d3d55 3996
1c6dfe6f 3997 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
d93ed94f 3998 hclge_rfs_filter_expire(hdev);
7be1b9f3 3999
1c6dfe6f
YL
4000 hdev->last_serv_processed = jiffies;
4001
4002out:
4003 hclge_task_schedule(hdev, delta);
4004}
4005
4006static void hclge_service_task(struct work_struct *work)
4007{
4008 struct hclge_dev *hdev =
4009 container_of(work, struct hclge_dev, service_task.work);
4010
4011 hclge_reset_service_task(hdev);
4012 hclge_mailbox_service_task(hdev);
4013 hclge_periodic_service_task(hdev);
4014
4015 /* Handle reset and mbx again in case periodical task delays the
4016 * handling by calling hclge_task_schedule() in
4017 * hclge_periodic_service_task().
4018 */
4019 hclge_reset_service_task(hdev);
4020 hclge_mailbox_service_task(hdev);
46a3df9f
S
4021}
4022
46a3df9f
S
4023struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4024{
4025 /* VF handle has no client */
4026 if (!handle->client)
4027 return container_of(handle, struct hclge_vport, nic);
4028 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4029 return container_of(handle, struct hclge_vport, roce);
4030 else
4031 return container_of(handle, struct hclge_vport, nic);
4032}
4033
4034static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4035 struct hnae3_vector_info *vector_info)
4036{
4037 struct hclge_vport *vport = hclge_get_vport(handle);
4038 struct hnae3_vector_info *vector = vector_info;
4039 struct hclge_dev *hdev = vport->back;
4040 int alloc = 0;
4041 int i, j;
4042
580a05f9 4043 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
46a3df9f
S
4044 vector_num = min(hdev->num_msi_left, vector_num);
4045
4046 for (j = 0; j < vector_num; j++) {
4047 for (i = 1; i < hdev->num_msi; i++) {
4048 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4049 vector->vector = pci_irq_vector(hdev->pdev, i);
4050 vector->io_addr = hdev->hw.io_base +
4051 HCLGE_VECTOR_REG_BASE +
4052 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4053 vport->vport_id *
4054 HCLGE_VECTOR_VF_OFFSET;
4055 hdev->vector_status[i] = vport->vport_id;
887c3820 4056 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
4057
4058 vector++;
4059 alloc++;
4060
4061 break;
4062 }
4063 }
4064 }
4065 hdev->num_msi_left -= alloc;
4066 hdev->num_msi_used += alloc;
4067
4068 return alloc;
4069}
4070
4071static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4072{
4073 int i;
4074
887c3820
SM
4075 for (i = 0; i < hdev->num_msi; i++)
4076 if (vector == hdev->vector_irq[i])
4077 return i;
4078
46a3df9f
S
4079 return -EINVAL;
4080}
4081
0d3e6631
YL
4082static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4083{
4084 struct hclge_vport *vport = hclge_get_vport(handle);
4085 struct hclge_dev *hdev = vport->back;
4086 int vector_id;
4087
4088 vector_id = hclge_get_vector_index(hdev, vector);
4089 if (vector_id < 0) {
4090 dev_err(&hdev->pdev->dev,
6f8e330d 4091 "Get vector index fail. vector = %d\n", vector);
0d3e6631
YL
4092 return vector_id;
4093 }
4094
4095 hclge_free_vector(hdev, vector_id);
4096
4097 return 0;
4098}
4099
46a3df9f
S
4100static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4101{
4102 return HCLGE_RSS_KEY_SIZE;
4103}
4104
4105static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4106{
4107 return HCLGE_RSS_IND_TBL_SIZE;
4108}
4109
46a3df9f
S
4110static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4111 const u8 hfunc, const u8 *key)
4112{
d44f9b63 4113 struct hclge_rss_config_cmd *req;
ebaf1908 4114 unsigned int key_offset = 0;
46a3df9f 4115 struct hclge_desc desc;
3caf772b 4116 int key_counts;
46a3df9f
S
4117 int key_size;
4118 int ret;
4119
3caf772b 4120 key_counts = HCLGE_RSS_KEY_SIZE;
d44f9b63 4121 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f 4122
3caf772b 4123 while (key_counts) {
46a3df9f
S
4124 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4125 false);
4126
4127 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4128 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4129
3caf772b 4130 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
46a3df9f
S
4131 memcpy(req->hash_key,
4132 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4133
3caf772b
YM
4134 key_counts -= key_size;
4135 key_offset++;
46a3df9f
S
4136 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4137 if (ret) {
4138 dev_err(&hdev->pdev->dev,
4139 "Configure RSS config fail, status = %d\n",
4140 ret);
4141 return ret;
4142 }
4143 }
4144 return 0;
4145}
4146
89523cfa 4147static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
46a3df9f 4148{
d44f9b63 4149 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
4150 struct hclge_desc desc;
4151 int i, j;
4152 int ret;
4153
d44f9b63 4154 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
4155
4156 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4157 hclge_cmd_setup_basic_desc
4158 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4159
a90bb9a5
YL
4160 req->start_table_index =
4161 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4162 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
4163
4164 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4165 req->rss_result[j] =
4166 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4167
4168 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4169 if (ret) {
4170 dev_err(&hdev->pdev->dev,
4171 "Configure rss indir table fail,status = %d\n",
4172 ret);
4173 return ret;
4174 }
4175 }
4176 return 0;
4177}
4178
4179static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4180 u16 *tc_size, u16 *tc_offset)
4181{
d44f9b63 4182 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
4183 struct hclge_desc desc;
4184 int ret;
4185 int i;
4186
4187 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 4188 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
4189
4190 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
4191 u16 mode = 0;
4192
e4e87715
PL
4193 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4194 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4195 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4196 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4197 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
4198
4199 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
4200 }
4201
4202 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4203 if (ret)
46a3df9f
S
4204 dev_err(&hdev->pdev->dev,
4205 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 4206
3f639907 4207 return ret;
46a3df9f
S
4208}
4209
232fc64b
PL
4210static void hclge_get_rss_type(struct hclge_vport *vport)
4211{
4212 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4213 vport->rss_tuple_sets.ipv4_udp_en ||
4214 vport->rss_tuple_sets.ipv4_sctp_en ||
4215 vport->rss_tuple_sets.ipv6_tcp_en ||
4216 vport->rss_tuple_sets.ipv6_udp_en ||
4217 vport->rss_tuple_sets.ipv6_sctp_en)
4218 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4219 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4220 vport->rss_tuple_sets.ipv6_fragment_en)
4221 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4222 else
4223 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4224}
4225
46a3df9f
S
4226static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4227{
d44f9b63 4228 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
4229 struct hclge_desc desc;
4230 int ret;
4231
4232 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4233
d44f9b63 4234 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
4235
4236 /* Get the tuple cfg from pf */
4237 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4238 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4239 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4240 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4241 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4242 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4243 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4244 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 4245 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 4246 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4247 if (ret)
46a3df9f
S
4248 dev_err(&hdev->pdev->dev,
4249 "Configure rss input fail, status = %d\n", ret);
3f639907 4250 return ret;
46a3df9f
S
4251}
4252
4253static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4254 u8 *key, u8 *hfunc)
4255{
4256 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
4257 int i;
4258
4259 /* Get hash algorithm */
775501a1
JS
4260 if (hfunc) {
4261 switch (vport->rss_algo) {
4262 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4263 *hfunc = ETH_RSS_HASH_TOP;
4264 break;
4265 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4266 *hfunc = ETH_RSS_HASH_XOR;
4267 break;
4268 default:
4269 *hfunc = ETH_RSS_HASH_UNKNOWN;
4270 break;
4271 }
4272 }
46a3df9f
S
4273
4274 /* Get the RSS Key required by the user */
4275 if (key)
4276 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4277
4278 /* Get indirect table */
4279 if (indir)
4280 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4281 indir[i] = vport->rss_indirection_tbl[i];
4282
4283 return 0;
4284}
4285
4286static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4287 const u8 *key, const u8 hfunc)
4288{
4289 struct hclge_vport *vport = hclge_get_vport(handle);
4290 struct hclge_dev *hdev = vport->back;
4291 u8 hash_algo;
4292 int ret, i;
4293
4294 /* Set the RSS Hash Key if specififed by the user */
4295 if (key) {
775501a1
JS
4296 switch (hfunc) {
4297 case ETH_RSS_HASH_TOP:
46a3df9f 4298 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
4299 break;
4300 case ETH_RSS_HASH_XOR:
4301 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4302 break;
4303 case ETH_RSS_HASH_NO_CHANGE:
4304 hash_algo = vport->rss_algo;
4305 break;
4306 default:
46a3df9f 4307 return -EINVAL;
775501a1
JS
4308 }
4309
46a3df9f
S
4310 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4311 if (ret)
4312 return ret;
89523cfa
YL
4313
4314 /* Update the shadow RSS key with user specified qids */
4315 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4316 vport->rss_algo = hash_algo;
46a3df9f
S
4317 }
4318
4319 /* Update the shadow RSS table with user specified qids */
4320 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4321 vport->rss_indirection_tbl[i] = indir[i];
4322
4323 /* Update the hardware */
89523cfa 4324 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
4325}
4326
f7db940a
L
4327static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4328{
4329 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4330
4331 if (nfc->data & RXH_L4_B_2_3)
4332 hash_sets |= HCLGE_D_PORT_BIT;
4333 else
4334 hash_sets &= ~HCLGE_D_PORT_BIT;
4335
4336 if (nfc->data & RXH_IP_SRC)
4337 hash_sets |= HCLGE_S_IP_BIT;
4338 else
4339 hash_sets &= ~HCLGE_S_IP_BIT;
4340
4341 if (nfc->data & RXH_IP_DST)
4342 hash_sets |= HCLGE_D_IP_BIT;
4343 else
4344 hash_sets &= ~HCLGE_D_IP_BIT;
4345
4346 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4347 hash_sets |= HCLGE_V_TAG_BIT;
4348
4349 return hash_sets;
4350}
4351
4352static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4353 struct ethtool_rxnfc *nfc)
4354{
4355 struct hclge_vport *vport = hclge_get_vport(handle);
4356 struct hclge_dev *hdev = vport->back;
4357 struct hclge_rss_input_tuple_cmd *req;
4358 struct hclge_desc desc;
4359 u8 tuple_sets;
4360 int ret;
4361
4362 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4363 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4364 return -EINVAL;
4365
4366 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429 4367 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
f7db940a 4368
6f2af429
YL
4369 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4370 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4371 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4372 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4373 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4374 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4375 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4376 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
4377
4378 tuple_sets = hclge_get_rss_hash_bits(nfc);
4379 switch (nfc->flow_type) {
4380 case TCP_V4_FLOW:
4381 req->ipv4_tcp_en = tuple_sets;
4382 break;
4383 case TCP_V6_FLOW:
4384 req->ipv6_tcp_en = tuple_sets;
4385 break;
4386 case UDP_V4_FLOW:
4387 req->ipv4_udp_en = tuple_sets;
4388 break;
4389 case UDP_V6_FLOW:
4390 req->ipv6_udp_en = tuple_sets;
4391 break;
4392 case SCTP_V4_FLOW:
4393 req->ipv4_sctp_en = tuple_sets;
4394 break;
4395 case SCTP_V6_FLOW:
4396 if ((nfc->data & RXH_L4_B_0_1) ||
4397 (nfc->data & RXH_L4_B_2_3))
4398 return -EINVAL;
4399
4400 req->ipv6_sctp_en = tuple_sets;
4401 break;
4402 case IPV4_FLOW:
4403 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4404 break;
4405 case IPV6_FLOW:
4406 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4407 break;
4408 default:
4409 return -EINVAL;
4410 }
4411
4412 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 4413 if (ret) {
f7db940a
L
4414 dev_err(&hdev->pdev->dev,
4415 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
4416 return ret;
4417 }
f7db940a 4418
6f2af429
YL
4419 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4420 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4421 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4422 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4423 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4424 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4425 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4426 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 4427 hclge_get_rss_type(vport);
6f2af429 4428 return 0;
f7db940a
L
4429}
4430
07d29954
L
4431static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4432 struct ethtool_rxnfc *nfc)
4433{
4434 struct hclge_vport *vport = hclge_get_vport(handle);
07d29954 4435 u8 tuple_sets;
07d29954
L
4436
4437 nfc->data = 0;
4438
07d29954
L
4439 switch (nfc->flow_type) {
4440 case TCP_V4_FLOW:
6f2af429 4441 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
4442 break;
4443 case UDP_V4_FLOW:
6f2af429 4444 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
4445 break;
4446 case TCP_V6_FLOW:
6f2af429 4447 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
4448 break;
4449 case UDP_V6_FLOW:
6f2af429 4450 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
4451 break;
4452 case SCTP_V4_FLOW:
6f2af429 4453 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
4454 break;
4455 case SCTP_V6_FLOW:
6f2af429 4456 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
4457 break;
4458 case IPV4_FLOW:
4459 case IPV6_FLOW:
4460 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4461 break;
4462 default:
4463 return -EINVAL;
4464 }
4465
4466 if (!tuple_sets)
4467 return 0;
4468
4469 if (tuple_sets & HCLGE_D_PORT_BIT)
4470 nfc->data |= RXH_L4_B_2_3;
4471 if (tuple_sets & HCLGE_S_PORT_BIT)
4472 nfc->data |= RXH_L4_B_0_1;
4473 if (tuple_sets & HCLGE_D_IP_BIT)
4474 nfc->data |= RXH_IP_DST;
4475 if (tuple_sets & HCLGE_S_IP_BIT)
4476 nfc->data |= RXH_IP_SRC;
4477
4478 return 0;
4479}
4480
46a3df9f
S
4481static int hclge_get_tc_size(struct hnae3_handle *handle)
4482{
4483 struct hclge_vport *vport = hclge_get_vport(handle);
4484 struct hclge_dev *hdev = vport->back;
4485
4486 return hdev->rss_size_max;
4487}
4488
77f255c1 4489int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f 4490{
46a3df9f 4491 struct hclge_vport *vport = hdev->vport;
268f5dfa
YL
4492 u8 *rss_indir = vport[0].rss_indirection_tbl;
4493 u16 rss_size = vport[0].alloc_rss_size;
354d0fab
PL
4494 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4495 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
268f5dfa
YL
4496 u8 *key = vport[0].rss_hash_key;
4497 u8 hfunc = vport[0].rss_algo;
46a3df9f 4498 u16 tc_valid[HCLGE_MAX_TC_NUM];
268f5dfa 4499 u16 roundup_size;
ebaf1908
WL
4500 unsigned int i;
4501 int ret;
68ece54e 4502
46a3df9f
S
4503 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4504 if (ret)
268f5dfa 4505 return ret;
46a3df9f 4506
46a3df9f
S
4507 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4508 if (ret)
268f5dfa 4509 return ret;
46a3df9f
S
4510
4511 ret = hclge_set_rss_input_tuple(hdev);
4512 if (ret)
268f5dfa 4513 return ret;
46a3df9f 4514
68ece54e
YL
4515 /* Each TC have the same queue size, and tc_size set to hardware is
4516 * the log2 of roundup power of two of rss_size, the acutal queue
4517 * size is limited by indirection table.
4518 */
4519 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4520 dev_err(&hdev->pdev->dev,
adcf738b 4521 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
68ece54e 4522 rss_size);
268f5dfa 4523 return -EINVAL;
68ece54e
YL
4524 }
4525
4526 roundup_size = roundup_pow_of_two(rss_size);
4527 roundup_size = ilog2(roundup_size);
4528
46a3df9f 4529 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 4530 tc_valid[i] = 0;
46a3df9f 4531
68ece54e
YL
4532 if (!(hdev->hw_tc_map & BIT(i)))
4533 continue;
4534
4535 tc_valid[i] = 1;
4536 tc_size[i] = roundup_size;
4537 tc_offset[i] = rss_size * i;
46a3df9f 4538 }
68ece54e 4539
268f5dfa
YL
4540 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4541}
46a3df9f 4542
268f5dfa
YL
4543void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4544{
4545 struct hclge_vport *vport = hdev->vport;
4546 int i, j;
46a3df9f 4547
268f5dfa
YL
4548 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4549 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4550 vport[j].rss_indirection_tbl[i] =
4551 i % vport[j].alloc_rss_size;
4552 }
4553}
4554
4555static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4556{
472d7ece 4557 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 4558 struct hclge_vport *vport = hdev->vport;
472d7ece
JS
4559
4560 if (hdev->pdev->revision >= 0x21)
4561 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 4562
268f5dfa
YL
4563 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4564 vport[i].rss_tuple_sets.ipv4_tcp_en =
4565 HCLGE_RSS_INPUT_TUPLE_OTHER;
4566 vport[i].rss_tuple_sets.ipv4_udp_en =
4567 HCLGE_RSS_INPUT_TUPLE_OTHER;
4568 vport[i].rss_tuple_sets.ipv4_sctp_en =
4569 HCLGE_RSS_INPUT_TUPLE_SCTP;
4570 vport[i].rss_tuple_sets.ipv4_fragment_en =
4571 HCLGE_RSS_INPUT_TUPLE_OTHER;
4572 vport[i].rss_tuple_sets.ipv6_tcp_en =
4573 HCLGE_RSS_INPUT_TUPLE_OTHER;
4574 vport[i].rss_tuple_sets.ipv6_udp_en =
4575 HCLGE_RSS_INPUT_TUPLE_OTHER;
4576 vport[i].rss_tuple_sets.ipv6_sctp_en =
4577 HCLGE_RSS_INPUT_TUPLE_SCTP;
4578 vport[i].rss_tuple_sets.ipv6_fragment_en =
4579 HCLGE_RSS_INPUT_TUPLE_OTHER;
4580
472d7ece 4581 vport[i].rss_algo = rss_algo;
ea739c90 4582
472d7ece
JS
4583 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4584 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
4585 }
4586
4587 hclge_rss_indir_init_cfg(hdev);
46a3df9f
S
4588}
4589
84e095d6
SM
4590int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4591 int vector_id, bool en,
4592 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4593{
4594 struct hclge_dev *hdev = vport->back;
46a3df9f
S
4595 struct hnae3_ring_chain_node *node;
4596 struct hclge_desc desc;
37417c66
GL
4597 struct hclge_ctrl_vector_chain_cmd *req =
4598 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
84e095d6
SM
4599 enum hclge_cmd_status status;
4600 enum hclge_opcode_type op;
4601 u16 tqp_type_and_id;
46a3df9f
S
4602 int i;
4603
84e095d6
SM
4604 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4605 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
4606 req->int_vector_id = vector_id;
4607
4608 i = 0;
4609 for (node = ring_chain; node; node = node->next) {
84e095d6 4610 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
4611 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4612 HCLGE_INT_TYPE_S,
4613 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4614 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4615 HCLGE_TQP_ID_S, node->tqp_index);
4616 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4617 HCLGE_INT_GL_IDX_S,
4618 hnae3_get_field(node->int_gl_idx,
4619 HNAE3_RING_GL_IDX_M,
4620 HNAE3_RING_GL_IDX_S));
84e095d6 4621 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
4622 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4623 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 4624 req->vfid = vport->vport_id;
46a3df9f 4625
84e095d6
SM
4626 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4627 if (status) {
46a3df9f
S
4628 dev_err(&hdev->pdev->dev,
4629 "Map TQP fail, status is %d.\n",
84e095d6
SM
4630 status);
4631 return -EIO;
46a3df9f
S
4632 }
4633 i = 0;
4634
4635 hclge_cmd_setup_basic_desc(&desc,
84e095d6 4636 op,
46a3df9f
S
4637 false);
4638 req->int_vector_id = vector_id;
4639 }
4640 }
4641
4642 if (i > 0) {
4643 req->int_cause_num = i;
84e095d6
SM
4644 req->vfid = vport->vport_id;
4645 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4646 if (status) {
46a3df9f 4647 dev_err(&hdev->pdev->dev,
84e095d6
SM
4648 "Map TQP fail, status is %d.\n", status);
4649 return -EIO;
46a3df9f
S
4650 }
4651 }
4652
4653 return 0;
4654}
4655
9b2f3477 4656static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
84e095d6 4657 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4658{
4659 struct hclge_vport *vport = hclge_get_vport(handle);
4660 struct hclge_dev *hdev = vport->back;
4661 int vector_id;
4662
4663 vector_id = hclge_get_vector_index(hdev, vector);
4664 if (vector_id < 0) {
4665 dev_err(&hdev->pdev->dev,
7ab2b53e 4666 "failed to get vector index. vector=%d\n", vector);
46a3df9f
S
4667 return vector_id;
4668 }
4669
84e095d6 4670 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
4671}
4672
9b2f3477 4673static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
84e095d6 4674 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4675{
4676 struct hclge_vport *vport = hclge_get_vport(handle);
4677 struct hclge_dev *hdev = vport->back;
84e095d6 4678 int vector_id, ret;
46a3df9f 4679
b50ae26c
PL
4680 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4681 return 0;
4682
46a3df9f
S
4683 vector_id = hclge_get_vector_index(hdev, vector);
4684 if (vector_id < 0) {
4685 dev_err(&handle->pdev->dev,
4686 "Get vector index fail. ret =%d\n", vector_id);
4687 return vector_id;
4688 }
4689
84e095d6 4690 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 4691 if (ret)
84e095d6
SM
4692 dev_err(&handle->pdev->dev,
4693 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
9b2f3477 4694 vector_id, ret);
46a3df9f 4695
0d3e6631 4696 return ret;
46a3df9f
S
4697}
4698
e196ec75
JS
4699static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4700 struct hclge_promisc_param *param)
46a3df9f 4701{
d44f9b63 4702 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
4703 struct hclge_desc desc;
4704 int ret;
4705
4706 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4707
d44f9b63 4708 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f 4709 req->vf_id = param->vf_id;
96c0e861
PL
4710
4711 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4712 * pdev revision(0x20), new revision support them. The
4713 * value of this two fields will not return error when driver
4714 * send command to fireware in revision(0x20).
4715 */
4716 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4717 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
46a3df9f
S
4718
4719 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4720 if (ret)
46a3df9f
S
4721 dev_err(&hdev->pdev->dev,
4722 "Set promisc mode fail, status is %d.\n", ret);
3f639907
JS
4723
4724 return ret;
46a3df9f
S
4725}
4726
e196ec75
JS
4727static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4728 bool en_uc, bool en_mc, bool en_bc,
4729 int vport_id)
46a3df9f
S
4730{
4731 if (!param)
4732 return;
4733
4734 memset(param, 0, sizeof(struct hclge_promisc_param));
4735 if (en_uc)
4736 param->enable = HCLGE_PROMISC_EN_UC;
4737 if (en_mc)
4738 param->enable |= HCLGE_PROMISC_EN_MC;
4739 if (en_bc)
4740 param->enable |= HCLGE_PROMISC_EN_BC;
4741 param->vf_id = vport_id;
4742}
4743
e196ec75
JS
4744int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4745 bool en_mc_pmc, bool en_bc_pmc)
4746{
4747 struct hclge_dev *hdev = vport->back;
4748 struct hclge_promisc_param param;
4749
4750 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4751 vport->vport_id);
4752 return hclge_cmd_set_promisc_mode(hdev, &param);
4753}
4754
7fa6be4f
HT
4755static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4756 bool en_mc_pmc)
46a3df9f
S
4757{
4758 struct hclge_vport *vport = hclge_get_vport(handle);
28673b33 4759 bool en_bc_pmc = true;
46a3df9f 4760
28673b33
JS
4761 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4762 * always bypassed. So broadcast promisc should be disabled until
4763 * user enable promisc mode
4764 */
4765 if (handle->pdev->revision == 0x20)
4766 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4767
e196ec75
JS
4768 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4769 en_bc_pmc);
46a3df9f
S
4770}
4771
d695964d
JS
4772static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4773{
4774 struct hclge_get_fd_mode_cmd *req;
4775 struct hclge_desc desc;
4776 int ret;
4777
4778 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4779
4780 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4781
4782 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4783 if (ret) {
4784 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4785 return ret;
4786 }
4787
4788 *fd_mode = req->mode;
4789
4790 return ret;
4791}
4792
4793static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4794 u32 *stage1_entry_num,
4795 u32 *stage2_entry_num,
4796 u16 *stage1_counter_num,
4797 u16 *stage2_counter_num)
4798{
4799 struct hclge_get_fd_allocation_cmd *req;
4800 struct hclge_desc desc;
4801 int ret;
4802
4803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4804
4805 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4806
4807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4808 if (ret) {
4809 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4810 ret);
4811 return ret;
4812 }
4813
4814 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4815 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4816 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4817 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4818
4819 return ret;
4820}
4821
4822static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4823{
4824 struct hclge_set_fd_key_config_cmd *req;
4825 struct hclge_fd_key_cfg *stage;
4826 struct hclge_desc desc;
4827 int ret;
4828
4829 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4830
4831 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4832 stage = &hdev->fd_cfg.key_cfg[stage_num];
4833 req->stage = stage_num;
4834 req->key_select = stage->key_sel;
4835 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4836 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4837 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4838 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4839 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4840 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4841
4842 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4843 if (ret)
4844 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4845
4846 return ret;
4847}
4848
4849static int hclge_init_fd_config(struct hclge_dev *hdev)
4850{
4851#define LOW_2_WORDS 0x03
4852 struct hclge_fd_key_cfg *key_cfg;
4853 int ret;
4854
4855 if (!hnae3_dev_fd_supported(hdev))
4856 return 0;
4857
4858 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4859 if (ret)
4860 return ret;
4861
4862 switch (hdev->fd_cfg.fd_mode) {
4863 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4864 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4865 break;
4866 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4867 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4868 break;
4869 default:
4870 dev_err(&hdev->pdev->dev,
adcf738b 4871 "Unsupported flow director mode %u\n",
d695964d
JS
4872 hdev->fd_cfg.fd_mode);
4873 return -EOPNOTSUPP;
4874 }
4875
d695964d
JS
4876 hdev->fd_cfg.proto_support =
4877 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4878 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4879 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4880 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4881 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4882 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4883 key_cfg->outer_sipv6_word_en = 0;
4884 key_cfg->outer_dipv6_word_en = 0;
4885
4886 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4887 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4888 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4889 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4890
4891 /* If use max 400bit key, we can support tuples for ether type */
4892 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4893 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4894 key_cfg->tuple_active |=
4895 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4896 }
4897
4898 /* roce_type is used to filter roce frames
4899 * dst_vport is used to specify the rule
4900 */
4901 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4902
4903 ret = hclge_get_fd_allocation(hdev,
4904 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4905 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4906 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4907 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4908 if (ret)
4909 return ret;
4910
4911 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4912}
4913
11732868
JS
4914static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4915 int loc, u8 *key, bool is_add)
4916{
4917 struct hclge_fd_tcam_config_1_cmd *req1;
4918 struct hclge_fd_tcam_config_2_cmd *req2;
4919 struct hclge_fd_tcam_config_3_cmd *req3;
4920 struct hclge_desc desc[3];
4921 int ret;
4922
4923 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4924 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4926 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4927 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4928
4929 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4930 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4931 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4932
4933 req1->stage = stage;
4934 req1->xy_sel = sel_x ? 1 : 0;
4935 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4936 req1->index = cpu_to_le32(loc);
4937 req1->entry_vld = sel_x ? is_add : 0;
4938
4939 if (key) {
4940 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4941 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4942 sizeof(req2->tcam_data));
4943 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4944 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4945 }
4946
4947 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4948 if (ret)
4949 dev_err(&hdev->pdev->dev,
4950 "config tcam key fail, ret=%d\n",
4951 ret);
4952
4953 return ret;
4954}
4955
4956static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4957 struct hclge_fd_ad_data *action)
4958{
4959 struct hclge_fd_ad_config_cmd *req;
4960 struct hclge_desc desc;
4961 u64 ad_data = 0;
4962 int ret;
4963
4964 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4965
4966 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4967 req->index = cpu_to_le32(loc);
4968 req->stage = stage;
4969
4970 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4971 action->write_rule_id_to_bd);
4972 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4973 action->rule_id);
4974 ad_data <<= 32;
4975 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4976 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4977 action->forward_to_direct_queue);
4978 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4979 action->queue_id);
4980 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4981 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4982 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4983 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4984 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4985 action->counter_id);
4986
4987 req->ad_data = cpu_to_le64(ad_data);
4988 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4989 if (ret)
4990 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4991
4992 return ret;
4993}
4994
4995static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4996 struct hclge_fd_rule *rule)
4997{
4998 u16 tmp_x_s, tmp_y_s;
4999 u32 tmp_x_l, tmp_y_l;
5000 int i;
5001
5002 if (rule->unused_tuple & tuple_bit)
5003 return true;
5004
5005 switch (tuple_bit) {
5006 case 0:
5007 return false;
5008 case BIT(INNER_DST_MAC):
e91e388c
JS
5009 for (i = 0; i < ETH_ALEN; i++) {
5010 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868 5011 rule->tuples_mask.dst_mac[i]);
e91e388c 5012 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868
JS
5013 rule->tuples_mask.dst_mac[i]);
5014 }
5015
5016 return true;
5017 case BIT(INNER_SRC_MAC):
e91e388c
JS
5018 for (i = 0; i < ETH_ALEN; i++) {
5019 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868 5020 rule->tuples.src_mac[i]);
e91e388c 5021 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868
JS
5022 rule->tuples.src_mac[i]);
5023 }
5024
5025 return true;
5026 case BIT(INNER_VLAN_TAG_FST):
5027 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5028 rule->tuples_mask.vlan_tag1);
5029 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5030 rule->tuples_mask.vlan_tag1);
5031 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5032 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5033
5034 return true;
5035 case BIT(INNER_ETH_TYPE):
5036 calc_x(tmp_x_s, rule->tuples.ether_proto,
5037 rule->tuples_mask.ether_proto);
5038 calc_y(tmp_y_s, rule->tuples.ether_proto,
5039 rule->tuples_mask.ether_proto);
5040 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5041 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5042
5043 return true;
5044 case BIT(INNER_IP_TOS):
5045 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5046 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047
5048 return true;
5049 case BIT(INNER_IP_PROTO):
5050 calc_x(*key_x, rule->tuples.ip_proto,
5051 rule->tuples_mask.ip_proto);
5052 calc_y(*key_y, rule->tuples.ip_proto,
5053 rule->tuples_mask.ip_proto);
5054
5055 return true;
5056 case BIT(INNER_SRC_IP):
e91e388c
JS
5057 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5058 rule->tuples_mask.src_ip[IPV4_INDEX]);
5059 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5060 rule->tuples_mask.src_ip[IPV4_INDEX]);
11732868
JS
5061 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5062 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5063
5064 return true;
5065 case BIT(INNER_DST_IP):
e91e388c
JS
5066 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5067 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5069 rule->tuples_mask.dst_ip[IPV4_INDEX]);
11732868
JS
5070 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5071 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5072
5073 return true;
5074 case BIT(INNER_SRC_PORT):
5075 calc_x(tmp_x_s, rule->tuples.src_port,
5076 rule->tuples_mask.src_port);
5077 calc_y(tmp_y_s, rule->tuples.src_port,
5078 rule->tuples_mask.src_port);
5079 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5080 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5081
5082 return true;
5083 case BIT(INNER_DST_PORT):
5084 calc_x(tmp_x_s, rule->tuples.dst_port,
5085 rule->tuples_mask.dst_port);
5086 calc_y(tmp_y_s, rule->tuples.dst_port,
5087 rule->tuples_mask.dst_port);
5088 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5089 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5090
5091 return true;
5092 default:
5093 return false;
5094 }
5095}
5096
5097static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5098 u8 vf_id, u8 network_port_id)
5099{
5100 u32 port_number = 0;
5101
5102 if (port_type == HOST_PORT) {
5103 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5104 pf_id);
5105 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5106 vf_id);
5107 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5108 } else {
5109 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5110 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5111 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5112 }
5113
5114 return port_number;
5115}
5116
5117static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5118 __le32 *key_x, __le32 *key_y,
5119 struct hclge_fd_rule *rule)
5120{
5121 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5122 u8 cur_pos = 0, tuple_size, shift_bits;
ebaf1908 5123 unsigned int i;
11732868
JS
5124
5125 for (i = 0; i < MAX_META_DATA; i++) {
5126 tuple_size = meta_data_key_info[i].key_length;
5127 tuple_bit = key_cfg->meta_data_active & BIT(i);
5128
5129 switch (tuple_bit) {
5130 case BIT(ROCE_TYPE):
5131 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5132 cur_pos += tuple_size;
5133 break;
5134 case BIT(DST_VPORT):
5135 port_number = hclge_get_port_number(HOST_PORT, 0,
5136 rule->vf_id, 0);
5137 hnae3_set_field(meta_data,
5138 GENMASK(cur_pos + tuple_size, cur_pos),
5139 cur_pos, port_number);
5140 cur_pos += tuple_size;
5141 break;
5142 default:
5143 break;
5144 }
5145 }
5146
5147 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5148 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5149 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5150
5151 *key_x = cpu_to_le32(tmp_x << shift_bits);
5152 *key_y = cpu_to_le32(tmp_y << shift_bits);
5153}
5154
5155/* A complete key is combined with meta data key and tuple key.
5156 * Meta data key is stored at the MSB region, and tuple key is stored at
5157 * the LSB region, unused bits will be filled 0.
5158 */
5159static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5160 struct hclge_fd_rule *rule)
5161{
5162 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5163 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5164 u8 *cur_key_x, *cur_key_y;
ebaf1908
WL
5165 unsigned int i;
5166 int ret, tuple_size;
11732868
JS
5167 u8 meta_data_region;
5168
5169 memset(key_x, 0, sizeof(key_x));
5170 memset(key_y, 0, sizeof(key_y));
5171 cur_key_x = key_x;
5172 cur_key_y = key_y;
5173
5174 for (i = 0 ; i < MAX_TUPLE; i++) {
5175 bool tuple_valid;
5176 u32 check_tuple;
5177
5178 tuple_size = tuple_key_info[i].key_length / 8;
5179 check_tuple = key_cfg->tuple_active & BIT(i);
5180
5181 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5182 cur_key_y, rule);
5183 if (tuple_valid) {
5184 cur_key_x += tuple_size;
5185 cur_key_y += tuple_size;
5186 }
5187 }
5188
5189 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5190 MAX_META_DATA_LENGTH / 8;
5191
5192 hclge_fd_convert_meta_data(key_cfg,
5193 (__le32 *)(key_x + meta_data_region),
5194 (__le32 *)(key_y + meta_data_region),
5195 rule);
5196
5197 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5198 true);
5199 if (ret) {
5200 dev_err(&hdev->pdev->dev,
adcf738b 5201 "fd key_y config fail, loc=%u, ret=%d\n",
11732868
JS
5202 rule->queue_id, ret);
5203 return ret;
5204 }
5205
5206 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5207 true);
5208 if (ret)
5209 dev_err(&hdev->pdev->dev,
adcf738b 5210 "fd key_x config fail, loc=%u, ret=%d\n",
11732868
JS
5211 rule->queue_id, ret);
5212 return ret;
5213}
5214
5215static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5216 struct hclge_fd_rule *rule)
5217{
5218 struct hclge_fd_ad_data ad_data;
5219
5220 ad_data.ad_id = rule->location;
5221
5222 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5223 ad_data.drop_packet = true;
5224 ad_data.forward_to_direct_queue = false;
5225 ad_data.queue_id = 0;
5226 } else {
5227 ad_data.drop_packet = false;
5228 ad_data.forward_to_direct_queue = true;
5229 ad_data.queue_id = rule->queue_id;
5230 }
5231
5232 ad_data.use_counter = false;
5233 ad_data.counter_id = 0;
5234
5235 ad_data.use_next_stage = false;
5236 ad_data.next_input_key = 0;
5237
5238 ad_data.write_rule_id_to_bd = true;
5239 ad_data.rule_id = rule->location;
5240
5241 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5242}
5243
dd74f815
JS
5244static int hclge_fd_check_spec(struct hclge_dev *hdev,
5245 struct ethtool_rx_flow_spec *fs, u32 *unused)
5246{
5247 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5248 struct ethtool_usrip4_spec *usr_ip4_spec;
5249 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5250 struct ethtool_usrip6_spec *usr_ip6_spec;
5251 struct ethhdr *ether_spec;
5252
5253 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5254 return -EINVAL;
5255
5256 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5257 return -EOPNOTSUPP;
5258
5259 if ((fs->flow_type & FLOW_EXT) &&
5260 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5261 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5262 return -EOPNOTSUPP;
5263 }
5264
5265 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5266 case SCTP_V4_FLOW:
5267 case TCP_V4_FLOW:
5268 case UDP_V4_FLOW:
5269 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5270 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5271
5272 if (!tcp_ip4_spec->ip4src)
5273 *unused |= BIT(INNER_SRC_IP);
5274
5275 if (!tcp_ip4_spec->ip4dst)
5276 *unused |= BIT(INNER_DST_IP);
5277
5278 if (!tcp_ip4_spec->psrc)
5279 *unused |= BIT(INNER_SRC_PORT);
5280
5281 if (!tcp_ip4_spec->pdst)
5282 *unused |= BIT(INNER_DST_PORT);
5283
5284 if (!tcp_ip4_spec->tos)
5285 *unused |= BIT(INNER_IP_TOS);
5286
5287 break;
5288 case IP_USER_FLOW:
5289 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5290 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5291 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5292
5293 if (!usr_ip4_spec->ip4src)
5294 *unused |= BIT(INNER_SRC_IP);
5295
5296 if (!usr_ip4_spec->ip4dst)
5297 *unused |= BIT(INNER_DST_IP);
5298
5299 if (!usr_ip4_spec->tos)
5300 *unused |= BIT(INNER_IP_TOS);
5301
5302 if (!usr_ip4_spec->proto)
5303 *unused |= BIT(INNER_IP_PROTO);
5304
5305 if (usr_ip4_spec->l4_4_bytes)
5306 return -EOPNOTSUPP;
5307
5308 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5309 return -EOPNOTSUPP;
5310
5311 break;
5312 case SCTP_V6_FLOW:
5313 case TCP_V6_FLOW:
5314 case UDP_V6_FLOW:
5315 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5316 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5317 BIT(INNER_IP_TOS);
5318
e91e388c 5319 /* check whether src/dst ip address used */
dd74f815
JS
5320 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5321 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5322 *unused |= BIT(INNER_SRC_IP);
5323
5324 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5325 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5326 *unused |= BIT(INNER_DST_IP);
5327
5328 if (!tcp_ip6_spec->psrc)
5329 *unused |= BIT(INNER_SRC_PORT);
5330
5331 if (!tcp_ip6_spec->pdst)
5332 *unused |= BIT(INNER_DST_PORT);
5333
5334 if (tcp_ip6_spec->tclass)
5335 return -EOPNOTSUPP;
5336
5337 break;
5338 case IPV6_USER_FLOW:
5339 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5340 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5341 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5342 BIT(INNER_DST_PORT);
5343
e91e388c 5344 /* check whether src/dst ip address used */
dd74f815
JS
5345 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5346 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5347 *unused |= BIT(INNER_SRC_IP);
5348
5349 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5350 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5351 *unused |= BIT(INNER_DST_IP);
5352
5353 if (!usr_ip6_spec->l4_proto)
5354 *unused |= BIT(INNER_IP_PROTO);
5355
5356 if (usr_ip6_spec->tclass)
5357 return -EOPNOTSUPP;
5358
5359 if (usr_ip6_spec->l4_4_bytes)
5360 return -EOPNOTSUPP;
5361
5362 break;
5363 case ETHER_FLOW:
5364 ether_spec = &fs->h_u.ether_spec;
5365 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5366 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5367 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5368
5369 if (is_zero_ether_addr(ether_spec->h_source))
5370 *unused |= BIT(INNER_SRC_MAC);
5371
5372 if (is_zero_ether_addr(ether_spec->h_dest))
5373 *unused |= BIT(INNER_DST_MAC);
5374
5375 if (!ether_spec->h_proto)
5376 *unused |= BIT(INNER_ETH_TYPE);
5377
5378 break;
5379 default:
5380 return -EOPNOTSUPP;
5381 }
5382
5383 if ((fs->flow_type & FLOW_EXT)) {
5384 if (fs->h_ext.vlan_etype)
5385 return -EOPNOTSUPP;
5386 if (!fs->h_ext.vlan_tci)
5387 *unused |= BIT(INNER_VLAN_TAG_FST);
5388
5389 if (fs->m_ext.vlan_tci) {
5390 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5391 return -EINVAL;
5392 }
5393 } else {
5394 *unused |= BIT(INNER_VLAN_TAG_FST);
5395 }
5396
5397 if (fs->flow_type & FLOW_MAC_EXT) {
5398 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5399 return -EOPNOTSUPP;
5400
5401 if (is_zero_ether_addr(fs->h_ext.h_dest))
5402 *unused |= BIT(INNER_DST_MAC);
5403 else
5404 *unused &= ~(BIT(INNER_DST_MAC));
5405 }
5406
5407 return 0;
5408}
5409
5410static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5411{
5412 struct hclge_fd_rule *rule = NULL;
5413 struct hlist_node *node2;
5414
44122887 5415 spin_lock_bh(&hdev->fd_rule_lock);
dd74f815
JS
5416 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5417 if (rule->location >= location)
5418 break;
5419 }
5420
44122887
JS
5421 spin_unlock_bh(&hdev->fd_rule_lock);
5422
dd74f815
JS
5423 return rule && rule->location == location;
5424}
5425
44122887 5426/* make sure being called after lock up with fd_rule_lock */
dd74f815
JS
5427static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5428 struct hclge_fd_rule *new_rule,
5429 u16 location,
5430 bool is_add)
5431{
5432 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5433 struct hlist_node *node2;
5434
5435 if (is_add && !new_rule)
5436 return -EINVAL;
5437
5438 hlist_for_each_entry_safe(rule, node2,
5439 &hdev->fd_rule_list, rule_node) {
5440 if (rule->location >= location)
5441 break;
5442 parent = rule;
5443 }
5444
5445 if (rule && rule->location == location) {
5446 hlist_del(&rule->rule_node);
5447 kfree(rule);
5448 hdev->hclge_fd_rule_num--;
5449
44122887
JS
5450 if (!is_add) {
5451 if (!hdev->hclge_fd_rule_num)
5452 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5453 clear_bit(location, hdev->fd_bmap);
dd74f815 5454
44122887
JS
5455 return 0;
5456 }
dd74f815
JS
5457 } else if (!is_add) {
5458 dev_err(&hdev->pdev->dev,
adcf738b 5459 "delete fail, rule %u is inexistent\n",
dd74f815
JS
5460 location);
5461 return -EINVAL;
5462 }
5463
5464 INIT_HLIST_NODE(&new_rule->rule_node);
5465
5466 if (parent)
5467 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5468 else
5469 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5470
44122887 5471 set_bit(location, hdev->fd_bmap);
dd74f815 5472 hdev->hclge_fd_rule_num++;
44122887 5473 hdev->fd_active_type = new_rule->rule_type;
dd74f815
JS
5474
5475 return 0;
5476}
5477
5478static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5479 struct ethtool_rx_flow_spec *fs,
5480 struct hclge_fd_rule *rule)
5481{
5482 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5483
5484 switch (flow_type) {
5485 case SCTP_V4_FLOW:
5486 case TCP_V4_FLOW:
5487 case UDP_V4_FLOW:
e91e388c 5488 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5489 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
e91e388c 5490 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5491 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5492
e91e388c 5493 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5494 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
e91e388c 5495 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5496 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5497
5498 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5499 rule->tuples_mask.src_port =
5500 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5501
5502 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5503 rule->tuples_mask.dst_port =
5504 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5505
5506 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5507 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5508
5509 rule->tuples.ether_proto = ETH_P_IP;
5510 rule->tuples_mask.ether_proto = 0xFFFF;
5511
5512 break;
5513 case IP_USER_FLOW:
e91e388c 5514 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5515 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
e91e388c 5516 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5517 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5518
e91e388c 5519 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5520 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
e91e388c 5521 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5522 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5523
5524 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5525 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5526
5527 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5528 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5529
5530 rule->tuples.ether_proto = ETH_P_IP;
5531 rule->tuples_mask.ether_proto = 0xFFFF;
5532
5533 break;
5534 case SCTP_V6_FLOW:
5535 case TCP_V6_FLOW:
5536 case UDP_V6_FLOW:
5537 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5538 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5539 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5540 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5541
5542 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5543 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5544 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5545 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5546
5547 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5548 rule->tuples_mask.src_port =
5549 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5550
5551 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5552 rule->tuples_mask.dst_port =
5553 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5554
5555 rule->tuples.ether_proto = ETH_P_IPV6;
5556 rule->tuples_mask.ether_proto = 0xFFFF;
5557
5558 break;
5559 case IPV6_USER_FLOW:
5560 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5561 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5562 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5563 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5564
5565 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5566 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5567 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5568 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5569
5570 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5571 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5572
5573 rule->tuples.ether_proto = ETH_P_IPV6;
5574 rule->tuples_mask.ether_proto = 0xFFFF;
5575
5576 break;
5577 case ETHER_FLOW:
5578 ether_addr_copy(rule->tuples.src_mac,
5579 fs->h_u.ether_spec.h_source);
5580 ether_addr_copy(rule->tuples_mask.src_mac,
5581 fs->m_u.ether_spec.h_source);
5582
5583 ether_addr_copy(rule->tuples.dst_mac,
5584 fs->h_u.ether_spec.h_dest);
5585 ether_addr_copy(rule->tuples_mask.dst_mac,
5586 fs->m_u.ether_spec.h_dest);
5587
5588 rule->tuples.ether_proto =
5589 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5590 rule->tuples_mask.ether_proto =
5591 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5592
5593 break;
5594 default:
5595 return -EOPNOTSUPP;
5596 }
5597
5598 switch (flow_type) {
5599 case SCTP_V4_FLOW:
5600 case SCTP_V6_FLOW:
5601 rule->tuples.ip_proto = IPPROTO_SCTP;
5602 rule->tuples_mask.ip_proto = 0xFF;
5603 break;
5604 case TCP_V4_FLOW:
5605 case TCP_V6_FLOW:
5606 rule->tuples.ip_proto = IPPROTO_TCP;
5607 rule->tuples_mask.ip_proto = 0xFF;
5608 break;
5609 case UDP_V4_FLOW:
5610 case UDP_V6_FLOW:
5611 rule->tuples.ip_proto = IPPROTO_UDP;
5612 rule->tuples_mask.ip_proto = 0xFF;
5613 break;
5614 default:
5615 break;
5616 }
5617
5618 if ((fs->flow_type & FLOW_EXT)) {
5619 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5620 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5621 }
5622
5623 if (fs->flow_type & FLOW_MAC_EXT) {
5624 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5625 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5626 }
5627
5628 return 0;
5629}
5630
44122887
JS
5631/* make sure being called after lock up with fd_rule_lock */
5632static int hclge_fd_config_rule(struct hclge_dev *hdev,
5633 struct hclge_fd_rule *rule)
5634{
5635 int ret;
5636
5637 if (!rule) {
5638 dev_err(&hdev->pdev->dev,
5639 "The flow director rule is NULL\n");
5640 return -EINVAL;
5641 }
5642
5643 /* it will never fail here, so needn't to check return value */
5644 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5645
5646 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5647 if (ret)
5648 goto clear_rule;
5649
5650 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5651 if (ret)
5652 goto clear_rule;
5653
5654 return 0;
5655
5656clear_rule:
5657 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5658 return ret;
5659}
5660
dd74f815
JS
5661static int hclge_add_fd_entry(struct hnae3_handle *handle,
5662 struct ethtool_rxnfc *cmd)
5663{
5664 struct hclge_vport *vport = hclge_get_vport(handle);
5665 struct hclge_dev *hdev = vport->back;
5666 u16 dst_vport_id = 0, q_index = 0;
5667 struct ethtool_rx_flow_spec *fs;
5668 struct hclge_fd_rule *rule;
5669 u32 unused = 0;
5670 u8 action;
5671 int ret;
5672
5673 if (!hnae3_dev_fd_supported(hdev))
5674 return -EOPNOTSUPP;
5675
9abeb7d8 5676 if (!hdev->fd_en) {
dd74f815
JS
5677 dev_warn(&hdev->pdev->dev,
5678 "Please enable flow director first\n");
5679 return -EOPNOTSUPP;
5680 }
5681
5682 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5683
5684 ret = hclge_fd_check_spec(hdev, fs, &unused);
5685 if (ret) {
5686 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5687 return ret;
5688 }
5689
5690 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5691 action = HCLGE_FD_ACTION_DROP_PACKET;
5692 } else {
5693 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5694 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5695 u16 tqps;
5696
0285dbae
JS
5697 if (vf > hdev->num_req_vfs) {
5698 dev_err(&hdev->pdev->dev,
adcf738b 5699 "Error: vf id (%u) > max vf num (%u)\n",
0285dbae
JS
5700 vf, hdev->num_req_vfs);
5701 return -EINVAL;
5702 }
5703
dd74f815
JS
5704 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5705 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5706
5707 if (ring >= tqps) {
5708 dev_err(&hdev->pdev->dev,
adcf738b 5709 "Error: queue id (%u) > max tqp num (%u)\n",
dd74f815
JS
5710 ring, tqps - 1);
5711 return -EINVAL;
5712 }
5713
dd74f815
JS
5714 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5715 q_index = ring;
5716 }
5717
5718 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5719 if (!rule)
5720 return -ENOMEM;
5721
5722 ret = hclge_fd_get_tuple(hdev, fs, rule);
44122887
JS
5723 if (ret) {
5724 kfree(rule);
5725 return ret;
5726 }
dd74f815
JS
5727
5728 rule->flow_type = fs->flow_type;
5729
5730 rule->location = fs->location;
5731 rule->unused_tuple = unused;
5732 rule->vf_id = dst_vport_id;
5733 rule->queue_id = q_index;
5734 rule->action = action;
44122887 5735 rule->rule_type = HCLGE_FD_EP_ACTIVE;
dd74f815 5736
d93ed94f
JS
5737 /* to avoid rule conflict, when user configure rule by ethtool,
5738 * we need to clear all arfs rules
5739 */
5740 hclge_clear_arfs_rules(handle);
5741
44122887
JS
5742 spin_lock_bh(&hdev->fd_rule_lock);
5743 ret = hclge_fd_config_rule(hdev, rule);
dd74f815 5744
44122887 5745 spin_unlock_bh(&hdev->fd_rule_lock);
dd74f815 5746
dd74f815
JS
5747 return ret;
5748}
5749
5750static int hclge_del_fd_entry(struct hnae3_handle *handle,
5751 struct ethtool_rxnfc *cmd)
5752{
5753 struct hclge_vport *vport = hclge_get_vport(handle);
5754 struct hclge_dev *hdev = vport->back;
5755 struct ethtool_rx_flow_spec *fs;
5756 int ret;
5757
5758 if (!hnae3_dev_fd_supported(hdev))
5759 return -EOPNOTSUPP;
5760
5761 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5762
5763 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5764 return -EINVAL;
5765
5766 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5767 dev_err(&hdev->pdev->dev,
39edaf24 5768 "Delete fail, rule %u is inexistent\n", fs->location);
dd74f815
JS
5769 return -ENOENT;
5770 }
5771
9b2f3477
WL
5772 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5773 NULL, false);
dd74f815
JS
5774 if (ret)
5775 return ret;
5776
44122887
JS
5777 spin_lock_bh(&hdev->fd_rule_lock);
5778 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5779
5780 spin_unlock_bh(&hdev->fd_rule_lock);
5781
5782 return ret;
dd74f815
JS
5783}
5784
6871af29
JS
5785static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5786 bool clear_list)
5787{
5788 struct hclge_vport *vport = hclge_get_vport(handle);
5789 struct hclge_dev *hdev = vport->back;
5790 struct hclge_fd_rule *rule;
5791 struct hlist_node *node;
44122887 5792 u16 location;
6871af29
JS
5793
5794 if (!hnae3_dev_fd_supported(hdev))
5795 return;
5796
44122887
JS
5797 spin_lock_bh(&hdev->fd_rule_lock);
5798 for_each_set_bit(location, hdev->fd_bmap,
5799 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5800 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5801 NULL, false);
5802
6871af29
JS
5803 if (clear_list) {
5804 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5805 rule_node) {
6871af29
JS
5806 hlist_del(&rule->rule_node);
5807 kfree(rule);
6871af29 5808 }
44122887
JS
5809 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5810 hdev->hclge_fd_rule_num = 0;
5811 bitmap_zero(hdev->fd_bmap,
5812 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6871af29 5813 }
44122887
JS
5814
5815 spin_unlock_bh(&hdev->fd_rule_lock);
6871af29
JS
5816}
5817
5818static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5819{
5820 struct hclge_vport *vport = hclge_get_vport(handle);
5821 struct hclge_dev *hdev = vport->back;
5822 struct hclge_fd_rule *rule;
5823 struct hlist_node *node;
5824 int ret;
5825
65e41e7e
HT
5826 /* Return ok here, because reset error handling will check this
5827 * return value. If error is returned here, the reset process will
5828 * fail.
5829 */
6871af29 5830 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 5831 return 0;
6871af29 5832
8edc2285 5833 /* if fd is disabled, should not restore it when reset */
9abeb7d8 5834 if (!hdev->fd_en)
8edc2285
JS
5835 return 0;
5836
44122887 5837 spin_lock_bh(&hdev->fd_rule_lock);
6871af29
JS
5838 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5839 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5840 if (!ret)
5841 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5842
5843 if (ret) {
5844 dev_warn(&hdev->pdev->dev,
adcf738b 5845 "Restore rule %u failed, remove it\n",
6871af29 5846 rule->location);
44122887 5847 clear_bit(rule->location, hdev->fd_bmap);
6871af29
JS
5848 hlist_del(&rule->rule_node);
5849 kfree(rule);
5850 hdev->hclge_fd_rule_num--;
5851 }
5852 }
44122887
JS
5853
5854 if (hdev->hclge_fd_rule_num)
5855 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5856
5857 spin_unlock_bh(&hdev->fd_rule_lock);
5858
6871af29
JS
5859 return 0;
5860}
5861
05c2314f
JS
5862static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5863 struct ethtool_rxnfc *cmd)
5864{
5865 struct hclge_vport *vport = hclge_get_vport(handle);
5866 struct hclge_dev *hdev = vport->back;
5867
5868 if (!hnae3_dev_fd_supported(hdev))
5869 return -EOPNOTSUPP;
5870
5871 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5872 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5873
5874 return 0;
5875}
5876
5877static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5878 struct ethtool_rxnfc *cmd)
5879{
5880 struct hclge_vport *vport = hclge_get_vport(handle);
5881 struct hclge_fd_rule *rule = NULL;
5882 struct hclge_dev *hdev = vport->back;
5883 struct ethtool_rx_flow_spec *fs;
5884 struct hlist_node *node2;
5885
5886 if (!hnae3_dev_fd_supported(hdev))
5887 return -EOPNOTSUPP;
5888
5889 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5890
44122887
JS
5891 spin_lock_bh(&hdev->fd_rule_lock);
5892
05c2314f
JS
5893 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5894 if (rule->location >= fs->location)
5895 break;
5896 }
5897
44122887
JS
5898 if (!rule || fs->location != rule->location) {
5899 spin_unlock_bh(&hdev->fd_rule_lock);
5900
05c2314f 5901 return -ENOENT;
44122887 5902 }
05c2314f
JS
5903
5904 fs->flow_type = rule->flow_type;
5905 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5906 case SCTP_V4_FLOW:
5907 case TCP_V4_FLOW:
5908 case UDP_V4_FLOW:
5909 fs->h_u.tcp_ip4_spec.ip4src =
e91e388c 5910 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
05c2314f 5911 fs->m_u.tcp_ip4_spec.ip4src =
e91e388c
JS
5912 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5913 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
05c2314f
JS
5914
5915 fs->h_u.tcp_ip4_spec.ip4dst =
e91e388c 5916 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
05c2314f 5917 fs->m_u.tcp_ip4_spec.ip4dst =
e91e388c
JS
5918 rule->unused_tuple & BIT(INNER_DST_IP) ?
5919 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
05c2314f
JS
5920
5921 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5922 fs->m_u.tcp_ip4_spec.psrc =
5923 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5924 0 : cpu_to_be16(rule->tuples_mask.src_port);
5925
5926 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5927 fs->m_u.tcp_ip4_spec.pdst =
5928 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5929 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5930
5931 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5932 fs->m_u.tcp_ip4_spec.tos =
5933 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5934 0 : rule->tuples_mask.ip_tos;
5935
5936 break;
5937 case IP_USER_FLOW:
5938 fs->h_u.usr_ip4_spec.ip4src =
e91e388c 5939 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
05c2314f 5940 fs->m_u.tcp_ip4_spec.ip4src =
e91e388c
JS
5941 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5942 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
05c2314f
JS
5943
5944 fs->h_u.usr_ip4_spec.ip4dst =
e91e388c 5945 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
05c2314f 5946 fs->m_u.usr_ip4_spec.ip4dst =
e91e388c
JS
5947 rule->unused_tuple & BIT(INNER_DST_IP) ?
5948 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
05c2314f
JS
5949
5950 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5951 fs->m_u.usr_ip4_spec.tos =
5952 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5953 0 : rule->tuples_mask.ip_tos;
5954
5955 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5956 fs->m_u.usr_ip4_spec.proto =
5957 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5958 0 : rule->tuples_mask.ip_proto;
5959
5960 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5961
5962 break;
5963 case SCTP_V6_FLOW:
5964 case TCP_V6_FLOW:
5965 case UDP_V6_FLOW:
5966 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
e91e388c 5967 rule->tuples.src_ip, IPV6_SIZE);
05c2314f 5968 if (rule->unused_tuple & BIT(INNER_SRC_IP))
e91e388c
JS
5969 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5970 sizeof(int) * IPV6_SIZE);
05c2314f
JS
5971 else
5972 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
e91e388c 5973 rule->tuples_mask.src_ip, IPV6_SIZE);
05c2314f
JS
5974
5975 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
e91e388c 5976 rule->tuples.dst_ip, IPV6_SIZE);
05c2314f 5977 if (rule->unused_tuple & BIT(INNER_DST_IP))
e91e388c
JS
5978 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5979 sizeof(int) * IPV6_SIZE);
05c2314f
JS
5980 else
5981 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
e91e388c 5982 rule->tuples_mask.dst_ip, IPV6_SIZE);
05c2314f
JS
5983
5984 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5985 fs->m_u.tcp_ip6_spec.psrc =
5986 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5987 0 : cpu_to_be16(rule->tuples_mask.src_port);
5988
5989 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5990 fs->m_u.tcp_ip6_spec.pdst =
5991 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5992 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5993
5994 break;
5995 case IPV6_USER_FLOW:
5996 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
e91e388c 5997 rule->tuples.src_ip, IPV6_SIZE);
05c2314f 5998 if (rule->unused_tuple & BIT(INNER_SRC_IP))
e91e388c
JS
5999 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6000 sizeof(int) * IPV6_SIZE);
05c2314f
JS
6001 else
6002 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
e91e388c 6003 rule->tuples_mask.src_ip, IPV6_SIZE);
05c2314f
JS
6004
6005 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
e91e388c 6006 rule->tuples.dst_ip, IPV6_SIZE);
05c2314f 6007 if (rule->unused_tuple & BIT(INNER_DST_IP))
e91e388c
JS
6008 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6009 sizeof(int) * IPV6_SIZE);
05c2314f
JS
6010 else
6011 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
e91e388c 6012 rule->tuples_mask.dst_ip, IPV6_SIZE);
05c2314f
JS
6013
6014 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6015 fs->m_u.usr_ip6_spec.l4_proto =
6016 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6017 0 : rule->tuples_mask.ip_proto;
6018
6019 break;
6020 case ETHER_FLOW:
6021 ether_addr_copy(fs->h_u.ether_spec.h_source,
6022 rule->tuples.src_mac);
6023 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6024 eth_zero_addr(fs->m_u.ether_spec.h_source);
6025 else
6026 ether_addr_copy(fs->m_u.ether_spec.h_source,
6027 rule->tuples_mask.src_mac);
6028
6029 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6030 rule->tuples.dst_mac);
6031 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6032 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6033 else
6034 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6035 rule->tuples_mask.dst_mac);
6036
6037 fs->h_u.ether_spec.h_proto =
6038 cpu_to_be16(rule->tuples.ether_proto);
6039 fs->m_u.ether_spec.h_proto =
6040 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6041 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6042
6043 break;
6044 default:
44122887 6045 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f
JS
6046 return -EOPNOTSUPP;
6047 }
6048
6049 if (fs->flow_type & FLOW_EXT) {
6050 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6051 fs->m_ext.vlan_tci =
6052 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6053 cpu_to_be16(VLAN_VID_MASK) :
6054 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6055 }
6056
6057 if (fs->flow_type & FLOW_MAC_EXT) {
6058 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6059 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6060 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6061 else
6062 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6063 rule->tuples_mask.dst_mac);
6064 }
6065
6066 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6067 fs->ring_cookie = RX_CLS_FLOW_DISC;
6068 } else {
6069 u64 vf_id;
6070
6071 fs->ring_cookie = rule->queue_id;
6072 vf_id = rule->vf_id;
6073 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6074 fs->ring_cookie |= vf_id;
6075 }
6076
44122887
JS
6077 spin_unlock_bh(&hdev->fd_rule_lock);
6078
05c2314f
JS
6079 return 0;
6080}
6081
6082static int hclge_get_all_rules(struct hnae3_handle *handle,
6083 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6084{
6085 struct hclge_vport *vport = hclge_get_vport(handle);
6086 struct hclge_dev *hdev = vport->back;
6087 struct hclge_fd_rule *rule;
6088 struct hlist_node *node2;
6089 int cnt = 0;
6090
6091 if (!hnae3_dev_fd_supported(hdev))
6092 return -EOPNOTSUPP;
6093
6094 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6095
44122887 6096 spin_lock_bh(&hdev->fd_rule_lock);
05c2314f
JS
6097 hlist_for_each_entry_safe(rule, node2,
6098 &hdev->fd_rule_list, rule_node) {
44122887
JS
6099 if (cnt == cmd->rule_cnt) {
6100 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f 6101 return -EMSGSIZE;
44122887 6102 }
05c2314f
JS
6103
6104 rule_locs[cnt] = rule->location;
6105 cnt++;
6106 }
6107
44122887
JS
6108 spin_unlock_bh(&hdev->fd_rule_lock);
6109
05c2314f
JS
6110 cmd->rule_cnt = cnt;
6111
6112 return 0;
6113}
6114
d93ed94f
JS
6115static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6116 struct hclge_fd_rule_tuples *tuples)
6117{
47327c93
GH
6118#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6119#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6120
d93ed94f
JS
6121 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6122 tuples->ip_proto = fkeys->basic.ip_proto;
6123 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6124
6125 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6126 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6127 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6128 } else {
47327c93
GH
6129 int i;
6130
6131 for (i = 0; i < IPV6_SIZE; i++) {
6132 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6133 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6134 }
d93ed94f
JS
6135 }
6136}
6137
6138/* traverse all rules, check whether an existed rule has the same tuples */
6139static struct hclge_fd_rule *
6140hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6141 const struct hclge_fd_rule_tuples *tuples)
6142{
6143 struct hclge_fd_rule *rule = NULL;
6144 struct hlist_node *node;
6145
6146 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6147 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6148 return rule;
6149 }
6150
6151 return NULL;
6152}
6153
6154static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6155 struct hclge_fd_rule *rule)
6156{
6157 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6158 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6159 BIT(INNER_SRC_PORT);
6160 rule->action = 0;
6161 rule->vf_id = 0;
6162 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6163 if (tuples->ether_proto == ETH_P_IP) {
6164 if (tuples->ip_proto == IPPROTO_TCP)
6165 rule->flow_type = TCP_V4_FLOW;
6166 else
6167 rule->flow_type = UDP_V4_FLOW;
6168 } else {
6169 if (tuples->ip_proto == IPPROTO_TCP)
6170 rule->flow_type = TCP_V6_FLOW;
6171 else
6172 rule->flow_type = UDP_V6_FLOW;
6173 }
6174 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6175 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6176}
6177
6178static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6179 u16 flow_id, struct flow_keys *fkeys)
6180{
d93ed94f
JS
6181 struct hclge_vport *vport = hclge_get_vport(handle);
6182 struct hclge_fd_rule_tuples new_tuples;
6183 struct hclge_dev *hdev = vport->back;
6184 struct hclge_fd_rule *rule;
6185 u16 tmp_queue_id;
6186 u16 bit_id;
6187 int ret;
6188
6189 if (!hnae3_dev_fd_supported(hdev))
6190 return -EOPNOTSUPP;
6191
6192 memset(&new_tuples, 0, sizeof(new_tuples));
6193 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6194
6195 spin_lock_bh(&hdev->fd_rule_lock);
6196
6197 /* when there is already fd rule existed add by user,
6198 * arfs should not work
6199 */
6200 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6201 spin_unlock_bh(&hdev->fd_rule_lock);
6202
6203 return -EOPNOTSUPP;
6204 }
6205
6206 /* check is there flow director filter existed for this flow,
6207 * if not, create a new filter for it;
6208 * if filter exist with different queue id, modify the filter;
6209 * if filter exist with same queue id, do nothing
6210 */
6211 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6212 if (!rule) {
6213 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6214 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6215 spin_unlock_bh(&hdev->fd_rule_lock);
6216
6217 return -ENOSPC;
6218 }
6219
d659f9f6 6220 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
d93ed94f
JS
6221 if (!rule) {
6222 spin_unlock_bh(&hdev->fd_rule_lock);
6223
6224 return -ENOMEM;
6225 }
6226
6227 set_bit(bit_id, hdev->fd_bmap);
6228 rule->location = bit_id;
6229 rule->flow_id = flow_id;
6230 rule->queue_id = queue_id;
6231 hclge_fd_build_arfs_rule(&new_tuples, rule);
6232 ret = hclge_fd_config_rule(hdev, rule);
6233
6234 spin_unlock_bh(&hdev->fd_rule_lock);
6235
6236 if (ret)
6237 return ret;
6238
6239 return rule->location;
6240 }
6241
6242 spin_unlock_bh(&hdev->fd_rule_lock);
6243
6244 if (rule->queue_id == queue_id)
6245 return rule->location;
6246
6247 tmp_queue_id = rule->queue_id;
6248 rule->queue_id = queue_id;
6249 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6250 if (ret) {
6251 rule->queue_id = tmp_queue_id;
6252 return ret;
6253 }
6254
6255 return rule->location;
d93ed94f
JS
6256}
6257
6258static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6259{
6260#ifdef CONFIG_RFS_ACCEL
6261 struct hnae3_handle *handle = &hdev->vport[0].nic;
6262 struct hclge_fd_rule *rule;
6263 struct hlist_node *node;
6264 HLIST_HEAD(del_list);
6265
6266 spin_lock_bh(&hdev->fd_rule_lock);
6267 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6268 spin_unlock_bh(&hdev->fd_rule_lock);
6269 return;
6270 }
6271 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6272 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6273 rule->flow_id, rule->location)) {
6274 hlist_del_init(&rule->rule_node);
6275 hlist_add_head(&rule->rule_node, &del_list);
6276 hdev->hclge_fd_rule_num--;
6277 clear_bit(rule->location, hdev->fd_bmap);
6278 }
6279 }
6280 spin_unlock_bh(&hdev->fd_rule_lock);
6281
6282 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6283 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6284 rule->location, NULL, false);
6285 kfree(rule);
6286 }
6287#endif
6288}
6289
6290static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6291{
6292#ifdef CONFIG_RFS_ACCEL
6293 struct hclge_vport *vport = hclge_get_vport(handle);
6294 struct hclge_dev *hdev = vport->back;
6295
6296 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6297 hclge_del_all_fd_entries(handle, true);
6298#endif
6299}
6300
4d60291b
HT
6301static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6302{
6303 struct hclge_vport *vport = hclge_get_vport(handle);
6304 struct hclge_dev *hdev = vport->back;
6305
6306 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6307 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6308}
6309
6310static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6311{
6312 struct hclge_vport *vport = hclge_get_vport(handle);
6313 struct hclge_dev *hdev = vport->back;
6314
6315 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6316}
6317
6318static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6319{
6320 struct hclge_vport *vport = hclge_get_vport(handle);
6321 struct hclge_dev *hdev = vport->back;
6322
f02eb82d 6323 return hdev->rst_stats.hw_reset_done_cnt;
4d60291b
HT
6324}
6325
c17852a8
JS
6326static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6327{
6328 struct hclge_vport *vport = hclge_get_vport(handle);
6329 struct hclge_dev *hdev = vport->back;
44122887 6330 bool clear;
c17852a8 6331
9abeb7d8 6332 hdev->fd_en = enable;
1483fa49 6333 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
c17852a8 6334 if (!enable)
44122887 6335 hclge_del_all_fd_entries(handle, clear);
c17852a8
JS
6336 else
6337 hclge_restore_fd_entries(handle);
6338}
6339
46a3df9f
S
6340static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6341{
6342 struct hclge_desc desc;
d44f9b63
YL
6343 struct hclge_config_mac_mode_cmd *req =
6344 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 6345 u32 loop_en = 0;
46a3df9f
S
6346 int ret;
6347
6348 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
b9a8f883
YL
6349
6350 if (enable) {
6351 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6352 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6353 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6354 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6355 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6356 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6357 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6358 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6359 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6360 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6361 }
6362
a90bb9a5 6363 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
6364
6365 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6366 if (ret)
6367 dev_err(&hdev->pdev->dev,
6368 "mac enable fail, ret =%d.\n", ret);
6369}
6370
dd2956ea
YM
6371static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6372 u8 switch_param, u8 param_mask)
6373{
6374 struct hclge_mac_vlan_switch_cmd *req;
6375 struct hclge_desc desc;
6376 u32 func_id;
6377 int ret;
6378
6379 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6380 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
71c5e83b
GH
6381
6382 /* read current config parameter */
dd2956ea 6383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
71c5e83b 6384 true);
dd2956ea
YM
6385 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6386 req->func_id = cpu_to_le32(func_id);
71c5e83b
GH
6387
6388 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6389 if (ret) {
6390 dev_err(&hdev->pdev->dev,
6391 "read mac vlan switch parameter fail, ret = %d\n", ret);
6392 return ret;
6393 }
6394
6395 /* modify and write new config parameter */
6396 hclge_cmd_reuse_desc(&desc, false);
6397 req->switch_param = (req->switch_param & param_mask) | switch_param;
dd2956ea
YM
6398 req->param_mask = param_mask;
6399
6400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6401 if (ret)
6402 dev_err(&hdev->pdev->dev,
6403 "set mac vlan switch parameter fail, ret = %d\n", ret);
6404 return ret;
6405}
6406
c9765a89
YM
6407static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6408 int link_ret)
6409{
6410#define HCLGE_PHY_LINK_STATUS_NUM 200
6411
6412 struct phy_device *phydev = hdev->hw.mac.phydev;
6413 int i = 0;
6414 int ret;
6415
6416 do {
6417 ret = phy_read_status(phydev);
6418 if (ret) {
6419 dev_err(&hdev->pdev->dev,
6420 "phy update link status fail, ret = %d\n", ret);
6421 return;
6422 }
6423
6424 if (phydev->link == link_ret)
6425 break;
6426
6427 msleep(HCLGE_LINK_STATUS_MS);
6428 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6429}
6430
6431static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6432{
6433#define HCLGE_MAC_LINK_STATUS_NUM 100
6434
6435 int i = 0;
6436 int ret;
6437
6438 do {
6439 ret = hclge_get_mac_link_status(hdev);
6440 if (ret < 0)
6441 return ret;
6442 else if (ret == link_ret)
6443 return 0;
6444
6445 msleep(HCLGE_LINK_STATUS_MS);
6446 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6447 return -EBUSY;
6448}
6449
6450static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6451 bool is_phy)
6452{
6453#define HCLGE_LINK_STATUS_DOWN 0
6454#define HCLGE_LINK_STATUS_UP 1
6455
6456 int link_ret;
6457
6458 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6459
6460 if (is_phy)
6461 hclge_phy_link_status_wait(hdev, link_ret);
6462
6463 return hclge_mac_link_status_wait(hdev, link_ret);
6464}
6465
eb66d503 6466static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 6467{
c39c4d98 6468 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
6469 struct hclge_desc desc;
6470 u32 loop_en;
6471 int ret;
6472
e4d68dae
YL
6473 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6474 /* 1 Read out the MAC mode config at first */
6475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6476 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6477 if (ret) {
6478 dev_err(&hdev->pdev->dev,
6479 "mac loopback get fail, ret =%d.\n", ret);
6480 return ret;
6481 }
c39c4d98 6482
e4d68dae
YL
6483 /* 2 Then setup the loopback flag */
6484 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 6485 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
0f29fc23
YL
6486 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6487 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
e4d68dae
YL
6488
6489 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 6490
e4d68dae
YL
6491 /* 3 Config mac work mode with loopback flag
6492 * and its original configure parameters
6493 */
6494 hclge_cmd_reuse_desc(&desc, false);
6495 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6496 if (ret)
6497 dev_err(&hdev->pdev->dev,
6498 "mac loopback set fail, ret =%d.\n", ret);
6499 return ret;
6500}
c39c4d98 6501
1cbc662d 6502static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
4dc13b96 6503 enum hnae3_loop loop_mode)
5fd50ac3
PL
6504{
6505#define HCLGE_SERDES_RETRY_MS 10
6506#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 6507
5fd50ac3
PL
6508 struct hclge_serdes_lb_cmd *req;
6509 struct hclge_desc desc;
6510 int ret, i = 0;
4dc13b96 6511 u8 loop_mode_b;
5fd50ac3 6512
d0d72bac 6513 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
6514 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6515
4dc13b96
FL
6516 switch (loop_mode) {
6517 case HNAE3_LOOP_SERIAL_SERDES:
6518 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6519 break;
6520 case HNAE3_LOOP_PARALLEL_SERDES:
6521 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6522 break;
6523 default:
6524 dev_err(&hdev->pdev->dev,
6525 "unsupported serdes loopback mode %d\n", loop_mode);
6526 return -ENOTSUPP;
6527 }
6528
5fd50ac3 6529 if (en) {
4dc13b96
FL
6530 req->enable = loop_mode_b;
6531 req->mask = loop_mode_b;
5fd50ac3 6532 } else {
4dc13b96 6533 req->mask = loop_mode_b;
5fd50ac3
PL
6534 }
6535
6536 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6537 if (ret) {
6538 dev_err(&hdev->pdev->dev,
6539 "serdes loopback set fail, ret = %d\n", ret);
6540 return ret;
6541 }
6542
6543 do {
6544 msleep(HCLGE_SERDES_RETRY_MS);
6545 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6546 true);
6547 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6548 if (ret) {
6549 dev_err(&hdev->pdev->dev,
6550 "serdes loopback get, ret = %d\n", ret);
6551 return ret;
6552 }
6553 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6554 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6555
6556 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6557 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6558 return -EBUSY;
6559 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6560 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6561 return -EIO;
6562 }
1cbc662d
YM
6563 return ret;
6564}
6565
6566static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6567 enum hnae3_loop loop_mode)
6568{
6569 int ret;
6570
6571 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6572 if (ret)
6573 return ret;
5fd50ac3 6574
0f29fc23 6575 hclge_cfg_mac_mode(hdev, en);
350fda0a 6576
60df7e91 6577 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
c9765a89
YM
6578 if (ret)
6579 dev_err(&hdev->pdev->dev,
6580 "serdes loopback config mac mode timeout\n");
6581
6582 return ret;
6583}
350fda0a 6584
c9765a89
YM
6585static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6586 struct phy_device *phydev)
6587{
6588 int ret;
350fda0a 6589
c9765a89
YM
6590 if (!phydev->suspended) {
6591 ret = phy_suspend(phydev);
6592 if (ret)
6593 return ret;
6594 }
6595
6596 ret = phy_resume(phydev);
6597 if (ret)
6598 return ret;
6599
6600 return phy_loopback(phydev, true);
6601}
6602
6603static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6604 struct phy_device *phydev)
6605{
6606 int ret;
6607
6608 ret = phy_loopback(phydev, false);
6609 if (ret)
6610 return ret;
6611
6612 return phy_suspend(phydev);
6613}
6614
6615static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6616{
6617 struct phy_device *phydev = hdev->hw.mac.phydev;
6618 int ret;
6619
6620 if (!phydev)
6621 return -ENOTSUPP;
6622
6623 if (en)
6624 ret = hclge_enable_phy_loopback(hdev, phydev);
6625 else
6626 ret = hclge_disable_phy_loopback(hdev, phydev);
6627 if (ret) {
6628 dev_err(&hdev->pdev->dev,
6629 "set phy loopback fail, ret = %d\n", ret);
6630 return ret;
6631 }
6632
6633 hclge_cfg_mac_mode(hdev, en);
6634
60df7e91 6635 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
c9765a89
YM
6636 if (ret)
6637 dev_err(&hdev->pdev->dev,
6638 "phy loopback config mac mode timeout\n");
6639
6640 return ret;
5fd50ac3
PL
6641}
6642
ebaf1908 6643static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
0f29fc23
YL
6644 int stream_id, bool enable)
6645{
6646 struct hclge_desc desc;
6647 struct hclge_cfg_com_tqp_queue_cmd *req =
6648 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6649 int ret;
6650
6651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6652 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6653 req->stream_id = cpu_to_le16(stream_id);
ebaf1908
WL
6654 if (enable)
6655 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
0f29fc23
YL
6656
6657 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6658 if (ret)
6659 dev_err(&hdev->pdev->dev,
6660 "Tqp enable fail, status =%d.\n", ret);
6661 return ret;
6662}
6663
e4d68dae
YL
6664static int hclge_set_loopback(struct hnae3_handle *handle,
6665 enum hnae3_loop loop_mode, bool en)
6666{
6667 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6668 struct hnae3_knic_private_info *kinfo;
e4d68dae 6669 struct hclge_dev *hdev = vport->back;
0f29fc23 6670 int i, ret;
e4d68dae 6671
dd2956ea
YM
6672 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6673 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6674 * the same, the packets are looped back in the SSU. If SSU loopback
6675 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6676 */
6677 if (hdev->pdev->revision >= 0x21) {
6678 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6679
6680 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6681 HCLGE_SWITCH_ALW_LPBK_MASK);
6682 if (ret)
6683 return ret;
6684 }
6685
e4d68dae 6686 switch (loop_mode) {
eb66d503
FL
6687 case HNAE3_LOOP_APP:
6688 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 6689 break;
4dc13b96
FL
6690 case HNAE3_LOOP_SERIAL_SERDES:
6691 case HNAE3_LOOP_PARALLEL_SERDES:
6692 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 6693 break;
c9765a89
YM
6694 case HNAE3_LOOP_PHY:
6695 ret = hclge_set_phy_loopback(hdev, en);
6696 break;
c39c4d98
YL
6697 default:
6698 ret = -ENOTSUPP;
6699 dev_err(&hdev->pdev->dev,
6700 "loop_mode %d is not supported\n", loop_mode);
6701 break;
6702 }
6703
47ef6dec
JS
6704 if (ret)
6705 return ret;
6706
205a24ca
HT
6707 kinfo = &vport->nic.kinfo;
6708 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
6709 ret = hclge_tqp_enable(hdev, i, 0, en);
6710 if (ret)
6711 return ret;
6712 }
46a3df9f 6713
0f29fc23 6714 return 0;
46a3df9f
S
6715}
6716
1cbc662d
YM
6717static int hclge_set_default_loopback(struct hclge_dev *hdev)
6718{
6719 int ret;
6720
6721 ret = hclge_set_app_loopback(hdev, false);
6722 if (ret)
6723 return ret;
6724
6725 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6726 if (ret)
6727 return ret;
6728
6729 return hclge_cfg_serdes_loopback(hdev, false,
6730 HNAE3_LOOP_PARALLEL_SERDES);
6731}
6732
46a3df9f
S
6733static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6734{
6735 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6736 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
6737 struct hnae3_queue *queue;
6738 struct hclge_tqp *tqp;
6739 int i;
6740
205a24ca
HT
6741 kinfo = &vport->nic.kinfo;
6742 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
6743 queue = handle->kinfo.tqp[i];
6744 tqp = container_of(queue, struct hclge_tqp, q);
6745 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6746 }
6747}
6748
1c6dfe6f
YL
6749static void hclge_flush_link_update(struct hclge_dev *hdev)
6750{
6751#define HCLGE_FLUSH_LINK_TIMEOUT 100000
6752
6753 unsigned long last = hdev->serv_processed_cnt;
6754 int i = 0;
6755
6756 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6757 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6758 last == hdev->serv_processed_cnt)
6759 usleep_range(1, 1);
6760}
6761
8cdb992f
JS
6762static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6763{
6764 struct hclge_vport *vport = hclge_get_vport(handle);
6765 struct hclge_dev *hdev = vport->back;
6766
6767 if (enable) {
ed8fb4b2 6768 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
8cdb992f 6769 } else {
1c6dfe6f 6770 /* Set the DOWN flag here to disable link updating */
7be1b9f3 6771 set_bit(HCLGE_STATE_DOWN, &hdev->state);
1c6dfe6f
YL
6772
6773 /* flush memory to make sure DOWN is seen by service task */
6774 smp_mb__before_atomic();
6775 hclge_flush_link_update(hdev);
8cdb992f
JS
6776 }
6777}
6778
46a3df9f
S
6779static int hclge_ae_start(struct hnae3_handle *handle)
6780{
6781 struct hclge_vport *vport = hclge_get_vport(handle);
6782 struct hclge_dev *hdev = vport->back;
46a3df9f 6783
46a3df9f
S
6784 /* mac enable */
6785 hclge_cfg_mac_mode(hdev, true);
6786 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 6787 hdev->hw.mac.link = 0;
46a3df9f 6788
b50ae26c
PL
6789 /* reset tqp stats */
6790 hclge_reset_tqp_stats(handle);
6791
b01b7cf1 6792 hclge_mac_start_phy(hdev);
46a3df9f 6793
46a3df9f
S
6794 return 0;
6795}
6796
6797static void hclge_ae_stop(struct hnae3_handle *handle)
6798{
6799 struct hclge_vport *vport = hclge_get_vport(handle);
6800 struct hclge_dev *hdev = vport->back;
39cfbc9c 6801 int i;
46a3df9f 6802
2f7e4896
FL
6803 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6804
d93ed94f
JS
6805 hclge_clear_arfs_rules(handle);
6806
35d93a30
HT
6807 /* If it is not PF reset, the firmware will disable the MAC,
6808 * so it only need to stop phy here.
6809 */
6810 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6811 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 6812 hclge_mac_stop_phy(hdev);
ed8fb4b2 6813 hclge_update_link_status(hdev);
b50ae26c 6814 return;
9617f668 6815 }
b50ae26c 6816
39cfbc9c
HT
6817 for (i = 0; i < handle->kinfo.num_tqps; i++)
6818 hclge_reset_tqp(handle, i);
6819
20981a1e
HT
6820 hclge_config_mac_tnl_int(hdev, false);
6821
46a3df9f
S
6822 /* Mac disable */
6823 hclge_cfg_mac_mode(hdev, false);
6824
6825 hclge_mac_stop_phy(hdev);
6826
6827 /* reset tqp stats */
6828 hclge_reset_tqp_stats(handle);
f30dfddc 6829 hclge_update_link_status(hdev);
46a3df9f
S
6830}
6831
a6d818e3
YL
6832int hclge_vport_start(struct hclge_vport *vport)
6833{
6834 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6835 vport->last_active_jiffies = jiffies;
6836 return 0;
6837}
6838
6839void hclge_vport_stop(struct hclge_vport *vport)
6840{
6841 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6842}
6843
6844static int hclge_client_start(struct hnae3_handle *handle)
6845{
6846 struct hclge_vport *vport = hclge_get_vport(handle);
6847
6848 return hclge_vport_start(vport);
6849}
6850
6851static void hclge_client_stop(struct hnae3_handle *handle)
6852{
6853 struct hclge_vport *vport = hclge_get_vport(handle);
6854
6855 hclge_vport_stop(vport);
6856}
6857
46a3df9f
S
6858static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6859 u16 cmdq_resp, u8 resp_code,
6860 enum hclge_mac_vlan_tbl_opcode op)
6861{
6862 struct hclge_dev *hdev = vport->back;
46a3df9f
S
6863
6864 if (cmdq_resp) {
6865 dev_err(&hdev->pdev->dev,
adcf738b 6866 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
46a3df9f
S
6867 cmdq_resp);
6868 return -EIO;
6869 }
6870
6871 if (op == HCLGE_MAC_VLAN_ADD) {
6872 if ((!resp_code) || (resp_code == 1)) {
6e4139f6 6873 return 0;
b37ce587 6874 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
46a3df9f
S
6875 dev_err(&hdev->pdev->dev,
6876 "add mac addr failed for uc_overflow.\n");
6e4139f6 6877 return -ENOSPC;
b37ce587 6878 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
46a3df9f
S
6879 dev_err(&hdev->pdev->dev,
6880 "add mac addr failed for mc_overflow.\n");
6e4139f6 6881 return -ENOSPC;
46a3df9f 6882 }
6e4139f6
JS
6883
6884 dev_err(&hdev->pdev->dev,
6885 "add mac addr failed for undefined, code=%u.\n",
6886 resp_code);
6887 return -EIO;
46a3df9f
S
6888 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6889 if (!resp_code) {
6e4139f6 6890 return 0;
46a3df9f 6891 } else if (resp_code == 1) {
46a3df9f
S
6892 dev_dbg(&hdev->pdev->dev,
6893 "remove mac addr failed for miss.\n");
6e4139f6 6894 return -ENOENT;
46a3df9f 6895 }
6e4139f6
JS
6896
6897 dev_err(&hdev->pdev->dev,
6898 "remove mac addr failed for undefined, code=%u.\n",
6899 resp_code);
6900 return -EIO;
46a3df9f
S
6901 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6902 if (!resp_code) {
6e4139f6 6903 return 0;
46a3df9f 6904 } else if (resp_code == 1) {
46a3df9f
S
6905 dev_dbg(&hdev->pdev->dev,
6906 "lookup mac addr failed for miss.\n");
6e4139f6 6907 return -ENOENT;
46a3df9f 6908 }
6e4139f6 6909
46a3df9f 6910 dev_err(&hdev->pdev->dev,
6e4139f6
JS
6911 "lookup mac addr failed for undefined, code=%u.\n",
6912 resp_code);
6913 return -EIO;
46a3df9f
S
6914 }
6915
6e4139f6
JS
6916 dev_err(&hdev->pdev->dev,
6917 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6918
6919 return -EINVAL;
46a3df9f
S
6920}
6921
6922static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6923{
b37ce587
YM
6924#define HCLGE_VF_NUM_IN_FIRST_DESC 192
6925
b9a8f883
YL
6926 unsigned int word_num;
6927 unsigned int bit_num;
46a3df9f
S
6928
6929 if (vfid > 255 || vfid < 0)
6930 return -EIO;
6931
b37ce587 6932 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
46a3df9f
S
6933 word_num = vfid / 32;
6934 bit_num = vfid % 32;
6935 if (clr)
a90bb9a5 6936 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 6937 else
a90bb9a5 6938 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f 6939 } else {
b37ce587 6940 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
46a3df9f
S
6941 bit_num = vfid % 32;
6942 if (clr)
a90bb9a5 6943 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 6944 else
a90bb9a5 6945 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
6946 }
6947
6948 return 0;
6949}
6950
6951static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6952{
6953#define HCLGE_DESC_NUMBER 3
6954#define HCLGE_FUNC_NUMBER_PER_DESC 6
6955 int i, j;
6956
6c39d527 6957 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
6958 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6959 if (desc[i].data[j])
6960 return false;
6961
6962 return true;
6963}
6964
d44f9b63 6965static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 6966 const u8 *addr, bool is_mc)
46a3df9f
S
6967{
6968 const unsigned char *mac_addr = addr;
6969 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6970 (mac_addr[0]) | (mac_addr[1] << 8);
6971 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6972
3a586422
WL
6973 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6974 if (is_mc) {
6975 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6976 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6977 }
6978
46a3df9f
S
6979 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6980 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6981}
6982
46a3df9f 6983static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 6984 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
6985{
6986 struct hclge_dev *hdev = vport->back;
6987 struct hclge_desc desc;
6988 u8 resp_code;
a90bb9a5 6989 u16 retval;
46a3df9f
S
6990 int ret;
6991
6992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6993
d44f9b63 6994 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
6995
6996 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997 if (ret) {
6998 dev_err(&hdev->pdev->dev,
6999 "del mac addr failed for cmd_send, ret =%d.\n",
7000 ret);
7001 return ret;
7002 }
a90bb9a5
YL
7003 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7004 retval = le16_to_cpu(desc.retval);
46a3df9f 7005
a90bb9a5 7006 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7007 HCLGE_MAC_VLAN_REMOVE);
7008}
7009
7010static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7011 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7012 struct hclge_desc *desc,
7013 bool is_mc)
7014{
7015 struct hclge_dev *hdev = vport->back;
7016 u8 resp_code;
a90bb9a5 7017 u16 retval;
46a3df9f
S
7018 int ret;
7019
7020 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7021 if (is_mc) {
7022 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7023 memcpy(desc[0].data,
7024 req,
d44f9b63 7025 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7026 hclge_cmd_setup_basic_desc(&desc[1],
7027 HCLGE_OPC_MAC_VLAN_ADD,
7028 true);
7029 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7030 hclge_cmd_setup_basic_desc(&desc[2],
7031 HCLGE_OPC_MAC_VLAN_ADD,
7032 true);
7033 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7034 } else {
7035 memcpy(desc[0].data,
7036 req,
d44f9b63 7037 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7038 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7039 }
7040 if (ret) {
7041 dev_err(&hdev->pdev->dev,
7042 "lookup mac addr failed for cmd_send, ret =%d.\n",
7043 ret);
7044 return ret;
7045 }
a90bb9a5
YL
7046 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7047 retval = le16_to_cpu(desc[0].retval);
46a3df9f 7048
a90bb9a5 7049 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7050 HCLGE_MAC_VLAN_LKUP);
7051}
7052
7053static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7054 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7055 struct hclge_desc *mc_desc)
7056{
7057 struct hclge_dev *hdev = vport->back;
7058 int cfg_status;
7059 u8 resp_code;
a90bb9a5 7060 u16 retval;
46a3df9f
S
7061 int ret;
7062
7063 if (!mc_desc) {
7064 struct hclge_desc desc;
7065
7066 hclge_cmd_setup_basic_desc(&desc,
7067 HCLGE_OPC_MAC_VLAN_ADD,
7068 false);
d44f9b63
YL
7069 memcpy(desc.data, req,
7070 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7071 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
7072 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7073 retval = le16_to_cpu(desc.retval);
7074
7075 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7076 resp_code,
7077 HCLGE_MAC_VLAN_ADD);
7078 } else {
c3b6f755 7079 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 7080 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7081 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 7082 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7083 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
7084 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7085 memcpy(mc_desc[0].data, req,
d44f9b63 7086 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7087 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
7088 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7089 retval = le16_to_cpu(mc_desc[0].retval);
7090
7091 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7092 resp_code,
7093 HCLGE_MAC_VLAN_ADD);
7094 }
7095
7096 if (ret) {
7097 dev_err(&hdev->pdev->dev,
7098 "add mac addr failed for cmd_send, ret =%d.\n",
7099 ret);
7100 return ret;
7101 }
7102
7103 return cfg_status;
7104}
7105
39932473
JS
7106static int hclge_init_umv_space(struct hclge_dev *hdev)
7107{
7108 u16 allocated_size = 0;
7109 int ret;
7110
7111 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7112 true);
7113 if (ret)
7114 return ret;
7115
7116 if (allocated_size < hdev->wanted_umv_size)
7117 dev_warn(&hdev->pdev->dev,
adcf738b 7118 "Alloc umv space failed, want %u, get %u\n",
39932473
JS
7119 hdev->wanted_umv_size, allocated_size);
7120
7121 mutex_init(&hdev->umv_mutex);
7122 hdev->max_umv_size = allocated_size;
e91e388c
JS
7123 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7124 * preserve some unicast mac vlan table entries shared by pf
7125 * and its vfs.
7126 */
39932473
JS
7127 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7128 hdev->share_umv_size = hdev->priv_umv_size +
7129 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7130
7131 return 0;
7132}
7133
7134static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7135{
7136 int ret;
7137
7138 if (hdev->max_umv_size > 0) {
7139 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7140 false);
7141 if (ret)
7142 return ret;
7143 hdev->max_umv_size = 0;
7144 }
7145 mutex_destroy(&hdev->umv_mutex);
7146
7147 return 0;
7148}
7149
7150static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7151 u16 *allocated_size, bool is_alloc)
7152{
7153 struct hclge_umv_spc_alc_cmd *req;
7154 struct hclge_desc desc;
7155 int ret;
7156
7157 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7158 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
63cbf7a9
YM
7159 if (!is_alloc)
7160 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7161
39932473
JS
7162 req->space_size = cpu_to_le32(space_size);
7163
7164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165 if (ret) {
7166 dev_err(&hdev->pdev->dev,
7167 "%s umv space failed for cmd_send, ret =%d\n",
7168 is_alloc ? "allocate" : "free", ret);
7169 return ret;
7170 }
7171
7172 if (is_alloc && allocated_size)
7173 *allocated_size = le32_to_cpu(desc.data[1]);
7174
7175 return 0;
7176}
7177
7178static void hclge_reset_umv_space(struct hclge_dev *hdev)
7179{
7180 struct hclge_vport *vport;
7181 int i;
7182
7183 for (i = 0; i < hdev->num_alloc_vport; i++) {
7184 vport = &hdev->vport[i];
7185 vport->used_umv_num = 0;
7186 }
7187
7188 mutex_lock(&hdev->umv_mutex);
7189 hdev->share_umv_size = hdev->priv_umv_size +
7190 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7191 mutex_unlock(&hdev->umv_mutex);
7192}
7193
7194static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7195{
7196 struct hclge_dev *hdev = vport->back;
7197 bool is_full;
7198
7199 mutex_lock(&hdev->umv_mutex);
7200 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7201 hdev->share_umv_size == 0);
7202 mutex_unlock(&hdev->umv_mutex);
7203
7204 return is_full;
7205}
7206
7207static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7208{
7209 struct hclge_dev *hdev = vport->back;
7210
7211 mutex_lock(&hdev->umv_mutex);
7212 if (is_free) {
7213 if (vport->used_umv_num > hdev->priv_umv_size)
7214 hdev->share_umv_size++;
54a395b6 7215
7216 if (vport->used_umv_num > 0)
7217 vport->used_umv_num--;
39932473 7218 } else {
54a395b6 7219 if (vport->used_umv_num >= hdev->priv_umv_size &&
7220 hdev->share_umv_size > 0)
39932473
JS
7221 hdev->share_umv_size--;
7222 vport->used_umv_num++;
7223 }
7224 mutex_unlock(&hdev->umv_mutex);
7225}
7226
46a3df9f
S
7227static int hclge_add_uc_addr(struct hnae3_handle *handle,
7228 const unsigned char *addr)
7229{
7230 struct hclge_vport *vport = hclge_get_vport(handle);
7231
7232 return hclge_add_uc_addr_common(vport, addr);
7233}
7234
7235int hclge_add_uc_addr_common(struct hclge_vport *vport,
7236 const unsigned char *addr)
7237{
7238 struct hclge_dev *hdev = vport->back;
d44f9b63 7239 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 7240 struct hclge_desc desc;
a90bb9a5 7241 u16 egress_port = 0;
aa7a795e 7242 int ret;
46a3df9f
S
7243
7244 /* mac addr check */
7245 if (is_zero_ether_addr(addr) ||
7246 is_broadcast_ether_addr(addr) ||
7247 is_multicast_ether_addr(addr)) {
7248 dev_err(&hdev->pdev->dev,
7249 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
9b2f3477 7250 addr, is_zero_ether_addr(addr),
46a3df9f
S
7251 is_broadcast_ether_addr(addr),
7252 is_multicast_ether_addr(addr));
7253 return -EINVAL;
7254 }
7255
7256 memset(&req, 0, sizeof(req));
a90bb9a5 7257
e4e87715
PL
7258 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7259 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
7260
7261 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 7262
3a586422 7263 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 7264
d07b6bb4
JS
7265 /* Lookup the mac address in the mac_vlan table, and add
7266 * it if the entry is inexistent. Repeated unicast entry
7267 * is not allowed in the mac vlan table.
7268 */
7269 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473
JS
7270 if (ret == -ENOENT) {
7271 if (!hclge_is_umv_space_full(vport)) {
7272 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7273 if (!ret)
7274 hclge_update_umv_space(vport, false);
7275 return ret;
7276 }
7277
7278 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7279 hdev->priv_umv_size);
7280
7281 return -ENOSPC;
7282 }
d07b6bb4
JS
7283
7284 /* check if we just hit the duplicate */
72110b56 7285 if (!ret) {
adcf738b 7286 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
72110b56
PL
7287 vport->vport_id, addr);
7288 return 0;
7289 }
d07b6bb4
JS
7290
7291 dev_err(&hdev->pdev->dev,
7292 "PF failed to add unicast entry(%pM) in the MAC table\n",
7293 addr);
46a3df9f 7294
aa7a795e 7295 return ret;
46a3df9f
S
7296}
7297
7298static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7299 const unsigned char *addr)
7300{
7301 struct hclge_vport *vport = hclge_get_vport(handle);
7302
7303 return hclge_rm_uc_addr_common(vport, addr);
7304}
7305
7306int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7307 const unsigned char *addr)
7308{
7309 struct hclge_dev *hdev = vport->back;
d44f9b63 7310 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 7311 int ret;
46a3df9f
S
7312
7313 /* mac addr check */
7314 if (is_zero_ether_addr(addr) ||
7315 is_broadcast_ether_addr(addr) ||
7316 is_multicast_ether_addr(addr)) {
9b2f3477
WL
7317 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7318 addr);
46a3df9f
S
7319 return -EINVAL;
7320 }
7321
7322 memset(&req, 0, sizeof(req));
e4e87715 7323 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 7324 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 7325 ret = hclge_remove_mac_vlan_tbl(vport, &req);
39932473
JS
7326 if (!ret)
7327 hclge_update_umv_space(vport, true);
46a3df9f 7328
aa7a795e 7329 return ret;
46a3df9f
S
7330}
7331
7332static int hclge_add_mc_addr(struct hnae3_handle *handle,
7333 const unsigned char *addr)
7334{
7335 struct hclge_vport *vport = hclge_get_vport(handle);
7336
a10829c4 7337 return hclge_add_mc_addr_common(vport, addr);
46a3df9f
S
7338}
7339
7340int hclge_add_mc_addr_common(struct hclge_vport *vport,
7341 const unsigned char *addr)
7342{
7343 struct hclge_dev *hdev = vport->back;
d44f9b63 7344 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 7345 struct hclge_desc desc[3];
46a3df9f
S
7346 int status;
7347
7348 /* mac addr check */
7349 if (!is_multicast_ether_addr(addr)) {
7350 dev_err(&hdev->pdev->dev,
7351 "Add mc mac err! invalid mac:%pM.\n",
7352 addr);
7353 return -EINVAL;
7354 }
7355 memset(&req, 0, sizeof(req));
e4e87715 7356 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 7357 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f 7358 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
63cbf7a9 7359 if (status) {
46a3df9f
S
7360 /* This mac addr do not exist, add new entry for it */
7361 memset(desc[0].data, 0, sizeof(desc[0].data));
7362 memset(desc[1].data, 0, sizeof(desc[0].data));
7363 memset(desc[2].data, 0, sizeof(desc[0].data));
46a3df9f 7364 }
63cbf7a9
YM
7365 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7366 if (status)
7367 return status;
7368 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
46a3df9f 7369
1f6db589
JS
7370 if (status == -ENOSPC)
7371 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
7372
7373 return status;
7374}
7375
7376static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7377 const unsigned char *addr)
7378{
7379 struct hclge_vport *vport = hclge_get_vport(handle);
7380
7381 return hclge_rm_mc_addr_common(vport, addr);
7382}
7383
7384int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7385 const unsigned char *addr)
7386{
7387 struct hclge_dev *hdev = vport->back;
d44f9b63 7388 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
7389 enum hclge_cmd_status status;
7390 struct hclge_desc desc[3];
46a3df9f
S
7391
7392 /* mac addr check */
7393 if (!is_multicast_ether_addr(addr)) {
7394 dev_dbg(&hdev->pdev->dev,
7395 "Remove mc mac err! invalid mac:%pM.\n",
7396 addr);
7397 return -EINVAL;
7398 }
7399
7400 memset(&req, 0, sizeof(req));
e4e87715 7401 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 7402 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
7403 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7404 if (!status) {
7405 /* This mac addr exist, remove this handle's VFID for it */
63cbf7a9
YM
7406 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7407 if (status)
7408 return status;
46a3df9f
S
7409
7410 if (hclge_is_all_function_id_zero(desc))
7411 /* All the vfid is zero, so need to delete this entry */
7412 status = hclge_remove_mac_vlan_tbl(vport, &req);
7413 else
7414 /* Not all the vfid is zero, update the vfid */
7415 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7416
7417 } else {
40cca1c5
XW
7418 /* Maybe this mac address is in mta table, but it cannot be
7419 * deleted here because an entry of mta represents an address
7420 * range rather than a specific address. the delete action to
7421 * all entries will take effect in update_mta_status called by
7422 * hns3_nic_set_rx_mode.
7423 */
7424 status = 0;
46a3df9f
S
7425 }
7426
46a3df9f
S
7427 return status;
7428}
7429
6dd86902 7430void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7431 enum HCLGE_MAC_ADDR_TYPE mac_type)
7432{
7433 struct hclge_vport_mac_addr_cfg *mac_cfg;
7434 struct list_head *list;
7435
7436 if (!vport->vport_id)
7437 return;
7438
7439 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7440 if (!mac_cfg)
7441 return;
7442
7443 mac_cfg->hd_tbl_status = true;
7444 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7445
7446 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7447 &vport->uc_mac_list : &vport->mc_mac_list;
7448
7449 list_add_tail(&mac_cfg->node, list);
7450}
7451
7452void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7453 bool is_write_tbl,
7454 enum HCLGE_MAC_ADDR_TYPE mac_type)
7455{
7456 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7457 struct list_head *list;
7458 bool uc_flag, mc_flag;
7459
7460 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7461 &vport->uc_mac_list : &vport->mc_mac_list;
7462
7463 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7464 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7465
7466 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
db4d3d55 7467 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
6dd86902 7468 if (uc_flag && mac_cfg->hd_tbl_status)
7469 hclge_rm_uc_addr_common(vport, mac_addr);
7470
7471 if (mc_flag && mac_cfg->hd_tbl_status)
7472 hclge_rm_mc_addr_common(vport, mac_addr);
7473
7474 list_del(&mac_cfg->node);
7475 kfree(mac_cfg);
7476 break;
7477 }
7478 }
7479}
7480
7481void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7482 enum HCLGE_MAC_ADDR_TYPE mac_type)
7483{
7484 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7485 struct list_head *list;
7486
7487 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7488 &vport->uc_mac_list : &vport->mc_mac_list;
7489
7490 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7491 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7492 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7493
7494 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7495 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7496
7497 mac_cfg->hd_tbl_status = false;
7498 if (is_del_list) {
7499 list_del(&mac_cfg->node);
7500 kfree(mac_cfg);
7501 }
7502 }
7503}
7504
7505void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7506{
7507 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7508 struct hclge_vport *vport;
7509 int i;
7510
6dd86902 7511 for (i = 0; i < hdev->num_alloc_vport; i++) {
7512 vport = &hdev->vport[i];
7513 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7514 list_del(&mac->node);
7515 kfree(mac);
7516 }
7517
7518 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7519 list_del(&mac->node);
7520 kfree(mac);
7521 }
7522 }
6dd86902 7523}
7524
f5aac71c
FL
7525static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7526 u16 cmdq_resp, u8 resp_code)
7527{
7528#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7529#define HCLGE_ETHERTYPE_ALREADY_ADD 1
7530#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7531#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7532
7533 int return_status;
7534
7535 if (cmdq_resp) {
7536 dev_err(&hdev->pdev->dev,
adcf738b 7537 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
f5aac71c
FL
7538 cmdq_resp);
7539 return -EIO;
7540 }
7541
7542 switch (resp_code) {
7543 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7544 case HCLGE_ETHERTYPE_ALREADY_ADD:
7545 return_status = 0;
7546 break;
7547 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7548 dev_err(&hdev->pdev->dev,
7549 "add mac ethertype failed for manager table overflow.\n");
7550 return_status = -EIO;
7551 break;
7552 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7553 dev_err(&hdev->pdev->dev,
7554 "add mac ethertype failed for key conflict.\n");
7555 return_status = -EIO;
7556 break;
7557 default:
7558 dev_err(&hdev->pdev->dev,
adcf738b 7559 "add mac ethertype failed for undefined, code=%u.\n",
f5aac71c
FL
7560 resp_code);
7561 return_status = -EIO;
7562 }
7563
7564 return return_status;
7565}
7566
8e6de441
HT
7567static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7568 u8 *mac_addr)
7569{
7570 struct hclge_mac_vlan_tbl_entry_cmd req;
7571 struct hclge_dev *hdev = vport->back;
7572 struct hclge_desc desc;
7573 u16 egress_port = 0;
7574 int i;
7575
7576 if (is_zero_ether_addr(mac_addr))
7577 return false;
7578
7579 memset(&req, 0, sizeof(req));
7580 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7581 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7582 req.egress_port = cpu_to_le16(egress_port);
7583 hclge_prepare_mac_addr(&req, mac_addr, false);
7584
7585 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7586 return true;
7587
7588 vf_idx += HCLGE_VF_VPORT_START_NUM;
7589 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7590 if (i != vf_idx &&
7591 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7592 return true;
7593
7594 return false;
7595}
7596
7597static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7598 u8 *mac_addr)
7599{
7600 struct hclge_vport *vport = hclge_get_vport(handle);
7601 struct hclge_dev *hdev = vport->back;
7602
7603 vport = hclge_get_vf_vport(hdev, vf);
7604 if (!vport)
7605 return -EINVAL;
7606
7607 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7608 dev_info(&hdev->pdev->dev,
7609 "Specified MAC(=%pM) is same as before, no change committed!\n",
7610 mac_addr);
7611 return 0;
7612 }
7613
7614 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7615 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7616 mac_addr);
7617 return -EEXIST;
7618 }
7619
7620 ether_addr_copy(vport->vf_info.mac, mac_addr);
7621 dev_info(&hdev->pdev->dev,
7622 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7623 vf, mac_addr);
7624
7625 return hclge_inform_reset_assert_to_vf(vport);
7626}
7627
f5aac71c
FL
7628static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7629 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7630{
7631 struct hclge_desc desc;
7632 u8 resp_code;
7633 u16 retval;
7634 int ret;
7635
7636 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7637 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7638
7639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7640 if (ret) {
7641 dev_err(&hdev->pdev->dev,
7642 "add mac ethertype failed for cmd_send, ret =%d.\n",
7643 ret);
7644 return ret;
7645 }
7646
7647 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7648 retval = le16_to_cpu(desc.retval);
7649
7650 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7651}
7652
7653static int init_mgr_tbl(struct hclge_dev *hdev)
7654{
7655 int ret;
7656 int i;
7657
7658 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7659 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7660 if (ret) {
7661 dev_err(&hdev->pdev->dev,
7662 "add mac ethertype failed, ret =%d.\n",
7663 ret);
7664 return ret;
7665 }
7666 }
7667
7668 return 0;
7669}
7670
46a3df9f
S
7671static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7672{
7673 struct hclge_vport *vport = hclge_get_vport(handle);
7674 struct hclge_dev *hdev = vport->back;
7675
7676 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7677}
7678
59098055
FL
7679static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7680 bool is_first)
46a3df9f
S
7681{
7682 const unsigned char *new_addr = (const unsigned char *)p;
7683 struct hclge_vport *vport = hclge_get_vport(handle);
7684 struct hclge_dev *hdev = vport->back;
18838d0c 7685 int ret;
46a3df9f
S
7686
7687 /* mac addr check */
7688 if (is_zero_ether_addr(new_addr) ||
7689 is_broadcast_ether_addr(new_addr) ||
7690 is_multicast_ether_addr(new_addr)) {
7691 dev_err(&hdev->pdev->dev,
ed5b255b 7692 "Change uc mac err! invalid mac:%pM.\n",
46a3df9f
S
7693 new_addr);
7694 return -EINVAL;
7695 }
7696
962e31bd
YL
7697 if ((!is_first || is_kdump_kernel()) &&
7698 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 7699 dev_warn(&hdev->pdev->dev,
59098055 7700 "remove old uc mac address fail.\n");
46a3df9f 7701
18838d0c
FL
7702 ret = hclge_add_uc_addr(handle, new_addr);
7703 if (ret) {
7704 dev_err(&hdev->pdev->dev,
7705 "add uc mac address fail, ret =%d.\n",
7706 ret);
7707
59098055
FL
7708 if (!is_first &&
7709 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
18838d0c 7710 dev_err(&hdev->pdev->dev,
59098055 7711 "restore uc mac address fail.\n");
18838d0c
FL
7712
7713 return -EIO;
46a3df9f
S
7714 }
7715
e98d7183 7716 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
7717 if (ret) {
7718 dev_err(&hdev->pdev->dev,
7719 "configure mac pause address fail, ret =%d.\n",
7720 ret);
7721 return -EIO;
7722 }
7723
7724 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7725
7726 return 0;
46a3df9f
S
7727}
7728
26483246
XW
7729static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7730 int cmd)
7731{
7732 struct hclge_vport *vport = hclge_get_vport(handle);
7733 struct hclge_dev *hdev = vport->back;
7734
7735 if (!hdev->hw.mac.phydev)
7736 return -EOPNOTSUPP;
7737
7738 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7739}
7740
46a3df9f 7741static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 7742 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 7743{
d44f9b63 7744 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
7745 struct hclge_desc desc;
7746 int ret;
7747
903b85d3
JS
7748 /* read current vlan filter parameter */
7749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
d44f9b63 7750 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 7751 req->vlan_type = vlan_type;
30ebc576 7752 req->vf_id = vf_id;
46a3df9f 7753
903b85d3
JS
7754 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7755 if (ret) {
7756 dev_err(&hdev->pdev->dev,
7757 "failed to get vlan filter config, ret = %d.\n", ret);
7758 return ret;
7759 }
7760
7761 /* modify and write new config parameter */
7762 hclge_cmd_reuse_desc(&desc, false);
7763 req->vlan_fe = filter_en ?
7764 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
7765
46a3df9f 7766 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 7767 if (ret)
903b85d3 7768 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
46a3df9f 7769 ret);
46a3df9f 7770
3f639907 7771 return ret;
46a3df9f
S
7772}
7773
391b5e93
JS
7774#define HCLGE_FILTER_TYPE_VF 0
7775#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
7776#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7777#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7778#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7779#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7780#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7781#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7782 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7783#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7784 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
7785
7786static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7787{
7788 struct hclge_vport *vport = hclge_get_vport(handle);
7789 struct hclge_dev *hdev = vport->back;
7790
64d114f0
ZL
7791 if (hdev->pdev->revision >= 0x21) {
7792 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 7793 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 7794 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 7795 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
7796 } else {
7797 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
7798 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7799 0);
64d114f0 7800 }
c60edc17
JS
7801 if (enable)
7802 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7803 else
7804 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
7805}
7806
ebaf1908 7807static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
70a21490 7808 bool is_kill, u16 vlan,
dc8131d8 7809 __be16 proto)
46a3df9f 7810{
22044f95 7811 struct hclge_vport *vport = &hdev->vport[vfid];
d44f9b63
YL
7812 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7813 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
7814 struct hclge_desc desc[2];
7815 u8 vf_byte_val;
7816 u8 vf_byte_off;
7817 int ret;
7818
81a9255e 7819 /* if vf vlan table is full, firmware will close vf vlan filter, it
22044f95
JS
7820 * is unable and unnecessary to add new vlan id to vf vlan filter.
7821 * If spoof check is enable, and vf vlan is full, it shouldn't add
7822 * new vlan, because tx packets with these vlan id will be dropped.
81a9255e 7823 */
22044f95
JS
7824 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7825 if (vport->vf_info.spoofchk && vlan) {
7826 dev_err(&hdev->pdev->dev,
7827 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7828 return -EPERM;
7829 }
81a9255e 7830 return 0;
22044f95 7831 }
81a9255e 7832
46a3df9f
S
7833 hclge_cmd_setup_basic_desc(&desc[0],
7834 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7835 hclge_cmd_setup_basic_desc(&desc[1],
7836 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7837
7838 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7839
7840 vf_byte_off = vfid / 8;
7841 vf_byte_val = 1 << (vfid % 8);
7842
d44f9b63
YL
7843 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7844 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 7845
a90bb9a5 7846 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
7847 req0->vlan_cfg = is_kill;
7848
7849 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7850 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7851 else
7852 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7853
7854 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7855 if (ret) {
7856 dev_err(&hdev->pdev->dev,
7857 "Send vf vlan command fail, ret =%d.\n",
7858 ret);
7859 return ret;
7860 }
7861
7862 if (!is_kill) {
6c251711 7863#define HCLGE_VF_VLAN_NO_ENTRY 2
46a3df9f
S
7864 if (!req0->resp_code || req0->resp_code == 1)
7865 return 0;
7866
6c251711 7867 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
81a9255e 7868 set_bit(vfid, hdev->vf_vlan_full);
6c251711
YL
7869 dev_warn(&hdev->pdev->dev,
7870 "vf vlan table is full, vf vlan filter is disabled\n");
7871 return 0;
7872 }
7873
46a3df9f 7874 dev_err(&hdev->pdev->dev,
adcf738b 7875 "Add vf vlan filter fail, ret =%u.\n",
46a3df9f
S
7876 req0->resp_code);
7877 } else {
41dafea2 7878#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
46a3df9f
S
7879 if (!req0->resp_code)
7880 return 0;
7881
d0c31df2
JS
7882 /* vf vlan filter is disabled when vf vlan table is full,
7883 * then new vlan id will not be added into vf vlan table.
7884 * Just return 0 without warning, avoid massive verbose
7885 * print logs when unload.
7886 */
7887 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
41dafea2 7888 return 0;
41dafea2 7889
46a3df9f 7890 dev_err(&hdev->pdev->dev,
adcf738b 7891 "Kill vf vlan filter fail, ret =%u.\n",
46a3df9f
S
7892 req0->resp_code);
7893 }
7894
7895 return -EIO;
7896}
7897
dc8131d8
YL
7898static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7899 u16 vlan_id, bool is_kill)
46a3df9f 7900{
d44f9b63 7901 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
7902 struct hclge_desc desc;
7903 u8 vlan_offset_byte_val;
7904 u8 vlan_offset_byte;
7905 u8 vlan_offset_160;
7906 int ret;
7907
7908 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7909
d6ad7c53
GL
7910 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7911 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7912 HCLGE_VLAN_BYTE_SIZE;
7913 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
46a3df9f 7914
d44f9b63 7915 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
7916 req->vlan_offset = vlan_offset_160;
7917 req->vlan_cfg = is_kill;
7918 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7919
7920 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
7921 if (ret)
7922 dev_err(&hdev->pdev->dev,
7923 "port vlan command, send fail, ret =%d.\n", ret);
7924 return ret;
7925}
7926
7927static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
70a21490 7928 u16 vport_id, u16 vlan_id,
dc8131d8
YL
7929 bool is_kill)
7930{
7931 u16 vport_idx, vport_num = 0;
7932 int ret;
7933
daaa8521
YL
7934 if (is_kill && !vlan_id)
7935 return 0;
7936
dc8131d8 7937 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
70a21490 7938 proto);
46a3df9f
S
7939 if (ret) {
7940 dev_err(&hdev->pdev->dev,
adcf738b 7941 "Set %u vport vlan filter config fail, ret =%d.\n",
dc8131d8 7942 vport_id, ret);
46a3df9f
S
7943 return ret;
7944 }
7945
dc8131d8
YL
7946 /* vlan 0 may be added twice when 8021q module is enabled */
7947 if (!is_kill && !vlan_id &&
7948 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7949 return 0;
7950
7951 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 7952 dev_err(&hdev->pdev->dev,
adcf738b 7953 "Add port vlan failed, vport %u is already in vlan %u\n",
dc8131d8
YL
7954 vport_id, vlan_id);
7955 return -EINVAL;
46a3df9f
S
7956 }
7957
dc8131d8
YL
7958 if (is_kill &&
7959 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7960 dev_err(&hdev->pdev->dev,
adcf738b 7961 "Delete port vlan failed, vport %u is not in vlan %u\n",
dc8131d8
YL
7962 vport_id, vlan_id);
7963 return -EINVAL;
7964 }
7965
54e97d11 7966 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
7967 vport_num++;
7968
7969 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7970 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7971 is_kill);
7972
7973 return ret;
7974}
7975
5f6ea83f
PL
7976static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7977{
7978 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7979 struct hclge_vport_vtag_tx_cfg_cmd *req;
7980 struct hclge_dev *hdev = vport->back;
7981 struct hclge_desc desc;
d9c0f275 7982 u16 bmap_index;
5f6ea83f
PL
7983 int status;
7984
7985 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7986
7987 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7988 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7989 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
7990 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7991 vcfg->accept_tag1 ? 1 : 0);
7992 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7993 vcfg->accept_untag1 ? 1 : 0);
7994 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7995 vcfg->accept_tag2 ? 1 : 0);
7996 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7997 vcfg->accept_untag2 ? 1 : 0);
7998 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7999 vcfg->insert_tag1_en ? 1 : 0);
8000 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8001 vcfg->insert_tag2_en ? 1 : 0);
8002 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
8003
8004 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8005 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8006 HCLGE_VF_NUM_PER_BYTE;
8007 req->vf_bitmap[bmap_index] =
8008 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8009
8010 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8011 if (status)
8012 dev_err(&hdev->pdev->dev,
8013 "Send port txvlan cfg command fail, ret =%d\n",
8014 status);
8015
8016 return status;
8017}
8018
8019static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8020{
8021 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8022 struct hclge_vport_vtag_rx_cfg_cmd *req;
8023 struct hclge_dev *hdev = vport->back;
8024 struct hclge_desc desc;
d9c0f275 8025 u16 bmap_index;
5f6ea83f
PL
8026 int status;
8027
8028 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8029
8030 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
8031 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8032 vcfg->strip_tag1_en ? 1 : 0);
8033 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8034 vcfg->strip_tag2_en ? 1 : 0);
8035 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8036 vcfg->vlan1_vlan_prionly ? 1 : 0);
8037 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8038 vcfg->vlan2_vlan_prionly ? 1 : 0);
5f6ea83f
PL
8039
8040 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8041 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8042 HCLGE_VF_NUM_PER_BYTE;
8043 req->vf_bitmap[bmap_index] =
8044 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8045
8046 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8047 if (status)
8048 dev_err(&hdev->pdev->dev,
8049 "Send port rxvlan cfg command fail, ret =%d\n",
8050 status);
8051
8052 return status;
8053}
8054
741fca16
JS
8055static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8056 u16 port_base_vlan_state,
8057 u16 vlan_tag)
8058{
8059 int ret;
8060
8061 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8062 vport->txvlan_cfg.accept_tag1 = true;
8063 vport->txvlan_cfg.insert_tag1_en = false;
8064 vport->txvlan_cfg.default_tag1 = 0;
8065 } else {
8066 vport->txvlan_cfg.accept_tag1 = false;
8067 vport->txvlan_cfg.insert_tag1_en = true;
8068 vport->txvlan_cfg.default_tag1 = vlan_tag;
8069 }
8070
8071 vport->txvlan_cfg.accept_untag1 = true;
8072
8073 /* accept_tag2 and accept_untag2 are not supported on
8074 * pdev revision(0x20), new revision support them,
8075 * this two fields can not be configured by user.
8076 */
8077 vport->txvlan_cfg.accept_tag2 = true;
8078 vport->txvlan_cfg.accept_untag2 = true;
8079 vport->txvlan_cfg.insert_tag2_en = false;
8080 vport->txvlan_cfg.default_tag2 = 0;
8081
8082 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8083 vport->rxvlan_cfg.strip_tag1_en = false;
8084 vport->rxvlan_cfg.strip_tag2_en =
8085 vport->rxvlan_cfg.rx_vlan_offload_en;
8086 } else {
8087 vport->rxvlan_cfg.strip_tag1_en =
8088 vport->rxvlan_cfg.rx_vlan_offload_en;
8089 vport->rxvlan_cfg.strip_tag2_en = true;
8090 }
8091 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8092 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8093
8094 ret = hclge_set_vlan_tx_offload_cfg(vport);
8095 if (ret)
8096 return ret;
8097
8098 return hclge_set_vlan_rx_offload_cfg(vport);
8099}
8100
5f6ea83f
PL
8101static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8102{
8103 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8104 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8105 struct hclge_desc desc;
8106 int status;
8107
8108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8109 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8110 rx_req->ot_fst_vlan_type =
8111 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8112 rx_req->ot_sec_vlan_type =
8113 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8114 rx_req->in_fst_vlan_type =
8115 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8116 rx_req->in_sec_vlan_type =
8117 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8118
8119 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8120 if (status) {
8121 dev_err(&hdev->pdev->dev,
8122 "Send rxvlan protocol type command fail, ret =%d\n",
8123 status);
8124 return status;
8125 }
8126
8127 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8128
d0d72bac 8129 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
8130 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8131 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8132
8133 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8134 if (status)
8135 dev_err(&hdev->pdev->dev,
8136 "Send txvlan protocol type command fail, ret =%d\n",
8137 status);
8138
8139 return status;
8140}
8141
46a3df9f
S
8142static int hclge_init_vlan_config(struct hclge_dev *hdev)
8143{
5f6ea83f
PL
8144#define HCLGE_DEF_VLAN_TYPE 0x8100
8145
c60edc17 8146 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 8147 struct hclge_vport *vport;
46a3df9f 8148 int ret;
5f6ea83f
PL
8149 int i;
8150
64d114f0 8151 if (hdev->pdev->revision >= 0x21) {
30ebc576
JS
8152 /* for revision 0x21, vf vlan filter is per function */
8153 for (i = 0; i < hdev->num_alloc_vport; i++) {
8154 vport = &hdev->vport[i];
8155 ret = hclge_set_vlan_filter_ctrl(hdev,
8156 HCLGE_FILTER_TYPE_VF,
8157 HCLGE_FILTER_FE_EGRESS,
8158 true,
8159 vport->vport_id);
8160 if (ret)
8161 return ret;
8162 }
46a3df9f 8163
64d114f0 8164 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
8165 HCLGE_FILTER_FE_INGRESS, true,
8166 0);
64d114f0
ZL
8167 if (ret)
8168 return ret;
8169 } else {
8170 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8171 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 8172 true, 0);
64d114f0
ZL
8173 if (ret)
8174 return ret;
8175 }
46a3df9f 8176
c60edc17
JS
8177 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8178
5f6ea83f
PL
8179 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8180 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8181 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8182 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8183 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8184 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8185
8186 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
8187 if (ret)
8188 return ret;
46a3df9f 8189
5f6ea83f 8190 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 8191 u16 vlan_tag;
dcb35cce 8192
741fca16
JS
8193 vport = &hdev->vport[i];
8194 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 8195
741fca16
JS
8196 ret = hclge_vlan_offload_cfg(vport,
8197 vport->port_base_vlan_cfg.state,
8198 vlan_tag);
5f6ea83f
PL
8199 if (ret)
8200 return ret;
8201 }
8202
dc8131d8 8203 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
8204}
8205
21e043cd
JS
8206static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8207 bool writen_to_tbl)
c6075b19 8208{
8209 struct hclge_vport_vlan_cfg *vlan;
8210
c6075b19 8211 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8212 if (!vlan)
8213 return;
8214
21e043cd 8215 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 8216 vlan->vlan_id = vlan_id;
8217
8218 list_add_tail(&vlan->node, &vport->vlan_list);
8219}
8220
21e043cd
JS
8221static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8222{
8223 struct hclge_vport_vlan_cfg *vlan, *tmp;
8224 struct hclge_dev *hdev = vport->back;
8225 int ret;
8226
8227 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8228 if (!vlan->hd_tbl_status) {
8229 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8230 vport->vport_id,
70a21490 8231 vlan->vlan_id, false);
21e043cd
JS
8232 if (ret) {
8233 dev_err(&hdev->pdev->dev,
8234 "restore vport vlan list failed, ret=%d\n",
8235 ret);
8236 return ret;
8237 }
8238 }
8239 vlan->hd_tbl_status = true;
8240 }
8241
8242 return 0;
8243}
8244
8245static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8246 bool is_write_tbl)
c6075b19 8247{
8248 struct hclge_vport_vlan_cfg *vlan, *tmp;
8249 struct hclge_dev *hdev = vport->back;
8250
8251 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8252 if (vlan->vlan_id == vlan_id) {
8253 if (is_write_tbl && vlan->hd_tbl_status)
8254 hclge_set_vlan_filter_hw(hdev,
8255 htons(ETH_P_8021Q),
8256 vport->vport_id,
70a21490 8257 vlan_id,
c6075b19 8258 true);
8259
8260 list_del(&vlan->node);
8261 kfree(vlan);
8262 break;
8263 }
8264 }
8265}
8266
8267void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8268{
8269 struct hclge_vport_vlan_cfg *vlan, *tmp;
8270 struct hclge_dev *hdev = vport->back;
8271
8272 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8273 if (vlan->hd_tbl_status)
8274 hclge_set_vlan_filter_hw(hdev,
8275 htons(ETH_P_8021Q),
8276 vport->vport_id,
70a21490 8277 vlan->vlan_id,
c6075b19 8278 true);
8279
8280 vlan->hd_tbl_status = false;
8281 if (is_del_list) {
8282 list_del(&vlan->node);
8283 kfree(vlan);
8284 }
8285 }
23b4201d 8286 clear_bit(vport->vport_id, hdev->vf_vlan_full);
c6075b19 8287}
8288
8289void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8290{
8291 struct hclge_vport_vlan_cfg *vlan, *tmp;
8292 struct hclge_vport *vport;
8293 int i;
8294
c6075b19 8295 for (i = 0; i < hdev->num_alloc_vport; i++) {
8296 vport = &hdev->vport[i];
8297 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8298 list_del(&vlan->node);
8299 kfree(vlan);
8300 }
8301 }
c6075b19 8302}
8303
b524b38f
JS
8304static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8305{
8306 struct hclge_vport *vport = hclge_get_vport(handle);
8307 struct hclge_vport_vlan_cfg *vlan, *tmp;
8308 struct hclge_dev *hdev = vport->back;
b943e033 8309 u16 vlan_proto;
b524b38f
JS
8310 u16 state, vlan_id;
8311 int i;
8312
b524b38f
JS
8313 for (i = 0; i < hdev->num_alloc_vport; i++) {
8314 vport = &hdev->vport[i];
8315 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8316 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
b524b38f
JS
8317 state = vport->port_base_vlan_cfg.state;
8318
8319 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8320 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
70a21490 8321 vport->vport_id, vlan_id,
b524b38f
JS
8322 false);
8323 continue;
8324 }
8325
8326 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
22044f95
JS
8327 int ret;
8328
8329 if (!vlan->hd_tbl_status)
8330 continue;
8331 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8332 vport->vport_id,
8333 vlan->vlan_id, false);
8334 if (ret)
8335 break;
b524b38f
JS
8336 }
8337 }
b524b38f
JS
8338}
8339
b2641e2a 8340int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
8341{
8342 struct hclge_vport *vport = hclge_get_vport(handle);
8343
44e626f7
JS
8344 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8345 vport->rxvlan_cfg.strip_tag1_en = false;
8346 vport->rxvlan_cfg.strip_tag2_en = enable;
8347 } else {
8348 vport->rxvlan_cfg.strip_tag1_en = enable;
8349 vport->rxvlan_cfg.strip_tag2_en = true;
8350 }
052ece6d
PL
8351 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8352 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 8353 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
8354
8355 return hclge_set_vlan_rx_offload_cfg(vport);
8356}
8357
21e043cd
JS
8358static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8359 u16 port_base_vlan_state,
8360 struct hclge_vlan_info *new_info,
8361 struct hclge_vlan_info *old_info)
8362{
8363 struct hclge_dev *hdev = vport->back;
8364 int ret;
8365
8366 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8367 hclge_rm_vport_all_vlan_table(vport, false);
8368 return hclge_set_vlan_filter_hw(hdev,
8369 htons(new_info->vlan_proto),
8370 vport->vport_id,
8371 new_info->vlan_tag,
70a21490 8372 false);
21e043cd
JS
8373 }
8374
8375 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8376 vport->vport_id, old_info->vlan_tag,
70a21490 8377 true);
21e043cd
JS
8378 if (ret)
8379 return ret;
8380
8381 return hclge_add_vport_all_vlan_table(vport);
8382}
8383
8384int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8385 struct hclge_vlan_info *vlan_info)
8386{
8387 struct hnae3_handle *nic = &vport->nic;
8388 struct hclge_vlan_info *old_vlan_info;
8389 struct hclge_dev *hdev = vport->back;
8390 int ret;
8391
8392 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8393
8394 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8395 if (ret)
8396 return ret;
8397
8398 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8399 /* add new VLAN tag */
8a9a654b
JS
8400 ret = hclge_set_vlan_filter_hw(hdev,
8401 htons(vlan_info->vlan_proto),
21e043cd
JS
8402 vport->vport_id,
8403 vlan_info->vlan_tag,
70a21490 8404 false);
21e043cd
JS
8405 if (ret)
8406 return ret;
8407
8408 /* remove old VLAN tag */
8a9a654b
JS
8409 ret = hclge_set_vlan_filter_hw(hdev,
8410 htons(old_vlan_info->vlan_proto),
21e043cd
JS
8411 vport->vport_id,
8412 old_vlan_info->vlan_tag,
70a21490 8413 true);
21e043cd
JS
8414 if (ret)
8415 return ret;
8416
8417 goto update;
8418 }
8419
8420 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8421 old_vlan_info);
8422 if (ret)
8423 return ret;
8424
8425 /* update state only when disable/enable port based VLAN */
8426 vport->port_base_vlan_cfg.state = state;
8427 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8428 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8429 else
8430 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8431
8432update:
8433 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8434 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8435 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8436
8437 return 0;
8438}
8439
8440static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8441 enum hnae3_port_base_vlan_state state,
8442 u16 vlan)
8443{
8444 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8445 if (!vlan)
8446 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8447 else
8448 return HNAE3_PORT_BASE_VLAN_ENABLE;
8449 } else {
8450 if (!vlan)
8451 return HNAE3_PORT_BASE_VLAN_DISABLE;
8452 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8453 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8454 else
8455 return HNAE3_PORT_BASE_VLAN_MODIFY;
8456 }
8457}
8458
8459static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8460 u16 vlan, u8 qos, __be16 proto)
8461{
8462 struct hclge_vport *vport = hclge_get_vport(handle);
8463 struct hclge_dev *hdev = vport->back;
8464 struct hclge_vlan_info vlan_info;
8465 u16 state;
8466 int ret;
8467
8468 if (hdev->pdev->revision == 0x20)
8469 return -EOPNOTSUPP;
8470
1c985508
JS
8471 vport = hclge_get_vf_vport(hdev, vfid);
8472 if (!vport)
8473 return -EINVAL;
8474
21e043cd 8475 /* qos is a 3 bits value, so can not be bigger than 7 */
1c985508 8476 if (vlan > VLAN_N_VID - 1 || qos > 7)
21e043cd
JS
8477 return -EINVAL;
8478 if (proto != htons(ETH_P_8021Q))
8479 return -EPROTONOSUPPORT;
8480
21e043cd
JS
8481 state = hclge_get_port_base_vlan_state(vport,
8482 vport->port_base_vlan_cfg.state,
8483 vlan);
8484 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8485 return 0;
8486
8487 vlan_info.vlan_tag = vlan;
8488 vlan_info.qos = qos;
8489 vlan_info.vlan_proto = ntohs(proto);
8490
92f11ea1
JS
8491 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8492 return hclge_update_port_base_vlan_cfg(vport, state,
8493 &vlan_info);
8494 } else {
8495 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
1c985508 8496 vport->vport_id, state,
92f11ea1
JS
8497 vlan, qos,
8498 ntohs(proto));
8499 return ret;
8500 }
21e043cd
JS
8501}
8502
59359fc8
JS
8503static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
8504{
8505 struct hclge_vlan_info *vlan_info;
8506 struct hclge_vport *vport;
8507 int ret;
8508 int vf;
8509
8510 /* clear port base vlan for all vf */
8511 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
8512 vport = &hdev->vport[vf];
8513 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8514
8515 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8516 vport->vport_id,
8517 vlan_info->vlan_tag, true);
8518 if (ret)
8519 dev_err(&hdev->pdev->dev,
8520 "failed to clear vf vlan for vf%d, ret = %d\n",
8521 vf - HCLGE_VF_VPORT_START_NUM, ret);
8522 }
8523}
8524
21e043cd
JS
8525int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8526 u16 vlan_id, bool is_kill)
8527{
8528 struct hclge_vport *vport = hclge_get_vport(handle);
8529 struct hclge_dev *hdev = vport->back;
8530 bool writen_to_tbl = false;
8531 int ret = 0;
8532
fe4144d4
JS
8533 /* When device is resetting, firmware is unable to handle
8534 * mailbox. Just record the vlan id, and remove it after
8535 * reset finished.
8536 */
8537 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8538 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8539 return -EBUSY;
8540 }
8541
46ee7350 8542 /* when port base vlan enabled, we use port base vlan as the vlan
fe4144d4
JS
8543 * filter entry. In this case, we don't update vlan filter table
8544 * when user add new vlan or remove exist vlan, just update the vport
8545 * vlan list. The vlan id in vlan list will be writen in vlan filter
8546 * table until port base vlan disabled
21e043cd
JS
8547 */
8548 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8549 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
70a21490 8550 vlan_id, is_kill);
21e043cd
JS
8551 writen_to_tbl = true;
8552 }
8553
fe4144d4
JS
8554 if (!ret) {
8555 if (is_kill)
8556 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8557 else
8558 hclge_add_vport_vlan_table(vport, vlan_id,
8559 writen_to_tbl);
8560 } else if (is_kill) {
46ee7350 8561 /* when remove hw vlan filter failed, record the vlan id,
fe4144d4
JS
8562 * and try to remove it from hw later, to be consistence
8563 * with stack
8564 */
8565 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8566 }
8567 return ret;
8568}
21e043cd 8569
fe4144d4
JS
8570static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8571{
8572#define HCLGE_MAX_SYNC_COUNT 60
21e043cd 8573
fe4144d4
JS
8574 int i, ret, sync_cnt = 0;
8575 u16 vlan_id;
8576
8577 /* start from vport 1 for PF is always alive */
8578 for (i = 0; i < hdev->num_alloc_vport; i++) {
8579 struct hclge_vport *vport = &hdev->vport[i];
8580
8581 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8582 VLAN_N_VID);
8583 while (vlan_id != VLAN_N_VID) {
8584 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8585 vport->vport_id, vlan_id,
70a21490 8586 true);
fe4144d4
JS
8587 if (ret && ret != -EINVAL)
8588 return;
8589
8590 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8591 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8592
8593 sync_cnt++;
8594 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8595 return;
8596
8597 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8598 VLAN_N_VID);
8599 }
8600 }
21e043cd
JS
8601}
8602
e6d7d79d 8603static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 8604{
d44f9b63 8605 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 8606 struct hclge_desc desc;
46a3df9f 8607
46a3df9f
S
8608 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8609
d44f9b63 8610 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 8611 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 8612 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 8613
e6d7d79d 8614 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
8615}
8616
dd72140c
FL
8617static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8618{
8619 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
8620
8621 return hclge_set_vport_mtu(vport, new_mtu);
8622}
8623
8624int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8625{
dd72140c 8626 struct hclge_dev *hdev = vport->back;
63cbf7a9 8627 int i, max_frm_size, ret;
dd72140c 8628
9e690456 8629 /* HW supprt 2 layer vlan */
e6d7d79d
YL
8630 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8631 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8632 max_frm_size > HCLGE_MAC_MAX_FRAME)
8633 return -EINVAL;
8634
818f1675
YL
8635 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8636 mutex_lock(&hdev->vport_lock);
8637 /* VF's mps must fit within hdev->mps */
8638 if (vport->vport_id && max_frm_size > hdev->mps) {
8639 mutex_unlock(&hdev->vport_lock);
8640 return -EINVAL;
8641 } else if (vport->vport_id) {
8642 vport->mps = max_frm_size;
8643 mutex_unlock(&hdev->vport_lock);
8644 return 0;
8645 }
8646
8647 /* PF's mps must be greater then VF's mps */
8648 for (i = 1; i < hdev->num_alloc_vport; i++)
8649 if (max_frm_size < hdev->vport[i].mps) {
8650 mutex_unlock(&hdev->vport_lock);
8651 return -EINVAL;
8652 }
8653
cdca4c48
YL
8654 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8655
e6d7d79d 8656 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
8657 if (ret) {
8658 dev_err(&hdev->pdev->dev,
8659 "Change mtu fail, ret =%d\n", ret);
818f1675 8660 goto out;
dd72140c
FL
8661 }
8662
e6d7d79d 8663 hdev->mps = max_frm_size;
818f1675 8664 vport->mps = max_frm_size;
e6d7d79d 8665
dd72140c
FL
8666 ret = hclge_buffer_alloc(hdev);
8667 if (ret)
8668 dev_err(&hdev->pdev->dev,
8669 "Allocate buffer fail, ret =%d\n", ret);
8670
818f1675 8671out:
cdca4c48 8672 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 8673 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
8674 return ret;
8675}
8676
46a3df9f
S
8677static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8678 bool enable)
8679{
d44f9b63 8680 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
8681 struct hclge_desc desc;
8682 int ret;
8683
8684 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8685
d44f9b63 8686 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f 8687 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
b9a8f883
YL
8688 if (enable)
8689 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
46a3df9f
S
8690
8691 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8692 if (ret) {
8693 dev_err(&hdev->pdev->dev,
8694 "Send tqp reset cmd error, status =%d\n", ret);
8695 return ret;
8696 }
8697
8698 return 0;
8699}
8700
8701static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8702{
d44f9b63 8703 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
8704 struct hclge_desc desc;
8705 int ret;
8706
8707 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8708
d44f9b63 8709 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
8710 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8711
8712 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8713 if (ret) {
8714 dev_err(&hdev->pdev->dev,
8715 "Get reset status error, status =%d\n", ret);
8716 return ret;
8717 }
8718
e4e87715 8719 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
8720}
8721
0c29d191 8722u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
8723{
8724 struct hnae3_queue *queue;
8725 struct hclge_tqp *tqp;
8726
8727 queue = handle->kinfo.tqp[queue_id];
8728 tqp = container_of(queue, struct hclge_tqp, q);
8729
8730 return tqp->index;
8731}
8732
7fa6be4f 8733int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
8734{
8735 struct hclge_vport *vport = hclge_get_vport(handle);
8736 struct hclge_dev *hdev = vport->back;
8737 int reset_try_times = 0;
8738 int reset_status;
814e0274 8739 u16 queue_gid;
63cbf7a9 8740 int ret;
46a3df9f 8741
814e0274
PL
8742 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8743
46a3df9f
S
8744 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8745 if (ret) {
7fa6be4f
HT
8746 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8747 return ret;
46a3df9f
S
8748 }
8749
814e0274 8750 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 8751 if (ret) {
7fa6be4f
HT
8752 dev_err(&hdev->pdev->dev,
8753 "Send reset tqp cmd fail, ret = %d\n", ret);
8754 return ret;
46a3df9f
S
8755 }
8756
46a3df9f 8757 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
814e0274 8758 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
8759 if (reset_status)
8760 break;
e8df45c2
ZL
8761
8762 /* Wait for tqp hw reset */
8763 usleep_range(1000, 1200);
46a3df9f
S
8764 }
8765
8766 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
8767 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8768 return ret;
46a3df9f
S
8769 }
8770
814e0274 8771 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
8772 if (ret)
8773 dev_err(&hdev->pdev->dev,
8774 "Deassert the soft reset fail, ret = %d\n", ret);
8775
8776 return ret;
46a3df9f
S
8777}
8778
1a426f8b
PL
8779void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8780{
8781 struct hclge_dev *hdev = vport->back;
8782 int reset_try_times = 0;
8783 int reset_status;
8784 u16 queue_gid;
8785 int ret;
8786
8787 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8788
8789 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8790 if (ret) {
8791 dev_warn(&hdev->pdev->dev,
8792 "Send reset tqp cmd fail, ret = %d\n", ret);
8793 return;
8794 }
8795
1a426f8b 8796 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
1a426f8b
PL
8797 reset_status = hclge_get_reset_status(hdev, queue_gid);
8798 if (reset_status)
8799 break;
e8df45c2
ZL
8800
8801 /* Wait for tqp hw reset */
8802 usleep_range(1000, 1200);
1a426f8b
PL
8803 }
8804
8805 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8806 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8807 return;
8808 }
8809
8810 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8811 if (ret)
8812 dev_warn(&hdev->pdev->dev,
8813 "Deassert the soft reset fail, ret = %d\n", ret);
8814}
8815
46a3df9f
S
8816static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8817{
8818 struct hclge_vport *vport = hclge_get_vport(handle);
8819 struct hclge_dev *hdev = vport->back;
8820
8821 return hdev->fw_version;
8822}
8823
61387774
PL
8824static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8825{
8826 struct phy_device *phydev = hdev->hw.mac.phydev;
8827
8828 if (!phydev)
8829 return;
8830
70814e81 8831 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
8832}
8833
8834static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8835{
61387774
PL
8836 int ret;
8837
40173a2e 8838 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 8839 return 0;
61387774
PL
8840
8841 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
aacbe27e
YL
8842 if (ret)
8843 dev_err(&hdev->pdev->dev,
8844 "configure pauseparam error, ret = %d.\n", ret);
61387774 8845
aacbe27e 8846 return ret;
61387774
PL
8847}
8848
1770a7a3
PL
8849int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8850{
8851 struct phy_device *phydev = hdev->hw.mac.phydev;
8852 u16 remote_advertising = 0;
63cbf7a9 8853 u16 local_advertising;
1770a7a3
PL
8854 u32 rx_pause, tx_pause;
8855 u8 flowctl;
8856
8857 if (!phydev->link || !phydev->autoneg)
8858 return 0;
8859
3c1bcc86 8860 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
8861
8862 if (phydev->pause)
8863 remote_advertising = LPA_PAUSE_CAP;
8864
8865 if (phydev->asym_pause)
8866 remote_advertising |= LPA_PAUSE_ASYM;
8867
8868 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8869 remote_advertising);
8870 tx_pause = flowctl & FLOW_CTRL_TX;
8871 rx_pause = flowctl & FLOW_CTRL_RX;
8872
8873 if (phydev->duplex == HCLGE_MAC_HALF) {
8874 tx_pause = 0;
8875 rx_pause = 0;
8876 }
8877
8878 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8879}
8880
46a3df9f
S
8881static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8882 u32 *rx_en, u32 *tx_en)
8883{
8884 struct hclge_vport *vport = hclge_get_vport(handle);
8885 struct hclge_dev *hdev = vport->back;
fb89629f 8886 struct phy_device *phydev = hdev->hw.mac.phydev;
46a3df9f 8887
fb89629f 8888 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
46a3df9f
S
8889
8890 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8891 *rx_en = 0;
8892 *tx_en = 0;
8893 return;
8894 }
8895
8896 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8897 *rx_en = 1;
8898 *tx_en = 0;
8899 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8900 *tx_en = 1;
8901 *rx_en = 0;
8902 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8903 *rx_en = 1;
8904 *tx_en = 1;
8905 } else {
8906 *rx_en = 0;
8907 *tx_en = 0;
8908 }
8909}
8910
aacbe27e
YL
8911static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8912 u32 rx_en, u32 tx_en)
8913{
8914 if (rx_en && tx_en)
8915 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8916 else if (rx_en && !tx_en)
8917 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8918 else if (!rx_en && tx_en)
8919 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8920 else
8921 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8922
8923 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8924}
8925
61387774
PL
8926static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8927 u32 rx_en, u32 tx_en)
8928{
8929 struct hclge_vport *vport = hclge_get_vport(handle);
8930 struct hclge_dev *hdev = vport->back;
8931 struct phy_device *phydev = hdev->hw.mac.phydev;
8932 u32 fc_autoneg;
8933
fb89629f
JS
8934 if (phydev) {
8935 fc_autoneg = hclge_get_autoneg(handle);
8936 if (auto_neg != fc_autoneg) {
8937 dev_info(&hdev->pdev->dev,
8938 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8939 return -EOPNOTSUPP;
8940 }
61387774
PL
8941 }
8942
8943 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8944 dev_info(&hdev->pdev->dev,
8945 "Priority flow control enabled. Cannot set link flow control.\n");
8946 return -EOPNOTSUPP;
8947 }
8948
8949 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8950
aacbe27e
YL
8951 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8952
fb89629f 8953 if (!auto_neg)
61387774
PL
8954 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8955
22f48e24
JS
8956 if (phydev)
8957 return phy_start_aneg(phydev);
8958
fb89629f 8959 return -EOPNOTSUPP;
61387774
PL
8960}
8961
46a3df9f
S
8962static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8963 u8 *auto_neg, u32 *speed, u8 *duplex)
8964{
8965 struct hclge_vport *vport = hclge_get_vport(handle);
8966 struct hclge_dev *hdev = vport->back;
8967
8968 if (speed)
8969 *speed = hdev->hw.mac.speed;
8970 if (duplex)
8971 *duplex = hdev->hw.mac.duplex;
8972 if (auto_neg)
8973 *auto_neg = hdev->hw.mac.autoneg;
8974}
8975
88d10bd6
JS
8976static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8977 u8 *module_type)
46a3df9f
S
8978{
8979 struct hclge_vport *vport = hclge_get_vport(handle);
8980 struct hclge_dev *hdev = vport->back;
8981
8982 if (media_type)
8983 *media_type = hdev->hw.mac.media_type;
88d10bd6
JS
8984
8985 if (module_type)
8986 *module_type = hdev->hw.mac.module_type;
46a3df9f
S
8987}
8988
8989static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8990 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8991{
8992 struct hclge_vport *vport = hclge_get_vport(handle);
8993 struct hclge_dev *hdev = vport->back;
8994 struct phy_device *phydev = hdev->hw.mac.phydev;
ebaf1908
WL
8995 int mdix_ctrl, mdix, is_resolved;
8996 unsigned int retval;
46a3df9f
S
8997
8998 if (!phydev) {
8999 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9000 *tp_mdix = ETH_TP_MDI_INVALID;
9001 return;
9002 }
9003
9004 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9005
9006 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
9007 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9008 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
9009
9010 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
9011 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9012 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
9013
9014 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9015
9016 switch (mdix_ctrl) {
9017 case 0x0:
9018 *tp_mdix_ctrl = ETH_TP_MDI;
9019 break;
9020 case 0x1:
9021 *tp_mdix_ctrl = ETH_TP_MDI_X;
9022 break;
9023 case 0x3:
9024 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9025 break;
9026 default:
9027 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9028 break;
9029 }
9030
9031 if (!is_resolved)
9032 *tp_mdix = ETH_TP_MDI_INVALID;
9033 else if (mdix)
9034 *tp_mdix = ETH_TP_MDI_X;
9035 else
9036 *tp_mdix = ETH_TP_MDI;
9037}
9038
bb87be87
YL
9039static void hclge_info_show(struct hclge_dev *hdev)
9040{
9041 struct device *dev = &hdev->pdev->dev;
9042
9043 dev_info(dev, "PF info begin:\n");
9044
adcf738b
GL
9045 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9046 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9047 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9048 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9049 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9050 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9051 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9052 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9053 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9054 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
bb87be87
YL
9055 dev_info(dev, "This is %s PF\n",
9056 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9057 dev_info(dev, "DCB %s\n",
9058 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9059 dev_info(dev, "MQPRIO %s\n",
9060 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9061
9062 dev_info(dev, "PF info end.\n");
9063}
9064
994e04f1
HT
9065static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9066 struct hclge_vport *vport)
9067{
9068 struct hnae3_client *client = vport->nic.client;
9069 struct hclge_dev *hdev = ae_dev->priv;
0bfdf286 9070 int rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9071 int ret;
9072
9073 ret = client->ops->init_instance(&vport->nic);
9074 if (ret)
9075 return ret;
9076
9077 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9078 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9079 rst_cnt != hdev->rst_stats.reset_cnt) {
9080 ret = -EBUSY;
9081 goto init_nic_err;
9082 }
9083
00ea6e5f
WL
9084 /* Enable nic hw error interrupts */
9085 ret = hclge_config_nic_hw_error(hdev, true);
bcf643c5 9086 if (ret) {
00ea6e5f
WL
9087 dev_err(&ae_dev->pdev->dev,
9088 "fail(%d) to enable hw error interrupts\n", ret);
bcf643c5
WL
9089 goto init_nic_err;
9090 }
9091
9092 hnae3_set_client_init_flag(client, ae_dev, 1);
00ea6e5f 9093
994e04f1
HT
9094 if (netif_msg_drv(&hdev->vport->nic))
9095 hclge_info_show(hdev);
9096
00ea6e5f 9097 return ret;
7cf9c069
HT
9098
9099init_nic_err:
9100 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9101 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9102 msleep(HCLGE_WAIT_RESET_DONE);
9103
9104 client->ops->uninit_instance(&vport->nic, 0);
9105
9106 return ret;
994e04f1
HT
9107}
9108
9109static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9110 struct hclge_vport *vport)
9111{
9112 struct hnae3_client *client = vport->roce.client;
9113 struct hclge_dev *hdev = ae_dev->priv;
7cf9c069 9114 int rst_cnt;
994e04f1
HT
9115 int ret;
9116
9117 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9118 !hdev->nic_client)
9119 return 0;
9120
9121 client = hdev->roce_client;
9122 ret = hclge_init_roce_base_info(vport);
9123 if (ret)
9124 return ret;
9125
7cf9c069 9126 rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9127 ret = client->ops->init_instance(&vport->roce);
9128 if (ret)
9129 return ret;
9130
9131 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9132 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9133 rst_cnt != hdev->rst_stats.reset_cnt) {
9134 ret = -EBUSY;
9135 goto init_roce_err;
9136 }
9137
72fcd2be
HT
9138 /* Enable roce ras interrupts */
9139 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9140 if (ret) {
9141 dev_err(&ae_dev->pdev->dev,
9142 "fail(%d) to enable roce ras interrupts\n", ret);
9143 goto init_roce_err;
9144 }
9145
994e04f1
HT
9146 hnae3_set_client_init_flag(client, ae_dev, 1);
9147
9148 return 0;
7cf9c069
HT
9149
9150init_roce_err:
9151 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9152 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9153 msleep(HCLGE_WAIT_RESET_DONE);
9154
9155 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9156
9157 return ret;
994e04f1
HT
9158}
9159
46a3df9f
S
9160static int hclge_init_client_instance(struct hnae3_client *client,
9161 struct hnae3_ae_dev *ae_dev)
9162{
9163 struct hclge_dev *hdev = ae_dev->priv;
9164 struct hclge_vport *vport;
9165 int i, ret;
9166
9167 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9168 vport = &hdev->vport[i];
9169
9170 switch (client->type) {
9171 case HNAE3_CLIENT_KNIC:
46a3df9f
S
9172 hdev->nic_client = client;
9173 vport->nic.client = client;
994e04f1 9174 ret = hclge_init_nic_client_instance(ae_dev, vport);
46a3df9f 9175 if (ret)
49dd8054 9176 goto clear_nic;
46a3df9f 9177
994e04f1
HT
9178 ret = hclge_init_roce_client_instance(ae_dev, vport);
9179 if (ret)
9180 goto clear_roce;
46a3df9f 9181
46a3df9f
S
9182 break;
9183 case HNAE3_CLIENT_ROCE:
e92a0843 9184 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
9185 hdev->roce_client = client;
9186 vport->roce.client = client;
9187 }
9188
994e04f1
HT
9189 ret = hclge_init_roce_client_instance(ae_dev, vport);
9190 if (ret)
9191 goto clear_roce;
fa7a4bd5
JS
9192
9193 break;
9194 default:
9195 return -EINVAL;
46a3df9f
S
9196 }
9197 }
9198
37417c66 9199 return 0;
49dd8054
JS
9200
9201clear_nic:
9202 hdev->nic_client = NULL;
9203 vport->nic.client = NULL;
9204 return ret;
9205clear_roce:
9206 hdev->roce_client = NULL;
9207 vport->roce.client = NULL;
9208 return ret;
46a3df9f
S
9209}
9210
9211static void hclge_uninit_client_instance(struct hnae3_client *client,
9212 struct hnae3_ae_dev *ae_dev)
9213{
9214 struct hclge_dev *hdev = ae_dev->priv;
9215 struct hclge_vport *vport;
9216 int i;
9217
9218 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9219 vport = &hdev->vport[i];
a17dcf3f 9220 if (hdev->roce_client) {
2a0bfc36 9221 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9222 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9223 msleep(HCLGE_WAIT_RESET_DONE);
9224
46a3df9f
S
9225 hdev->roce_client->ops->uninit_instance(&vport->roce,
9226 0);
a17dcf3f
L
9227 hdev->roce_client = NULL;
9228 vport->roce.client = NULL;
9229 }
46a3df9f
S
9230 if (client->type == HNAE3_CLIENT_ROCE)
9231 return;
49dd8054 9232 if (hdev->nic_client && client->ops->uninit_instance) {
bd9109c9 9233 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9234 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9235 msleep(HCLGE_WAIT_RESET_DONE);
9236
46a3df9f 9237 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
9238 hdev->nic_client = NULL;
9239 vport->nic.client = NULL;
9240 }
46a3df9f
S
9241 }
9242}
9243
9244static int hclge_pci_init(struct hclge_dev *hdev)
9245{
9246 struct pci_dev *pdev = hdev->pdev;
9247 struct hclge_hw *hw;
9248 int ret;
9249
9250 ret = pci_enable_device(pdev);
9251 if (ret) {
9252 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 9253 return ret;
46a3df9f
S
9254 }
9255
9256 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9257 if (ret) {
9258 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9259 if (ret) {
9260 dev_err(&pdev->dev,
9261 "can't set consistent PCI DMA");
9262 goto err_disable_device;
9263 }
9264 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9265 }
9266
9267 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9268 if (ret) {
9269 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9270 goto err_disable_device;
9271 }
9272
9273 pci_set_master(pdev);
9274 hw = &hdev->hw;
46a3df9f
S
9275 hw->io_base = pcim_iomap(pdev, 2, 0);
9276 if (!hw->io_base) {
9277 dev_err(&pdev->dev, "Can't map configuration register space\n");
9278 ret = -ENOMEM;
9279 goto err_clr_master;
9280 }
9281
709eb41a
L
9282 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9283
46a3df9f
S
9284 return 0;
9285err_clr_master:
9286 pci_clear_master(pdev);
9287 pci_release_regions(pdev);
9288err_disable_device:
9289 pci_disable_device(pdev);
46a3df9f
S
9290
9291 return ret;
9292}
9293
9294static void hclge_pci_uninit(struct hclge_dev *hdev)
9295{
9296 struct pci_dev *pdev = hdev->pdev;
9297
6a814413 9298 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 9299 pci_free_irq_vectors(pdev);
46a3df9f
S
9300 pci_clear_master(pdev);
9301 pci_release_mem_regions(pdev);
9302 pci_disable_device(pdev);
9303}
9304
48569cda
PL
9305static void hclge_state_init(struct hclge_dev *hdev)
9306{
9307 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9308 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9309 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9310 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
d5432455 9311 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
48569cda
PL
9312 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9313 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9314}
9315
9316static void hclge_state_uninit(struct hclge_dev *hdev)
9317{
9318 set_bit(HCLGE_STATE_DOWN, &hdev->state);
acfc3d55 9319 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
48569cda 9320
65e41e7e
HT
9321 if (hdev->reset_timer.function)
9322 del_timer_sync(&hdev->reset_timer);
7be1b9f3
YL
9323 if (hdev->service_task.work.func)
9324 cancel_delayed_work_sync(&hdev->service_task);
48569cda
PL
9325}
9326
6b9a97ee
HT
9327static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9328{
8627bded
HT
9329#define HCLGE_FLR_RETRY_WAIT_MS 500
9330#define HCLGE_FLR_RETRY_CNT 5
6b9a97ee 9331
8627bded
HT
9332 struct hclge_dev *hdev = ae_dev->priv;
9333 int retry_cnt = 0;
9334 int ret;
6b9a97ee 9335
8627bded
HT
9336retry:
9337 down(&hdev->reset_sem);
9338 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9339 hdev->reset_type = HNAE3_FLR_RESET;
9340 ret = hclge_reset_prepare(hdev);
9341 if (ret) {
9342 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9343 ret);
9344 if (hdev->reset_pending ||
9345 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9346 dev_err(&hdev->pdev->dev,
9347 "reset_pending:0x%lx, retry_cnt:%d\n",
9348 hdev->reset_pending, retry_cnt);
9349 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9350 up(&hdev->reset_sem);
9351 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9352 goto retry;
9353 }
9354 }
6b9a97ee 9355
8627bded
HT
9356 /* disable misc vector before FLR done */
9357 hclge_enable_vector(&hdev->misc_vector, false);
9358 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9359 hdev->rst_stats.flr_rst_cnt++;
6b9a97ee
HT
9360}
9361
9362static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9363{
9364 struct hclge_dev *hdev = ae_dev->priv;
8627bded
HT
9365 int ret;
9366
9367 hclge_enable_vector(&hdev->misc_vector, true);
6b9a97ee 9368
8627bded
HT
9369 ret = hclge_reset_rebuild(hdev);
9370 if (ret)
9371 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9372
9373 hdev->reset_type = HNAE3_NONE_RESET;
9374 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9375 up(&hdev->reset_sem);
6b9a97ee
HT
9376}
9377
31bb229d
PL
9378static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9379{
9380 u16 i;
9381
9382 for (i = 0; i < hdev->num_alloc_vport; i++) {
9383 struct hclge_vport *vport = &hdev->vport[i];
9384 int ret;
9385
9386 /* Send cmd to clear VF's FUNC_RST_ING */
9387 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9388 if (ret)
9389 dev_warn(&hdev->pdev->dev,
adcf738b 9390 "clear vf(%u) rst failed %d!\n",
31bb229d
PL
9391 vport->vport_id, ret);
9392 }
9393}
9394
46a3df9f
S
9395static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9396{
9397 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
9398 struct hclge_dev *hdev;
9399 int ret;
9400
9401 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9402 if (!hdev) {
9403 ret = -ENOMEM;
ffd5656e 9404 goto out;
46a3df9f
S
9405 }
9406
46a3df9f
S
9407 hdev->pdev = pdev;
9408 hdev->ae_dev = ae_dev;
4ed340ab 9409 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 9410 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 9411 ae_dev->priv = hdev;
9e690456
GH
9412
9413 /* HW supprt 2 layer vlan */
e6d7d79d 9414 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 9415
818f1675 9416 mutex_init(&hdev->vport_lock);
44122887 9417 spin_lock_init(&hdev->fd_rule_lock);
8627bded 9418 sema_init(&hdev->reset_sem, 1);
818f1675 9419
46a3df9f 9420 ret = hclge_pci_init(hdev);
60df7e91 9421 if (ret)
ffd5656e 9422 goto out;
46a3df9f 9423
3efb960f
L
9424 /* Firmware command queue initialize */
9425 ret = hclge_cmd_queue_init(hdev);
60df7e91 9426 if (ret)
ffd5656e 9427 goto err_pci_uninit;
3efb960f
L
9428
9429 /* Firmware command initialize */
46a3df9f
S
9430 ret = hclge_cmd_init(hdev);
9431 if (ret)
ffd5656e 9432 goto err_cmd_uninit;
46a3df9f
S
9433
9434 ret = hclge_get_cap(hdev);
60df7e91 9435 if (ret)
ffd5656e 9436 goto err_cmd_uninit;
46a3df9f
S
9437
9438 ret = hclge_configure(hdev);
9439 if (ret) {
9440 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 9441 goto err_cmd_uninit;
46a3df9f
S
9442 }
9443
887c3820 9444 ret = hclge_init_msi(hdev);
46a3df9f 9445 if (ret) {
887c3820 9446 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 9447 goto err_cmd_uninit;
46a3df9f
S
9448 }
9449
466b0c00 9450 ret = hclge_misc_irq_init(hdev);
60df7e91 9451 if (ret)
ffd5656e 9452 goto err_msi_uninit;
466b0c00 9453
46a3df9f
S
9454 ret = hclge_alloc_tqps(hdev);
9455 if (ret) {
9456 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 9457 goto err_msi_irq_uninit;
46a3df9f
S
9458 }
9459
9460 ret = hclge_alloc_vport(hdev);
60df7e91 9461 if (ret)
ffd5656e 9462 goto err_msi_irq_uninit;
46a3df9f 9463
7df7dad6 9464 ret = hclge_map_tqp(hdev);
60df7e91 9465 if (ret)
2312e050 9466 goto err_msi_irq_uninit;
7df7dad6 9467
c5ef83cb
HT
9468 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9469 ret = hclge_mac_mdio_config(hdev);
60df7e91 9470 if (ret)
2312e050 9471 goto err_msi_irq_uninit;
cf9cca2d 9472 }
9473
39932473 9474 ret = hclge_init_umv_space(hdev);
60df7e91 9475 if (ret)
9fc55413 9476 goto err_mdiobus_unreg;
39932473 9477
46a3df9f
S
9478 ret = hclge_mac_init(hdev);
9479 if (ret) {
9480 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 9481 goto err_mdiobus_unreg;
46a3df9f 9482 }
46a3df9f
S
9483
9484 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9485 if (ret) {
9486 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 9487 goto err_mdiobus_unreg;
46a3df9f
S
9488 }
9489
b26a6fea
PL
9490 ret = hclge_config_gro(hdev, true);
9491 if (ret)
9492 goto err_mdiobus_unreg;
9493
46a3df9f
S
9494 ret = hclge_init_vlan_config(hdev);
9495 if (ret) {
9496 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 9497 goto err_mdiobus_unreg;
46a3df9f
S
9498 }
9499
9500 ret = hclge_tm_schd_init(hdev);
9501 if (ret) {
9502 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 9503 goto err_mdiobus_unreg;
68ece54e
YL
9504 }
9505
268f5dfa 9506 hclge_rss_init_cfg(hdev);
68ece54e
YL
9507 ret = hclge_rss_init_hw(hdev);
9508 if (ret) {
9509 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 9510 goto err_mdiobus_unreg;
46a3df9f
S
9511 }
9512
f5aac71c
FL
9513 ret = init_mgr_tbl(hdev);
9514 if (ret) {
9515 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 9516 goto err_mdiobus_unreg;
f5aac71c
FL
9517 }
9518
d695964d
JS
9519 ret = hclge_init_fd_config(hdev);
9520 if (ret) {
9521 dev_err(&pdev->dev,
9522 "fd table init fail, ret=%d\n", ret);
9523 goto err_mdiobus_unreg;
9524 }
9525
a6345787
WL
9526 INIT_KFIFO(hdev->mac_tnl_log);
9527
cacde272
YL
9528 hclge_dcb_ops_set(hdev);
9529
65e41e7e 9530 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7be1b9f3 9531 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
46a3df9f 9532
08125454
YL
9533 /* Setup affinity after service timer setup because add_timer_on
9534 * is called in affinity notify.
9535 */
9536 hclge_misc_affinity_setup(hdev);
9537
8e52a602 9538 hclge_clear_all_event_cause(hdev);
31bb229d 9539 hclge_clear_resetting_state(hdev);
8e52a602 9540
e4193e24
SJ
9541 /* Log and clear the hw errors those already occurred */
9542 hclge_handle_all_hns_hw_errors(ae_dev);
9543
e3b84ed2
SJ
9544 /* request delayed reset for the error recovery because an immediate
9545 * global reset on a PF affecting pending initialization of other PFs
9546 */
9547 if (ae_dev->hw_err_reset_req) {
9548 enum hnae3_reset_type reset_level;
9549
9550 reset_level = hclge_get_reset_level(ae_dev,
9551 &ae_dev->hw_err_reset_req);
9552 hclge_set_def_reset_request(ae_dev, reset_level);
9553 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9554 }
9555
466b0c00
L
9556 /* Enable MISC vector(vector0) */
9557 hclge_enable_vector(&hdev->misc_vector, true);
9558
48569cda 9559 hclge_state_init(hdev);
0742ed7c 9560 hdev->last_reset_time = jiffies;
46a3df9f 9561
08d80a4c
HT
9562 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9563 HCLGE_DRIVER_NAME);
9564
1c6dfe6f
YL
9565 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9566
46a3df9f
S
9567 return 0;
9568
ffd5656e
HT
9569err_mdiobus_unreg:
9570 if (hdev->hw.mac.phydev)
9571 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
9572err_msi_irq_uninit:
9573 hclge_misc_irq_uninit(hdev);
9574err_msi_uninit:
9575 pci_free_irq_vectors(pdev);
9576err_cmd_uninit:
232d0d55 9577 hclge_cmd_uninit(hdev);
ffd5656e 9578err_pci_uninit:
6a814413 9579 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 9580 pci_clear_master(pdev);
46a3df9f 9581 pci_release_regions(pdev);
ffd5656e 9582 pci_disable_device(pdev);
ffd5656e 9583out:
46a3df9f
S
9584 return ret;
9585}
9586
c6dc5213 9587static void hclge_stats_clear(struct hclge_dev *hdev)
9588{
1c6dfe6f 9589 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
c6dc5213 9590}
9591
22044f95
JS
9592static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9593{
9594 return hclge_config_switch_param(hdev, vf, enable,
9595 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9596}
9597
9598static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9599{
9600 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9601 HCLGE_FILTER_FE_NIC_INGRESS_B,
9602 enable, vf);
9603}
9604
9605static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9606{
9607 int ret;
9608
9609 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9610 if (ret) {
9611 dev_err(&hdev->pdev->dev,
9612 "Set vf %d mac spoof check %s failed, ret=%d\n",
9613 vf, enable ? "on" : "off", ret);
9614 return ret;
9615 }
9616
9617 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9618 if (ret)
9619 dev_err(&hdev->pdev->dev,
9620 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9621 vf, enable ? "on" : "off", ret);
9622
9623 return ret;
9624}
9625
9626static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9627 bool enable)
9628{
9629 struct hclge_vport *vport = hclge_get_vport(handle);
9630 struct hclge_dev *hdev = vport->back;
9631 u32 new_spoofchk = enable ? 1 : 0;
9632 int ret;
9633
9634 if (hdev->pdev->revision == 0x20)
9635 return -EOPNOTSUPP;
9636
9637 vport = hclge_get_vf_vport(hdev, vf);
9638 if (!vport)
9639 return -EINVAL;
9640
9641 if (vport->vf_info.spoofchk == new_spoofchk)
9642 return 0;
9643
9644 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9645 dev_warn(&hdev->pdev->dev,
9646 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9647 vf);
9648 else if (enable && hclge_is_umv_space_full(vport))
9649 dev_warn(&hdev->pdev->dev,
9650 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9651 vf);
9652
9653 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9654 if (ret)
9655 return ret;
9656
9657 vport->vf_info.spoofchk = new_spoofchk;
9658 return 0;
9659}
9660
9661static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9662{
9663 struct hclge_vport *vport = hdev->vport;
9664 int ret;
9665 int i;
9666
9667 if (hdev->pdev->revision == 0x20)
9668 return 0;
9669
9670 /* resume the vf spoof check state after reset */
9671 for (i = 0; i < hdev->num_alloc_vport; i++) {
9672 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9673 vport->vf_info.spoofchk);
9674 if (ret)
9675 return ret;
9676
9677 vport++;
9678 }
9679
9680 return 0;
9681}
9682
e196ec75
JS
9683static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9684{
9685 struct hclge_vport *vport = hclge_get_vport(handle);
9686 struct hclge_dev *hdev = vport->back;
9687 u32 new_trusted = enable ? 1 : 0;
9688 bool en_bc_pmc;
9689 int ret;
9690
9691 vport = hclge_get_vf_vport(hdev, vf);
9692 if (!vport)
9693 return -EINVAL;
9694
9695 if (vport->vf_info.trusted == new_trusted)
9696 return 0;
9697
9698 /* Disable promisc mode for VF if it is not trusted any more. */
9699 if (!enable && vport->vf_info.promisc_enable) {
9700 en_bc_pmc = hdev->pdev->revision != 0x20;
9701 ret = hclge_set_vport_promisc_mode(vport, false, false,
9702 en_bc_pmc);
9703 if (ret)
9704 return ret;
9705 vport->vf_info.promisc_enable = 0;
9706 hclge_inform_vf_promisc_info(vport);
9707 }
9708
9709 vport->vf_info.trusted = new_trusted;
9710
9711 return 0;
9712}
9713
ee9e4424
YL
9714static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9715{
9716 int ret;
9717 int vf;
9718
9719 /* reset vf rate to default value */
9720 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9721 struct hclge_vport *vport = &hdev->vport[vf];
9722
9723 vport->vf_info.max_tx_rate = 0;
9724 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9725 if (ret)
9726 dev_err(&hdev->pdev->dev,
9727 "vf%d failed to reset to default, ret=%d\n",
9728 vf - HCLGE_VF_VPORT_START_NUM, ret);
9729 }
9730}
9731
9732static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9733 int min_tx_rate, int max_tx_rate)
9734{
9735 if (min_tx_rate != 0 ||
9736 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9737 dev_err(&hdev->pdev->dev,
9738 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9739 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9740 return -EINVAL;
9741 }
9742
9743 return 0;
9744}
9745
9746static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9747 int min_tx_rate, int max_tx_rate, bool force)
9748{
9749 struct hclge_vport *vport = hclge_get_vport(handle);
9750 struct hclge_dev *hdev = vport->back;
9751 int ret;
9752
9753 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9754 if (ret)
9755 return ret;
9756
9757 vport = hclge_get_vf_vport(hdev, vf);
9758 if (!vport)
9759 return -EINVAL;
9760
9761 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9762 return 0;
9763
9764 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9765 if (ret)
9766 return ret;
9767
9768 vport->vf_info.max_tx_rate = max_tx_rate;
9769
9770 return 0;
9771}
9772
9773static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9774{
9775 struct hnae3_handle *handle = &hdev->vport->nic;
9776 struct hclge_vport *vport;
9777 int ret;
9778 int vf;
9779
9780 /* resume the vf max_tx_rate after reset */
9781 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9782 vport = hclge_get_vf_vport(hdev, vf);
9783 if (!vport)
9784 return -EINVAL;
9785
9786 /* zero means max rate, after reset, firmware already set it to
9787 * max rate, so just continue.
9788 */
9789 if (!vport->vf_info.max_tx_rate)
9790 continue;
9791
9792 ret = hclge_set_vf_rate(handle, vf, 0,
9793 vport->vf_info.max_tx_rate, true);
9794 if (ret) {
9795 dev_err(&hdev->pdev->dev,
9796 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9797 vf, vport->vf_info.max_tx_rate, ret);
9798 return ret;
9799 }
9800 }
9801
9802 return 0;
9803}
9804
a6d818e3
YL
9805static void hclge_reset_vport_state(struct hclge_dev *hdev)
9806{
9807 struct hclge_vport *vport = hdev->vport;
9808 int i;
9809
9810 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 9811 hclge_vport_stop(vport);
a6d818e3
YL
9812 vport++;
9813 }
9814}
9815
4ed340ab
L
9816static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9817{
9818 struct hclge_dev *hdev = ae_dev->priv;
9819 struct pci_dev *pdev = ae_dev->pdev;
9820 int ret;
9821
9822 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9823
c6dc5213 9824 hclge_stats_clear(hdev);
dc8131d8 9825 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
81a9255e 9826 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
c6dc5213 9827
4ed340ab
L
9828 ret = hclge_cmd_init(hdev);
9829 if (ret) {
9830 dev_err(&pdev->dev, "Cmd queue init failed\n");
9831 return ret;
9832 }
9833
4ed340ab
L
9834 ret = hclge_map_tqp(hdev);
9835 if (ret) {
9836 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9837 return ret;
9838 }
9839
39932473
JS
9840 hclge_reset_umv_space(hdev);
9841
4ed340ab
L
9842 ret = hclge_mac_init(hdev);
9843 if (ret) {
9844 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9845 return ret;
9846 }
9847
4ed340ab
L
9848 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9849 if (ret) {
9850 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9851 return ret;
9852 }
9853
b26a6fea
PL
9854 ret = hclge_config_gro(hdev, true);
9855 if (ret)
9856 return ret;
9857
4ed340ab
L
9858 ret = hclge_init_vlan_config(hdev);
9859 if (ret) {
9860 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9861 return ret;
9862 }
9863
44e59e37 9864 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 9865 if (ret) {
f31c1ba6 9866 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
9867 return ret;
9868 }
9869
9870 ret = hclge_rss_init_hw(hdev);
9871 if (ret) {
9872 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9873 return ret;
9874 }
9875
d0db7ed3
YM
9876 ret = init_mgr_tbl(hdev);
9877 if (ret) {
9878 dev_err(&pdev->dev,
9879 "failed to reinit manager table, ret = %d\n", ret);
9880 return ret;
9881 }
9882
d695964d
JS
9883 ret = hclge_init_fd_config(hdev);
9884 if (ret) {
9b2f3477 9885 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
d695964d
JS
9886 return ret;
9887 }
9888
4fdd0bca
JS
9889 /* Log and clear the hw errors those already occurred */
9890 hclge_handle_all_hns_hw_errors(ae_dev);
9891
f3fa4a94 9892 /* Re-enable the hw error interrupts because
00ea6e5f 9893 * the interrupts get disabled on global reset.
01865a50 9894 */
00ea6e5f 9895 ret = hclge_config_nic_hw_error(hdev, true);
f3fa4a94
SJ
9896 if (ret) {
9897 dev_err(&pdev->dev,
00ea6e5f
WL
9898 "fail(%d) to re-enable NIC hw error interrupts\n",
9899 ret);
f3fa4a94
SJ
9900 return ret;
9901 }
01865a50 9902
00ea6e5f
WL
9903 if (hdev->roce_client) {
9904 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9905 if (ret) {
9906 dev_err(&pdev->dev,
9907 "fail(%d) to re-enable roce ras interrupts\n",
9908 ret);
9909 return ret;
9910 }
9911 }
9912
a6d818e3 9913 hclge_reset_vport_state(hdev);
22044f95
JS
9914 ret = hclge_reset_vport_spoofchk(hdev);
9915 if (ret)
9916 return ret;
a6d818e3 9917
ee9e4424
YL
9918 ret = hclge_resume_vf_rate(hdev);
9919 if (ret)
9920 return ret;
9921
4ed340ab
L
9922 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9923 HCLGE_DRIVER_NAME);
9924
9925 return 0;
9926}
9927
46a3df9f
S
9928static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9929{
9930 struct hclge_dev *hdev = ae_dev->priv;
9931 struct hclge_mac *mac = &hdev->hw.mac;
9932
ee9e4424 9933 hclge_reset_vf_rate(hdev);
59359fc8 9934 hclge_clear_vf_vlan(hdev);
08125454 9935 hclge_misc_affinity_teardown(hdev);
48569cda 9936 hclge_state_uninit(hdev);
46a3df9f
S
9937
9938 if (mac->phydev)
9939 mdiobus_unregister(mac->mdio_bus);
9940
39932473
JS
9941 hclge_uninit_umv_space(hdev);
9942
466b0c00
L
9943 /* Disable MISC vector(vector0) */
9944 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
9945 synchronize_irq(hdev->misc_vector.vector_irq);
9946
00ea6e5f 9947 /* Disable all hw interrupts */
a6345787 9948 hclge_config_mac_tnl_int(hdev, false);
00ea6e5f
WL
9949 hclge_config_nic_hw_error(hdev, false);
9950 hclge_config_rocee_ras_interrupt(hdev, false);
9951
232d0d55 9952 hclge_cmd_uninit(hdev);
ca1d7669 9953 hclge_misc_irq_uninit(hdev);
46a3df9f 9954 hclge_pci_uninit(hdev);
818f1675 9955 mutex_destroy(&hdev->vport_lock);
6dd86902 9956 hclge_uninit_vport_mac_table(hdev);
c6075b19 9957 hclge_uninit_vport_vlan_table(hdev);
46a3df9f
S
9958 ae_dev->priv = NULL;
9959}
9960
482d2e9c
PL
9961static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9962{
9963 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9964 struct hclge_vport *vport = hclge_get_vport(handle);
9965 struct hclge_dev *hdev = vport->back;
9966
c3b9c50d
HT
9967 return min_t(u32, hdev->rss_size_max,
9968 vport->alloc_tqps / kinfo->num_tc);
482d2e9c
PL
9969}
9970
9971static void hclge_get_channels(struct hnae3_handle *handle,
9972 struct ethtool_channels *ch)
9973{
482d2e9c
PL
9974 ch->max_combined = hclge_get_max_channels(handle);
9975 ch->other_count = 1;
9976 ch->max_other = 1;
c3b9c50d 9977 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
9978}
9979
09f2af64 9980static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 9981 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
9982{
9983 struct hclge_vport *vport = hclge_get_vport(handle);
9984 struct hclge_dev *hdev = vport->back;
09f2af64 9985
0d43bf45 9986 *alloc_tqps = vport->alloc_tqps;
09f2af64
PL
9987 *max_rss_size = hdev->rss_size_max;
9988}
9989
90c68a41
YL
9990static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9991 bool rxfh_configured)
09f2af64
PL
9992{
9993 struct hclge_vport *vport = hclge_get_vport(handle);
9994 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
354d0fab 9995 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
09f2af64 9996 struct hclge_dev *hdev = vport->back;
354d0fab 9997 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
adcf738b
GL
9998 u16 cur_rss_size = kinfo->rss_size;
9999 u16 cur_tqps = kinfo->num_tqps;
09f2af64 10000 u16 tc_valid[HCLGE_MAX_TC_NUM];
09f2af64
PL
10001 u16 roundup_size;
10002 u32 *rss_indir;
ebaf1908
WL
10003 unsigned int i;
10004 int ret;
09f2af64 10005
672ad0ed 10006 kinfo->req_rss_size = new_tqps_num;
09f2af64 10007
672ad0ed 10008 ret = hclge_tm_vport_map_update(hdev);
09f2af64 10009 if (ret) {
672ad0ed 10010 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
10011 return ret;
10012 }
10013
10014 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10015 roundup_size = ilog2(roundup_size);
10016 /* Set the RSS TC mode according to the new RSS size */
10017 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10018 tc_valid[i] = 0;
10019
10020 if (!(hdev->hw_tc_map & BIT(i)))
10021 continue;
10022
10023 tc_valid[i] = 1;
10024 tc_size[i] = roundup_size;
10025 tc_offset[i] = kinfo->rss_size * i;
10026 }
10027 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10028 if (ret)
10029 return ret;
10030
90c68a41
YL
10031 /* RSS indirection table has been configuared by user */
10032 if (rxfh_configured)
10033 goto out;
10034
09f2af64
PL
10035 /* Reinitializes the rss indirect table according to the new RSS size */
10036 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10037 if (!rss_indir)
10038 return -ENOMEM;
10039
10040 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10041 rss_indir[i] = i % kinfo->rss_size;
10042
10043 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10044 if (ret)
10045 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10046 ret);
10047
10048 kfree(rss_indir);
10049
90c68a41 10050out:
09f2af64
PL
10051 if (!ret)
10052 dev_info(&hdev->pdev->dev,
adcf738b 10053 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
09f2af64
PL
10054 cur_rss_size, kinfo->rss_size,
10055 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10056
10057 return ret;
10058}
10059
77b34110
FL
10060static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10061 u32 *regs_num_64_bit)
10062{
10063 struct hclge_desc desc;
10064 u32 total_num;
10065 int ret;
10066
10067 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10068 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10069 if (ret) {
10070 dev_err(&hdev->pdev->dev,
10071 "Query register number cmd failed, ret = %d.\n", ret);
10072 return ret;
10073 }
10074
10075 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10076 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10077
10078 total_num = *regs_num_32_bit + *regs_num_64_bit;
10079 if (!total_num)
10080 return -EINVAL;
10081
10082 return 0;
10083}
10084
10085static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10086 void *data)
10087{
10088#define HCLGE_32_BIT_REG_RTN_DATANUM 8
b37ce587 10089#define HCLGE_32_BIT_DESC_NODATA_LEN 2
77b34110
FL
10090
10091 struct hclge_desc *desc;
10092 u32 *reg_val = data;
10093 __le32 *desc_data;
b37ce587 10094 int nodata_num;
77b34110
FL
10095 int cmd_num;
10096 int i, k, n;
10097 int ret;
10098
10099 if (regs_num == 0)
10100 return 0;
10101
b37ce587
YM
10102 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10103 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10104 HCLGE_32_BIT_REG_RTN_DATANUM);
77b34110
FL
10105 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10106 if (!desc)
10107 return -ENOMEM;
10108
10109 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10110 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10111 if (ret) {
10112 dev_err(&hdev->pdev->dev,
10113 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10114 kfree(desc);
10115 return ret;
10116 }
10117
10118 for (i = 0; i < cmd_num; i++) {
10119 if (i == 0) {
10120 desc_data = (__le32 *)(&desc[i].data[0]);
b37ce587 10121 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
77b34110
FL
10122 } else {
10123 desc_data = (__le32 *)(&desc[i]);
10124 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10125 }
10126 for (k = 0; k < n; k++) {
10127 *reg_val++ = le32_to_cpu(*desc_data++);
10128
10129 regs_num--;
10130 if (!regs_num)
10131 break;
10132 }
10133 }
10134
10135 kfree(desc);
10136 return 0;
10137}
10138
10139static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10140 void *data)
10141{
10142#define HCLGE_64_BIT_REG_RTN_DATANUM 4
b37ce587 10143#define HCLGE_64_BIT_DESC_NODATA_LEN 1
77b34110
FL
10144
10145 struct hclge_desc *desc;
10146 u64 *reg_val = data;
10147 __le64 *desc_data;
b37ce587 10148 int nodata_len;
77b34110
FL
10149 int cmd_num;
10150 int i, k, n;
10151 int ret;
10152
10153 if (regs_num == 0)
10154 return 0;
10155
b37ce587
YM
10156 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10157 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10158 HCLGE_64_BIT_REG_RTN_DATANUM);
77b34110
FL
10159 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10160 if (!desc)
10161 return -ENOMEM;
10162
10163 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10164 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10165 if (ret) {
10166 dev_err(&hdev->pdev->dev,
10167 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10168 kfree(desc);
10169 return ret;
10170 }
10171
10172 for (i = 0; i < cmd_num; i++) {
10173 if (i == 0) {
10174 desc_data = (__le64 *)(&desc[i].data[0]);
b37ce587 10175 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
77b34110
FL
10176 } else {
10177 desc_data = (__le64 *)(&desc[i]);
10178 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10179 }
10180 for (k = 0; k < n; k++) {
10181 *reg_val++ = le64_to_cpu(*desc_data++);
10182
10183 regs_num--;
10184 if (!regs_num)
10185 break;
10186 }
10187 }
10188
10189 kfree(desc);
10190 return 0;
10191}
10192
ea4750ca 10193#define MAX_SEPARATE_NUM 4
ddb54554 10194#define SEPARATOR_VALUE 0xFDFCFBFA
ea4750ca
JS
10195#define REG_NUM_PER_LINE 4
10196#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
ddb54554
GH
10197#define REG_SEPARATOR_LINE 1
10198#define REG_NUM_REMAIN_MASK 3
10199#define BD_LIST_MAX_NUM 30
ea4750ca 10200
ddb54554 10201int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
77b34110 10202{
ddb54554
GH
10203 /*prepare 4 commands to query DFX BD number*/
10204 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10205 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10206 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10207 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10208 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10209 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10210 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10211
10212 return hclge_cmd_send(&hdev->hw, desc, 4);
10213}
10214
10215static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10216 int *bd_num_list,
10217 u32 type_num)
10218{
ddb54554 10219 u32 entries_per_desc, desc_index, index, offset, i;
9027d043 10220 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
77b34110
FL
10221 int ret;
10222
ddb54554 10223 ret = hclge_query_bd_num_cmd_send(hdev, desc);
77b34110
FL
10224 if (ret) {
10225 dev_err(&hdev->pdev->dev,
ddb54554
GH
10226 "Get dfx bd num fail, status is %d.\n", ret);
10227 return ret;
77b34110
FL
10228 }
10229
ddb54554
GH
10230 entries_per_desc = ARRAY_SIZE(desc[0].data);
10231 for (i = 0; i < type_num; i++) {
10232 offset = hclge_dfx_bd_offset_list[i];
10233 index = offset % entries_per_desc;
10234 desc_index = offset / entries_per_desc;
10235 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10236 }
ea4750ca 10237
ddb54554 10238 return ret;
77b34110
FL
10239}
10240
ddb54554
GH
10241static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10242 struct hclge_desc *desc_src, int bd_num,
10243 enum hclge_opcode_type cmd)
77b34110 10244{
ddb54554
GH
10245 struct hclge_desc *desc = desc_src;
10246 int i, ret;
10247
10248 hclge_cmd_setup_basic_desc(desc, cmd, true);
10249 for (i = 0; i < bd_num - 1; i++) {
10250 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10251 desc++;
10252 hclge_cmd_setup_basic_desc(desc, cmd, true);
10253 }
10254
10255 desc = desc_src;
10256 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10257 if (ret)
10258 dev_err(&hdev->pdev->dev,
10259 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10260 cmd, ret);
10261
10262 return ret;
10263}
10264
10265static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10266 void *data)
10267{
10268 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10269 struct hclge_desc *desc = desc_src;
ea4750ca 10270 u32 *reg = data;
ddb54554
GH
10271
10272 entries_per_desc = ARRAY_SIZE(desc->data);
10273 reg_num = entries_per_desc * bd_num;
10274 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10275 for (i = 0; i < reg_num; i++) {
10276 index = i % entries_per_desc;
10277 desc_index = i / entries_per_desc;
10278 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10279 }
10280 for (i = 0; i < separator_num; i++)
10281 *reg++ = SEPARATOR_VALUE;
10282
10283 return reg_num + separator_num;
10284}
10285
10286static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10287{
10288 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10289 int data_len_per_desc, data_len, bd_num, i;
10290 int bd_num_list[BD_LIST_MAX_NUM];
77b34110
FL
10291 int ret;
10292
ddb54554
GH
10293 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10294 if (ret) {
10295 dev_err(&hdev->pdev->dev,
10296 "Get dfx reg bd num fail, status is %d.\n", ret);
10297 return ret;
10298 }
77b34110 10299
c593642c 10300 data_len_per_desc = sizeof_field(struct hclge_desc, data);
ddb54554
GH
10301 *len = 0;
10302 for (i = 0; i < dfx_reg_type_num; i++) {
10303 bd_num = bd_num_list[i];
10304 data_len = data_len_per_desc * bd_num;
10305 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10306 }
10307
10308 return ret;
10309}
10310
10311static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10312{
10313 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10314 int bd_num, bd_num_max, buf_len, i;
10315 int bd_num_list[BD_LIST_MAX_NUM];
10316 struct hclge_desc *desc_src;
10317 u32 *reg = data;
10318 int ret;
10319
10320 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
77b34110
FL
10321 if (ret) {
10322 dev_err(&hdev->pdev->dev,
ddb54554
GH
10323 "Get dfx reg bd num fail, status is %d.\n", ret);
10324 return ret;
10325 }
10326
10327 bd_num_max = bd_num_list[0];
10328 for (i = 1; i < dfx_reg_type_num; i++)
10329 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10330
10331 buf_len = sizeof(*desc_src) * bd_num_max;
10332 desc_src = kzalloc(buf_len, GFP_KERNEL);
322cb97c 10333 if (!desc_src)
ddb54554 10334 return -ENOMEM;
77b34110 10335
ddb54554
GH
10336 for (i = 0; i < dfx_reg_type_num; i++) {
10337 bd_num = bd_num_list[i];
10338 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10339 hclge_dfx_reg_opcode_list[i]);
10340 if (ret) {
10341 dev_err(&hdev->pdev->dev,
10342 "Get dfx reg fail, status is %d.\n", ret);
10343 break;
10344 }
10345
10346 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10347 }
10348
10349 kfree(desc_src);
10350 return ret;
10351}
10352
10353static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10354 struct hnae3_knic_private_info *kinfo)
10355{
10356#define HCLGE_RING_REG_OFFSET 0x200
10357#define HCLGE_RING_INT_REG_OFFSET 0x4
10358
10359 int i, j, reg_num, separator_num;
10360 int data_num_sum;
10361 u32 *reg = data;
10362
ea4750ca 10363 /* fetching per-PF registers valus from PF PCIe register space */
ddb54554
GH
10364 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10365 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10366 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10367 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10368 for (i = 0; i < separator_num; i++)
10369 *reg++ = SEPARATOR_VALUE;
ddb54554 10370 data_num_sum = reg_num + separator_num;
ea4750ca 10371
ddb54554
GH
10372 reg_num = ARRAY_SIZE(common_reg_addr_list);
10373 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10374 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10375 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10376 for (i = 0; i < separator_num; i++)
10377 *reg++ = SEPARATOR_VALUE;
ddb54554 10378 data_num_sum += reg_num + separator_num;
ea4750ca 10379
ddb54554
GH
10380 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10381 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 10382 for (j = 0; j < kinfo->num_tqps; j++) {
ddb54554 10383 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10384 *reg++ = hclge_read_dev(&hdev->hw,
10385 ring_reg_addr_list[i] +
ddb54554 10386 HCLGE_RING_REG_OFFSET * j);
ea4750ca
JS
10387 for (i = 0; i < separator_num; i++)
10388 *reg++ = SEPARATOR_VALUE;
10389 }
ddb54554 10390 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
ea4750ca 10391
ddb54554
GH
10392 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10393 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 10394 for (j = 0; j < hdev->num_msi_used - 1; j++) {
ddb54554 10395 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10396 *reg++ = hclge_read_dev(&hdev->hw,
10397 tqp_intr_reg_addr_list[i] +
ddb54554 10398 HCLGE_RING_INT_REG_OFFSET * j);
ea4750ca
JS
10399 for (i = 0; i < separator_num; i++)
10400 *reg++ = SEPARATOR_VALUE;
10401 }
ddb54554
GH
10402 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10403
10404 return data_num_sum;
10405}
10406
10407static int hclge_get_regs_len(struct hnae3_handle *handle)
10408{
10409 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10410 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10411 struct hclge_vport *vport = hclge_get_vport(handle);
10412 struct hclge_dev *hdev = vport->back;
10413 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10414 int regs_lines_32_bit, regs_lines_64_bit;
10415 int ret;
10416
10417 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10418 if (ret) {
10419 dev_err(&hdev->pdev->dev,
10420 "Get register number failed, ret = %d.\n", ret);
10421 return ret;
10422 }
10423
10424 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10425 if (ret) {
10426 dev_err(&hdev->pdev->dev,
10427 "Get dfx reg len failed, ret = %d.\n", ret);
10428 return ret;
10429 }
10430
10431 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10432 REG_SEPARATOR_LINE;
10433 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10434 REG_SEPARATOR_LINE;
10435 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10436 REG_SEPARATOR_LINE;
10437 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10438 REG_SEPARATOR_LINE;
10439 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10440 REG_SEPARATOR_LINE;
10441 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10442 REG_SEPARATOR_LINE;
10443
10444 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10445 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10446 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10447}
10448
10449static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10450 void *data)
10451{
10452 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10453 struct hclge_vport *vport = hclge_get_vport(handle);
10454 struct hclge_dev *hdev = vport->back;
10455 u32 regs_num_32_bit, regs_num_64_bit;
10456 int i, reg_num, separator_num, ret;
10457 u32 *reg = data;
10458
10459 *version = hdev->fw_version;
10460
10461 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10462 if (ret) {
10463 dev_err(&hdev->pdev->dev,
10464 "Get register number failed, ret = %d.\n", ret);
10465 return;
10466 }
10467
10468 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
ea4750ca 10469
ea4750ca 10470 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
10471 if (ret) {
10472 dev_err(&hdev->pdev->dev,
10473 "Get 32 bit register failed, ret = %d.\n", ret);
10474 return;
10475 }
ddb54554
GH
10476 reg_num = regs_num_32_bit;
10477 reg += reg_num;
10478 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10479 for (i = 0; i < separator_num; i++)
10480 *reg++ = SEPARATOR_VALUE;
77b34110 10481
ea4750ca 10482 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
ddb54554 10483 if (ret) {
77b34110
FL
10484 dev_err(&hdev->pdev->dev,
10485 "Get 64 bit register failed, ret = %d.\n", ret);
ddb54554
GH
10486 return;
10487 }
10488 reg_num = regs_num_64_bit * 2;
10489 reg += reg_num;
10490 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10491 for (i = 0; i < separator_num; i++)
10492 *reg++ = SEPARATOR_VALUE;
10493
10494 ret = hclge_get_dfx_reg(hdev, reg);
10495 if (ret)
10496 dev_err(&hdev->pdev->dev,
10497 "Get dfx register failed, ret = %d.\n", ret);
77b34110
FL
10498}
10499
f6f75abc 10500static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
10501{
10502 struct hclge_set_led_state_cmd *req;
10503 struct hclge_desc desc;
10504 int ret;
10505
10506 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10507
10508 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
10509 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10510 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
10511
10512 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10513 if (ret)
10514 dev_err(&hdev->pdev->dev,
10515 "Send set led state cmd error, ret =%d\n", ret);
10516
10517 return ret;
10518}
10519
10520enum hclge_led_status {
10521 HCLGE_LED_OFF,
10522 HCLGE_LED_ON,
10523 HCLGE_LED_NO_CHANGE = 0xFF,
10524};
10525
10526static int hclge_set_led_id(struct hnae3_handle *handle,
10527 enum ethtool_phys_id_state status)
10528{
07f8e940
JS
10529 struct hclge_vport *vport = hclge_get_vport(handle);
10530 struct hclge_dev *hdev = vport->back;
07f8e940
JS
10531
10532 switch (status) {
10533 case ETHTOOL_ID_ACTIVE:
f6f75abc 10534 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 10535 case ETHTOOL_ID_INACTIVE:
f6f75abc 10536 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 10537 default:
f6f75abc 10538 return -EINVAL;
07f8e940 10539 }
07f8e940
JS
10540}
10541
0979aa0b
FL
10542static void hclge_get_link_mode(struct hnae3_handle *handle,
10543 unsigned long *supported,
10544 unsigned long *advertising)
10545{
10546 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10547 struct hclge_vport *vport = hclge_get_vport(handle);
10548 struct hclge_dev *hdev = vport->back;
10549 unsigned int idx = 0;
10550
10551 for (; idx < size; idx++) {
10552 supported[idx] = hdev->hw.mac.supported[idx];
10553 advertising[idx] = hdev->hw.mac.advertising[idx];
10554 }
10555}
10556
1731be4c 10557static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
10558{
10559 struct hclge_vport *vport = hclge_get_vport(handle);
10560 struct hclge_dev *hdev = vport->back;
10561
10562 return hclge_config_gro(hdev, enable);
10563}
10564
46a3df9f
S
10565static const struct hnae3_ae_ops hclge_ops = {
10566 .init_ae_dev = hclge_init_ae_dev,
10567 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
10568 .flr_prepare = hclge_flr_prepare,
10569 .flr_done = hclge_flr_done,
46a3df9f
S
10570 .init_client_instance = hclge_init_client_instance,
10571 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
10572 .map_ring_to_vector = hclge_map_ring_to_vector,
10573 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 10574 .get_vector = hclge_get_vector,
0d3e6631 10575 .put_vector = hclge_put_vector,
46a3df9f 10576 .set_promisc_mode = hclge_set_promisc_mode,
c39c4d98 10577 .set_loopback = hclge_set_loopback,
46a3df9f
S
10578 .start = hclge_ae_start,
10579 .stop = hclge_ae_stop,
a6d818e3
YL
10580 .client_start = hclge_client_start,
10581 .client_stop = hclge_client_stop,
46a3df9f
S
10582 .get_status = hclge_get_status,
10583 .get_ksettings_an_result = hclge_get_ksettings_an_result,
46a3df9f
S
10584 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10585 .get_media_type = hclge_get_media_type,
22f48e24 10586 .check_port_speed = hclge_check_port_speed,
7e6ec914
JS
10587 .get_fec = hclge_get_fec,
10588 .set_fec = hclge_set_fec,
46a3df9f
S
10589 .get_rss_key_size = hclge_get_rss_key_size,
10590 .get_rss_indir_size = hclge_get_rss_indir_size,
10591 .get_rss = hclge_get_rss,
10592 .set_rss = hclge_set_rss,
f7db940a 10593 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 10594 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
10595 .get_tc_size = hclge_get_tc_size,
10596 .get_mac_addr = hclge_get_mac_addr,
10597 .set_mac_addr = hclge_set_mac_addr,
26483246 10598 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
10599 .add_uc_addr = hclge_add_uc_addr,
10600 .rm_uc_addr = hclge_rm_uc_addr,
10601 .add_mc_addr = hclge_add_mc_addr,
10602 .rm_mc_addr = hclge_rm_mc_addr,
10603 .set_autoneg = hclge_set_autoneg,
10604 .get_autoneg = hclge_get_autoneg,
22f48e24 10605 .restart_autoneg = hclge_restart_autoneg,
7786a996 10606 .halt_autoneg = hclge_halt_autoneg,
46a3df9f 10607 .get_pauseparam = hclge_get_pauseparam,
61387774 10608 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
10609 .set_mtu = hclge_set_mtu,
10610 .reset_queue = hclge_reset_tqp,
10611 .get_stats = hclge_get_stats,
615466ce 10612 .get_mac_stats = hclge_get_mac_stat,
46a3df9f
S
10613 .update_stats = hclge_update_stats,
10614 .get_strings = hclge_get_strings,
10615 .get_sset_count = hclge_get_sset_count,
10616 .get_fw_version = hclge_get_fw_version,
10617 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 10618 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 10619 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 10620 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 10621 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 10622 .reset_event = hclge_reset_event,
123297b7 10623 .get_reset_level = hclge_get_reset_level,
720bd583 10624 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
10625 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10626 .set_channels = hclge_set_channels,
482d2e9c 10627 .get_channels = hclge_get_channels,
77b34110
FL
10628 .get_regs_len = hclge_get_regs_len,
10629 .get_regs = hclge_get_regs,
07f8e940 10630 .set_led_id = hclge_set_led_id,
0979aa0b 10631 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
10632 .add_fd_entry = hclge_add_fd_entry,
10633 .del_fd_entry = hclge_del_fd_entry,
6871af29 10634 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
10635 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10636 .get_fd_rule_info = hclge_get_fd_rule_info,
10637 .get_fd_all_rules = hclge_get_all_rules,
6871af29 10638 .restore_fd_rules = hclge_restore_fd_entries,
c17852a8 10639 .enable_fd = hclge_enable_fd,
d93ed94f 10640 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
3c666b58 10641 .dbg_run_cmd = hclge_dbg_run_cmd,
381c356e 10642 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
10643 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10644 .ae_dev_resetting = hclge_ae_dev_resetting,
10645 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 10646 .set_gro_en = hclge_gro_en,
0c29d191 10647 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 10648 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
10649 .mac_connect_phy = hclge_mac_connect_phy,
10650 .mac_disconnect_phy = hclge_mac_disconnect_phy,
b524b38f 10651 .restore_vlan_table = hclge_restore_vlan_table,
6430f744
YM
10652 .get_vf_config = hclge_get_vf_config,
10653 .set_vf_link_state = hclge_set_vf_link_state,
22044f95 10654 .set_vf_spoofchk = hclge_set_vf_spoofchk,
e196ec75 10655 .set_vf_trust = hclge_set_vf_trust,
ee9e4424 10656 .set_vf_rate = hclge_set_vf_rate,
8e6de441 10657 .set_vf_mac = hclge_set_vf_mac,
46a3df9f
S
10658};
10659
10660static struct hnae3_ae_algo ae_algo = {
10661 .ops = &hclge_ops,
46a3df9f
S
10662 .pdev_id_table = ae_algo_pci_tbl,
10663};
10664
10665static int hclge_init(void)
10666{
10667 pr_info("%s is initializing\n", HCLGE_NAME);
10668
0ea68902
YL
10669 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10670 if (!hclge_wq) {
10671 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10672 return -ENOMEM;
10673 }
10674
854cf33a
FL
10675 hnae3_register_ae_algo(&ae_algo);
10676
10677 return 0;
46a3df9f
S
10678}
10679
10680static void hclge_exit(void)
10681{
10682 hnae3_unregister_ae_algo(&ae_algo);
0ea68902 10683 destroy_workqueue(hclge_wq);
46a3df9f
S
10684}
10685module_init(hclge_init);
10686module_exit(hclge_exit);
10687
10688MODULE_LICENSE("GPL");
10689MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10690MODULE_DESCRIPTION("HCLGE Driver");
10691MODULE_VERSION(HCLGE_MOD_VERSION);