net: hns3: remove unnecessary MAC enable in app loopback
[linux-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
f2f432f2 16#include <net/rtnetlink.h>
46a3df9f 17#include "hclge_cmd.h"
cacde272 18#include "hclge_dcb.h"
46a3df9f 19#include "hclge_main.h"
dde1a86e 20#include "hclge_mbx.h"
46a3df9f
S
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
5a9f0eac 23#include "hclge_err.h"
46a3df9f
S
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 29
ebaf1908 30#define HCLGE_BUF_SIZE_UNIT 256U
b37ce587
YM
31#define HCLGE_BUF_MUL_BY 2
32#define HCLGE_BUF_DIV_BY 2
9e15be90
YL
33#define NEED_RESERVE_TC_NUM 2
34#define BUF_MAX_PERCENT 100
35#define BUF_RESERVE_PERCENT 90
b9a400ac 36
63cbf7a9 37#define HCLGE_RESET_MAX_FAIL_CNT 5
427a7bff
HT
38#define HCLGE_RESET_SYNC_TIME 100
39#define HCLGE_PF_RESET_SYNC_TIME 20
40#define HCLGE_PF_RESET_SYNC_CNT 1500
63cbf7a9 41
ddb54554
GH
42/* Get DFX BD number offset */
43#define HCLGE_DFX_BIOS_BD_OFFSET 1
44#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46#define HCLGE_DFX_IGU_BD_OFFSET 4
47#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49#define HCLGE_DFX_NCSI_BD_OFFSET 7
50#define HCLGE_DFX_RTC_BD_OFFSET 8
51#define HCLGE_DFX_PPP_BD_OFFSET 9
52#define HCLGE_DFX_RCB_BD_OFFSET 10
53#define HCLGE_DFX_TQP_BD_OFFSET 11
54#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
c9765a89
YM
56#define HCLGE_LINK_STATUS_MS 10
57
6430f744
YM
58#define HCLGE_VF_VPORT_START_NUM 1
59
e6d7d79d 60static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 61static int hclge_init_vlan_config(struct hclge_dev *hdev);
fe4144d4 62static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
4ed340ab 63static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 64static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
d93ed94f
JS
65static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
123297b7
SJ
67static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
1cbc662d 69static int hclge_set_default_loopback(struct hclge_dev *hdev);
46a3df9f 70
ee4bcd3b 71static void hclge_sync_mac_table(struct hclge_dev *hdev);
039ba863 72static void hclge_restore_hw_table(struct hclge_dev *hdev);
c631c696 73static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
ee4bcd3b 74
46a3df9f
S
75static struct hnae3_ae_algo ae_algo;
76
0ea68902
YL
77static struct workqueue_struct *hclge_wq;
78
46a3df9f
S
79static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 87 /* required last entry */
46a3df9f
S
88 {0, }
89};
90
2f550a46
YL
91MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
ea4750ca
JS
93static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
107
108static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
113 HCLGE_FUN_RST_ING,
114 HCLGE_GRO_EN_REG};
115
116static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
141 HCLGE_RING_EN_REG};
142
143static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
148
46a3df9f 149static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 150 "App Loopback test",
4dc13b96
FL
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
46a3df9f
S
153 "Phy Loopback test"
154};
155
46a3df9f
S
156static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 301
a6c51c26
JS
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
326};
327
f5aac71c
FL
328static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
7efffc64 331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
0e02a53d 332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
f5aac71c
FL
333 .i_port_bitmap = 0x1,
334 },
335};
336
472d7ece
JS
337static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343};
344
ddb54554
GH
345static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
358};
359
360static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
373};
374
2307f4a5
Y
375static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
377 { IP_FRAGEMENT, 1},
378 { ROCE_TYPE, 1},
379 { NEXT_KEY, 5},
380 { VLAN_NUMBER, 2},
381 { SRC_VPORT, 12},
382 { DST_VPORT, 12},
383 { TUNNEL_PACKET, 1},
384};
385
386static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
392 { OUTER_L2_RSV, 16},
393 { OUTER_IP_TOS, 8},
394 { OUTER_IP_PROTO, 8},
395 { OUTER_SRC_IP, 32},
396 { OUTER_DST_IP, 32},
397 { OUTER_L3_RSV, 16},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
400 { OUTER_L4_RSV, 32},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
408 { INNER_L2_RSV, 16},
409 { INNER_IP_TOS, 8},
410 { INNER_IP_PROTO, 8},
411 { INNER_SRC_IP, 32},
412 { INNER_DST_IP, 32},
413 { INNER_L3_RSV, 16},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
416 { INNER_L4_RSV, 32},
417};
418
d174ea75 419static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 420{
91f384f6 421#define HCLGE_MAC_CMD_NUM 21
46a3df9f 422
1c6dfe6f 423 u64 *data = (u64 *)(&hdev->mac_stats);
46a3df9f 424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 425 __le64 *desc_data;
46a3df9f
S
426 int i, k, n;
427 int ret;
428
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 if (ret) {
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
434
435 return ret;
436 }
437
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 439 /* for special opcode 0032, only the first desc has the head */
46a3df9f 440 if (unlikely(i == 0)) {
a90bb9a5 441 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 442 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 443 } else {
a90bb9a5 444 desc_data = (__le64 *)(&desc[i]);
d174ea75 445 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 446 }
d174ea75 447
46a3df9f 448 for (k = 0; k < n; k++) {
d174ea75 449 *data += le64_to_cpu(*desc_data);
450 data++;
46a3df9f
S
451 desc_data++;
452 }
453 }
454
455 return 0;
456}
457
d174ea75 458static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459{
1c6dfe6f 460 u64 *data = (u64 *)(&hdev->mac_stats);
d174ea75 461 struct hclge_desc *desc;
462 __le64 *desc_data;
463 u16 i, k, n;
464 int ret;
465
9e6717af
ZL
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
468 */
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
39ee6e82
DC
470 if (!desc)
471 return -ENOMEM;
9e6717af 472
d174ea75 473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 if (ret) {
476 kfree(desc);
477 return ret;
478 }
479
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
482 if (i == 0) {
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
485 } else {
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
488 }
489
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
492 data++;
493 desc_data++;
494 }
495 }
496
497 kfree(desc);
498
499 return 0;
500}
501
502static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503{
504 struct hclge_desc desc;
505 __le32 *desc_data;
506 u32 reg_num;
507 int ret;
508
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 if (ret)
512 return ret;
513
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
516
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520 return 0;
521}
522
523static int hclge_mac_update_stats(struct hclge_dev *hdev)
524{
525 u32 desc_num;
526 int ret;
527
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530 /* The firmware supports the new statistics acquisition method */
531 if (!ret)
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
535 else
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538 return ret;
539}
540
46a3df9f
S
541static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542{
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
549 int ret, i;
550
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
4279b4d5 555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
46a3df9f
S
556 true);
557
a90bb9a5 558 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 if (ret) {
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
9b2f3477 563 ret, i);
46a3df9f
S
564 return ret;
565 }
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 567 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
568 }
569
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
4279b4d5 575 HCLGE_OPC_QUERY_TX_STATS,
46a3df9f
S
576 true);
577
a90bb9a5 578 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 if (ret) {
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
583 ret, i);
584 return ret;
585 }
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 587 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
588 }
589
590 return 0;
591}
592
593static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594{
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
597 u64 *buff = data;
598 int i;
599
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
603 }
604
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
608 }
609
610 return buff;
611}
612
613static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614{
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
9b2f3477 617 /* each tqp has TX & RX two queues */
46a3df9f
S
618 return kinfo->num_tqps * (2);
619}
620
621static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622{
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 u8 *buff = data;
625 int i = 0;
626
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
0c218123 630 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
46a3df9f
S
631 tqp->index);
632 buff = buff + ETH_GSTRING_LEN;
633 }
634
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
0c218123 638 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
46a3df9f
S
639 tqp->index);
640 buff = buff + ETH_GSTRING_LEN;
641 }
642
643 return buff;
644}
645
ebaf1908 646static u64 *hclge_comm_get_stats(const void *comm_stats,
46a3df9f
S
647 const struct hclge_comm_stats_str strs[],
648 int size, u64 *data)
649{
650 u64 *buf = data;
651 u32 i;
652
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656 return buf + size;
657}
658
659static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
661 int size, u8 *data)
662{
663 char *buff = (char *)data;
664 u32 i;
665
666 if (stringset != ETH_SS_STATS)
667 return buff;
668
669 for (i = 0; i < size; i++) {
18d219b7 670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
46a3df9f
S
671 buff = buff + ETH_GSTRING_LEN;
672 }
673
674 return (u8 *)buff;
675}
676
46a3df9f
S
677static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678{
679 struct hnae3_handle *handle;
680 int status;
681
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
685 if (status) {
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
688 status);
689 }
690 }
691
692 status = hclge_mac_update_stats(hdev);
693 if (status)
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
696}
697
698static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
700{
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
46a3df9f
S
703 int status;
704
c5f65480
JS
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 return;
707
46a3df9f
S
708 status = hclge_mac_update_stats(hdev);
709 if (status)
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
712 status);
713
46a3df9f
S
714 status = hclge_tqps_update_stats(handle);
715 if (status)
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
718 status);
719
c5f65480 720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
721}
722
723static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724{
4dc13b96
FL
725#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
729
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
732 int count = 0;
733
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
738 */
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
3ff6cde8 742 if (hdev->pdev->revision >= 0x21 ||
4dc13b96 743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 count += 1;
eb66d503 747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 748 }
5fd50ac3 749
4dc13b96
FL
750 count += 2;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
c9765a89
YM
753
754 if (hdev->hw.mac.phydev) {
755 count += 1;
756 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 }
758
46a3df9f
S
759 } else if (stringset == ETH_SS_STATS) {
760 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
761 hclge_tqps_get_sset_count(handle, stringset);
762 }
763
764 return count;
765}
766
9b2f3477 767static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
46a3df9f
S
768 u8 *data)
769{
770 u8 *p = (char *)data;
771 int size;
772
773 if (stringset == ETH_SS_STATS) {
774 size = ARRAY_SIZE(g_mac_stats_string);
9b2f3477
WL
775 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776 size, p);
46a3df9f
S
777 p = hclge_tqps_get_strings(handle, p);
778 } else if (stringset == ETH_SS_TEST) {
eb66d503 779 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
9b2f3477 780 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
781 ETH_GSTRING_LEN);
782 p += ETH_GSTRING_LEN;
783 }
4dc13b96 784 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
9b2f3477 785 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
4dc13b96
FL
786 ETH_GSTRING_LEN);
787 p += ETH_GSTRING_LEN;
788 }
789 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790 memcpy(p,
791 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
792 ETH_GSTRING_LEN);
793 p += ETH_GSTRING_LEN;
794 }
795 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
9b2f3477 796 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
797 ETH_GSTRING_LEN);
798 p += ETH_GSTRING_LEN;
799 }
800 }
801}
802
803static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804{
805 struct hclge_vport *vport = hclge_get_vport(handle);
806 struct hclge_dev *hdev = vport->back;
807 u64 *p;
808
1c6dfe6f 809 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
9b2f3477 810 ARRAY_SIZE(g_mac_stats_string), data);
46a3df9f
S
811 p = hclge_tqps_get_stats(handle, p);
812}
813
615466ce
YM
814static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 struct hns3_mac_stats *mac_stats)
e511c97d
JS
816{
817 struct hclge_vport *vport = hclge_get_vport(handle);
818 struct hclge_dev *hdev = vport->back;
819
615466ce
YM
820 hclge_update_stats(handle, NULL);
821
1c6dfe6f
YL
822 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
e511c97d
JS
824}
825
46a3df9f 826static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 827 struct hclge_func_status_cmd *status)
46a3df9f 828{
ded45d40
YM
829#define HCLGE_MAC_ID_MASK 0xF
830
46a3df9f
S
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 return -EINVAL;
833
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
837 else
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
839
ded45d40 840 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
46a3df9f
S
841 return 0;
842}
843
844static int hclge_query_function_status(struct hclge_dev *hdev)
845{
b37ce587
YM
846#define HCLGE_QUERY_MAX_CNT 5
847
d44f9b63 848 struct hclge_func_status_cmd *req;
46a3df9f
S
849 struct hclge_desc desc;
850 int timeout = 0;
851 int ret;
852
853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 854 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
855
856 do {
857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858 if (ret) {
859 dev_err(&hdev->pdev->dev,
9b2f3477 860 "query function status failed %d.\n", ret);
46a3df9f
S
861 return ret;
862 }
863
864 /* Check pf reset is done */
865 if (req->pf_state)
866 break;
867 usleep_range(1000, 2000);
b37ce587 868 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
46a3df9f 869
60df7e91 870 return hclge_parse_func_status(hdev, req);
46a3df9f
S
871}
872
873static int hclge_query_pf_resource(struct hclge_dev *hdev)
874{
d44f9b63 875 struct hclge_pf_res_cmd *req;
46a3df9f
S
876 struct hclge_desc desc;
877 int ret;
878
879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881 if (ret) {
882 dev_err(&hdev->pdev->dev,
883 "query pf resource failed %d.\n", ret);
884 return ret;
885 }
886
d44f9b63 887 req = (struct hclge_pf_res_cmd *)desc.data;
60df7e91
HT
888 hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
46a3df9f 890
368686be
YL
891 if (req->tx_buf_size)
892 hdev->tx_buf_size =
60df7e91 893 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
894 else
895 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896
b9a400ac
YL
897 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898
368686be
YL
899 if (req->dv_buf_size)
900 hdev->dv_buf_size =
60df7e91 901 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
902 else
903 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904
b9a400ac
YL
905 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906
e92a0843 907 if (hnae3_dev_roce_supported(hdev)) {
375dd5e4 908 hdev->roce_base_msix_offset =
60df7e91 909 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
375dd5e4 910 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
887c3820 911 hdev->num_roce_msi =
60df7e91 912 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 913 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
46a3df9f 914
580a05f9
YL
915 /* nic's msix numbers is always equals to the roce's. */
916 hdev->num_nic_msi = hdev->num_roce_msi;
917
46a3df9f
S
918 /* PF should have NIC vectors and Roce vectors,
919 * NIC vectors are queued before Roce vectors.
920 */
9b2f3477 921 hdev->num_msi = hdev->num_roce_msi +
375dd5e4 922 hdev->roce_base_msix_offset;
46a3df9f
S
923 } else {
924 hdev->num_msi =
60df7e91 925 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
e4e87715 926 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
580a05f9
YL
927
928 hdev->num_nic_msi = hdev->num_msi;
929 }
930
931 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 dev_err(&hdev->pdev->dev,
933 "Just %u msi resources, not enough for pf(min:2).\n",
934 hdev->num_nic_msi);
935 return -EINVAL;
46a3df9f
S
936 }
937
938 return 0;
939}
940
941static int hclge_parse_speed(int speed_cmd, int *speed)
942{
943 switch (speed_cmd) {
944 case 6:
945 *speed = HCLGE_MAC_SPEED_10M;
946 break;
947 case 7:
948 *speed = HCLGE_MAC_SPEED_100M;
949 break;
950 case 0:
951 *speed = HCLGE_MAC_SPEED_1G;
952 break;
953 case 1:
954 *speed = HCLGE_MAC_SPEED_10G;
955 break;
956 case 2:
957 *speed = HCLGE_MAC_SPEED_25G;
958 break;
959 case 3:
960 *speed = HCLGE_MAC_SPEED_40G;
961 break;
962 case 4:
963 *speed = HCLGE_MAC_SPEED_50G;
964 break;
965 case 5:
966 *speed = HCLGE_MAC_SPEED_100G;
967 break;
968 default:
969 return -EINVAL;
970 }
971
972 return 0;
973}
974
22f48e24
JS
975static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976{
977 struct hclge_vport *vport = hclge_get_vport(handle);
978 struct hclge_dev *hdev = vport->back;
979 u32 speed_ability = hdev->hw.mac.speed_ability;
980 u32 speed_bit = 0;
981
982 switch (speed) {
983 case HCLGE_MAC_SPEED_10M:
984 speed_bit = HCLGE_SUPPORT_10M_BIT;
985 break;
986 case HCLGE_MAC_SPEED_100M:
987 speed_bit = HCLGE_SUPPORT_100M_BIT;
988 break;
989 case HCLGE_MAC_SPEED_1G:
990 speed_bit = HCLGE_SUPPORT_1G_BIT;
991 break;
992 case HCLGE_MAC_SPEED_10G:
993 speed_bit = HCLGE_SUPPORT_10G_BIT;
994 break;
995 case HCLGE_MAC_SPEED_25G:
996 speed_bit = HCLGE_SUPPORT_25G_BIT;
997 break;
998 case HCLGE_MAC_SPEED_40G:
999 speed_bit = HCLGE_SUPPORT_40G_BIT;
1000 break;
1001 case HCLGE_MAC_SPEED_50G:
1002 speed_bit = HCLGE_SUPPORT_50G_BIT;
1003 break;
1004 case HCLGE_MAC_SPEED_100G:
1005 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 break;
1007 default:
1008 return -EINVAL;
1009 }
1010
1011 if (speed_bit & speed_ability)
1012 return 0;
1013
1014 return -EINVAL;
1015}
1016
88d10bd6 1017static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
0979aa0b 1018{
0979aa0b 1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e 1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
88d10bd6
JS
1021 mac->supported);
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024 mac->supported);
1025 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027 mac->supported);
1028 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030 mac->supported);
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 mac->supported);
1034}
0979aa0b 1035
88d10bd6
JS
1036static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037{
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 mac->supported);
0979aa0b 1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e 1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
88d10bd6
JS
1043 mac->supported);
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 mac->supported);
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 mac->supported);
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 mac->supported);
1053}
0979aa0b 1054
88d10bd6
JS
1055static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056{
1057 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059 mac->supported);
1060 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062 mac->supported);
1063 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065 mac->supported);
0979aa0b 1066 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
88d10bd6
JS
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068 mac->supported);
1069 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 mac->supported);
1072}
0979aa0b 1073
88d10bd6
JS
1074static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075{
1076 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078 mac->supported);
1079 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081 mac->supported);
1082 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084 mac->supported);
1085 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087 mac->supported);
1088 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090 mac->supported);
0979aa0b 1091 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
88d10bd6
JS
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 mac->supported);
1094}
0979aa0b 1095
7e6ec914
JS
1096static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097{
1098 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100
1101 switch (mac->speed) {
1102 case HCLGE_MAC_SPEED_10G:
1103 case HCLGE_MAC_SPEED_40G:
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105 mac->supported);
1106 mac->fec_ability =
1107 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108 break;
1109 case HCLGE_MAC_SPEED_25G:
1110 case HCLGE_MAC_SPEED_50G:
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112 mac->supported);
1113 mac->fec_ability =
1114 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 BIT(HNAE3_FEC_AUTO);
1116 break;
1117 case HCLGE_MAC_SPEED_100G:
1118 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120 break;
1121 default:
1122 mac->fec_ability = 0;
1123 break;
1124 }
1125}
1126
88d10bd6
JS
1127static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128 u8 speed_ability)
1129{
1130 struct hclge_mac *mac = &hdev->hw.mac;
1131
1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134 mac->supported);
1135
1136 hclge_convert_setting_sr(mac, speed_ability);
1137 hclge_convert_setting_lr(mac, speed_ability);
1138 hclge_convert_setting_cr(mac, speed_ability);
7e6ec914
JS
1139 if (hdev->pdev->revision >= 0x21)
1140 hclge_convert_setting_fec(mac);
88d10bd6
JS
1141
1142 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1144 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
88d10bd6
JS
1145}
1146
1147static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148 u8 speed_ability)
1149{
1150 struct hclge_mac *mac = &hdev->hw.mac;
1151
1152 hclge_convert_setting_kr(mac, speed_ability);
7e6ec914
JS
1153 if (hdev->pdev->revision >= 0x21)
1154 hclge_convert_setting_fec(mac);
88d10bd6
JS
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
0979aa0b
FL
1158}
1159
f18635d5
JS
1160static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161 u8 speed_ability)
1162{
1163 unsigned long *supported = hdev->hw.mac.supported;
1164
1165 /* default to support all speed for GE port */
1166 if (!speed_ability)
1167 speed_ability = HCLGE_SUPPORT_GE;
1168
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171 supported);
1172
1173 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175 supported);
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 supported);
1178 }
1179
1180 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183 }
1184
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
bc3781ed 1188 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
f18635d5
JS
1189}
1190
0979aa0b
FL
1191static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192{
1193 u8 media_type = hdev->hw.mac.media_type;
1194
f18635d5
JS
1195 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 hclge_parse_copper_link_mode(hdev, speed_ability);
88d10bd6
JS
1199 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 hclge_parse_backplane_link_mode(hdev, speed_ability);
0979aa0b 1201}
37417c66 1202
ee9e4424
YL
1203static u32 hclge_get_max_speed(u8 speed_ability)
1204{
1205 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 return HCLGE_MAC_SPEED_100G;
1207
1208 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 return HCLGE_MAC_SPEED_50G;
1210
1211 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 return HCLGE_MAC_SPEED_40G;
1213
1214 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 return HCLGE_MAC_SPEED_25G;
1216
1217 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 return HCLGE_MAC_SPEED_10G;
1219
1220 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 return HCLGE_MAC_SPEED_1G;
1222
1223 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 return HCLGE_MAC_SPEED_100M;
1225
1226 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 return HCLGE_MAC_SPEED_10M;
1228
1229 return HCLGE_MAC_SPEED_1G;
1230}
1231
46a3df9f
S
1232static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233{
d44f9b63 1234 struct hclge_cfg_param_cmd *req;
46a3df9f
S
1235 u64 mac_addr_tmp_high;
1236 u64 mac_addr_tmp;
ebaf1908 1237 unsigned int i;
46a3df9f 1238
d44f9b63 1239 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
1240
1241 /* get the configuration */
e4e87715
PL
1242 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_VMDQ_M,
1244 HCLGE_CFG_VMDQ_S);
1245 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 HCLGE_CFG_TQP_DESC_N_M,
1249 HCLGE_CFG_TQP_DESC_N_S);
1250
1251 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_PHY_ADDR_M,
1253 HCLGE_CFG_PHY_ADDR_S);
1254 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_MEDIA_TP_M,
1256 HCLGE_CFG_MEDIA_TP_S);
1257 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 HCLGE_CFG_RX_BUF_LEN_M,
1259 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
1260 /* get mac_address */
1261 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
1262 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 HCLGE_CFG_MAC_ADDR_H_M,
1264 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
1265
1266 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267
e4e87715
PL
1268 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_DEFAULT_SPEED_M,
1270 HCLGE_CFG_DEFAULT_SPEED_S);
1271 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 HCLGE_CFG_RSS_SIZE_M,
1273 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 1274
46a3df9f
S
1275 for (i = 0; i < ETH_ALEN; i++)
1276 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277
d44f9b63 1278 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 1279 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 1280
e4e87715
PL
1281 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_SPEED_ABILITY_M,
1283 HCLGE_CFG_SPEED_ABILITY_S);
39932473
JS
1284 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 if (!cfg->umv_space)
1288 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
46a3df9f
S
1289}
1290
1291/* hclge_get_cfg: query the static parameter from flash
1292 * @hdev: pointer to struct hclge_dev
1293 * @hcfg: the config structure to be getted
1294 */
1295static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296{
1297 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 1298 struct hclge_cfg_param_cmd *req;
ebaf1908
WL
1299 unsigned int i;
1300 int ret;
46a3df9f
S
1301
1302 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1303 u32 offset = 0;
1304
d44f9b63 1305 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1306 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307 true);
e4e87715
PL
1308 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 1310 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
1311 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1313 req->offset = cpu_to_le32(offset);
46a3df9f
S
1314 }
1315
1316 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317 if (ret) {
3f639907 1318 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
1319 return ret;
1320 }
1321
1322 hclge_parse_cfg(hcfg, desc);
3f639907 1323
46a3df9f
S
1324 return 0;
1325}
1326
1327static int hclge_get_cap(struct hclge_dev *hdev)
1328{
1329 int ret;
1330
1331 ret = hclge_query_function_status(hdev);
1332 if (ret) {
1333 dev_err(&hdev->pdev->dev,
1334 "query function status error %d.\n", ret);
1335 return ret;
1336 }
1337
1338 /* get pf resource */
60df7e91 1339 return hclge_query_pf_resource(hdev);
46a3df9f
S
1340}
1341
962e31bd
YL
1342static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343{
1344#define HCLGE_MIN_TX_DESC 64
1345#define HCLGE_MIN_RX_DESC 64
1346
1347 if (!is_kdump_kernel())
1348 return;
1349
1350 dev_info(&hdev->pdev->dev,
1351 "Running kdump kernel. Using minimal resources\n");
1352
1353 /* minimal queue pairs equals to the number of vports */
1354 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357}
1358
46a3df9f
S
1359static int hclge_configure(struct hclge_dev *hdev)
1360{
1361 struct hclge_cfg cfg;
ebaf1908
WL
1362 unsigned int i;
1363 int ret;
46a3df9f
S
1364
1365 ret = hclge_get_cfg(hdev, &cfg);
727f514b 1366 if (ret)
46a3df9f 1367 return ret;
46a3df9f
S
1368
1369 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 hdev->base_tqp_pid = 0;
0e7a40cd 1371 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1372 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1373 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1374 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1375 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1376 hdev->num_tx_desc = cfg.tqp_desc_num;
1377 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1378 hdev->tm_info.num_pg = 1;
cacde272 1379 hdev->tc_max = cfg.tc_num;
46a3df9f 1380 hdev->tm_info.hw_pfc_map = 0;
39932473 1381 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1382
44122887 1383 if (hnae3_dev_fd_supported(hdev)) {
9abeb7d8 1384 hdev->fd_en = true;
44122887
JS
1385 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 }
9abeb7d8 1387
46a3df9f
S
1388 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 if (ret) {
1390 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 return ret;
1392 }
1393
0979aa0b
FL
1394 hclge_parse_link_mode(hdev, cfg.speed_ability);
1395
ee9e4424
YL
1396 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1397
cacde272
YL
1398 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399 (hdev->tc_max < 1)) {
adcf738b 1400 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
cacde272
YL
1401 hdev->tc_max);
1402 hdev->tc_max = 1;
46a3df9f
S
1403 }
1404
cacde272
YL
1405 /* Dev does not support DCB */
1406 if (!hnae3_dev_dcb_supported(hdev)) {
1407 hdev->tc_max = 1;
1408 hdev->pfc_max = 0;
1409 } else {
1410 hdev->pfc_max = hdev->tc_max;
1411 }
1412
a2987975 1413 hdev->tm_info.num_tc = 1;
cacde272 1414
46a3df9f 1415 /* Currently not support uncontiuous tc */
cacde272 1416 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1417 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1418
71b83869 1419 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1420
962e31bd
YL
1421 hclge_init_kdump_kernel_config(hdev);
1422
08125454
YL
1423 /* Set the init affinity based on pci func number */
1424 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427 &hdev->affinity_mask);
1428
46a3df9f
S
1429 return ret;
1430}
1431
ebaf1908
WL
1432static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1433 unsigned int tso_mss_max)
46a3df9f 1434{
d44f9b63 1435 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1436 struct hclge_desc desc;
a90bb9a5 1437 u16 tso_mss;
46a3df9f
S
1438
1439 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440
d44f9b63 1441 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1442
1443 tso_mss = 0;
e4e87715
PL
1444 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1445 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1446 req->tso_mss_min = cpu_to_le16(tso_mss);
1447
1448 tso_mss = 0;
e4e87715
PL
1449 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1450 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1451 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1452
1453 return hclge_cmd_send(&hdev->hw, &desc, 1);
1454}
1455
b26a6fea
PL
1456static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1457{
1458 struct hclge_cfg_gro_status_cmd *req;
1459 struct hclge_desc desc;
1460 int ret;
1461
1462 if (!hnae3_dev_gro_supported(hdev))
1463 return 0;
1464
1465 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1466 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1467
1468 req->gro_en = cpu_to_le16(en ? 1 : 0);
1469
1470 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1471 if (ret)
1472 dev_err(&hdev->pdev->dev,
1473 "GRO hardware config cmd failed, ret = %d\n", ret);
1474
1475 return ret;
1476}
1477
46a3df9f
S
1478static int hclge_alloc_tqps(struct hclge_dev *hdev)
1479{
1480 struct hclge_tqp *tqp;
1481 int i;
1482
1483 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1484 sizeof(struct hclge_tqp), GFP_KERNEL);
1485 if (!hdev->htqp)
1486 return -ENOMEM;
1487
1488 tqp = hdev->htqp;
1489
1490 for (i = 0; i < hdev->num_tqps; i++) {
1491 tqp->dev = &hdev->pdev->dev;
1492 tqp->index = i;
1493
1494 tqp->q.ae_algo = &ae_algo;
1495 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1496 tqp->q.tx_desc_num = hdev->num_tx_desc;
1497 tqp->q.rx_desc_num = hdev->num_rx_desc;
46a3df9f
S
1498 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1499 i * HCLGE_TQP_REG_SIZE;
1500
1501 tqp++;
1502 }
1503
1504 return 0;
1505}
1506
1507static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1508 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1509{
d44f9b63 1510 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1511 struct hclge_desc desc;
1512 int ret;
1513
1514 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1515
d44f9b63 1516 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1517 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1518 req->tqp_vf = func_id;
b9a8f883
YL
1519 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1520 if (!is_pf)
1521 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
46a3df9f
S
1522 req->tqp_vid = cpu_to_le16(tqp_vid);
1523
1524 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1525 if (ret)
1526 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1527
3f639907 1528 return ret;
46a3df9f
S
1529}
1530
672ad0ed 1531static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1532{
128b900d 1533 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1534 struct hclge_dev *hdev = vport->back;
7df7dad6 1535 int i, alloced;
46a3df9f
S
1536
1537 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1538 alloced < num_tqps; i++) {
46a3df9f
S
1539 if (!hdev->htqp[i].alloced) {
1540 hdev->htqp[i].q.handle = &vport->nic;
1541 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1542 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1543 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1544 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1545 hdev->htqp[i].alloced = true;
46a3df9f
S
1546 alloced++;
1547 }
1548 }
672ad0ed
HT
1549 vport->alloc_tqps = alloced;
1550 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1551 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f 1552
580a05f9
YL
1553 /* ensure one to one mapping between irq and queue at default */
1554 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1555 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1556
46a3df9f
S
1557 return 0;
1558}
1559
c0425944
PL
1560static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1561 u16 num_tx_desc, u16 num_rx_desc)
1562
46a3df9f
S
1563{
1564 struct hnae3_handle *nic = &vport->nic;
1565 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1566 struct hclge_dev *hdev = vport->back;
af958827 1567 int ret;
46a3df9f 1568
c0425944
PL
1569 kinfo->num_tx_desc = num_tx_desc;
1570 kinfo->num_rx_desc = num_rx_desc;
1571
46a3df9f 1572 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1573
672ad0ed 1574 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1575 sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 if (!kinfo->tqp)
1577 return -ENOMEM;
1578
672ad0ed 1579 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1580 if (ret)
46a3df9f 1581 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1582
3f639907 1583 return ret;
46a3df9f
S
1584}
1585
7df7dad6
L
1586static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1587 struct hclge_vport *vport)
1588{
1589 struct hnae3_handle *nic = &vport->nic;
1590 struct hnae3_knic_private_info *kinfo;
1591 u16 i;
1592
1593 kinfo = &nic->kinfo;
205a24ca 1594 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1595 struct hclge_tqp *q =
1596 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 bool is_pf;
1598 int ret;
1599
1600 is_pf = !(vport->vport_id);
1601 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1602 i, is_pf);
1603 if (ret)
1604 return ret;
1605 }
1606
1607 return 0;
1608}
1609
1610static int hclge_map_tqp(struct hclge_dev *hdev)
1611{
1612 struct hclge_vport *vport = hdev->vport;
1613 u16 i, num_vport;
1614
1615 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1616 for (i = 0; i < num_vport; i++) {
1617 int ret;
1618
1619 ret = hclge_map_tqp_to_vport(hdev, vport);
1620 if (ret)
1621 return ret;
1622
1623 vport++;
1624 }
1625
1626 return 0;
1627}
1628
46a3df9f
S
1629static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1630{
1631 struct hnae3_handle *nic = &vport->nic;
1632 struct hclge_dev *hdev = vport->back;
1633 int ret;
1634
1635 nic->pdev = hdev->pdev;
1636 nic->ae_algo = &ae_algo;
1637 nic->numa_node_mask = hdev->numa_node_mask;
1638
b69c9737
YL
1639 ret = hclge_knic_setup(vport, num_tqps,
1640 hdev->num_tx_desc, hdev->num_rx_desc);
1641 if (ret)
1642 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
46a3df9f 1643
b69c9737 1644 return ret;
46a3df9f
S
1645}
1646
1647static int hclge_alloc_vport(struct hclge_dev *hdev)
1648{
1649 struct pci_dev *pdev = hdev->pdev;
1650 struct hclge_vport *vport;
1651 u32 tqp_main_vport;
1652 u32 tqp_per_vport;
1653 int num_vport, i;
1654 int ret;
1655
1656 /* We need to alloc a vport for main NIC of PF */
1657 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1658
38e62046 1659 if (hdev->num_tqps < num_vport) {
adcf738b 1660 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
38e62046
HT
1661 hdev->num_tqps, num_vport);
1662 return -EINVAL;
1663 }
46a3df9f
S
1664
1665 /* Alloc the same number of TQPs for every vport */
1666 tqp_per_vport = hdev->num_tqps / num_vport;
1667 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1668
1669 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1670 GFP_KERNEL);
1671 if (!vport)
1672 return -ENOMEM;
1673
1674 hdev->vport = vport;
1675 hdev->num_alloc_vport = num_vport;
1676
2312e050
FL
1677 if (IS_ENABLED(CONFIG_PCI_IOV))
1678 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1679
1680 for (i = 0; i < num_vport; i++) {
1681 vport->back = hdev;
1682 vport->vport_id = i;
6430f744 1683 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
818f1675 1684 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1685 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1686 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1687 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1688 INIT_LIST_HEAD(&vport->uc_mac_list);
1689 INIT_LIST_HEAD(&vport->mc_mac_list);
ee4bcd3b 1690 spin_lock_init(&vport->mac_list_lock);
46a3df9f
S
1691
1692 if (i == 0)
1693 ret = hclge_vport_setup(vport, tqp_main_vport);
1694 else
1695 ret = hclge_vport_setup(vport, tqp_per_vport);
1696 if (ret) {
1697 dev_err(&pdev->dev,
1698 "vport setup failed for vport %d, %d\n",
1699 i, ret);
1700 return ret;
1701 }
1702
1703 vport++;
1704 }
1705
1706 return 0;
1707}
1708
acf61ecd
YL
1709static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1710 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1711{
1712/* TX buffer size is unit by 128 byte */
1713#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1714#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1715 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1716 struct hclge_desc desc;
1717 int ret;
1718 u8 i;
1719
d44f9b63 1720 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1721
1722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1724 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1725
46a3df9f
S
1726 req->tx_pkt_buff[i] =
1727 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1728 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1729 }
46a3df9f
S
1730
1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1732 if (ret)
46a3df9f
S
1733 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1734 ret);
46a3df9f 1735
3f639907 1736 return ret;
46a3df9f
S
1737}
1738
acf61ecd
YL
1739static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1740 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1741{
acf61ecd 1742 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1743
3f639907
JS
1744 if (ret)
1745 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1746
3f639907 1747 return ret;
46a3df9f
S
1748}
1749
1a49f3c6 1750static u32 hclge_get_tc_num(struct hclge_dev *hdev)
46a3df9f 1751{
ebaf1908
WL
1752 unsigned int i;
1753 u32 cnt = 0;
46a3df9f
S
1754
1755 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1756 if (hdev->hw_tc_map & BIT(i))
1757 cnt++;
1758 return cnt;
1759}
1760
46a3df9f 1761/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1762static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1763 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1764{
1765 struct hclge_priv_buf *priv;
ebaf1908
WL
1766 unsigned int i;
1767 int cnt = 0;
46a3df9f
S
1768
1769 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1770 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1771 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1772 priv->enable)
1773 cnt++;
1774 }
1775
1776 return cnt;
1777}
1778
1779/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1780static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1781 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1782{
1783 struct hclge_priv_buf *priv;
ebaf1908
WL
1784 unsigned int i;
1785 int cnt = 0;
46a3df9f
S
1786
1787 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1788 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1789 if (hdev->hw_tc_map & BIT(i) &&
1790 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1791 priv->enable)
1792 cnt++;
1793 }
1794
1795 return cnt;
1796}
1797
acf61ecd 1798static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1799{
1800 struct hclge_priv_buf *priv;
1801 u32 rx_priv = 0;
1802 int i;
1803
1804 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1805 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1806 if (priv->enable)
1807 rx_priv += priv->buf_size;
1808 }
1809 return rx_priv;
1810}
1811
acf61ecd 1812static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1813{
1814 u32 i, total_tx_size = 0;
1815
1816 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1817 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1818
1819 return total_tx_size;
1820}
1821
acf61ecd
YL
1822static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1823 struct hclge_pkt_buf_alloc *buf_alloc,
1824 u32 rx_all)
46a3df9f 1825{
1a49f3c6
YL
1826 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1827 u32 tc_num = hclge_get_tc_num(hdev);
b9a400ac 1828 u32 shared_buf, aligned_mps;
46a3df9f
S
1829 u32 rx_priv;
1830 int i;
1831
b9a400ac 1832 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1833
d221df4e 1834 if (hnae3_dev_dcb_supported(hdev))
b37ce587
YM
1835 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1836 hdev->dv_buf_size;
d221df4e 1837 else
b9a400ac 1838 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1839 + hdev->dv_buf_size;
d221df4e 1840
db5936db 1841 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1842 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1843 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1844
acf61ecd 1845 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1846 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1847 return false;
1848
b9a400ac 1849 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1850 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1851 if (hnae3_dev_dcb_supported(hdev)) {
1852 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1853 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b37ce587
YM
1854 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1855 HCLGE_BUF_SIZE_UNIT);
368686be 1856 } else {
b9a400ac 1857 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1858 HCLGE_NON_DCB_ADDITIONAL_BUF;
1a49f3c6
YL
1859 buf_alloc->s_buf.self.low = aligned_mps;
1860 }
1861
1862 if (hnae3_dev_dcb_supported(hdev)) {
9e15be90
YL
1863 hi_thrd = shared_buf - hdev->dv_buf_size;
1864
1865 if (tc_num <= NEED_RESERVE_TC_NUM)
1866 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1867 / BUF_MAX_PERCENT;
1868
1a49f3c6 1869 if (tc_num)
9e15be90 1870 hi_thrd = hi_thrd / tc_num;
1a49f3c6 1871
b37ce587 1872 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1a49f3c6 1873 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
b37ce587 1874 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1a49f3c6
YL
1875 } else {
1876 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1877 lo_thrd = aligned_mps;
368686be 1878 }
46a3df9f
S
1879
1880 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1a49f3c6
YL
1881 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1882 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
46a3df9f
S
1883 }
1884
1885 return true;
1886}
1887
acf61ecd
YL
1888static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1889 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1890{
1891 u32 i, total_size;
1892
1893 total_size = hdev->pkt_buf_size;
1894
1895 /* alloc tx buffer for all enabled tc */
1896 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1897 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 1898
b6b4f987
HT
1899 if (hdev->hw_tc_map & BIT(i)) {
1900 if (total_size < hdev->tx_buf_size)
1901 return -ENOMEM;
9ffe79a9 1902
368686be 1903 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 1904 } else {
9ffe79a9 1905 priv->tx_buf_size = 0;
b6b4f987 1906 }
9ffe79a9
YL
1907
1908 total_size -= priv->tx_buf_size;
1909 }
1910
1911 return 0;
1912}
1913
8ca754b1
YL
1914static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1915 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1916{
8ca754b1
YL
1917 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1918 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
ebaf1908 1919 unsigned int i;
46a3df9f 1920
46a3df9f 1921 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 1922 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 1923
bb1fe9ea
YL
1924 priv->enable = 0;
1925 priv->wl.low = 0;
1926 priv->wl.high = 0;
1927 priv->buf_size = 0;
1928
1929 if (!(hdev->hw_tc_map & BIT(i)))
1930 continue;
1931
1932 priv->enable = 1;
46a3df9f
S
1933
1934 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
b37ce587 1935 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
8ca754b1
YL
1936 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1937 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
1938 } else {
1939 priv->wl.low = 0;
b37ce587
YM
1940 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1941 aligned_mps;
46a3df9f 1942 }
8ca754b1
YL
1943
1944 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
1945 }
1946
8ca754b1
YL
1947 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1948}
46a3df9f 1949
8ca754b1
YL
1950static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1951 struct hclge_pkt_buf_alloc *buf_alloc)
1952{
1953 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1954 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1955 int i;
46a3df9f
S
1956
1957 /* let the last to be cleared first */
1958 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1959 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1960 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1961
ebaf1908
WL
1962 if (hdev->hw_tc_map & mask &&
1963 !(hdev->tm_info.hw_pfc_map & mask)) {
46a3df9f
S
1964 /* Clear the no pfc TC private buffer */
1965 priv->wl.low = 0;
1966 priv->wl.high = 0;
1967 priv->buf_size = 0;
1968 priv->enable = 0;
1969 no_pfc_priv_num--;
1970 }
1971
acf61ecd 1972 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1973 no_pfc_priv_num == 0)
1974 break;
1975 }
1976
8ca754b1
YL
1977 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1978}
46a3df9f 1979
8ca754b1
YL
1980static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1981 struct hclge_pkt_buf_alloc *buf_alloc)
1982{
1983 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1984 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1985 int i;
46a3df9f
S
1986
1987 /* let the last to be cleared first */
1988 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 1989 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 1990 unsigned int mask = BIT((unsigned int)i);
46a3df9f 1991
ebaf1908
WL
1992 if (hdev->hw_tc_map & mask &&
1993 hdev->tm_info.hw_pfc_map & mask) {
46a3df9f
S
1994 /* Reduce the number of pfc TC with private buffer */
1995 priv->wl.low = 0;
1996 priv->enable = 0;
1997 priv->wl.high = 0;
1998 priv->buf_size = 0;
1999 pfc_priv_num--;
2000 }
2001
acf61ecd 2002 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
2003 pfc_priv_num == 0)
2004 break;
2005 }
8ca754b1
YL
2006
2007 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2008}
2009
9e15be90
YL
2010static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2011 struct hclge_pkt_buf_alloc *buf_alloc)
2012{
2013#define COMPENSATE_BUFFER 0x3C00
2014#define COMPENSATE_HALF_MPS_NUM 5
2015#define PRIV_WL_GAP 0x1800
2016
2017 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2018 u32 tc_num = hclge_get_tc_num(hdev);
2019 u32 half_mps = hdev->mps >> 1;
2020 u32 min_rx_priv;
2021 unsigned int i;
2022
2023 if (tc_num)
2024 rx_priv = rx_priv / tc_num;
2025
2026 if (tc_num <= NEED_RESERVE_TC_NUM)
2027 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2028
2029 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2030 COMPENSATE_HALF_MPS_NUM * half_mps;
2031 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2033
2034 if (rx_priv < min_rx_priv)
2035 return false;
2036
2037 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2038 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2039
2040 priv->enable = 0;
2041 priv->wl.low = 0;
2042 priv->wl.high = 0;
2043 priv->buf_size = 0;
2044
2045 if (!(hdev->hw_tc_map & BIT(i)))
2046 continue;
2047
2048 priv->enable = 1;
2049 priv->buf_size = rx_priv;
2050 priv->wl.high = rx_priv - hdev->dv_buf_size;
2051 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2052 }
2053
2054 buf_alloc->s_buf.buf_size = 0;
2055
2056 return true;
2057}
2058
8ca754b1
YL
2059/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2060 * @hdev: pointer to struct hclge_dev
2061 * @buf_alloc: pointer to buffer calculation data
2062 * @return: 0: calculate sucessful, negative: fail
2063 */
2064static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2065 struct hclge_pkt_buf_alloc *buf_alloc)
2066{
2067 /* When DCB is not supported, rx private buffer is not allocated. */
2068 if (!hnae3_dev_dcb_supported(hdev)) {
2069 u32 rx_all = hdev->pkt_buf_size;
2070
2071 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2072 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2073 return -ENOMEM;
2074
2075 return 0;
2076 }
2077
9e15be90
YL
2078 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2079 return 0;
2080
8ca754b1
YL
2081 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2082 return 0;
2083
2084 /* try to decrease the buffer size */
2085 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2086 return 0;
2087
2088 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2089 return 0;
2090
2091 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
2092 return 0;
2093
2094 return -ENOMEM;
2095}
2096
acf61ecd
YL
2097static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2099{
d44f9b63 2100 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
2101 struct hclge_desc desc;
2102 int ret;
2103 int i;
2104
2105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 2106 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
2107
2108 /* Alloc private buffer TCs */
2109 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2110 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
2111
2112 req->buf_num[i] =
2113 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2114 req->buf_num[i] |=
5bca3b94 2115 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
2116 }
2117
b8c8bf47 2118 req->shared_buf =
acf61ecd 2119 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
2120 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2121
46a3df9f 2122 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2123 if (ret)
46a3df9f
S
2124 dev_err(&hdev->pdev->dev,
2125 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 2126
3f639907 2127 return ret;
46a3df9f
S
2128}
2129
acf61ecd
YL
2130static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
2132{
2133 struct hclge_rx_priv_wl_buf *req;
2134 struct hclge_priv_buf *priv;
2135 struct hclge_desc desc[2];
2136 int i, j;
2137 int ret;
2138
2139 for (i = 0; i < 2; i++) {
2140 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2141 false);
2142 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2143
2144 /* The first descriptor set the NEXT bit to 1 */
2145 if (i == 0)
2146 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2147 else
2148 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149
2150 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
2151 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2152
2153 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
2154 req->tc_wl[j].high =
2155 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].high |=
3738287c 2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2158 req->tc_wl[j].low =
2159 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2160 req->tc_wl[j].low |=
3738287c 2161 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2162 }
2163 }
2164
2165 /* Send 2 descriptor at one time */
2166 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2167 if (ret)
46a3df9f
S
2168 dev_err(&hdev->pdev->dev,
2169 "rx private waterline config cmd failed %d\n",
2170 ret);
3f639907 2171 return ret;
46a3df9f
S
2172}
2173
acf61ecd
YL
2174static int hclge_common_thrd_config(struct hclge_dev *hdev,
2175 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2176{
acf61ecd 2177 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
2178 struct hclge_rx_com_thrd *req;
2179 struct hclge_desc desc[2];
2180 struct hclge_tc_thrd *tc;
2181 int i, j;
2182 int ret;
2183
2184 for (i = 0; i < 2; i++) {
2185 hclge_cmd_setup_basic_desc(&desc[i],
2186 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2187 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2188
2189 /* The first descriptor set the NEXT bit to 1 */
2190 if (i == 0)
2191 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2192 else
2193 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194
2195 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2196 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2197
2198 req->com_thrd[j].high =
2199 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].high |=
3738287c 2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2202 req->com_thrd[j].low =
2203 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2204 req->com_thrd[j].low |=
3738287c 2205 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2206 }
2207 }
2208
2209 /* Send 2 descriptors at one time */
2210 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2211 if (ret)
46a3df9f
S
2212 dev_err(&hdev->pdev->dev,
2213 "common threshold config cmd failed %d\n", ret);
3f639907 2214 return ret;
46a3df9f
S
2215}
2216
acf61ecd
YL
2217static int hclge_common_wl_config(struct hclge_dev *hdev,
2218 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2219{
acf61ecd 2220 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
2221 struct hclge_rx_com_wl *req;
2222 struct hclge_desc desc;
2223 int ret;
2224
2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2226
2227 req = (struct hclge_rx_com_wl *)desc.data;
2228 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 2229 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2230
2231 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 2232 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2233
2234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2235 if (ret)
46a3df9f
S
2236 dev_err(&hdev->pdev->dev,
2237 "common waterline config cmd failed %d\n", ret);
46a3df9f 2238
3f639907 2239 return ret;
46a3df9f
S
2240}
2241
2242int hclge_buffer_alloc(struct hclge_dev *hdev)
2243{
acf61ecd 2244 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
2245 int ret;
2246
acf61ecd
YL
2247 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2248 if (!pkt_buf)
46a3df9f
S
2249 return -ENOMEM;
2250
acf61ecd 2251 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
2252 if (ret) {
2253 dev_err(&hdev->pdev->dev,
2254 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 2255 goto out;
9ffe79a9
YL
2256 }
2257
acf61ecd 2258 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
2259 if (ret) {
2260 dev_err(&hdev->pdev->dev,
2261 "could not alloc tx buffers %d\n", ret);
acf61ecd 2262 goto out;
46a3df9f
S
2263 }
2264
acf61ecd 2265 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
2266 if (ret) {
2267 dev_err(&hdev->pdev->dev,
2268 "could not calc rx priv buffer size for all TCs %d\n",
2269 ret);
acf61ecd 2270 goto out;
46a3df9f
S
2271 }
2272
acf61ecd 2273 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
2274 if (ret) {
2275 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2276 ret);
acf61ecd 2277 goto out;
46a3df9f
S
2278 }
2279
2daf4a65 2280 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 2281 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
2282 if (ret) {
2283 dev_err(&hdev->pdev->dev,
2284 "could not configure rx private waterline %d\n",
2285 ret);
acf61ecd 2286 goto out;
2daf4a65 2287 }
46a3df9f 2288
acf61ecd 2289 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
2290 if (ret) {
2291 dev_err(&hdev->pdev->dev,
2292 "could not configure common threshold %d\n",
2293 ret);
acf61ecd 2294 goto out;
2daf4a65 2295 }
46a3df9f
S
2296 }
2297
acf61ecd
YL
2298 ret = hclge_common_wl_config(hdev, pkt_buf);
2299 if (ret)
46a3df9f
S
2300 dev_err(&hdev->pdev->dev,
2301 "could not configure common waterline %d\n", ret);
46a3df9f 2302
acf61ecd
YL
2303out:
2304 kfree(pkt_buf);
2305 return ret;
46a3df9f
S
2306}
2307
2308static int hclge_init_roce_base_info(struct hclge_vport *vport)
2309{
2310 struct hnae3_handle *roce = &vport->roce;
2311 struct hnae3_handle *nic = &vport->nic;
2312
887c3820 2313 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
2314
2315 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2316 vport->back->num_msi_left == 0)
2317 return -EINVAL;
2318
2319 roce->rinfo.base_vector = vport->back->roce_base_vector;
2320
2321 roce->rinfo.netdev = nic->kinfo.netdev;
2322 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2323
2324 roce->pdev = nic->pdev;
2325 roce->ae_algo = nic->ae_algo;
2326 roce->numa_node_mask = nic->numa_node_mask;
2327
2328 return 0;
2329}
2330
887c3820 2331static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
2332{
2333 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
2334 int vectors;
2335 int i;
46a3df9f 2336
580a05f9
YL
2337 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2338 hdev->num_msi,
887c3820
SM
2339 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2340 if (vectors < 0) {
2341 dev_err(&pdev->dev,
2342 "failed(%d) to allocate MSI/MSI-X vectors\n",
2343 vectors);
2344 return vectors;
46a3df9f 2345 }
887c3820
SM
2346 if (vectors < hdev->num_msi)
2347 dev_warn(&hdev->pdev->dev,
adcf738b 2348 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
887c3820 2349 hdev->num_msi, vectors);
46a3df9f 2350
887c3820
SM
2351 hdev->num_msi = vectors;
2352 hdev->num_msi_left = vectors;
580a05f9 2353
887c3820 2354 hdev->base_msi_vector = pdev->irq;
46a3df9f 2355 hdev->roce_base_vector = hdev->base_msi_vector +
375dd5e4 2356 hdev->roce_base_msix_offset;
46a3df9f 2357
46a3df9f
S
2358 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2359 sizeof(u16), GFP_KERNEL);
887c3820
SM
2360 if (!hdev->vector_status) {
2361 pci_free_irq_vectors(pdev);
46a3df9f 2362 return -ENOMEM;
887c3820 2363 }
46a3df9f
S
2364
2365 for (i = 0; i < hdev->num_msi; i++)
2366 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2367
887c3820
SM
2368 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2369 sizeof(int), GFP_KERNEL);
2370 if (!hdev->vector_irq) {
2371 pci_free_irq_vectors(pdev);
2372 return -ENOMEM;
46a3df9f 2373 }
46a3df9f
S
2374
2375 return 0;
2376}
2377
2d03eacc 2378static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 2379{
2d03eacc
YL
2380 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2381 duplex = HCLGE_MAC_FULL;
46a3df9f 2382
2d03eacc 2383 return duplex;
46a3df9f
S
2384}
2385
2d03eacc
YL
2386static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2387 u8 duplex)
46a3df9f 2388{
d44f9b63 2389 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2390 struct hclge_desc desc;
2391 int ret;
2392
d44f9b63 2393 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2394
2395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2396
63cbf7a9
YM
2397 if (duplex)
2398 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
46a3df9f
S
2399
2400 switch (speed) {
2401 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2404 break;
2405 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2408 break;
2409 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2412 break;
2413 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2416 break;
2417 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2420 break;
2421 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2424 break;
2425 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2428 break;
2429 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2430 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2431 HCLGE_CFG_SPEED_S, 5);
46a3df9f
S
2432 break;
2433 default:
d7629e74 2434 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2435 return -EINVAL;
2436 }
2437
e4e87715
PL
2438 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2439 1);
46a3df9f
S
2440
2441 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2442 if (ret) {
2443 dev_err(&hdev->pdev->dev,
2444 "mac speed/duplex config cmd failed %d.\n", ret);
2445 return ret;
2446 }
2447
2d03eacc
YL
2448 return 0;
2449}
2450
2451int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2452{
68e1006f 2453 struct hclge_mac *mac = &hdev->hw.mac;
2d03eacc
YL
2454 int ret;
2455
2456 duplex = hclge_check_speed_dup(duplex, speed);
68e1006f
JS
2457 if (!mac->support_autoneg && mac->speed == speed &&
2458 mac->duplex == duplex)
2d03eacc
YL
2459 return 0;
2460
2461 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2462 if (ret)
2463 return ret;
2464
2465 hdev->hw.mac.speed = speed;
2466 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2467
2468 return 0;
2469}
2470
2471static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472 u8 duplex)
2473{
2474 struct hclge_vport *vport = hclge_get_vport(handle);
2475 struct hclge_dev *hdev = vport->back;
2476
2477 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478}
2479
46a3df9f
S
2480static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2481{
d44f9b63 2482 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2483 struct hclge_desc desc;
a90bb9a5 2484 u32 flag = 0;
46a3df9f
S
2485 int ret;
2486
2487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2488
d44f9b63 2489 req = (struct hclge_config_auto_neg_cmd *)desc.data;
b9a8f883
YL
2490 if (enable)
2491 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
a90bb9a5 2492 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2493
2494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2495 if (ret)
46a3df9f
S
2496 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2497 ret);
46a3df9f 2498
3f639907 2499 return ret;
46a3df9f
S
2500}
2501
2502static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2503{
2504 struct hclge_vport *vport = hclge_get_vport(handle);
2505 struct hclge_dev *hdev = vport->back;
2506
22f48e24
JS
2507 if (!hdev->hw.mac.support_autoneg) {
2508 if (enable) {
2509 dev_err(&hdev->pdev->dev,
2510 "autoneg is not supported by current port\n");
2511 return -EOPNOTSUPP;
2512 } else {
2513 return 0;
2514 }
2515 }
2516
46a3df9f
S
2517 return hclge_set_autoneg_en(hdev, enable);
2518}
2519
2520static int hclge_get_autoneg(struct hnae3_handle *handle)
2521{
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2524 struct phy_device *phydev = hdev->hw.mac.phydev;
2525
2526 if (phydev)
2527 return phydev->autoneg;
46a3df9f
S
2528
2529 return hdev->hw.mac.autoneg;
2530}
2531
22f48e24
JS
2532static int hclge_restart_autoneg(struct hnae3_handle *handle)
2533{
2534 struct hclge_vport *vport = hclge_get_vport(handle);
2535 struct hclge_dev *hdev = vport->back;
2536 int ret;
2537
2538 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2539
2540 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541 if (ret)
2542 return ret;
2543 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544}
2545
7786a996
JS
2546static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2547{
2548 struct hclge_vport *vport = hclge_get_vport(handle);
2549 struct hclge_dev *hdev = vport->back;
2550
2551 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552 return hclge_set_autoneg_en(hdev, !halt);
2553
2554 return 0;
2555}
2556
7e6ec914
JS
2557static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2558{
2559 struct hclge_config_fec_cmd *req;
2560 struct hclge_desc desc;
2561 int ret;
2562
2563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2564
2565 req = (struct hclge_config_fec_cmd *)desc.data;
2566 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568 if (fec_mode & BIT(HNAE3_FEC_RS))
2569 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571 if (fec_mode & BIT(HNAE3_FEC_BASER))
2572 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2574
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576 if (ret)
2577 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2578
2579 return ret;
2580}
2581
2582static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2583{
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 struct hclge_mac *mac = &hdev->hw.mac;
2587 int ret;
2588
2589 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2591 return -EINVAL;
2592 }
2593
2594 ret = hclge_set_fec_hw(hdev, fec_mode);
2595 if (ret)
2596 return ret;
2597
2598 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2599 return 0;
2600}
2601
2602static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603 u8 *fec_mode)
2604{
2605 struct hclge_vport *vport = hclge_get_vport(handle);
2606 struct hclge_dev *hdev = vport->back;
2607 struct hclge_mac *mac = &hdev->hw.mac;
2608
2609 if (fec_ability)
2610 *fec_ability = mac->fec_ability;
2611 if (fec_mode)
2612 *fec_mode = mac->fec_mode;
2613}
2614
46a3df9f
S
2615static int hclge_mac_init(struct hclge_dev *hdev)
2616{
2617 struct hclge_mac *mac = &hdev->hw.mac;
2618 int ret;
2619
5d497936 2620 hdev->support_sfp_query = true;
2d03eacc
YL
2621 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623 hdev->hw.mac.duplex);
60df7e91 2624 if (ret)
46a3df9f 2625 return ret;
46a3df9f 2626
d736fc6c
JS
2627 if (hdev->hw.mac.support_autoneg) {
2628 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
60df7e91 2629 if (ret)
d736fc6c 2630 return ret;
d736fc6c
JS
2631 }
2632
46a3df9f
S
2633 mac->link = 0;
2634
7e6ec914
JS
2635 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2636 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
60df7e91 2637 if (ret)
7e6ec914 2638 return ret;
7e6ec914
JS
2639 }
2640
e6d7d79d
YL
2641 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2642 if (ret) {
2643 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2644 return ret;
2645 }
f9fd82a9 2646
1cbc662d
YM
2647 ret = hclge_set_default_loopback(hdev);
2648 if (ret)
2649 return ret;
2650
e6d7d79d 2651 ret = hclge_buffer_alloc(hdev);
3f639907 2652 if (ret)
f9fd82a9 2653 dev_err(&hdev->pdev->dev,
e6d7d79d 2654 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2655
3f639907 2656 return ret;
46a3df9f
S
2657}
2658
c1a81619
SM
2659static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2660{
1c6dfe6f 2661 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
18e24888 2662 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2663 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2664 hclge_wq, &hdev->service_task, 0);
c1a81619
SM
2665}
2666
cb1b9f77
SM
2667static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2668{
acfc3d55
HT
2669 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2670 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2671 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2672 hclge_wq, &hdev->service_task, 0);
cb1b9f77
SM
2673}
2674
ed8fb4b2 2675void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
46a3df9f 2676{
d5432455
GL
2677 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2678 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
08125454 2679 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2680 hclge_wq, &hdev->service_task,
ed8fb4b2 2681 delay_time);
46a3df9f
S
2682}
2683
2684static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2685{
d44f9b63 2686 struct hclge_link_status_cmd *req;
46a3df9f
S
2687 struct hclge_desc desc;
2688 int link_status;
2689 int ret;
2690
2691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2692 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2693 if (ret) {
2694 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2695 ret);
2696 return ret;
2697 }
2698
d44f9b63 2699 req = (struct hclge_link_status_cmd *)desc.data;
c79301d8 2700 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
46a3df9f
S
2701
2702 return !!link_status;
2703}
2704
2705static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2706{
ebaf1908 2707 unsigned int mac_state;
46a3df9f
S
2708 int link_stat;
2709
582d37bb
PL
2710 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2711 return 0;
2712
46a3df9f
S
2713 mac_state = hclge_get_mac_link_status(hdev);
2714
2715 if (hdev->hw.mac.phydev) {
fd813314 2716 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
46a3df9f
S
2717 link_stat = mac_state &
2718 hdev->hw.mac.phydev->link;
2719 else
2720 link_stat = 0;
2721
2722 } else {
2723 link_stat = mac_state;
2724 }
2725
2726 return !!link_stat;
2727}
2728
2729static void hclge_update_link_status(struct hclge_dev *hdev)
2730{
45e92b7e 2731 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2732 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2733 struct hnae3_handle *rhandle;
46a3df9f
S
2734 struct hnae3_handle *handle;
2735 int state;
2736 int i;
2737
2738 if (!client)
2739 return;
1c6dfe6f
YL
2740
2741 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2742 return;
2743
46a3df9f
S
2744 state = hclge_get_mac_phy_link(hdev);
2745 if (state != hdev->hw.mac.link) {
2746 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2747 handle = &hdev->vport[i].nic;
2748 client->ops->link_status_change(handle, state);
a6345787 2749 hclge_config_mac_tnl_int(hdev, state);
45e92b7e
PL
2750 rhandle = &hdev->vport[i].roce;
2751 if (rclient && rclient->ops->link_status_change)
2752 rclient->ops->link_status_change(rhandle,
2753 state);
46a3df9f
S
2754 }
2755 hdev->hw.mac.link = state;
2756 }
1c6dfe6f
YL
2757
2758 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
46a3df9f
S
2759}
2760
88d10bd6
JS
2761static void hclge_update_port_capability(struct hclge_mac *mac)
2762{
f438bfe9
JS
2763 /* update fec ability by speed */
2764 hclge_convert_setting_fec(mac);
2765
88d10bd6
JS
2766 /* firmware can not identify back plane type, the media type
2767 * read from configuration can help deal it
2768 */
2769 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2770 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2771 mac->module_type = HNAE3_MODULE_TYPE_KR;
2772 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2773 mac->module_type = HNAE3_MODULE_TYPE_TP;
2774
db4d3d55 2775 if (mac->support_autoneg) {
88d10bd6
JS
2776 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2777 linkmode_copy(mac->advertising, mac->supported);
2778 } else {
2779 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2780 mac->supported);
2781 linkmode_zero(mac->advertising);
2782 }
2783}
2784
5d497936
PL
2785static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2786{
63cbf7a9 2787 struct hclge_sfp_info_cmd *resp;
5d497936
PL
2788 struct hclge_desc desc;
2789 int ret;
2790
88d10bd6
JS
2791 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2792 resp = (struct hclge_sfp_info_cmd *)desc.data;
5d497936
PL
2793 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2794 if (ret == -EOPNOTSUPP) {
2795 dev_warn(&hdev->pdev->dev,
2796 "IMP do not support get SFP speed %d\n", ret);
2797 return ret;
2798 } else if (ret) {
2799 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2800 return ret;
2801 }
2802
88d10bd6 2803 *speed = le32_to_cpu(resp->speed);
5d497936
PL
2804
2805 return 0;
2806}
2807
88d10bd6 2808static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
46a3df9f 2809{
88d10bd6
JS
2810 struct hclge_sfp_info_cmd *resp;
2811 struct hclge_desc desc;
46a3df9f
S
2812 int ret;
2813
88d10bd6
JS
2814 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2815 resp = (struct hclge_sfp_info_cmd *)desc.data;
2816
2817 resp->query_type = QUERY_ACTIVE_SPEED;
2818
2819 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2820 if (ret == -EOPNOTSUPP) {
2821 dev_warn(&hdev->pdev->dev,
2822 "IMP does not support get SFP info %d\n", ret);
2823 return ret;
2824 } else if (ret) {
2825 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2826 return ret;
2827 }
2828
2af8cb61
GL
2829 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2830 * set to mac->speed.
2831 */
2832 if (!le32_to_cpu(resp->speed))
2833 return 0;
2834
88d10bd6
JS
2835 mac->speed = le32_to_cpu(resp->speed);
2836 /* if resp->speed_ability is 0, it means it's an old version
2837 * firmware, do not update these params
46a3df9f 2838 */
88d10bd6
JS
2839 if (resp->speed_ability) {
2840 mac->module_type = le32_to_cpu(resp->module_type);
2841 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2842 mac->autoneg = resp->autoneg;
2843 mac->support_autoneg = resp->autoneg_ability;
49b12556 2844 mac->speed_type = QUERY_ACTIVE_SPEED;
f438bfe9
JS
2845 if (!resp->active_fec)
2846 mac->fec_mode = 0;
2847 else
2848 mac->fec_mode = BIT(resp->active_fec);
88d10bd6
JS
2849 } else {
2850 mac->speed_type = QUERY_SFP_SPEED;
2851 }
2852
2853 return 0;
2854}
2855
2856static int hclge_update_port_info(struct hclge_dev *hdev)
2857{
2858 struct hclge_mac *mac = &hdev->hw.mac;
2859 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2860 int ret;
2861
2862 /* get the port info from SFP cmd if not copper port */
2863 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
46a3df9f
S
2864 return 0;
2865
88d10bd6 2866 /* if IMP does not support get SFP/qSFP info, return directly */
5d497936
PL
2867 if (!hdev->support_sfp_query)
2868 return 0;
46a3df9f 2869
88d10bd6
JS
2870 if (hdev->pdev->revision >= 0x21)
2871 ret = hclge_get_sfp_info(hdev, mac);
2872 else
2873 ret = hclge_get_sfp_speed(hdev, &speed);
2874
5d497936
PL
2875 if (ret == -EOPNOTSUPP) {
2876 hdev->support_sfp_query = false;
2877 return ret;
2878 } else if (ret) {
2d03eacc 2879 return ret;
46a3df9f
S
2880 }
2881
88d10bd6
JS
2882 if (hdev->pdev->revision >= 0x21) {
2883 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2884 hclge_update_port_capability(mac);
2885 return 0;
2886 }
2887 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2888 HCLGE_MAC_FULL);
2889 } else {
2890 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2891 return 0; /* do nothing if no SFP */
46a3df9f 2892
88d10bd6
JS
2893 /* must config full duplex for SFP */
2894 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2895 }
46a3df9f
S
2896}
2897
2898static int hclge_get_status(struct hnae3_handle *handle)
2899{
2900 struct hclge_vport *vport = hclge_get_vport(handle);
2901 struct hclge_dev *hdev = vport->back;
2902
2903 hclge_update_link_status(hdev);
2904
2905 return hdev->hw.mac.link;
2906}
2907
6430f744
YM
2908static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2909{
60df7e91 2910 if (!pci_num_vf(hdev->pdev)) {
6430f744
YM
2911 dev_err(&hdev->pdev->dev,
2912 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2913 return NULL;
2914 }
2915
2916 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2917 dev_err(&hdev->pdev->dev,
2918 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2919 vf, pci_num_vf(hdev->pdev));
2920 return NULL;
2921 }
2922
2923 /* VF start from 1 in vport */
2924 vf += HCLGE_VF_VPORT_START_NUM;
2925 return &hdev->vport[vf];
2926}
2927
2928static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2929 struct ifla_vf_info *ivf)
2930{
2931 struct hclge_vport *vport = hclge_get_vport(handle);
2932 struct hclge_dev *hdev = vport->back;
2933
2934 vport = hclge_get_vf_vport(hdev, vf);
2935 if (!vport)
2936 return -EINVAL;
2937
2938 ivf->vf = vf;
2939 ivf->linkstate = vport->vf_info.link_state;
22044f95 2940 ivf->spoofchk = vport->vf_info.spoofchk;
e196ec75 2941 ivf->trusted = vport->vf_info.trusted;
ee9e4424
YL
2942 ivf->min_tx_rate = 0;
2943 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
89b40c7f
HT
2944 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2945 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2946 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
6430f744
YM
2947 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2948
2949 return 0;
2950}
2951
2952static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2953 int link_state)
2954{
2955 struct hclge_vport *vport = hclge_get_vport(handle);
2956 struct hclge_dev *hdev = vport->back;
2957
2958 vport = hclge_get_vf_vport(hdev, vf);
2959 if (!vport)
2960 return -EINVAL;
2961
2962 vport->vf_info.link_state = link_state;
2963
2964 return 0;
2965}
2966
ca1d7669
SM
2967static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2968{
5705b451 2969 u32 cmdq_src_reg, msix_src_reg;
ca1d7669
SM
2970
2971 /* fetch the events from their corresponding regs */
c1a81619 2972 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
5705b451 2973 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619
SM
2974
2975 /* Assumption: If by any chance reset and mailbox events are reported
2976 * together then we will only process reset event in this go and will
2977 * defer the processing of the mailbox events. Since, we would have not
2978 * cleared RX CMDQ event this time we would receive again another
2979 * interrupt from H/W just for the mailbox.
46ee7350
GL
2980 *
2981 * check for vector0 reset event sources
c1a81619 2982 */
5705b451 2983 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
6dd22bbc
HT
2984 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2985 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2986 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2987 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
f02eb82d 2988 hdev->rst_stats.imp_rst_cnt++;
6dd22bbc
HT
2989 return HCLGE_VECTOR0_EVENT_RST;
2990 }
2991
5705b451 2992 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
65e41e7e 2993 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 2994 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
2995 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2996 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
f02eb82d 2997 hdev->rst_stats.global_rst_cnt++;
ca1d7669
SM
2998 return HCLGE_VECTOR0_EVENT_RST;
2999 }
3000
f6162d44 3001 /* check for vector0 msix event source */
147175c9 3002 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
9bc6ac91 3003 *clearval = msix_src_reg;
f6162d44 3004 return HCLGE_VECTOR0_EVENT_ERR;
147175c9 3005 }
f6162d44 3006
c1a81619
SM
3007 /* check for vector0 mailbox(=CMDQ RX) event source */
3008 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3009 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3010 *clearval = cmdq_src_reg;
3011 return HCLGE_VECTOR0_EVENT_MBX;
3012 }
ca1d7669 3013
147175c9 3014 /* print other vector0 event source */
9bc6ac91
HT
3015 dev_info(&hdev->pdev->dev,
3016 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3017 cmdq_src_reg, msix_src_reg);
3018 *clearval = msix_src_reg;
3019
ca1d7669
SM
3020 return HCLGE_VECTOR0_EVENT_OTHER;
3021}
3022
3023static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3024 u32 regclr)
3025{
c1a81619
SM
3026 switch (event_type) {
3027 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 3028 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
3029 break;
3030 case HCLGE_VECTOR0_EVENT_MBX:
3031 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3032 break;
fa7a4bd5
JS
3033 default:
3034 break;
c1a81619 3035 }
ca1d7669
SM
3036}
3037
8e52a602
XW
3038static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3039{
3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3041 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3042 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3043 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3044 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3045}
3046
466b0c00
L
3047static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3048{
3049 writel(enable ? 1 : 0, vector->addr);
3050}
3051
3052static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3053{
3054 struct hclge_dev *hdev = data;
ebaf1908 3055 u32 clearval = 0;
ca1d7669 3056 u32 event_cause;
466b0c00
L
3057
3058 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
3059 event_cause = hclge_check_event_cause(hdev, &clearval);
3060
c1a81619 3061 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 3062 switch (event_cause) {
f6162d44
SM
3063 case HCLGE_VECTOR0_EVENT_ERR:
3064 /* we do not know what type of reset is required now. This could
3065 * only be decided after we fetch the type of errors which
3066 * caused this event. Therefore, we will do below for now:
3067 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3068 * have defered type of reset to be used.
3069 * 2. Schedule the reset serivce task.
3070 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3071 * will fetch the correct type of reset. This would be done
3072 * by first decoding the types of errors.
3073 */
3074 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3075 /* fall through */
ca1d7669 3076 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 3077 hclge_reset_task_schedule(hdev);
ca1d7669 3078 break;
c1a81619
SM
3079 case HCLGE_VECTOR0_EVENT_MBX:
3080 /* If we are here then,
3081 * 1. Either we are not handling any mbx task and we are not
3082 * scheduled as well
3083 * OR
3084 * 2. We could be handling a mbx task but nothing more is
3085 * scheduled.
3086 * In both cases, we should schedule mbx task as there are more
3087 * mbx messages reported by this interrupt.
3088 */
3089 hclge_mbx_task_schedule(hdev);
f0ad97ac 3090 break;
ca1d7669 3091 default:
f0ad97ac
YL
3092 dev_warn(&hdev->pdev->dev,
3093 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
3094 break;
3095 }
3096
72e2fb07
HT
3097 hclge_clear_event_cause(hdev, event_cause, clearval);
3098
3099 /* Enable interrupt if it is not cause by reset. And when
3100 * clearval equal to 0, it means interrupt status may be
3101 * cleared by hardware before driver reads status register.
3102 * For this case, vector0 interrupt also should be enabled.
3103 */
9bc6ac91
HT
3104 if (!clearval ||
3105 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
3106 hclge_enable_vector(&hdev->misc_vector, true);
3107 }
466b0c00
L
3108
3109 return IRQ_HANDLED;
3110}
3111
3112static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3113{
36cbbdf6
PL
3114 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3115 dev_warn(&hdev->pdev->dev,
3116 "vector(vector_id %d) has been freed.\n", vector_id);
3117 return;
3118 }
3119
466b0c00
L
3120 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3121 hdev->num_msi_left += 1;
3122 hdev->num_msi_used -= 1;
3123}
3124
3125static void hclge_get_misc_vector(struct hclge_dev *hdev)
3126{
3127 struct hclge_misc_vector *vector = &hdev->misc_vector;
3128
3129 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3130
3131 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3132 hdev->vector_status[0] = 0;
3133
3134 hdev->num_msi_left -= 1;
3135 hdev->num_msi_used += 1;
3136}
3137
08125454
YL
3138static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3139 const cpumask_t *mask)
3140{
3141 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3142 affinity_notify);
3143
3144 cpumask_copy(&hdev->affinity_mask, mask);
3145}
3146
3147static void hclge_irq_affinity_release(struct kref *ref)
3148{
3149}
3150
3151static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3152{
3153 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3154 &hdev->affinity_mask);
3155
3156 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3157 hdev->affinity_notify.release = hclge_irq_affinity_release;
3158 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3159 &hdev->affinity_notify);
3160}
3161
3162static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3163{
3164 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3165 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3166}
3167
466b0c00
L
3168static int hclge_misc_irq_init(struct hclge_dev *hdev)
3169{
3170 int ret;
3171
3172 hclge_get_misc_vector(hdev);
3173
ca1d7669 3174 /* this would be explicitly freed in the end */
f97c4d82
YL
3175 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3176 HCLGE_NAME, pci_name(hdev->pdev));
ca1d7669 3177 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
f97c4d82 3178 0, hdev->misc_vector.name, hdev);
466b0c00
L
3179 if (ret) {
3180 hclge_free_vector(hdev, 0);
3181 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3182 hdev->misc_vector.vector_irq);
3183 }
3184
3185 return ret;
3186}
3187
ca1d7669
SM
3188static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3189{
3190 free_irq(hdev->misc_vector.vector_irq, hdev);
3191 hclge_free_vector(hdev, 0);
3192}
3193
af013903
HT
3194int hclge_notify_client(struct hclge_dev *hdev,
3195 enum hnae3_reset_notify_type type)
4ed340ab
L
3196{
3197 struct hnae3_client *client = hdev->nic_client;
3198 u16 i;
3199
9b2f3477 3200 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
bd9109c9
HT
3201 return 0;
3202
4ed340ab
L
3203 if (!client->ops->reset_notify)
3204 return -EOPNOTSUPP;
3205
3206 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3207 struct hnae3_handle *handle = &hdev->vport[i].nic;
3208 int ret;
3209
3210 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
3211 if (ret) {
3212 dev_err(&hdev->pdev->dev,
3213 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 3214 return ret;
65e41e7e 3215 }
4ed340ab
L
3216 }
3217
3218 return 0;
3219}
3220
f403a84f
HT
3221static int hclge_notify_roce_client(struct hclge_dev *hdev,
3222 enum hnae3_reset_notify_type type)
3223{
3224 struct hnae3_client *client = hdev->roce_client;
3225 int ret = 0;
3226 u16 i;
3227
9b2f3477 3228 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
f403a84f
HT
3229 return 0;
3230
3231 if (!client->ops->reset_notify)
3232 return -EOPNOTSUPP;
3233
3234 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3235 struct hnae3_handle *handle = &hdev->vport[i].roce;
3236
3237 ret = client->ops->reset_notify(handle, type);
3238 if (ret) {
3239 dev_err(&hdev->pdev->dev,
3240 "notify roce client failed %d(%d)",
3241 type, ret);
3242 return ret;
3243 }
3244 }
3245
3246 return ret;
3247}
3248
4ed340ab
L
3249static int hclge_reset_wait(struct hclge_dev *hdev)
3250{
3251#define HCLGE_RESET_WATI_MS 100
5bb784e9
HT
3252#define HCLGE_RESET_WAIT_CNT 350
3253
4ed340ab
L
3254 u32 val, reg, reg_bit;
3255 u32 cnt = 0;
3256
3257 switch (hdev->reset_type) {
6dd22bbc
HT
3258 case HNAE3_IMP_RESET:
3259 reg = HCLGE_GLOBAL_RESET_REG;
3260 reg_bit = HCLGE_IMP_RESET_BIT;
3261 break;
4ed340ab
L
3262 case HNAE3_GLOBAL_RESET:
3263 reg = HCLGE_GLOBAL_RESET_REG;
3264 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3265 break;
4ed340ab
L
3266 case HNAE3_FUNC_RESET:
3267 reg = HCLGE_FUN_RST_ING;
3268 reg_bit = HCLGE_FUN_RST_ING_B;
3269 break;
3270 default:
3271 dev_err(&hdev->pdev->dev,
3272 "Wait for unsupported reset type: %d\n",
3273 hdev->reset_type);
3274 return -EINVAL;
3275 }
3276
3277 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 3278 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
3279 msleep(HCLGE_RESET_WATI_MS);
3280 val = hclge_read_dev(&hdev->hw, reg);
3281 cnt++;
3282 }
3283
4ed340ab
L
3284 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3285 dev_warn(&hdev->pdev->dev,
3286 "Wait for reset timeout: %d\n", hdev->reset_type);
3287 return -EBUSY;
3288 }
3289
3290 return 0;
3291}
3292
aa5c4f17
HT
3293static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3294{
3295 struct hclge_vf_rst_cmd *req;
3296 struct hclge_desc desc;
3297
3298 req = (struct hclge_vf_rst_cmd *)desc.data;
3299 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3300 req->dest_vfid = func_id;
3301
3302 if (reset)
3303 req->vf_rst = 0x1;
3304
3305 return hclge_cmd_send(&hdev->hw, &desc, 1);
3306}
3307
e511f17b 3308static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
3309{
3310 int i;
3311
3312 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3313 struct hclge_vport *vport = &hdev->vport[i];
3314 int ret;
3315
3316 /* Send cmd to set/clear VF's FUNC_RST_ING */
3317 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3318 if (ret) {
3319 dev_err(&hdev->pdev->dev,
adcf738b 3320 "set vf(%u) rst failed %d!\n",
aa5c4f17
HT
3321 vport->vport_id, ret);
3322 return ret;
3323 }
3324
cc645dfa 3325 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
3326 continue;
3327
3328 /* Inform VF to process the reset.
3329 * hclge_inform_reset_assert_to_vf may fail if VF
3330 * driver is not loaded.
3331 */
3332 ret = hclge_inform_reset_assert_to_vf(vport);
3333 if (ret)
3334 dev_warn(&hdev->pdev->dev,
adcf738b 3335 "inform reset to vf(%u) failed %d!\n",
aa5c4f17
HT
3336 vport->vport_id, ret);
3337 }
3338
3339 return 0;
3340}
3341
1c6dfe6f
YL
3342static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3343{
3344 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3345 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3346 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3347 return;
3348
3349 hclge_mbx_handler(hdev);
3350
3351 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3352}
3353
c3106cac 3354static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
427a7bff
HT
3355{
3356 struct hclge_pf_rst_sync_cmd *req;
3357 struct hclge_desc desc;
3358 int cnt = 0;
3359 int ret;
3360
3361 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3363
3364 do {
1c6dfe6f
YL
3365 /* vf need to down netdev by mbx during PF or FLR reset */
3366 hclge_mailbox_service_task(hdev);
3367
427a7bff
HT
3368 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3369 /* for compatible with old firmware, wait
3370 * 100 ms for VF to stop IO
3371 */
3372 if (ret == -EOPNOTSUPP) {
3373 msleep(HCLGE_RESET_SYNC_TIME);
c3106cac 3374 return;
427a7bff 3375 } else if (ret) {
c3106cac
HT
3376 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3377 ret);
3378 return;
427a7bff 3379 } else if (req->all_vf_ready) {
c3106cac 3380 return;
427a7bff
HT
3381 }
3382 msleep(HCLGE_PF_RESET_SYNC_TIME);
3383 hclge_cmd_reuse_desc(&desc, true);
3384 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3385
c3106cac 3386 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
427a7bff
HT
3387}
3388
a83d2961
WL
3389void hclge_report_hw_error(struct hclge_dev *hdev,
3390 enum hnae3_hw_error_type type)
3391{
3392 struct hnae3_client *client = hdev->nic_client;
3393 u16 i;
3394
3395 if (!client || !client->ops->process_hw_error ||
3396 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3397 return;
3398
3399 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3400 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3401}
3402
3403static void hclge_handle_imp_error(struct hclge_dev *hdev)
3404{
3405 u32 reg_val;
3406
3407 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3408 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3409 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3410 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3411 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3412 }
3413
3414 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3415 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3416 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3417 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3418 }
3419}
3420
2bfbd35d 3421int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
3422{
3423 struct hclge_desc desc;
3424 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3425 int ret;
3426
3427 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 3428 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
3429 req->fun_reset_vfid = func_id;
3430
3431 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3432 if (ret)
3433 dev_err(&hdev->pdev->dev,
3434 "send function reset cmd fail, status =%d\n", ret);
3435
3436 return ret;
3437}
3438
f2f432f2 3439static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 3440{
4f765d3e 3441 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
3442 struct pci_dev *pdev = hdev->pdev;
3443 u32 val;
3444
4f765d3e 3445 if (hclge_get_hw_reset_stat(handle)) {
8de91e92 3446 dev_info(&pdev->dev, "hardware reset not finish\n");
4f765d3e
HT
3447 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3448 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3449 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3450 return;
3451 }
3452
f2f432f2 3453 switch (hdev->reset_type) {
4ed340ab 3454 case HNAE3_GLOBAL_RESET:
8de91e92 3455 dev_info(&pdev->dev, "global reset requested\n");
4ed340ab 3456 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 3457 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab 3458 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4ed340ab 3459 break;
4ed340ab 3460 case HNAE3_FUNC_RESET:
8de91e92 3461 dev_info(&pdev->dev, "PF reset requested\n");
cb1b9f77
SM
3462 /* schedule again to check later */
3463 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3464 hclge_reset_task_schedule(hdev);
4ed340ab
L
3465 break;
3466 default:
3467 dev_warn(&pdev->dev,
8de91e92 3468 "unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
3469 break;
3470 }
3471}
3472
123297b7 3473static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
f2f432f2
SM
3474 unsigned long *addr)
3475{
3476 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
123297b7 3477 struct hclge_dev *hdev = ae_dev->priv;
f2f432f2 3478
f6162d44
SM
3479 /* first, resolve any unknown reset type to the known type(s) */
3480 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
d9b81c96 3481 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
5705b451 3482 HCLGE_MISC_VECTOR_INT_STS);
f6162d44
SM
3483 /* we will intentionally ignore any errors from this function
3484 * as we will end up in *some* reset request in any case
3485 */
d9b81c96
HT
3486 if (hclge_handle_hw_msix_error(hdev, addr))
3487 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3488 msix_sts_reg);
3489
f6162d44
SM
3490 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3491 /* We defered the clearing of the error event which caused
3492 * interrupt since it was not posssible to do that in
3493 * interrupt context (and this is the reason we introduced
3494 * new UNKNOWN reset type). Now, the errors have been
3495 * handled and cleared in hardware we can safely enable
3496 * interrupts. This is an exception to the norm.
3497 */
3498 hclge_enable_vector(&hdev->misc_vector, true);
3499 }
3500
f2f432f2 3501 /* return the highest priority reset level amongst all */
7cea834d
HT
3502 if (test_bit(HNAE3_IMP_RESET, addr)) {
3503 rst_level = HNAE3_IMP_RESET;
3504 clear_bit(HNAE3_IMP_RESET, addr);
3505 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3506 clear_bit(HNAE3_FUNC_RESET, addr);
3507 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 3508 rst_level = HNAE3_GLOBAL_RESET;
7cea834d 3509 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3510 clear_bit(HNAE3_FUNC_RESET, addr);
3511 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 3512 rst_level = HNAE3_FUNC_RESET;
7cea834d 3513 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
3514 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3515 rst_level = HNAE3_FLR_RESET;
3516 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 3517 }
f2f432f2 3518
0fdf4d30
HT
3519 if (hdev->reset_type != HNAE3_NONE_RESET &&
3520 rst_level < hdev->reset_type)
3521 return HNAE3_NONE_RESET;
3522
f2f432f2
SM
3523 return rst_level;
3524}
3525
cd8c5c26
YL
3526static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3527{
3528 u32 clearval = 0;
3529
3530 switch (hdev->reset_type) {
3531 case HNAE3_IMP_RESET:
3532 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3533 break;
3534 case HNAE3_GLOBAL_RESET:
3535 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3536 break;
cd8c5c26 3537 default:
cd8c5c26
YL
3538 break;
3539 }
3540
3541 if (!clearval)
3542 return;
3543
72e2fb07
HT
3544 /* For revision 0x20, the reset interrupt source
3545 * can only be cleared after hardware reset done
3546 */
3547 if (hdev->pdev->revision == 0x20)
3548 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3549 clearval);
3550
cd8c5c26
YL
3551 hclge_enable_vector(&hdev->misc_vector, true);
3552}
3553
6b428b4f
HT
3554static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3555{
3556 u32 reg_val;
3557
3558 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3559 if (enable)
3560 reg_val |= HCLGE_NIC_SW_RST_RDY;
3561 else
3562 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3563
3564 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3565}
3566
c7554dcd
HT
3567static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3568{
3569 int ret;
3570
3571 ret = hclge_set_all_vf_rst(hdev, true);
3572 if (ret)
3573 return ret;
3574
3575 hclge_func_reset_sync_vf(hdev);
3576
3577 return 0;
3578}
3579
35d93a30
HT
3580static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3581{
6dd22bbc 3582 u32 reg_val;
35d93a30
HT
3583 int ret = 0;
3584
3585 switch (hdev->reset_type) {
3586 case HNAE3_FUNC_RESET:
c7554dcd
HT
3587 ret = hclge_func_reset_notify_vf(hdev);
3588 if (ret)
3589 return ret;
427a7bff 3590
35d93a30
HT
3591 ret = hclge_func_reset_cmd(hdev, 0);
3592 if (ret) {
3593 dev_err(&hdev->pdev->dev,
141b95d5 3594 "asserting function reset fail %d!\n", ret);
35d93a30
HT
3595 return ret;
3596 }
3597
3598 /* After performaning pf reset, it is not necessary to do the
3599 * mailbox handling or send any command to firmware, because
3600 * any mailbox handling or command to firmware is only valid
3601 * after hclge_cmd_init is called.
3602 */
3603 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
f02eb82d 3604 hdev->rst_stats.pf_rst_cnt++;
35d93a30 3605 break;
6b9a97ee 3606 case HNAE3_FLR_RESET:
c7554dcd
HT
3607 ret = hclge_func_reset_notify_vf(hdev);
3608 if (ret)
3609 return ret;
6b9a97ee 3610 break;
6dd22bbc 3611 case HNAE3_IMP_RESET:
a83d2961 3612 hclge_handle_imp_error(hdev);
6dd22bbc
HT
3613 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3614 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3615 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3616 break;
35d93a30
HT
3617 default:
3618 break;
3619 }
3620
ada13ee3
HT
3621 /* inform hardware that preparatory work is done */
3622 msleep(HCLGE_RESET_SYNC_TIME);
6b428b4f 3623 hclge_reset_handshake(hdev, true);
35d93a30
HT
3624 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3625
3626 return ret;
3627}
3628
8e9eee78 3629static bool hclge_reset_err_handle(struct hclge_dev *hdev)
65e41e7e
HT
3630{
3631#define MAX_RESET_FAIL_CNT 5
65e41e7e
HT
3632
3633 if (hdev->reset_pending) {
3634 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3635 hdev->reset_pending);
3636 return true;
2336f19d
HT
3637 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3638 HCLGE_RESET_INT_M) {
65e41e7e 3639 dev_info(&hdev->pdev->dev,
2336f19d 3640 "reset failed because new reset interrupt\n");
65e41e7e
HT
3641 hclge_clear_reset_cause(hdev);
3642 return false;
0ecf1f7b
HT
3643 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3644 hdev->rst_stats.reset_fail_cnt++;
8e9eee78
HT
3645 set_bit(hdev->reset_type, &hdev->reset_pending);
3646 dev_info(&hdev->pdev->dev,
adcf738b 3647 "re-schedule reset task(%u)\n",
0ecf1f7b 3648 hdev->rst_stats.reset_fail_cnt);
8e9eee78 3649 return true;
65e41e7e
HT
3650 }
3651
3652 hclge_clear_reset_cause(hdev);
6b428b4f
HT
3653
3654 /* recover the handshake status when reset fail */
3655 hclge_reset_handshake(hdev, true);
3656
65e41e7e 3657 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3d77d0cb
HT
3658
3659 hclge_dbg_dump_rst_info(hdev);
3660
d5432455
GL
3661 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3662
65e41e7e
HT
3663 return false;
3664}
3665
72e2fb07
HT
3666static int hclge_set_rst_done(struct hclge_dev *hdev)
3667{
3668 struct hclge_pf_rst_done_cmd *req;
3669 struct hclge_desc desc;
648db051 3670 int ret;
72e2fb07
HT
3671
3672 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3674 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3675
648db051
HT
3676 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3677 /* To be compatible with the old firmware, which does not support
3678 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3679 * return success
3680 */
3681 if (ret == -EOPNOTSUPP) {
3682 dev_warn(&hdev->pdev->dev,
3683 "current firmware does not support command(0x%x)!\n",
3684 HCLGE_OPC_PF_RST_DONE);
3685 return 0;
3686 } else if (ret) {
3687 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3688 ret);
3689 }
3690
3691 return ret;
72e2fb07
HT
3692}
3693
aa5c4f17
HT
3694static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3695{
3696 int ret = 0;
3697
3698 switch (hdev->reset_type) {
3699 case HNAE3_FUNC_RESET:
6b9a97ee
HT
3700 /* fall through */
3701 case HNAE3_FLR_RESET:
aa5c4f17
HT
3702 ret = hclge_set_all_vf_rst(hdev, false);
3703 break;
72e2fb07
HT
3704 case HNAE3_GLOBAL_RESET:
3705 /* fall through */
3706 case HNAE3_IMP_RESET:
3707 ret = hclge_set_rst_done(hdev);
3708 break;
aa5c4f17
HT
3709 default:
3710 break;
3711 }
3712
6b428b4f
HT
3713 /* clear up the handshake status after re-initialize done */
3714 hclge_reset_handshake(hdev, false);
3715
aa5c4f17
HT
3716 return ret;
3717}
3718
63cbf7a9
YM
3719static int hclge_reset_stack(struct hclge_dev *hdev)
3720{
3721 int ret;
3722
3723 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3724 if (ret)
3725 return ret;
3726
3727 ret = hclge_reset_ae_dev(hdev->ae_dev);
3728 if (ret)
3729 return ret;
3730
039ba863 3731 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
63cbf7a9
YM
3732}
3733
d4fa0656 3734static int hclge_reset_prepare(struct hclge_dev *hdev)
f2f432f2 3735{
65e41e7e 3736 int ret;
9de0b86f 3737
f02eb82d 3738 hdev->rst_stats.reset_cnt++;
f2f432f2 3739 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
3740 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3741 if (ret)
d4fa0656 3742 return ret;
65e41e7e 3743
6d4fab39 3744 rtnl_lock();
65e41e7e 3745 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
65e41e7e 3746 rtnl_unlock();
65e41e7e 3747 if (ret)
d4fa0656 3748 return ret;
cd8c5c26 3749
d4fa0656
HT
3750 return hclge_reset_prepare_wait(hdev);
3751}
3752
3753static int hclge_reset_rebuild(struct hclge_dev *hdev)
3754{
3755 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3756 enum hnae3_reset_type reset_level;
3757 int ret;
f2f432f2 3758
f02eb82d
HT
3759 hdev->rst_stats.hw_reset_done_cnt++;
3760
65e41e7e
HT
3761 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3762 if (ret)
d4fa0656 3763 return ret;
65e41e7e
HT
3764
3765 rtnl_lock();
63cbf7a9 3766 ret = hclge_reset_stack(hdev);
d4fa0656 3767 rtnl_unlock();
1f609492 3768 if (ret)
d4fa0656 3769 return ret;
1f609492 3770
65e41e7e
HT
3771 hclge_clear_reset_cause(hdev);
3772
63cbf7a9
YM
3773 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3774 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3775 * times
3776 */
0ecf1f7b
HT
3777 if (ret &&
3778 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
d4fa0656 3779 return ret;
63cbf7a9 3780
60c800c6
YM
3781 ret = hclge_reset_prepare_up(hdev);
3782 if (ret)
3783 return ret;
3784
63cbf7a9 3785 rtnl_lock();
65e41e7e 3786 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6d4fab39 3787 rtnl_unlock();
d4fa0656
HT
3788 if (ret)
3789 return ret;
f403a84f 3790
65e41e7e
HT
3791 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3792 if (ret)
d4fa0656 3793 return ret;
65e41e7e 3794
b644a8d4 3795 hdev->last_reset_time = jiffies;
0ecf1f7b 3796 hdev->rst_stats.reset_fail_cnt = 0;
f02eb82d 3797 hdev->rst_stats.reset_done_cnt++;
d5432455 3798 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
012fcb52
HT
3799
3800 /* if default_reset_request has a higher level reset request,
3801 * it should be handled as soon as possible. since some errors
3802 * need this kind of reset to fix.
3803 */
525a294e
HT
3804 reset_level = hclge_get_reset_level(ae_dev,
3805 &hdev->default_reset_request);
3806 if (reset_level != HNAE3_NONE_RESET)
3807 set_bit(reset_level, &hdev->reset_request);
b644a8d4 3808
d4fa0656
HT
3809 return 0;
3810}
3811
3812static void hclge_reset(struct hclge_dev *hdev)
3813{
3814 if (hclge_reset_prepare(hdev))
3815 goto err_reset;
3816
3817 if (hclge_reset_wait(hdev))
3818 goto err_reset;
3819
3820 if (hclge_reset_rebuild(hdev))
3821 goto err_reset;
3822
65e41e7e
HT
3823 return;
3824
65e41e7e 3825err_reset:
8e9eee78 3826 if (hclge_reset_err_handle(hdev))
65e41e7e 3827 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3828}
3829
6ae4e733
SJ
3830static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3831{
3832 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3833 struct hclge_dev *hdev = ae_dev->priv;
3834
3835 /* We might end up getting called broadly because of 2 below cases:
3836 * 1. Recoverable error was conveyed through APEI and only way to bring
3837 * normalcy is to reset.
3838 * 2. A new reset request from the stack due to timeout
3839 *
3840 * For the first case,error event might not have ae handle available.
3841 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3842 * last reset attempt did not succeed and watchdog hit us again. We will
3843 * know this if last reset request did not occur very recently (watchdog
3844 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3845 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3846 * And if it is a repeat reset request of the most recent one then we
3847 * want to make sure we throttle the reset request. Therefore, we will
3848 * not allow it again before 3*HZ times.
6d4c3981 3849 */
6ae4e733
SJ
3850 if (!handle)
3851 handle = &hdev->vport[0].nic;
3852
b37ce587 3853 if (time_before(jiffies, (hdev->last_reset_time +
012fcb52
HT
3854 HCLGE_RESET_INTERVAL))) {
3855 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9de0b86f 3856 return;
db4d3d55 3857 } else if (hdev->default_reset_request) {
0742ed7c 3858 hdev->reset_level =
123297b7 3859 hclge_get_reset_level(ae_dev,
720bd583 3860 &hdev->default_reset_request);
db4d3d55 3861 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
0742ed7c 3862 hdev->reset_level = HNAE3_FUNC_RESET;
db4d3d55 3863 }
4ed340ab 3864
96e65abb 3865 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
0742ed7c 3866 hdev->reset_level);
6d4c3981
SM
3867
3868 /* request reset & schedule reset task */
0742ed7c 3869 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3870 hclge_reset_task_schedule(hdev);
3871
0742ed7c
HT
3872 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3873 hdev->reset_level++;
4ed340ab
L
3874}
3875
720bd583
HT
3876static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3877 enum hnae3_reset_type rst_type)
3878{
3879 struct hclge_dev *hdev = ae_dev->priv;
3880
3881 set_bit(rst_type, &hdev->default_reset_request);
3882}
3883
65e41e7e
HT
3884static void hclge_reset_timer(struct timer_list *t)
3885{
3886 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3887
012fcb52
HT
3888 /* if default_reset_request has no value, it means that this reset
3889 * request has already be handled, so just return here
3890 */
3891 if (!hdev->default_reset_request)
3892 return;
3893
65e41e7e 3894 dev_info(&hdev->pdev->dev,
e3b84ed2 3895 "triggering reset in reset timer\n");
65e41e7e
HT
3896 hclge_reset_event(hdev->pdev, NULL);
3897}
3898
4ed340ab
L
3899static void hclge_reset_subtask(struct hclge_dev *hdev)
3900{
123297b7
SJ
3901 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3902
f2f432f2
SM
3903 /* check if there is any ongoing reset in the hardware. This status can
3904 * be checked from reset_pending. If there is then, we need to wait for
3905 * hardware to complete reset.
3906 * a. If we are able to figure out in reasonable time that hardware
3907 * has fully resetted then, we can proceed with driver, client
3908 * reset.
3909 * b. else, we can come back later to check this status so re-sched
3910 * now.
3911 */
0742ed7c 3912 hdev->last_reset_time = jiffies;
123297b7 3913 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
f2f432f2
SM
3914 if (hdev->reset_type != HNAE3_NONE_RESET)
3915 hclge_reset(hdev);
4ed340ab 3916
f2f432f2 3917 /* check if we got any *new* reset requests to be honored */
123297b7 3918 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
f2f432f2
SM
3919 if (hdev->reset_type != HNAE3_NONE_RESET)
3920 hclge_do_reset(hdev);
4ed340ab 3921
4ed340ab
L
3922 hdev->reset_type = HNAE3_NONE_RESET;
3923}
3924
1c6dfe6f 3925static void hclge_reset_service_task(struct hclge_dev *hdev)
466b0c00 3926{
1c6dfe6f
YL
3927 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3928 return;
cb1b9f77 3929
8627bded
HT
3930 down(&hdev->reset_sem);
3931 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
cb1b9f77 3932
4ed340ab 3933 hclge_reset_subtask(hdev);
cb1b9f77
SM
3934
3935 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8627bded 3936 up(&hdev->reset_sem);
466b0c00
L
3937}
3938
a6d818e3
YL
3939static void hclge_update_vport_alive(struct hclge_dev *hdev)
3940{
3941 int i;
3942
3943 /* start from vport 1 for PF is always alive */
3944 for (i = 1; i < hdev->num_alloc_vport; i++) {
3945 struct hclge_vport *vport = &hdev->vport[i];
3946
3947 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3948 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
3949
3950 /* If vf is not alive, set to default value */
3951 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3952 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
3953 }
3954}
3955
1c6dfe6f 3956static void hclge_periodic_service_task(struct hclge_dev *hdev)
46a3df9f 3957{
1c6dfe6f 3958 unsigned long delta = round_jiffies_relative(HZ);
7be1b9f3 3959
1c6dfe6f
YL
3960 /* Always handle the link updating to make sure link state is
3961 * updated when it is triggered by mbx.
3962 */
3963 hclge_update_link_status(hdev);
ee4bcd3b 3964 hclge_sync_mac_table(hdev);
c631c696 3965 hclge_sync_promisc_mode(hdev);
46a3df9f 3966
1c6dfe6f
YL
3967 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3968 delta = jiffies - hdev->last_serv_processed;
3969
3970 if (delta < round_jiffies_relative(HZ)) {
3971 delta = round_jiffies_relative(HZ) - delta;
3972 goto out;
3973 }
c5f65480
JS
3974 }
3975
1c6dfe6f 3976 hdev->serv_processed_cnt++;
a6d818e3 3977 hclge_update_vport_alive(hdev);
1c6dfe6f
YL
3978
3979 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3980 hdev->last_serv_processed = jiffies;
3981 goto out;
3982 }
3983
3984 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3985 hclge_update_stats_for_all(hdev);
3986
3987 hclge_update_port_info(hdev);
fe4144d4 3988 hclge_sync_vlan_filter(hdev);
db4d3d55 3989
1c6dfe6f 3990 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
d93ed94f 3991 hclge_rfs_filter_expire(hdev);
7be1b9f3 3992
1c6dfe6f
YL
3993 hdev->last_serv_processed = jiffies;
3994
3995out:
3996 hclge_task_schedule(hdev, delta);
3997}
3998
3999static void hclge_service_task(struct work_struct *work)
4000{
4001 struct hclge_dev *hdev =
4002 container_of(work, struct hclge_dev, service_task.work);
4003
4004 hclge_reset_service_task(hdev);
4005 hclge_mailbox_service_task(hdev);
4006 hclge_periodic_service_task(hdev);
4007
4008 /* Handle reset and mbx again in case periodical task delays the
4009 * handling by calling hclge_task_schedule() in
4010 * hclge_periodic_service_task().
4011 */
4012 hclge_reset_service_task(hdev);
4013 hclge_mailbox_service_task(hdev);
46a3df9f
S
4014}
4015
46a3df9f
S
4016struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4017{
4018 /* VF handle has no client */
4019 if (!handle->client)
4020 return container_of(handle, struct hclge_vport, nic);
4021 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4022 return container_of(handle, struct hclge_vport, roce);
4023 else
4024 return container_of(handle, struct hclge_vport, nic);
4025}
4026
4027static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4028 struct hnae3_vector_info *vector_info)
4029{
4030 struct hclge_vport *vport = hclge_get_vport(handle);
4031 struct hnae3_vector_info *vector = vector_info;
4032 struct hclge_dev *hdev = vport->back;
4033 int alloc = 0;
4034 int i, j;
4035
580a05f9 4036 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
46a3df9f
S
4037 vector_num = min(hdev->num_msi_left, vector_num);
4038
4039 for (j = 0; j < vector_num; j++) {
4040 for (i = 1; i < hdev->num_msi; i++) {
4041 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4042 vector->vector = pci_irq_vector(hdev->pdev, i);
4043 vector->io_addr = hdev->hw.io_base +
4044 HCLGE_VECTOR_REG_BASE +
4045 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4046 vport->vport_id *
4047 HCLGE_VECTOR_VF_OFFSET;
4048 hdev->vector_status[i] = vport->vport_id;
887c3820 4049 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
4050
4051 vector++;
4052 alloc++;
4053
4054 break;
4055 }
4056 }
4057 }
4058 hdev->num_msi_left -= alloc;
4059 hdev->num_msi_used += alloc;
4060
4061 return alloc;
4062}
4063
4064static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4065{
4066 int i;
4067
887c3820
SM
4068 for (i = 0; i < hdev->num_msi; i++)
4069 if (vector == hdev->vector_irq[i])
4070 return i;
4071
46a3df9f
S
4072 return -EINVAL;
4073}
4074
0d3e6631
YL
4075static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4076{
4077 struct hclge_vport *vport = hclge_get_vport(handle);
4078 struct hclge_dev *hdev = vport->back;
4079 int vector_id;
4080
4081 vector_id = hclge_get_vector_index(hdev, vector);
4082 if (vector_id < 0) {
4083 dev_err(&hdev->pdev->dev,
6f8e330d 4084 "Get vector index fail. vector = %d\n", vector);
0d3e6631
YL
4085 return vector_id;
4086 }
4087
4088 hclge_free_vector(hdev, vector_id);
4089
4090 return 0;
4091}
4092
46a3df9f
S
4093static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4094{
4095 return HCLGE_RSS_KEY_SIZE;
4096}
4097
4098static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4099{
4100 return HCLGE_RSS_IND_TBL_SIZE;
4101}
4102
46a3df9f
S
4103static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4104 const u8 hfunc, const u8 *key)
4105{
d44f9b63 4106 struct hclge_rss_config_cmd *req;
ebaf1908 4107 unsigned int key_offset = 0;
46a3df9f 4108 struct hclge_desc desc;
3caf772b 4109 int key_counts;
46a3df9f
S
4110 int key_size;
4111 int ret;
4112
3caf772b 4113 key_counts = HCLGE_RSS_KEY_SIZE;
d44f9b63 4114 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f 4115
3caf772b 4116 while (key_counts) {
46a3df9f
S
4117 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4118 false);
4119
4120 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4121 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4122
3caf772b 4123 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
46a3df9f
S
4124 memcpy(req->hash_key,
4125 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4126
3caf772b
YM
4127 key_counts -= key_size;
4128 key_offset++;
46a3df9f
S
4129 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4130 if (ret) {
4131 dev_err(&hdev->pdev->dev,
4132 "Configure RSS config fail, status = %d\n",
4133 ret);
4134 return ret;
4135 }
4136 }
4137 return 0;
4138}
4139
89523cfa 4140static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
46a3df9f 4141{
d44f9b63 4142 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
4143 struct hclge_desc desc;
4144 int i, j;
4145 int ret;
4146
d44f9b63 4147 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
4148
4149 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4150 hclge_cmd_setup_basic_desc
4151 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4152
a90bb9a5
YL
4153 req->start_table_index =
4154 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4155 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
4156
4157 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4158 req->rss_result[j] =
4159 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4160
4161 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4162 if (ret) {
4163 dev_err(&hdev->pdev->dev,
4164 "Configure rss indir table fail,status = %d\n",
4165 ret);
4166 return ret;
4167 }
4168 }
4169 return 0;
4170}
4171
4172static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4173 u16 *tc_size, u16 *tc_offset)
4174{
d44f9b63 4175 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
4176 struct hclge_desc desc;
4177 int ret;
4178 int i;
4179
4180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 4181 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
4182
4183 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
4184 u16 mode = 0;
4185
e4e87715
PL
4186 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4187 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4188 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4189 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4190 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
4191
4192 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
4193 }
4194
4195 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4196 if (ret)
46a3df9f
S
4197 dev_err(&hdev->pdev->dev,
4198 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 4199
3f639907 4200 return ret;
46a3df9f
S
4201}
4202
232fc64b
PL
4203static void hclge_get_rss_type(struct hclge_vport *vport)
4204{
4205 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4206 vport->rss_tuple_sets.ipv4_udp_en ||
4207 vport->rss_tuple_sets.ipv4_sctp_en ||
4208 vport->rss_tuple_sets.ipv6_tcp_en ||
4209 vport->rss_tuple_sets.ipv6_udp_en ||
4210 vport->rss_tuple_sets.ipv6_sctp_en)
4211 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4212 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4213 vport->rss_tuple_sets.ipv6_fragment_en)
4214 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4215 else
4216 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4217}
4218
46a3df9f
S
4219static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4220{
d44f9b63 4221 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
4222 struct hclge_desc desc;
4223 int ret;
4224
4225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4226
d44f9b63 4227 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
4228
4229 /* Get the tuple cfg from pf */
4230 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4231 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4232 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4233 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4234 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4235 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4236 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4237 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 4238 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 4239 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4240 if (ret)
46a3df9f
S
4241 dev_err(&hdev->pdev->dev,
4242 "Configure rss input fail, status = %d\n", ret);
3f639907 4243 return ret;
46a3df9f
S
4244}
4245
4246static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4247 u8 *key, u8 *hfunc)
4248{
4249 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
4250 int i;
4251
4252 /* Get hash algorithm */
775501a1
JS
4253 if (hfunc) {
4254 switch (vport->rss_algo) {
4255 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4256 *hfunc = ETH_RSS_HASH_TOP;
4257 break;
4258 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4259 *hfunc = ETH_RSS_HASH_XOR;
4260 break;
4261 default:
4262 *hfunc = ETH_RSS_HASH_UNKNOWN;
4263 break;
4264 }
4265 }
46a3df9f
S
4266
4267 /* Get the RSS Key required by the user */
4268 if (key)
4269 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4270
4271 /* Get indirect table */
4272 if (indir)
4273 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4274 indir[i] = vport->rss_indirection_tbl[i];
4275
4276 return 0;
4277}
4278
4279static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4280 const u8 *key, const u8 hfunc)
4281{
4282 struct hclge_vport *vport = hclge_get_vport(handle);
4283 struct hclge_dev *hdev = vport->back;
4284 u8 hash_algo;
4285 int ret, i;
4286
4287 /* Set the RSS Hash Key if specififed by the user */
4288 if (key) {
775501a1
JS
4289 switch (hfunc) {
4290 case ETH_RSS_HASH_TOP:
46a3df9f 4291 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
4292 break;
4293 case ETH_RSS_HASH_XOR:
4294 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4295 break;
4296 case ETH_RSS_HASH_NO_CHANGE:
4297 hash_algo = vport->rss_algo;
4298 break;
4299 default:
46a3df9f 4300 return -EINVAL;
775501a1
JS
4301 }
4302
46a3df9f
S
4303 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4304 if (ret)
4305 return ret;
89523cfa
YL
4306
4307 /* Update the shadow RSS key with user specified qids */
4308 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4309 vport->rss_algo = hash_algo;
46a3df9f
S
4310 }
4311
4312 /* Update the shadow RSS table with user specified qids */
4313 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4314 vport->rss_indirection_tbl[i] = indir[i];
4315
4316 /* Update the hardware */
89523cfa 4317 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
4318}
4319
f7db940a
L
4320static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4321{
4322 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4323
4324 if (nfc->data & RXH_L4_B_2_3)
4325 hash_sets |= HCLGE_D_PORT_BIT;
4326 else
4327 hash_sets &= ~HCLGE_D_PORT_BIT;
4328
4329 if (nfc->data & RXH_IP_SRC)
4330 hash_sets |= HCLGE_S_IP_BIT;
4331 else
4332 hash_sets &= ~HCLGE_S_IP_BIT;
4333
4334 if (nfc->data & RXH_IP_DST)
4335 hash_sets |= HCLGE_D_IP_BIT;
4336 else
4337 hash_sets &= ~HCLGE_D_IP_BIT;
4338
4339 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4340 hash_sets |= HCLGE_V_TAG_BIT;
4341
4342 return hash_sets;
4343}
4344
4345static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4346 struct ethtool_rxnfc *nfc)
4347{
4348 struct hclge_vport *vport = hclge_get_vport(handle);
4349 struct hclge_dev *hdev = vport->back;
4350 struct hclge_rss_input_tuple_cmd *req;
4351 struct hclge_desc desc;
4352 u8 tuple_sets;
4353 int ret;
4354
4355 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4356 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4357 return -EINVAL;
4358
4359 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429 4360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
f7db940a 4361
6f2af429
YL
4362 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4363 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4364 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4365 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4366 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4367 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4368 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4369 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
4370
4371 tuple_sets = hclge_get_rss_hash_bits(nfc);
4372 switch (nfc->flow_type) {
4373 case TCP_V4_FLOW:
4374 req->ipv4_tcp_en = tuple_sets;
4375 break;
4376 case TCP_V6_FLOW:
4377 req->ipv6_tcp_en = tuple_sets;
4378 break;
4379 case UDP_V4_FLOW:
4380 req->ipv4_udp_en = tuple_sets;
4381 break;
4382 case UDP_V6_FLOW:
4383 req->ipv6_udp_en = tuple_sets;
4384 break;
4385 case SCTP_V4_FLOW:
4386 req->ipv4_sctp_en = tuple_sets;
4387 break;
4388 case SCTP_V6_FLOW:
4389 if ((nfc->data & RXH_L4_B_0_1) ||
4390 (nfc->data & RXH_L4_B_2_3))
4391 return -EINVAL;
4392
4393 req->ipv6_sctp_en = tuple_sets;
4394 break;
4395 case IPV4_FLOW:
4396 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4397 break;
4398 case IPV6_FLOW:
4399 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4400 break;
4401 default:
4402 return -EINVAL;
4403 }
4404
4405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 4406 if (ret) {
f7db940a
L
4407 dev_err(&hdev->pdev->dev,
4408 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
4409 return ret;
4410 }
f7db940a 4411
6f2af429
YL
4412 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4413 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4414 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4415 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4416 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4417 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4418 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4419 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 4420 hclge_get_rss_type(vport);
6f2af429 4421 return 0;
f7db940a
L
4422}
4423
07d29954
L
4424static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4425 struct ethtool_rxnfc *nfc)
4426{
4427 struct hclge_vport *vport = hclge_get_vport(handle);
07d29954 4428 u8 tuple_sets;
07d29954
L
4429
4430 nfc->data = 0;
4431
07d29954
L
4432 switch (nfc->flow_type) {
4433 case TCP_V4_FLOW:
6f2af429 4434 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
4435 break;
4436 case UDP_V4_FLOW:
6f2af429 4437 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
4438 break;
4439 case TCP_V6_FLOW:
6f2af429 4440 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
4441 break;
4442 case UDP_V6_FLOW:
6f2af429 4443 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
4444 break;
4445 case SCTP_V4_FLOW:
6f2af429 4446 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
4447 break;
4448 case SCTP_V6_FLOW:
6f2af429 4449 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
4450 break;
4451 case IPV4_FLOW:
4452 case IPV6_FLOW:
4453 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4454 break;
4455 default:
4456 return -EINVAL;
4457 }
4458
4459 if (!tuple_sets)
4460 return 0;
4461
4462 if (tuple_sets & HCLGE_D_PORT_BIT)
4463 nfc->data |= RXH_L4_B_2_3;
4464 if (tuple_sets & HCLGE_S_PORT_BIT)
4465 nfc->data |= RXH_L4_B_0_1;
4466 if (tuple_sets & HCLGE_D_IP_BIT)
4467 nfc->data |= RXH_IP_DST;
4468 if (tuple_sets & HCLGE_S_IP_BIT)
4469 nfc->data |= RXH_IP_SRC;
4470
4471 return 0;
4472}
4473
46a3df9f
S
4474static int hclge_get_tc_size(struct hnae3_handle *handle)
4475{
4476 struct hclge_vport *vport = hclge_get_vport(handle);
4477 struct hclge_dev *hdev = vport->back;
4478
4479 return hdev->rss_size_max;
4480}
4481
77f255c1 4482int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f 4483{
46a3df9f 4484 struct hclge_vport *vport = hdev->vport;
268f5dfa
YL
4485 u8 *rss_indir = vport[0].rss_indirection_tbl;
4486 u16 rss_size = vport[0].alloc_rss_size;
354d0fab
PL
4487 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4488 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
268f5dfa
YL
4489 u8 *key = vport[0].rss_hash_key;
4490 u8 hfunc = vport[0].rss_algo;
46a3df9f 4491 u16 tc_valid[HCLGE_MAX_TC_NUM];
268f5dfa 4492 u16 roundup_size;
ebaf1908
WL
4493 unsigned int i;
4494 int ret;
68ece54e 4495
46a3df9f
S
4496 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4497 if (ret)
268f5dfa 4498 return ret;
46a3df9f 4499
46a3df9f
S
4500 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4501 if (ret)
268f5dfa 4502 return ret;
46a3df9f
S
4503
4504 ret = hclge_set_rss_input_tuple(hdev);
4505 if (ret)
268f5dfa 4506 return ret;
46a3df9f 4507
68ece54e
YL
4508 /* Each TC have the same queue size, and tc_size set to hardware is
4509 * the log2 of roundup power of two of rss_size, the acutal queue
4510 * size is limited by indirection table.
4511 */
4512 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4513 dev_err(&hdev->pdev->dev,
adcf738b 4514 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
68ece54e 4515 rss_size);
268f5dfa 4516 return -EINVAL;
68ece54e
YL
4517 }
4518
4519 roundup_size = roundup_pow_of_two(rss_size);
4520 roundup_size = ilog2(roundup_size);
4521
46a3df9f 4522 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 4523 tc_valid[i] = 0;
46a3df9f 4524
68ece54e
YL
4525 if (!(hdev->hw_tc_map & BIT(i)))
4526 continue;
4527
4528 tc_valid[i] = 1;
4529 tc_size[i] = roundup_size;
4530 tc_offset[i] = rss_size * i;
46a3df9f 4531 }
68ece54e 4532
268f5dfa
YL
4533 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4534}
46a3df9f 4535
268f5dfa
YL
4536void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4537{
4538 struct hclge_vport *vport = hdev->vport;
4539 int i, j;
46a3df9f 4540
268f5dfa
YL
4541 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4542 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4543 vport[j].rss_indirection_tbl[i] =
4544 i % vport[j].alloc_rss_size;
4545 }
4546}
4547
4548static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4549{
472d7ece 4550 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 4551 struct hclge_vport *vport = hdev->vport;
472d7ece
JS
4552
4553 if (hdev->pdev->revision >= 0x21)
4554 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 4555
268f5dfa
YL
4556 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4557 vport[i].rss_tuple_sets.ipv4_tcp_en =
4558 HCLGE_RSS_INPUT_TUPLE_OTHER;
4559 vport[i].rss_tuple_sets.ipv4_udp_en =
4560 HCLGE_RSS_INPUT_TUPLE_OTHER;
4561 vport[i].rss_tuple_sets.ipv4_sctp_en =
4562 HCLGE_RSS_INPUT_TUPLE_SCTP;
4563 vport[i].rss_tuple_sets.ipv4_fragment_en =
4564 HCLGE_RSS_INPUT_TUPLE_OTHER;
4565 vport[i].rss_tuple_sets.ipv6_tcp_en =
4566 HCLGE_RSS_INPUT_TUPLE_OTHER;
4567 vport[i].rss_tuple_sets.ipv6_udp_en =
4568 HCLGE_RSS_INPUT_TUPLE_OTHER;
4569 vport[i].rss_tuple_sets.ipv6_sctp_en =
4570 HCLGE_RSS_INPUT_TUPLE_SCTP;
4571 vport[i].rss_tuple_sets.ipv6_fragment_en =
4572 HCLGE_RSS_INPUT_TUPLE_OTHER;
4573
472d7ece 4574 vport[i].rss_algo = rss_algo;
ea739c90 4575
472d7ece
JS
4576 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4577 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
4578 }
4579
4580 hclge_rss_indir_init_cfg(hdev);
46a3df9f
S
4581}
4582
84e095d6
SM
4583int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4584 int vector_id, bool en,
4585 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4586{
4587 struct hclge_dev *hdev = vport->back;
46a3df9f
S
4588 struct hnae3_ring_chain_node *node;
4589 struct hclge_desc desc;
37417c66
GL
4590 struct hclge_ctrl_vector_chain_cmd *req =
4591 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
84e095d6
SM
4592 enum hclge_cmd_status status;
4593 enum hclge_opcode_type op;
4594 u16 tqp_type_and_id;
46a3df9f
S
4595 int i;
4596
84e095d6
SM
4597 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4598 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
4599 req->int_vector_id = vector_id;
4600
4601 i = 0;
4602 for (node = ring_chain; node; node = node->next) {
84e095d6 4603 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
4604 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4605 HCLGE_INT_TYPE_S,
4606 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4607 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4608 HCLGE_TQP_ID_S, node->tqp_index);
4609 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4610 HCLGE_INT_GL_IDX_S,
4611 hnae3_get_field(node->int_gl_idx,
4612 HNAE3_RING_GL_IDX_M,
4613 HNAE3_RING_GL_IDX_S));
84e095d6 4614 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
4615 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4616 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 4617 req->vfid = vport->vport_id;
46a3df9f 4618
84e095d6
SM
4619 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4620 if (status) {
46a3df9f
S
4621 dev_err(&hdev->pdev->dev,
4622 "Map TQP fail, status is %d.\n",
84e095d6
SM
4623 status);
4624 return -EIO;
46a3df9f
S
4625 }
4626 i = 0;
4627
4628 hclge_cmd_setup_basic_desc(&desc,
84e095d6 4629 op,
46a3df9f
S
4630 false);
4631 req->int_vector_id = vector_id;
4632 }
4633 }
4634
4635 if (i > 0) {
4636 req->int_cause_num = i;
84e095d6
SM
4637 req->vfid = vport->vport_id;
4638 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4639 if (status) {
46a3df9f 4640 dev_err(&hdev->pdev->dev,
84e095d6
SM
4641 "Map TQP fail, status is %d.\n", status);
4642 return -EIO;
46a3df9f
S
4643 }
4644 }
4645
4646 return 0;
4647}
4648
9b2f3477 4649static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
84e095d6 4650 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4651{
4652 struct hclge_vport *vport = hclge_get_vport(handle);
4653 struct hclge_dev *hdev = vport->back;
4654 int vector_id;
4655
4656 vector_id = hclge_get_vector_index(hdev, vector);
4657 if (vector_id < 0) {
4658 dev_err(&hdev->pdev->dev,
7ab2b53e 4659 "failed to get vector index. vector=%d\n", vector);
46a3df9f
S
4660 return vector_id;
4661 }
4662
84e095d6 4663 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
4664}
4665
9b2f3477 4666static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
84e095d6 4667 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4668{
4669 struct hclge_vport *vport = hclge_get_vport(handle);
4670 struct hclge_dev *hdev = vport->back;
84e095d6 4671 int vector_id, ret;
46a3df9f 4672
b50ae26c
PL
4673 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4674 return 0;
4675
46a3df9f
S
4676 vector_id = hclge_get_vector_index(hdev, vector);
4677 if (vector_id < 0) {
4678 dev_err(&handle->pdev->dev,
4679 "Get vector index fail. ret =%d\n", vector_id);
4680 return vector_id;
4681 }
4682
84e095d6 4683 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 4684 if (ret)
84e095d6
SM
4685 dev_err(&handle->pdev->dev,
4686 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
9b2f3477 4687 vector_id, ret);
46a3df9f 4688
0d3e6631 4689 return ret;
46a3df9f
S
4690}
4691
e196ec75
JS
4692static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4693 struct hclge_promisc_param *param)
46a3df9f 4694{
d44f9b63 4695 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
4696 struct hclge_desc desc;
4697 int ret;
4698
4699 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4700
d44f9b63 4701 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f 4702 req->vf_id = param->vf_id;
96c0e861
PL
4703
4704 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4705 * pdev revision(0x20), new revision support them. The
4706 * value of this two fields will not return error when driver
4707 * send command to fireware in revision(0x20).
4708 */
4709 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4710 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
46a3df9f
S
4711
4712 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4713 if (ret)
46a3df9f 4714 dev_err(&hdev->pdev->dev,
c631c696
JS
4715 "failed to set vport %d promisc mode, ret = %d.\n",
4716 param->vf_id, ret);
3f639907
JS
4717
4718 return ret;
46a3df9f
S
4719}
4720
e196ec75
JS
4721static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4722 bool en_uc, bool en_mc, bool en_bc,
4723 int vport_id)
46a3df9f
S
4724{
4725 if (!param)
4726 return;
4727
4728 memset(param, 0, sizeof(struct hclge_promisc_param));
4729 if (en_uc)
4730 param->enable = HCLGE_PROMISC_EN_UC;
4731 if (en_mc)
4732 param->enable |= HCLGE_PROMISC_EN_MC;
4733 if (en_bc)
4734 param->enable |= HCLGE_PROMISC_EN_BC;
4735 param->vf_id = vport_id;
4736}
4737
e196ec75
JS
4738int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4739 bool en_mc_pmc, bool en_bc_pmc)
4740{
4741 struct hclge_dev *hdev = vport->back;
4742 struct hclge_promisc_param param;
4743
4744 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4745 vport->vport_id);
4746 return hclge_cmd_set_promisc_mode(hdev, &param);
4747}
4748
7fa6be4f
HT
4749static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4750 bool en_mc_pmc)
46a3df9f
S
4751{
4752 struct hclge_vport *vport = hclge_get_vport(handle);
28673b33 4753 bool en_bc_pmc = true;
46a3df9f 4754
28673b33
JS
4755 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4756 * always bypassed. So broadcast promisc should be disabled until
4757 * user enable promisc mode
4758 */
4759 if (handle->pdev->revision == 0x20)
4760 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4761
e196ec75
JS
4762 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4763 en_bc_pmc);
46a3df9f
S
4764}
4765
c631c696
JS
4766static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4767{
4768 struct hclge_vport *vport = hclge_get_vport(handle);
4769 struct hclge_dev *hdev = vport->back;
4770
4771 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4772}
4773
d695964d
JS
4774static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4775{
4776 struct hclge_get_fd_mode_cmd *req;
4777 struct hclge_desc desc;
4778 int ret;
4779
4780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4781
4782 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4783
4784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4785 if (ret) {
4786 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4787 return ret;
4788 }
4789
4790 *fd_mode = req->mode;
4791
4792 return ret;
4793}
4794
4795static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4796 u32 *stage1_entry_num,
4797 u32 *stage2_entry_num,
4798 u16 *stage1_counter_num,
4799 u16 *stage2_counter_num)
4800{
4801 struct hclge_get_fd_allocation_cmd *req;
4802 struct hclge_desc desc;
4803 int ret;
4804
4805 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4806
4807 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4808
4809 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4810 if (ret) {
4811 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4812 ret);
4813 return ret;
4814 }
4815
4816 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4817 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4818 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4819 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4820
4821 return ret;
4822}
4823
84944d5c
GL
4824static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4825 enum HCLGE_FD_STAGE stage_num)
d695964d
JS
4826{
4827 struct hclge_set_fd_key_config_cmd *req;
4828 struct hclge_fd_key_cfg *stage;
4829 struct hclge_desc desc;
4830 int ret;
4831
4832 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4833
4834 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4835 stage = &hdev->fd_cfg.key_cfg[stage_num];
4836 req->stage = stage_num;
4837 req->key_select = stage->key_sel;
4838 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4839 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4840 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4841 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4842 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4843 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4844
4845 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4846 if (ret)
4847 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4848
4849 return ret;
4850}
4851
4852static int hclge_init_fd_config(struct hclge_dev *hdev)
4853{
4854#define LOW_2_WORDS 0x03
4855 struct hclge_fd_key_cfg *key_cfg;
4856 int ret;
4857
4858 if (!hnae3_dev_fd_supported(hdev))
4859 return 0;
4860
4861 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4862 if (ret)
4863 return ret;
4864
4865 switch (hdev->fd_cfg.fd_mode) {
4866 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4867 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4868 break;
4869 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4870 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4871 break;
4872 default:
4873 dev_err(&hdev->pdev->dev,
adcf738b 4874 "Unsupported flow director mode %u\n",
d695964d
JS
4875 hdev->fd_cfg.fd_mode);
4876 return -EOPNOTSUPP;
4877 }
4878
d695964d
JS
4879 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4880 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4881 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4882 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4883 key_cfg->outer_sipv6_word_en = 0;
4884 key_cfg->outer_dipv6_word_en = 0;
4885
4886 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4887 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4888 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4889 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4890
4891 /* If use max 400bit key, we can support tuples for ether type */
16505f87 4892 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
d695964d
JS
4893 key_cfg->tuple_active |=
4894 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
d695964d
JS
4895
4896 /* roce_type is used to filter roce frames
4897 * dst_vport is used to specify the rule
4898 */
4899 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4900
4901 ret = hclge_get_fd_allocation(hdev,
4902 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4903 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4904 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4905 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4906 if (ret)
4907 return ret;
4908
4909 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4910}
4911
11732868
JS
4912static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4913 int loc, u8 *key, bool is_add)
4914{
4915 struct hclge_fd_tcam_config_1_cmd *req1;
4916 struct hclge_fd_tcam_config_2_cmd *req2;
4917 struct hclge_fd_tcam_config_3_cmd *req3;
4918 struct hclge_desc desc[3];
4919 int ret;
4920
4921 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4922 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4923 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4924 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4926
4927 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4928 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4929 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4930
4931 req1->stage = stage;
4932 req1->xy_sel = sel_x ? 1 : 0;
4933 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4934 req1->index = cpu_to_le32(loc);
4935 req1->entry_vld = sel_x ? is_add : 0;
4936
4937 if (key) {
4938 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4939 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4940 sizeof(req2->tcam_data));
4941 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4942 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4943 }
4944
4945 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4946 if (ret)
4947 dev_err(&hdev->pdev->dev,
4948 "config tcam key fail, ret=%d\n",
4949 ret);
4950
4951 return ret;
4952}
4953
4954static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4955 struct hclge_fd_ad_data *action)
4956{
4957 struct hclge_fd_ad_config_cmd *req;
4958 struct hclge_desc desc;
4959 u64 ad_data = 0;
4960 int ret;
4961
4962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4963
4964 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4965 req->index = cpu_to_le32(loc);
4966 req->stage = stage;
4967
4968 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4969 action->write_rule_id_to_bd);
4970 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4971 action->rule_id);
4972 ad_data <<= 32;
4973 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4974 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4975 action->forward_to_direct_queue);
4976 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4977 action->queue_id);
4978 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4979 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4980 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4981 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4982 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4983 action->counter_id);
4984
4985 req->ad_data = cpu_to_le64(ad_data);
4986 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4987 if (ret)
4988 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4989
4990 return ret;
4991}
4992
4993static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4994 struct hclge_fd_rule *rule)
4995{
4996 u16 tmp_x_s, tmp_y_s;
4997 u32 tmp_x_l, tmp_y_l;
4998 int i;
4999
5000 if (rule->unused_tuple & tuple_bit)
5001 return true;
5002
5003 switch (tuple_bit) {
11732868 5004 case BIT(INNER_DST_MAC):
e91e388c
JS
5005 for (i = 0; i < ETH_ALEN; i++) {
5006 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868 5007 rule->tuples_mask.dst_mac[i]);
e91e388c 5008 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868
JS
5009 rule->tuples_mask.dst_mac[i]);
5010 }
5011
5012 return true;
5013 case BIT(INNER_SRC_MAC):
e91e388c
JS
5014 for (i = 0; i < ETH_ALEN; i++) {
5015 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868 5016 rule->tuples.src_mac[i]);
e91e388c 5017 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
11732868
JS
5018 rule->tuples.src_mac[i]);
5019 }
5020
5021 return true;
5022 case BIT(INNER_VLAN_TAG_FST):
5023 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5024 rule->tuples_mask.vlan_tag1);
5025 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5026 rule->tuples_mask.vlan_tag1);
5027 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5028 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5029
5030 return true;
5031 case BIT(INNER_ETH_TYPE):
5032 calc_x(tmp_x_s, rule->tuples.ether_proto,
5033 rule->tuples_mask.ether_proto);
5034 calc_y(tmp_y_s, rule->tuples.ether_proto,
5035 rule->tuples_mask.ether_proto);
5036 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5037 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5038
5039 return true;
5040 case BIT(INNER_IP_TOS):
5041 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5042 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5043
5044 return true;
5045 case BIT(INNER_IP_PROTO):
5046 calc_x(*key_x, rule->tuples.ip_proto,
5047 rule->tuples_mask.ip_proto);
5048 calc_y(*key_y, rule->tuples.ip_proto,
5049 rule->tuples_mask.ip_proto);
5050
5051 return true;
5052 case BIT(INNER_SRC_IP):
e91e388c
JS
5053 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5054 rule->tuples_mask.src_ip[IPV4_INDEX]);
5055 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5056 rule->tuples_mask.src_ip[IPV4_INDEX]);
11732868
JS
5057 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5058 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5059
5060 return true;
5061 case BIT(INNER_DST_IP):
e91e388c
JS
5062 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5063 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5064 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5065 rule->tuples_mask.dst_ip[IPV4_INDEX]);
11732868
JS
5066 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5067 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5068
5069 return true;
5070 case BIT(INNER_SRC_PORT):
5071 calc_x(tmp_x_s, rule->tuples.src_port,
5072 rule->tuples_mask.src_port);
5073 calc_y(tmp_y_s, rule->tuples.src_port,
5074 rule->tuples_mask.src_port);
5075 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5076 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5077
5078 return true;
5079 case BIT(INNER_DST_PORT):
5080 calc_x(tmp_x_s, rule->tuples.dst_port,
5081 rule->tuples_mask.dst_port);
5082 calc_y(tmp_y_s, rule->tuples.dst_port,
5083 rule->tuples_mask.dst_port);
5084 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5085 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5086
5087 return true;
5088 default:
5089 return false;
5090 }
5091}
5092
5093static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5094 u8 vf_id, u8 network_port_id)
5095{
5096 u32 port_number = 0;
5097
5098 if (port_type == HOST_PORT) {
5099 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5100 pf_id);
5101 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5102 vf_id);
5103 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5104 } else {
5105 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5106 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5107 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5108 }
5109
5110 return port_number;
5111}
5112
5113static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5114 __le32 *key_x, __le32 *key_y,
5115 struct hclge_fd_rule *rule)
5116{
5117 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5118 u8 cur_pos = 0, tuple_size, shift_bits;
ebaf1908 5119 unsigned int i;
11732868
JS
5120
5121 for (i = 0; i < MAX_META_DATA; i++) {
5122 tuple_size = meta_data_key_info[i].key_length;
5123 tuple_bit = key_cfg->meta_data_active & BIT(i);
5124
5125 switch (tuple_bit) {
5126 case BIT(ROCE_TYPE):
5127 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5128 cur_pos += tuple_size;
5129 break;
5130 case BIT(DST_VPORT):
5131 port_number = hclge_get_port_number(HOST_PORT, 0,
5132 rule->vf_id, 0);
5133 hnae3_set_field(meta_data,
5134 GENMASK(cur_pos + tuple_size, cur_pos),
5135 cur_pos, port_number);
5136 cur_pos += tuple_size;
5137 break;
5138 default:
5139 break;
5140 }
5141 }
5142
5143 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5144 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5145 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5146
5147 *key_x = cpu_to_le32(tmp_x << shift_bits);
5148 *key_y = cpu_to_le32(tmp_y << shift_bits);
5149}
5150
5151/* A complete key is combined with meta data key and tuple key.
5152 * Meta data key is stored at the MSB region, and tuple key is stored at
5153 * the LSB region, unused bits will be filled 0.
5154 */
5155static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5156 struct hclge_fd_rule *rule)
5157{
5158 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5159 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5160 u8 *cur_key_x, *cur_key_y;
11732868 5161 u8 meta_data_region;
84944d5c
GL
5162 u8 tuple_size;
5163 int ret;
5164 u32 i;
11732868
JS
5165
5166 memset(key_x, 0, sizeof(key_x));
5167 memset(key_y, 0, sizeof(key_y));
5168 cur_key_x = key_x;
5169 cur_key_y = key_y;
5170
5171 for (i = 0 ; i < MAX_TUPLE; i++) {
5172 bool tuple_valid;
5173 u32 check_tuple;
5174
5175 tuple_size = tuple_key_info[i].key_length / 8;
5176 check_tuple = key_cfg->tuple_active & BIT(i);
5177
5178 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5179 cur_key_y, rule);
5180 if (tuple_valid) {
5181 cur_key_x += tuple_size;
5182 cur_key_y += tuple_size;
5183 }
5184 }
5185
5186 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5187 MAX_META_DATA_LENGTH / 8;
5188
5189 hclge_fd_convert_meta_data(key_cfg,
5190 (__le32 *)(key_x + meta_data_region),
5191 (__le32 *)(key_y + meta_data_region),
5192 rule);
5193
5194 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5195 true);
5196 if (ret) {
5197 dev_err(&hdev->pdev->dev,
adcf738b 5198 "fd key_y config fail, loc=%u, ret=%d\n",
11732868
JS
5199 rule->queue_id, ret);
5200 return ret;
5201 }
5202
5203 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5204 true);
5205 if (ret)
5206 dev_err(&hdev->pdev->dev,
adcf738b 5207 "fd key_x config fail, loc=%u, ret=%d\n",
11732868
JS
5208 rule->queue_id, ret);
5209 return ret;
5210}
5211
5212static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5213 struct hclge_fd_rule *rule)
5214{
5215 struct hclge_fd_ad_data ad_data;
5216
5217 ad_data.ad_id = rule->location;
5218
5219 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5220 ad_data.drop_packet = true;
5221 ad_data.forward_to_direct_queue = false;
5222 ad_data.queue_id = 0;
5223 } else {
5224 ad_data.drop_packet = false;
5225 ad_data.forward_to_direct_queue = true;
5226 ad_data.queue_id = rule->queue_id;
5227 }
5228
5229 ad_data.use_counter = false;
5230 ad_data.counter_id = 0;
5231
5232 ad_data.use_next_stage = false;
5233 ad_data.next_input_key = 0;
5234
5235 ad_data.write_rule_id_to_bd = true;
5236 ad_data.rule_id = rule->location;
5237
5238 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5239}
5240
736fc0e1
JS
5241static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5242 u32 *unused_tuple)
dd74f815 5243{
736fc0e1 5244 if (!spec || !unused_tuple)
dd74f815
JS
5245 return -EINVAL;
5246
736fc0e1 5247 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
dd74f815 5248
736fc0e1
JS
5249 if (!spec->ip4src)
5250 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5251
736fc0e1
JS
5252 if (!spec->ip4dst)
5253 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5254
736fc0e1
JS
5255 if (!spec->psrc)
5256 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5257
736fc0e1
JS
5258 if (!spec->pdst)
5259 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5260
736fc0e1
JS
5261 if (!spec->tos)
5262 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5263
736fc0e1
JS
5264 return 0;
5265}
dd74f815 5266
736fc0e1
JS
5267static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5268 u32 *unused_tuple)
5269{
5270 if (!spec || !unused_tuple)
5271 return -EINVAL;
dd74f815 5272
736fc0e1
JS
5273 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5274 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5275
736fc0e1
JS
5276 if (!spec->ip4src)
5277 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5278
736fc0e1
JS
5279 if (!spec->ip4dst)
5280 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5281
736fc0e1
JS
5282 if (!spec->tos)
5283 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5284
736fc0e1
JS
5285 if (!spec->proto)
5286 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5287
736fc0e1
JS
5288 if (spec->l4_4_bytes)
5289 return -EOPNOTSUPP;
dd74f815 5290
736fc0e1
JS
5291 if (spec->ip_ver != ETH_RX_NFC_IP4)
5292 return -EOPNOTSUPP;
dd74f815 5293
736fc0e1
JS
5294 return 0;
5295}
dd74f815 5296
736fc0e1
JS
5297static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5298 u32 *unused_tuple)
5299{
5300 if (!spec || !unused_tuple)
5301 return -EINVAL;
dd74f815 5302
736fc0e1
JS
5303 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5304 BIT(INNER_IP_TOS);
dd74f815 5305
736fc0e1
JS
5306 /* check whether src/dst ip address used */
5307 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5308 !spec->ip6src[2] && !spec->ip6src[3])
5309 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5310
736fc0e1
JS
5311 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5312 !spec->ip6dst[2] && !spec->ip6dst[3])
5313 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5314
736fc0e1
JS
5315 if (!spec->psrc)
5316 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5317
736fc0e1
JS
5318 if (!spec->pdst)
5319 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5320
736fc0e1
JS
5321 if (spec->tclass)
5322 return -EOPNOTSUPP;
dd74f815 5323
736fc0e1
JS
5324 return 0;
5325}
dd74f815 5326
736fc0e1
JS
5327static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5328 u32 *unused_tuple)
5329{
5330 if (!spec || !unused_tuple)
5331 return -EINVAL;
dd74f815 5332
736fc0e1
JS
5333 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5334 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5335
736fc0e1
JS
5336 /* check whether src/dst ip address used */
5337 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5338 !spec->ip6src[2] && !spec->ip6src[3])
5339 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5340
736fc0e1
JS
5341 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5342 !spec->ip6dst[2] && !spec->ip6dst[3])
5343 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5344
736fc0e1
JS
5345 if (!spec->l4_proto)
5346 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5347
736fc0e1
JS
5348 if (spec->tclass)
5349 return -EOPNOTSUPP;
dd74f815 5350
736fc0e1 5351 if (spec->l4_4_bytes)
dd74f815 5352 return -EOPNOTSUPP;
dd74f815 5353
736fc0e1
JS
5354 return 0;
5355}
5356
5357static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5358{
5359 if (!spec || !unused_tuple)
5360 return -EINVAL;
5361
5362 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5363 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5364 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5365
5366 if (is_zero_ether_addr(spec->h_source))
5367 *unused_tuple |= BIT(INNER_SRC_MAC);
5368
5369 if (is_zero_ether_addr(spec->h_dest))
5370 *unused_tuple |= BIT(INNER_DST_MAC);
5371
5372 if (!spec->h_proto)
5373 *unused_tuple |= BIT(INNER_ETH_TYPE);
5374
5375 return 0;
5376}
5377
5378static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5379 struct ethtool_rx_flow_spec *fs,
5380 u32 *unused_tuple)
5381{
0b4bdc55 5382 if (fs->flow_type & FLOW_EXT) {
a3ca5e90
GL
5383 if (fs->h_ext.vlan_etype) {
5384 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
dd74f815 5385 return -EOPNOTSUPP;
a3ca5e90
GL
5386 }
5387
dd74f815 5388 if (!fs->h_ext.vlan_tci)
736fc0e1 5389 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815 5390
736fc0e1 5391 if (fs->m_ext.vlan_tci &&
a3ca5e90
GL
5392 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5393 dev_err(&hdev->pdev->dev,
5394 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5395 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
736fc0e1 5396 return -EINVAL;
a3ca5e90 5397 }
dd74f815 5398 } else {
736fc0e1 5399 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815
JS
5400 }
5401
5402 if (fs->flow_type & FLOW_MAC_EXT) {
16505f87 5403 if (hdev->fd_cfg.fd_mode !=
a3ca5e90
GL
5404 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5405 dev_err(&hdev->pdev->dev,
5406 "FLOW_MAC_EXT is not supported in current fd mode!\n");
dd74f815 5407 return -EOPNOTSUPP;
a3ca5e90 5408 }
dd74f815
JS
5409
5410 if (is_zero_ether_addr(fs->h_ext.h_dest))
736fc0e1 5411 *unused_tuple |= BIT(INNER_DST_MAC);
dd74f815 5412 else
0b4bdc55 5413 *unused_tuple &= ~BIT(INNER_DST_MAC);
dd74f815
JS
5414 }
5415
5416 return 0;
5417}
5418
736fc0e1
JS
5419static int hclge_fd_check_spec(struct hclge_dev *hdev,
5420 struct ethtool_rx_flow_spec *fs,
5421 u32 *unused_tuple)
5422{
16505f87 5423 u32 flow_type;
736fc0e1
JS
5424 int ret;
5425
a3ca5e90
GL
5426 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5427 dev_err(&hdev->pdev->dev,
5428 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5429 fs->location,
5430 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
736fc0e1 5431 return -EINVAL;
a3ca5e90 5432 }
736fc0e1 5433
736fc0e1
JS
5434 if ((fs->flow_type & FLOW_EXT) &&
5435 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5436 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5437 return -EOPNOTSUPP;
5438 }
5439
16505f87
GL
5440 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5441 switch (flow_type) {
736fc0e1
JS
5442 case SCTP_V4_FLOW:
5443 case TCP_V4_FLOW:
5444 case UDP_V4_FLOW:
5445 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5446 unused_tuple);
5447 break;
5448 case IP_USER_FLOW:
5449 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5450 unused_tuple);
5451 break;
5452 case SCTP_V6_FLOW:
5453 case TCP_V6_FLOW:
5454 case UDP_V6_FLOW:
5455 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5456 unused_tuple);
5457 break;
5458 case IPV6_USER_FLOW:
5459 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5460 unused_tuple);
5461 break;
5462 case ETHER_FLOW:
5463 if (hdev->fd_cfg.fd_mode !=
5464 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5465 dev_err(&hdev->pdev->dev,
5466 "ETHER_FLOW is not supported in current fd mode!\n");
5467 return -EOPNOTSUPP;
5468 }
5469
5470 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5471 unused_tuple);
5472 break;
5473 default:
a3ca5e90
GL
5474 dev_err(&hdev->pdev->dev,
5475 "unsupported protocol type, protocol type = %#x\n",
5476 flow_type);
736fc0e1
JS
5477 return -EOPNOTSUPP;
5478 }
5479
a3ca5e90
GL
5480 if (ret) {
5481 dev_err(&hdev->pdev->dev,
5482 "failed to check flow union tuple, ret = %d\n",
5483 ret);
736fc0e1 5484 return ret;
a3ca5e90 5485 }
736fc0e1
JS
5486
5487 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5488}
5489
dd74f815
JS
5490static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5491{
5492 struct hclge_fd_rule *rule = NULL;
5493 struct hlist_node *node2;
5494
44122887 5495 spin_lock_bh(&hdev->fd_rule_lock);
dd74f815
JS
5496 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5497 if (rule->location >= location)
5498 break;
5499 }
5500
44122887
JS
5501 spin_unlock_bh(&hdev->fd_rule_lock);
5502
dd74f815
JS
5503 return rule && rule->location == location;
5504}
5505
44122887 5506/* make sure being called after lock up with fd_rule_lock */
dd74f815
JS
5507static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5508 struct hclge_fd_rule *new_rule,
5509 u16 location,
5510 bool is_add)
5511{
5512 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5513 struct hlist_node *node2;
5514
5515 if (is_add && !new_rule)
5516 return -EINVAL;
5517
5518 hlist_for_each_entry_safe(rule, node2,
5519 &hdev->fd_rule_list, rule_node) {
5520 if (rule->location >= location)
5521 break;
5522 parent = rule;
5523 }
5524
5525 if (rule && rule->location == location) {
5526 hlist_del(&rule->rule_node);
5527 kfree(rule);
5528 hdev->hclge_fd_rule_num--;
5529
44122887
JS
5530 if (!is_add) {
5531 if (!hdev->hclge_fd_rule_num)
5532 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5533 clear_bit(location, hdev->fd_bmap);
dd74f815 5534
44122887
JS
5535 return 0;
5536 }
dd74f815
JS
5537 } else if (!is_add) {
5538 dev_err(&hdev->pdev->dev,
adcf738b 5539 "delete fail, rule %u is inexistent\n",
dd74f815
JS
5540 location);
5541 return -EINVAL;
5542 }
5543
5544 INIT_HLIST_NODE(&new_rule->rule_node);
5545
5546 if (parent)
5547 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5548 else
5549 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5550
44122887 5551 set_bit(location, hdev->fd_bmap);
dd74f815 5552 hdev->hclge_fd_rule_num++;
44122887 5553 hdev->fd_active_type = new_rule->rule_type;
dd74f815
JS
5554
5555 return 0;
5556}
5557
5558static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5559 struct ethtool_rx_flow_spec *fs,
5560 struct hclge_fd_rule *rule)
5561{
5562 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5563
5564 switch (flow_type) {
5565 case SCTP_V4_FLOW:
5566 case TCP_V4_FLOW:
5567 case UDP_V4_FLOW:
e91e388c 5568 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5569 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
e91e388c 5570 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5571 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5572
e91e388c 5573 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5574 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
e91e388c 5575 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5576 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5577
5578 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5579 rule->tuples_mask.src_port =
5580 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5581
5582 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5583 rule->tuples_mask.dst_port =
5584 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5585
5586 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5587 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5588
5589 rule->tuples.ether_proto = ETH_P_IP;
5590 rule->tuples_mask.ether_proto = 0xFFFF;
5591
5592 break;
5593 case IP_USER_FLOW:
e91e388c 5594 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5595 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
e91e388c 5596 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5597 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5598
e91e388c 5599 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5600 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
e91e388c 5601 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5602 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5603
5604 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5605 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5606
5607 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5608 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5609
5610 rule->tuples.ether_proto = ETH_P_IP;
5611 rule->tuples_mask.ether_proto = 0xFFFF;
5612
5613 break;
5614 case SCTP_V6_FLOW:
5615 case TCP_V6_FLOW:
5616 case UDP_V6_FLOW:
5617 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5618 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5619 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5620 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5621
5622 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5623 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5624 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5625 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5626
5627 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5628 rule->tuples_mask.src_port =
5629 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5630
5631 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5632 rule->tuples_mask.dst_port =
5633 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5634
5635 rule->tuples.ether_proto = ETH_P_IPV6;
5636 rule->tuples_mask.ether_proto = 0xFFFF;
5637
5638 break;
5639 case IPV6_USER_FLOW:
5640 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5641 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5642 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5643 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5644
5645 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5646 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5647 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5648 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5649
5650 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5651 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5652
5653 rule->tuples.ether_proto = ETH_P_IPV6;
5654 rule->tuples_mask.ether_proto = 0xFFFF;
5655
5656 break;
5657 case ETHER_FLOW:
5658 ether_addr_copy(rule->tuples.src_mac,
5659 fs->h_u.ether_spec.h_source);
5660 ether_addr_copy(rule->tuples_mask.src_mac,
5661 fs->m_u.ether_spec.h_source);
5662
5663 ether_addr_copy(rule->tuples.dst_mac,
5664 fs->h_u.ether_spec.h_dest);
5665 ether_addr_copy(rule->tuples_mask.dst_mac,
5666 fs->m_u.ether_spec.h_dest);
5667
5668 rule->tuples.ether_proto =
5669 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5670 rule->tuples_mask.ether_proto =
5671 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5672
5673 break;
5674 default:
5675 return -EOPNOTSUPP;
5676 }
5677
5678 switch (flow_type) {
5679 case SCTP_V4_FLOW:
5680 case SCTP_V6_FLOW:
5681 rule->tuples.ip_proto = IPPROTO_SCTP;
5682 rule->tuples_mask.ip_proto = 0xFF;
5683 break;
5684 case TCP_V4_FLOW:
5685 case TCP_V6_FLOW:
5686 rule->tuples.ip_proto = IPPROTO_TCP;
5687 rule->tuples_mask.ip_proto = 0xFF;
5688 break;
5689 case UDP_V4_FLOW:
5690 case UDP_V6_FLOW:
5691 rule->tuples.ip_proto = IPPROTO_UDP;
5692 rule->tuples_mask.ip_proto = 0xFF;
5693 break;
5694 default:
5695 break;
5696 }
5697
0b4bdc55 5698 if (fs->flow_type & FLOW_EXT) {
dd74f815
JS
5699 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5700 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5701 }
5702
5703 if (fs->flow_type & FLOW_MAC_EXT) {
5704 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5705 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5706 }
5707
5708 return 0;
5709}
5710
44122887
JS
5711/* make sure being called after lock up with fd_rule_lock */
5712static int hclge_fd_config_rule(struct hclge_dev *hdev,
5713 struct hclge_fd_rule *rule)
5714{
5715 int ret;
5716
5717 if (!rule) {
5718 dev_err(&hdev->pdev->dev,
5719 "The flow director rule is NULL\n");
5720 return -EINVAL;
5721 }
5722
5723 /* it will never fail here, so needn't to check return value */
5724 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5725
5726 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5727 if (ret)
5728 goto clear_rule;
5729
5730 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5731 if (ret)
5732 goto clear_rule;
5733
5734 return 0;
5735
5736clear_rule:
5737 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5738 return ret;
5739}
5740
dd74f815
JS
5741static int hclge_add_fd_entry(struct hnae3_handle *handle,
5742 struct ethtool_rxnfc *cmd)
5743{
5744 struct hclge_vport *vport = hclge_get_vport(handle);
5745 struct hclge_dev *hdev = vport->back;
5746 u16 dst_vport_id = 0, q_index = 0;
5747 struct ethtool_rx_flow_spec *fs;
5748 struct hclge_fd_rule *rule;
5749 u32 unused = 0;
5750 u8 action;
5751 int ret;
5752
a3ca5e90
GL
5753 if (!hnae3_dev_fd_supported(hdev)) {
5754 dev_err(&hdev->pdev->dev,
5755 "flow table director is not supported\n");
dd74f815 5756 return -EOPNOTSUPP;
a3ca5e90 5757 }
dd74f815 5758
9abeb7d8 5759 if (!hdev->fd_en) {
a3ca5e90
GL
5760 dev_err(&hdev->pdev->dev,
5761 "please enable flow director first\n");
dd74f815
JS
5762 return -EOPNOTSUPP;
5763 }
5764
5765 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5766
5767 ret = hclge_fd_check_spec(hdev, fs, &unused);
a3ca5e90 5768 if (ret)
dd74f815 5769 return ret;
dd74f815
JS
5770
5771 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5772 action = HCLGE_FD_ACTION_DROP_PACKET;
5773 } else {
5774 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5775 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5776 u16 tqps;
5777
0285dbae
JS
5778 if (vf > hdev->num_req_vfs) {
5779 dev_err(&hdev->pdev->dev,
adcf738b 5780 "Error: vf id (%u) > max vf num (%u)\n",
0285dbae
JS
5781 vf, hdev->num_req_vfs);
5782 return -EINVAL;
5783 }
5784
dd74f815
JS
5785 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5786 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5787
5788 if (ring >= tqps) {
5789 dev_err(&hdev->pdev->dev,
adcf738b 5790 "Error: queue id (%u) > max tqp num (%u)\n",
dd74f815
JS
5791 ring, tqps - 1);
5792 return -EINVAL;
5793 }
5794
dd74f815
JS
5795 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5796 q_index = ring;
5797 }
5798
5799 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5800 if (!rule)
5801 return -ENOMEM;
5802
5803 ret = hclge_fd_get_tuple(hdev, fs, rule);
44122887
JS
5804 if (ret) {
5805 kfree(rule);
5806 return ret;
5807 }
dd74f815
JS
5808
5809 rule->flow_type = fs->flow_type;
dd74f815
JS
5810 rule->location = fs->location;
5811 rule->unused_tuple = unused;
5812 rule->vf_id = dst_vport_id;
5813 rule->queue_id = q_index;
5814 rule->action = action;
44122887 5815 rule->rule_type = HCLGE_FD_EP_ACTIVE;
dd74f815 5816
d93ed94f
JS
5817 /* to avoid rule conflict, when user configure rule by ethtool,
5818 * we need to clear all arfs rules
5819 */
5820 hclge_clear_arfs_rules(handle);
5821
44122887
JS
5822 spin_lock_bh(&hdev->fd_rule_lock);
5823 ret = hclge_fd_config_rule(hdev, rule);
dd74f815 5824
44122887 5825 spin_unlock_bh(&hdev->fd_rule_lock);
dd74f815 5826
dd74f815
JS
5827 return ret;
5828}
5829
5830static int hclge_del_fd_entry(struct hnae3_handle *handle,
5831 struct ethtool_rxnfc *cmd)
5832{
5833 struct hclge_vport *vport = hclge_get_vport(handle);
5834 struct hclge_dev *hdev = vport->back;
5835 struct ethtool_rx_flow_spec *fs;
5836 int ret;
5837
5838 if (!hnae3_dev_fd_supported(hdev))
5839 return -EOPNOTSUPP;
5840
5841 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5842
5843 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5844 return -EINVAL;
5845
5846 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5847 dev_err(&hdev->pdev->dev,
39edaf24 5848 "Delete fail, rule %u is inexistent\n", fs->location);
dd74f815
JS
5849 return -ENOENT;
5850 }
5851
9b2f3477
WL
5852 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5853 NULL, false);
dd74f815
JS
5854 if (ret)
5855 return ret;
5856
44122887
JS
5857 spin_lock_bh(&hdev->fd_rule_lock);
5858 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5859
5860 spin_unlock_bh(&hdev->fd_rule_lock);
5861
5862 return ret;
dd74f815
JS
5863}
5864
6871af29
JS
5865static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5866 bool clear_list)
5867{
5868 struct hclge_vport *vport = hclge_get_vport(handle);
5869 struct hclge_dev *hdev = vport->back;
5870 struct hclge_fd_rule *rule;
5871 struct hlist_node *node;
44122887 5872 u16 location;
6871af29
JS
5873
5874 if (!hnae3_dev_fd_supported(hdev))
5875 return;
5876
44122887
JS
5877 spin_lock_bh(&hdev->fd_rule_lock);
5878 for_each_set_bit(location, hdev->fd_bmap,
5879 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5880 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5881 NULL, false);
5882
6871af29
JS
5883 if (clear_list) {
5884 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5885 rule_node) {
6871af29
JS
5886 hlist_del(&rule->rule_node);
5887 kfree(rule);
6871af29 5888 }
44122887
JS
5889 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5890 hdev->hclge_fd_rule_num = 0;
5891 bitmap_zero(hdev->fd_bmap,
5892 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6871af29 5893 }
44122887
JS
5894
5895 spin_unlock_bh(&hdev->fd_rule_lock);
6871af29
JS
5896}
5897
5898static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5899{
5900 struct hclge_vport *vport = hclge_get_vport(handle);
5901 struct hclge_dev *hdev = vport->back;
5902 struct hclge_fd_rule *rule;
5903 struct hlist_node *node;
5904 int ret;
5905
65e41e7e
HT
5906 /* Return ok here, because reset error handling will check this
5907 * return value. If error is returned here, the reset process will
5908 * fail.
5909 */
6871af29 5910 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 5911 return 0;
6871af29 5912
8edc2285 5913 /* if fd is disabled, should not restore it when reset */
9abeb7d8 5914 if (!hdev->fd_en)
8edc2285
JS
5915 return 0;
5916
44122887 5917 spin_lock_bh(&hdev->fd_rule_lock);
6871af29
JS
5918 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5919 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5920 if (!ret)
5921 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5922
5923 if (ret) {
5924 dev_warn(&hdev->pdev->dev,
adcf738b 5925 "Restore rule %u failed, remove it\n",
6871af29 5926 rule->location);
44122887 5927 clear_bit(rule->location, hdev->fd_bmap);
6871af29
JS
5928 hlist_del(&rule->rule_node);
5929 kfree(rule);
5930 hdev->hclge_fd_rule_num--;
5931 }
5932 }
44122887
JS
5933
5934 if (hdev->hclge_fd_rule_num)
5935 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5936
5937 spin_unlock_bh(&hdev->fd_rule_lock);
5938
6871af29
JS
5939 return 0;
5940}
5941
05c2314f
JS
5942static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5943 struct ethtool_rxnfc *cmd)
5944{
5945 struct hclge_vport *vport = hclge_get_vport(handle);
5946 struct hclge_dev *hdev = vport->back;
5947
5948 if (!hnae3_dev_fd_supported(hdev))
5949 return -EOPNOTSUPP;
5950
5951 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5952 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5953
5954 return 0;
5955}
5956
fa663c09
JS
5957static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5958 struct ethtool_tcpip4_spec *spec,
5959 struct ethtool_tcpip4_spec *spec_mask)
5960{
5961 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5962 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5963 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5964
5965 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5966 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5967 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5968
5969 spec->psrc = cpu_to_be16(rule->tuples.src_port);
5970 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5971 0 : cpu_to_be16(rule->tuples_mask.src_port);
5972
5973 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5974 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5975 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5976
5977 spec->tos = rule->tuples.ip_tos;
5978 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5979 0 : rule->tuples_mask.ip_tos;
5980}
5981
5982static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5983 struct ethtool_usrip4_spec *spec,
5984 struct ethtool_usrip4_spec *spec_mask)
5985{
5986 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5987 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5988 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5989
5990 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5991 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5992 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5993
5994 spec->tos = rule->tuples.ip_tos;
5995 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5996 0 : rule->tuples_mask.ip_tos;
5997
5998 spec->proto = rule->tuples.ip_proto;
5999 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6000 0 : rule->tuples_mask.ip_proto;
6001
6002 spec->ip_ver = ETH_RX_NFC_IP4;
6003}
6004
6005static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6006 struct ethtool_tcpip6_spec *spec,
6007 struct ethtool_tcpip6_spec *spec_mask)
6008{
6009 cpu_to_be32_array(spec->ip6src,
6010 rule->tuples.src_ip, IPV6_SIZE);
6011 cpu_to_be32_array(spec->ip6dst,
6012 rule->tuples.dst_ip, IPV6_SIZE);
6013 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6014 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6015 else
6016 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6017 IPV6_SIZE);
6018
6019 if (rule->unused_tuple & BIT(INNER_DST_IP))
6020 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6021 else
6022 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6023 IPV6_SIZE);
6024
6025 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6026 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6027 0 : cpu_to_be16(rule->tuples_mask.src_port);
6028
6029 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6030 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6031 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6032}
6033
6034static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6035 struct ethtool_usrip6_spec *spec,
6036 struct ethtool_usrip6_spec *spec_mask)
6037{
6038 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6039 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6040 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6041 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6042 else
6043 cpu_to_be32_array(spec_mask->ip6src,
6044 rule->tuples_mask.src_ip, IPV6_SIZE);
6045
6046 if (rule->unused_tuple & BIT(INNER_DST_IP))
6047 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6048 else
6049 cpu_to_be32_array(spec_mask->ip6dst,
6050 rule->tuples_mask.dst_ip, IPV6_SIZE);
6051
6052 spec->l4_proto = rule->tuples.ip_proto;
6053 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6054 0 : rule->tuples_mask.ip_proto;
6055}
6056
6057static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6058 struct ethhdr *spec,
6059 struct ethhdr *spec_mask)
6060{
6061 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6062 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6063
6064 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6065 eth_zero_addr(spec_mask->h_source);
6066 else
6067 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6068
6069 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6070 eth_zero_addr(spec_mask->h_dest);
6071 else
6072 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6073
6074 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6075 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6076 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6077}
6078
6079static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6080 struct hclge_fd_rule *rule)
6081{
6082 if (fs->flow_type & FLOW_EXT) {
6083 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6084 fs->m_ext.vlan_tci =
6085 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6086 cpu_to_be16(VLAN_VID_MASK) :
6087 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6088 }
6089
6090 if (fs->flow_type & FLOW_MAC_EXT) {
6091 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6092 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6093 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6094 else
6095 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6096 rule->tuples_mask.dst_mac);
6097 }
6098}
6099
05c2314f
JS
6100static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6101 struct ethtool_rxnfc *cmd)
6102{
6103 struct hclge_vport *vport = hclge_get_vport(handle);
6104 struct hclge_fd_rule *rule = NULL;
6105 struct hclge_dev *hdev = vport->back;
6106 struct ethtool_rx_flow_spec *fs;
6107 struct hlist_node *node2;
6108
6109 if (!hnae3_dev_fd_supported(hdev))
6110 return -EOPNOTSUPP;
6111
6112 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6113
44122887
JS
6114 spin_lock_bh(&hdev->fd_rule_lock);
6115
05c2314f
JS
6116 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6117 if (rule->location >= fs->location)
6118 break;
6119 }
6120
44122887
JS
6121 if (!rule || fs->location != rule->location) {
6122 spin_unlock_bh(&hdev->fd_rule_lock);
6123
05c2314f 6124 return -ENOENT;
44122887 6125 }
05c2314f
JS
6126
6127 fs->flow_type = rule->flow_type;
6128 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6129 case SCTP_V4_FLOW:
6130 case TCP_V4_FLOW:
6131 case UDP_V4_FLOW:
fa663c09
JS
6132 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6133 &fs->m_u.tcp_ip4_spec);
05c2314f
JS
6134 break;
6135 case IP_USER_FLOW:
fa663c09
JS
6136 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6137 &fs->m_u.usr_ip4_spec);
05c2314f
JS
6138 break;
6139 case SCTP_V6_FLOW:
6140 case TCP_V6_FLOW:
6141 case UDP_V6_FLOW:
fa663c09
JS
6142 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6143 &fs->m_u.tcp_ip6_spec);
05c2314f
JS
6144 break;
6145 case IPV6_USER_FLOW:
fa663c09
JS
6146 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6147 &fs->m_u.usr_ip6_spec);
05c2314f 6148 break;
fa663c09
JS
6149 /* The flow type of fd rule has been checked before adding in to rule
6150 * list. As other flow types have been handled, it must be ETHER_FLOW
6151 * for the default case
6152 */
05c2314f 6153 default:
fa663c09
JS
6154 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6155 &fs->m_u.ether_spec);
6156 break;
05c2314f
JS
6157 }
6158
fa663c09 6159 hclge_fd_get_ext_info(fs, rule);
05c2314f
JS
6160
6161 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6162 fs->ring_cookie = RX_CLS_FLOW_DISC;
6163 } else {
6164 u64 vf_id;
6165
6166 fs->ring_cookie = rule->queue_id;
6167 vf_id = rule->vf_id;
6168 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6169 fs->ring_cookie |= vf_id;
6170 }
6171
44122887
JS
6172 spin_unlock_bh(&hdev->fd_rule_lock);
6173
05c2314f
JS
6174 return 0;
6175}
6176
6177static int hclge_get_all_rules(struct hnae3_handle *handle,
6178 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6179{
6180 struct hclge_vport *vport = hclge_get_vport(handle);
6181 struct hclge_dev *hdev = vport->back;
6182 struct hclge_fd_rule *rule;
6183 struct hlist_node *node2;
6184 int cnt = 0;
6185
6186 if (!hnae3_dev_fd_supported(hdev))
6187 return -EOPNOTSUPP;
6188
6189 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6190
44122887 6191 spin_lock_bh(&hdev->fd_rule_lock);
05c2314f
JS
6192 hlist_for_each_entry_safe(rule, node2,
6193 &hdev->fd_rule_list, rule_node) {
44122887
JS
6194 if (cnt == cmd->rule_cnt) {
6195 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f 6196 return -EMSGSIZE;
44122887 6197 }
05c2314f
JS
6198
6199 rule_locs[cnt] = rule->location;
6200 cnt++;
6201 }
6202
44122887
JS
6203 spin_unlock_bh(&hdev->fd_rule_lock);
6204
05c2314f
JS
6205 cmd->rule_cnt = cnt;
6206
6207 return 0;
6208}
6209
d93ed94f
JS
6210static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6211 struct hclge_fd_rule_tuples *tuples)
6212{
47327c93
GH
6213#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6214#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6215
d93ed94f
JS
6216 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6217 tuples->ip_proto = fkeys->basic.ip_proto;
6218 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6219
6220 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6221 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6222 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6223 } else {
47327c93
GH
6224 int i;
6225
6226 for (i = 0; i < IPV6_SIZE; i++) {
6227 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6228 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6229 }
d93ed94f
JS
6230 }
6231}
6232
6233/* traverse all rules, check whether an existed rule has the same tuples */
6234static struct hclge_fd_rule *
6235hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6236 const struct hclge_fd_rule_tuples *tuples)
6237{
6238 struct hclge_fd_rule *rule = NULL;
6239 struct hlist_node *node;
6240
6241 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6242 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6243 return rule;
6244 }
6245
6246 return NULL;
6247}
6248
6249static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6250 struct hclge_fd_rule *rule)
6251{
6252 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6253 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6254 BIT(INNER_SRC_PORT);
6255 rule->action = 0;
6256 rule->vf_id = 0;
6257 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6258 if (tuples->ether_proto == ETH_P_IP) {
6259 if (tuples->ip_proto == IPPROTO_TCP)
6260 rule->flow_type = TCP_V4_FLOW;
6261 else
6262 rule->flow_type = UDP_V4_FLOW;
6263 } else {
6264 if (tuples->ip_proto == IPPROTO_TCP)
6265 rule->flow_type = TCP_V6_FLOW;
6266 else
6267 rule->flow_type = UDP_V6_FLOW;
6268 }
6269 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6270 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6271}
6272
6273static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6274 u16 flow_id, struct flow_keys *fkeys)
6275{
d93ed94f
JS
6276 struct hclge_vport *vport = hclge_get_vport(handle);
6277 struct hclge_fd_rule_tuples new_tuples;
6278 struct hclge_dev *hdev = vport->back;
6279 struct hclge_fd_rule *rule;
6280 u16 tmp_queue_id;
6281 u16 bit_id;
6282 int ret;
6283
6284 if (!hnae3_dev_fd_supported(hdev))
6285 return -EOPNOTSUPP;
6286
6287 memset(&new_tuples, 0, sizeof(new_tuples));
6288 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6289
6290 spin_lock_bh(&hdev->fd_rule_lock);
6291
6292 /* when there is already fd rule existed add by user,
6293 * arfs should not work
6294 */
6295 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6296 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6297 return -EOPNOTSUPP;
6298 }
6299
6300 /* check is there flow director filter existed for this flow,
6301 * if not, create a new filter for it;
6302 * if filter exist with different queue id, modify the filter;
6303 * if filter exist with same queue id, do nothing
6304 */
6305 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6306 if (!rule) {
6307 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6308 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6309 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6310 return -ENOSPC;
6311 }
6312
d659f9f6 6313 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
d93ed94f
JS
6314 if (!rule) {
6315 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6316 return -ENOMEM;
6317 }
6318
6319 set_bit(bit_id, hdev->fd_bmap);
6320 rule->location = bit_id;
6321 rule->flow_id = flow_id;
6322 rule->queue_id = queue_id;
6323 hclge_fd_build_arfs_rule(&new_tuples, rule);
6324 ret = hclge_fd_config_rule(hdev, rule);
6325
6326 spin_unlock_bh(&hdev->fd_rule_lock);
6327
6328 if (ret)
6329 return ret;
6330
6331 return rule->location;
6332 }
6333
6334 spin_unlock_bh(&hdev->fd_rule_lock);
6335
6336 if (rule->queue_id == queue_id)
6337 return rule->location;
6338
6339 tmp_queue_id = rule->queue_id;
6340 rule->queue_id = queue_id;
6341 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6342 if (ret) {
6343 rule->queue_id = tmp_queue_id;
6344 return ret;
6345 }
6346
6347 return rule->location;
d93ed94f
JS
6348}
6349
6350static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6351{
6352#ifdef CONFIG_RFS_ACCEL
6353 struct hnae3_handle *handle = &hdev->vport[0].nic;
6354 struct hclge_fd_rule *rule;
6355 struct hlist_node *node;
6356 HLIST_HEAD(del_list);
6357
6358 spin_lock_bh(&hdev->fd_rule_lock);
6359 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6360 spin_unlock_bh(&hdev->fd_rule_lock);
6361 return;
6362 }
6363 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6364 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6365 rule->flow_id, rule->location)) {
6366 hlist_del_init(&rule->rule_node);
6367 hlist_add_head(&rule->rule_node, &del_list);
6368 hdev->hclge_fd_rule_num--;
6369 clear_bit(rule->location, hdev->fd_bmap);
6370 }
6371 }
6372 spin_unlock_bh(&hdev->fd_rule_lock);
6373
6374 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6375 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6376 rule->location, NULL, false);
6377 kfree(rule);
6378 }
6379#endif
6380}
6381
6382static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6383{
6384#ifdef CONFIG_RFS_ACCEL
6385 struct hclge_vport *vport = hclge_get_vport(handle);
6386 struct hclge_dev *hdev = vport->back;
6387
6388 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6389 hclge_del_all_fd_entries(handle, true);
6390#endif
6391}
6392
4d60291b
HT
6393static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6394{
6395 struct hclge_vport *vport = hclge_get_vport(handle);
6396 struct hclge_dev *hdev = vport->back;
6397
6398 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6399 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6400}
6401
a4de0228
HT
6402static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6403{
6404 struct hclge_vport *vport = hclge_get_vport(handle);
6405 struct hclge_dev *hdev = vport->back;
6406
6407 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6408}
6409
4d60291b
HT
6410static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6411{
6412 struct hclge_vport *vport = hclge_get_vport(handle);
6413 struct hclge_dev *hdev = vport->back;
6414
6415 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6416}
6417
6418static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6419{
6420 struct hclge_vport *vport = hclge_get_vport(handle);
6421 struct hclge_dev *hdev = vport->back;
6422
f02eb82d 6423 return hdev->rst_stats.hw_reset_done_cnt;
4d60291b
HT
6424}
6425
c17852a8
JS
6426static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6427{
6428 struct hclge_vport *vport = hclge_get_vport(handle);
6429 struct hclge_dev *hdev = vport->back;
44122887 6430 bool clear;
c17852a8 6431
9abeb7d8 6432 hdev->fd_en = enable;
1483fa49 6433 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
c17852a8 6434 if (!enable)
44122887 6435 hclge_del_all_fd_entries(handle, clear);
c17852a8
JS
6436 else
6437 hclge_restore_fd_entries(handle);
6438}
6439
46a3df9f
S
6440static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6441{
6442 struct hclge_desc desc;
d44f9b63
YL
6443 struct hclge_config_mac_mode_cmd *req =
6444 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 6445 u32 loop_en = 0;
46a3df9f
S
6446 int ret;
6447
6448 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
b9a8f883
YL
6449
6450 if (enable) {
6451 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6452 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6453 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6454 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6455 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6456 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6457 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6458 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6459 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6460 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6461 }
6462
a90bb9a5 6463 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
6464
6465 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6466 if (ret)
6467 dev_err(&hdev->pdev->dev,
6468 "mac enable fail, ret =%d.\n", ret);
6469}
6470
dd2956ea
YM
6471static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6472 u8 switch_param, u8 param_mask)
6473{
6474 struct hclge_mac_vlan_switch_cmd *req;
6475 struct hclge_desc desc;
6476 u32 func_id;
6477 int ret;
6478
6479 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6480 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
71c5e83b
GH
6481
6482 /* read current config parameter */
dd2956ea 6483 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
71c5e83b 6484 true);
dd2956ea
YM
6485 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6486 req->func_id = cpu_to_le32(func_id);
71c5e83b
GH
6487
6488 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6489 if (ret) {
6490 dev_err(&hdev->pdev->dev,
6491 "read mac vlan switch parameter fail, ret = %d\n", ret);
6492 return ret;
6493 }
6494
6495 /* modify and write new config parameter */
6496 hclge_cmd_reuse_desc(&desc, false);
6497 req->switch_param = (req->switch_param & param_mask) | switch_param;
dd2956ea
YM
6498 req->param_mask = param_mask;
6499
6500 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6501 if (ret)
6502 dev_err(&hdev->pdev->dev,
6503 "set mac vlan switch parameter fail, ret = %d\n", ret);
6504 return ret;
6505}
6506
c9765a89
YM
6507static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6508 int link_ret)
6509{
6510#define HCLGE_PHY_LINK_STATUS_NUM 200
6511
6512 struct phy_device *phydev = hdev->hw.mac.phydev;
6513 int i = 0;
6514 int ret;
6515
6516 do {
6517 ret = phy_read_status(phydev);
6518 if (ret) {
6519 dev_err(&hdev->pdev->dev,
6520 "phy update link status fail, ret = %d\n", ret);
6521 return;
6522 }
6523
6524 if (phydev->link == link_ret)
6525 break;
6526
6527 msleep(HCLGE_LINK_STATUS_MS);
6528 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6529}
6530
6531static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6532{
6533#define HCLGE_MAC_LINK_STATUS_NUM 100
6534
6535 int i = 0;
6536 int ret;
6537
6538 do {
6539 ret = hclge_get_mac_link_status(hdev);
6540 if (ret < 0)
6541 return ret;
6542 else if (ret == link_ret)
6543 return 0;
6544
6545 msleep(HCLGE_LINK_STATUS_MS);
6546 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6547 return -EBUSY;
6548}
6549
6550static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6551 bool is_phy)
6552{
6553#define HCLGE_LINK_STATUS_DOWN 0
6554#define HCLGE_LINK_STATUS_UP 1
6555
6556 int link_ret;
6557
6558 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6559
6560 if (is_phy)
6561 hclge_phy_link_status_wait(hdev, link_ret);
6562
6563 return hclge_mac_link_status_wait(hdev, link_ret);
6564}
6565
eb66d503 6566static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 6567{
c39c4d98 6568 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
6569 struct hclge_desc desc;
6570 u32 loop_en;
6571 int ret;
6572
e4d68dae
YL
6573 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6574 /* 1 Read out the MAC mode config at first */
6575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6576 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6577 if (ret) {
6578 dev_err(&hdev->pdev->dev,
6579 "mac loopback get fail, ret =%d.\n", ret);
6580 return ret;
6581 }
c39c4d98 6582
e4d68dae
YL
6583 /* 2 Then setup the loopback flag */
6584 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 6585 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
e4d68dae
YL
6586
6587 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 6588
e4d68dae
YL
6589 /* 3 Config mac work mode with loopback flag
6590 * and its original configure parameters
6591 */
6592 hclge_cmd_reuse_desc(&desc, false);
6593 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6594 if (ret)
6595 dev_err(&hdev->pdev->dev,
6596 "mac loopback set fail, ret =%d.\n", ret);
6597 return ret;
6598}
c39c4d98 6599
1cbc662d 6600static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
4dc13b96 6601 enum hnae3_loop loop_mode)
5fd50ac3
PL
6602{
6603#define HCLGE_SERDES_RETRY_MS 10
6604#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 6605
5fd50ac3
PL
6606 struct hclge_serdes_lb_cmd *req;
6607 struct hclge_desc desc;
6608 int ret, i = 0;
4dc13b96 6609 u8 loop_mode_b;
5fd50ac3 6610
d0d72bac 6611 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
6612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6613
4dc13b96
FL
6614 switch (loop_mode) {
6615 case HNAE3_LOOP_SERIAL_SERDES:
6616 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6617 break;
6618 case HNAE3_LOOP_PARALLEL_SERDES:
6619 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6620 break;
6621 default:
6622 dev_err(&hdev->pdev->dev,
6623 "unsupported serdes loopback mode %d\n", loop_mode);
6624 return -ENOTSUPP;
6625 }
6626
5fd50ac3 6627 if (en) {
4dc13b96
FL
6628 req->enable = loop_mode_b;
6629 req->mask = loop_mode_b;
5fd50ac3 6630 } else {
4dc13b96 6631 req->mask = loop_mode_b;
5fd50ac3
PL
6632 }
6633
6634 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6635 if (ret) {
6636 dev_err(&hdev->pdev->dev,
6637 "serdes loopback set fail, ret = %d\n", ret);
6638 return ret;
6639 }
6640
6641 do {
6642 msleep(HCLGE_SERDES_RETRY_MS);
6643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6644 true);
6645 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6646 if (ret) {
6647 dev_err(&hdev->pdev->dev,
6648 "serdes loopback get, ret = %d\n", ret);
6649 return ret;
6650 }
6651 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6652 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6653
6654 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6655 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6656 return -EBUSY;
6657 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6658 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6659 return -EIO;
6660 }
1cbc662d
YM
6661 return ret;
6662}
6663
6664static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6665 enum hnae3_loop loop_mode)
6666{
6667 int ret;
6668
6669 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6670 if (ret)
6671 return ret;
5fd50ac3 6672
0f29fc23 6673 hclge_cfg_mac_mode(hdev, en);
350fda0a 6674
60df7e91 6675 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
c9765a89
YM
6676 if (ret)
6677 dev_err(&hdev->pdev->dev,
6678 "serdes loopback config mac mode timeout\n");
6679
6680 return ret;
6681}
350fda0a 6682
c9765a89
YM
6683static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6684 struct phy_device *phydev)
6685{
6686 int ret;
350fda0a 6687
c9765a89
YM
6688 if (!phydev->suspended) {
6689 ret = phy_suspend(phydev);
6690 if (ret)
6691 return ret;
6692 }
6693
6694 ret = phy_resume(phydev);
6695 if (ret)
6696 return ret;
6697
6698 return phy_loopback(phydev, true);
6699}
6700
6701static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6702 struct phy_device *phydev)
6703{
6704 int ret;
6705
6706 ret = phy_loopback(phydev, false);
6707 if (ret)
6708 return ret;
6709
6710 return phy_suspend(phydev);
6711}
6712
6713static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6714{
6715 struct phy_device *phydev = hdev->hw.mac.phydev;
6716 int ret;
6717
6718 if (!phydev)
6719 return -ENOTSUPP;
6720
6721 if (en)
6722 ret = hclge_enable_phy_loopback(hdev, phydev);
6723 else
6724 ret = hclge_disable_phy_loopback(hdev, phydev);
6725 if (ret) {
6726 dev_err(&hdev->pdev->dev,
6727 "set phy loopback fail, ret = %d\n", ret);
6728 return ret;
6729 }
6730
6731 hclge_cfg_mac_mode(hdev, en);
6732
60df7e91 6733 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
c9765a89
YM
6734 if (ret)
6735 dev_err(&hdev->pdev->dev,
6736 "phy loopback config mac mode timeout\n");
6737
6738 return ret;
5fd50ac3
PL
6739}
6740
ebaf1908 6741static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
0f29fc23
YL
6742 int stream_id, bool enable)
6743{
6744 struct hclge_desc desc;
6745 struct hclge_cfg_com_tqp_queue_cmd *req =
6746 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6747 int ret;
6748
6749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6750 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6751 req->stream_id = cpu_to_le16(stream_id);
ebaf1908
WL
6752 if (enable)
6753 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
0f29fc23
YL
6754
6755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6756 if (ret)
6757 dev_err(&hdev->pdev->dev,
6758 "Tqp enable fail, status =%d.\n", ret);
6759 return ret;
6760}
6761
e4d68dae
YL
6762static int hclge_set_loopback(struct hnae3_handle *handle,
6763 enum hnae3_loop loop_mode, bool en)
6764{
6765 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6766 struct hnae3_knic_private_info *kinfo;
e4d68dae 6767 struct hclge_dev *hdev = vport->back;
0f29fc23 6768 int i, ret;
e4d68dae 6769
dd2956ea
YM
6770 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6771 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6772 * the same, the packets are looped back in the SSU. If SSU loopback
6773 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6774 */
6775 if (hdev->pdev->revision >= 0x21) {
6776 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6777
6778 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6779 HCLGE_SWITCH_ALW_LPBK_MASK);
6780 if (ret)
6781 return ret;
6782 }
6783
e4d68dae 6784 switch (loop_mode) {
eb66d503
FL
6785 case HNAE3_LOOP_APP:
6786 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 6787 break;
4dc13b96
FL
6788 case HNAE3_LOOP_SERIAL_SERDES:
6789 case HNAE3_LOOP_PARALLEL_SERDES:
6790 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 6791 break;
c9765a89
YM
6792 case HNAE3_LOOP_PHY:
6793 ret = hclge_set_phy_loopback(hdev, en);
6794 break;
c39c4d98
YL
6795 default:
6796 ret = -ENOTSUPP;
6797 dev_err(&hdev->pdev->dev,
6798 "loop_mode %d is not supported\n", loop_mode);
6799 break;
6800 }
6801
47ef6dec
JS
6802 if (ret)
6803 return ret;
6804
205a24ca
HT
6805 kinfo = &vport->nic.kinfo;
6806 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
6807 ret = hclge_tqp_enable(hdev, i, 0, en);
6808 if (ret)
6809 return ret;
6810 }
46a3df9f 6811
0f29fc23 6812 return 0;
46a3df9f
S
6813}
6814
1cbc662d
YM
6815static int hclge_set_default_loopback(struct hclge_dev *hdev)
6816{
6817 int ret;
6818
6819 ret = hclge_set_app_loopback(hdev, false);
6820 if (ret)
6821 return ret;
6822
6823 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6824 if (ret)
6825 return ret;
6826
6827 return hclge_cfg_serdes_loopback(hdev, false,
6828 HNAE3_LOOP_PARALLEL_SERDES);
6829}
6830
46a3df9f
S
6831static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6832{
6833 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 6834 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
6835 struct hnae3_queue *queue;
6836 struct hclge_tqp *tqp;
6837 int i;
6838
205a24ca
HT
6839 kinfo = &vport->nic.kinfo;
6840 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
6841 queue = handle->kinfo.tqp[i];
6842 tqp = container_of(queue, struct hclge_tqp, q);
6843 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6844 }
6845}
6846
1c6dfe6f
YL
6847static void hclge_flush_link_update(struct hclge_dev *hdev)
6848{
6849#define HCLGE_FLUSH_LINK_TIMEOUT 100000
6850
6851 unsigned long last = hdev->serv_processed_cnt;
6852 int i = 0;
6853
6854 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6855 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6856 last == hdev->serv_processed_cnt)
6857 usleep_range(1, 1);
6858}
6859
8cdb992f
JS
6860static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6861{
6862 struct hclge_vport *vport = hclge_get_vport(handle);
6863 struct hclge_dev *hdev = vport->back;
6864
6865 if (enable) {
a9775bb6 6866 hclge_task_schedule(hdev, 0);
8cdb992f 6867 } else {
1c6dfe6f 6868 /* Set the DOWN flag here to disable link updating */
7be1b9f3 6869 set_bit(HCLGE_STATE_DOWN, &hdev->state);
1c6dfe6f
YL
6870
6871 /* flush memory to make sure DOWN is seen by service task */
6872 smp_mb__before_atomic();
6873 hclge_flush_link_update(hdev);
8cdb992f
JS
6874 }
6875}
6876
46a3df9f
S
6877static int hclge_ae_start(struct hnae3_handle *handle)
6878{
6879 struct hclge_vport *vport = hclge_get_vport(handle);
6880 struct hclge_dev *hdev = vport->back;
46a3df9f 6881
46a3df9f
S
6882 /* mac enable */
6883 hclge_cfg_mac_mode(hdev, true);
6884 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 6885 hdev->hw.mac.link = 0;
46a3df9f 6886
b50ae26c
PL
6887 /* reset tqp stats */
6888 hclge_reset_tqp_stats(handle);
6889
b01b7cf1 6890 hclge_mac_start_phy(hdev);
46a3df9f 6891
46a3df9f
S
6892 return 0;
6893}
6894
6895static void hclge_ae_stop(struct hnae3_handle *handle)
6896{
6897 struct hclge_vport *vport = hclge_get_vport(handle);
6898 struct hclge_dev *hdev = vport->back;
39cfbc9c 6899 int i;
46a3df9f 6900
2f7e4896
FL
6901 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6902
d93ed94f
JS
6903 hclge_clear_arfs_rules(handle);
6904
35d93a30
HT
6905 /* If it is not PF reset, the firmware will disable the MAC,
6906 * so it only need to stop phy here.
6907 */
6908 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6909 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 6910 hclge_mac_stop_phy(hdev);
ed8fb4b2 6911 hclge_update_link_status(hdev);
b50ae26c 6912 return;
9617f668 6913 }
b50ae26c 6914
39cfbc9c
HT
6915 for (i = 0; i < handle->kinfo.num_tqps; i++)
6916 hclge_reset_tqp(handle, i);
6917
20981a1e
HT
6918 hclge_config_mac_tnl_int(hdev, false);
6919
46a3df9f
S
6920 /* Mac disable */
6921 hclge_cfg_mac_mode(hdev, false);
6922
6923 hclge_mac_stop_phy(hdev);
6924
6925 /* reset tqp stats */
6926 hclge_reset_tqp_stats(handle);
f30dfddc 6927 hclge_update_link_status(hdev);
46a3df9f
S
6928}
6929
a6d818e3
YL
6930int hclge_vport_start(struct hclge_vport *vport)
6931{
ee4bcd3b
JS
6932 struct hclge_dev *hdev = vport->back;
6933
a6d818e3
YL
6934 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6935 vport->last_active_jiffies = jiffies;
ee4bcd3b 6936
039ba863
JS
6937 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6938 if (vport->vport_id) {
6939 hclge_restore_mac_table_common(vport);
6940 hclge_restore_vport_vlan_table(vport);
6941 } else {
6942 hclge_restore_hw_table(hdev);
6943 }
6944 }
ee4bcd3b
JS
6945
6946 clear_bit(vport->vport_id, hdev->vport_config_block);
6947
a6d818e3
YL
6948 return 0;
6949}
6950
6951void hclge_vport_stop(struct hclge_vport *vport)
6952{
6953 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6954}
6955
6956static int hclge_client_start(struct hnae3_handle *handle)
6957{
6958 struct hclge_vport *vport = hclge_get_vport(handle);
6959
6960 return hclge_vport_start(vport);
6961}
6962
6963static void hclge_client_stop(struct hnae3_handle *handle)
6964{
6965 struct hclge_vport *vport = hclge_get_vport(handle);
6966
6967 hclge_vport_stop(vport);
6968}
6969
46a3df9f
S
6970static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6971 u16 cmdq_resp, u8 resp_code,
6972 enum hclge_mac_vlan_tbl_opcode op)
6973{
6974 struct hclge_dev *hdev = vport->back;
46a3df9f
S
6975
6976 if (cmdq_resp) {
6977 dev_err(&hdev->pdev->dev,
adcf738b 6978 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
46a3df9f
S
6979 cmdq_resp);
6980 return -EIO;
6981 }
6982
6983 if (op == HCLGE_MAC_VLAN_ADD) {
c631c696 6984 if (!resp_code || resp_code == 1)
6e4139f6 6985 return 0;
c631c696
JS
6986 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6987 resp_code == HCLGE_ADD_MC_OVERFLOW)
6e4139f6 6988 return -ENOSPC;
6e4139f6
JS
6989
6990 dev_err(&hdev->pdev->dev,
6991 "add mac addr failed for undefined, code=%u.\n",
6992 resp_code);
6993 return -EIO;
46a3df9f
S
6994 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6995 if (!resp_code) {
6e4139f6 6996 return 0;
46a3df9f 6997 } else if (resp_code == 1) {
46a3df9f
S
6998 dev_dbg(&hdev->pdev->dev,
6999 "remove mac addr failed for miss.\n");
6e4139f6 7000 return -ENOENT;
46a3df9f 7001 }
6e4139f6
JS
7002
7003 dev_err(&hdev->pdev->dev,
7004 "remove mac addr failed for undefined, code=%u.\n",
7005 resp_code);
7006 return -EIO;
46a3df9f
S
7007 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7008 if (!resp_code) {
6e4139f6 7009 return 0;
46a3df9f 7010 } else if (resp_code == 1) {
46a3df9f
S
7011 dev_dbg(&hdev->pdev->dev,
7012 "lookup mac addr failed for miss.\n");
6e4139f6 7013 return -ENOENT;
46a3df9f 7014 }
6e4139f6 7015
46a3df9f 7016 dev_err(&hdev->pdev->dev,
6e4139f6
JS
7017 "lookup mac addr failed for undefined, code=%u.\n",
7018 resp_code);
7019 return -EIO;
46a3df9f
S
7020 }
7021
6e4139f6
JS
7022 dev_err(&hdev->pdev->dev,
7023 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7024
7025 return -EINVAL;
46a3df9f
S
7026}
7027
7028static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7029{
b37ce587
YM
7030#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7031
b9a8f883
YL
7032 unsigned int word_num;
7033 unsigned int bit_num;
46a3df9f
S
7034
7035 if (vfid > 255 || vfid < 0)
7036 return -EIO;
7037
b37ce587 7038 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
46a3df9f
S
7039 word_num = vfid / 32;
7040 bit_num = vfid % 32;
7041 if (clr)
a90bb9a5 7042 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7043 else
a90bb9a5 7044 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f 7045 } else {
b37ce587 7046 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
46a3df9f
S
7047 bit_num = vfid % 32;
7048 if (clr)
a90bb9a5 7049 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7050 else
a90bb9a5 7051 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
7052 }
7053
7054 return 0;
7055}
7056
7057static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7058{
7059#define HCLGE_DESC_NUMBER 3
7060#define HCLGE_FUNC_NUMBER_PER_DESC 6
7061 int i, j;
7062
6c39d527 7063 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
7064 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7065 if (desc[i].data[j])
7066 return false;
7067
7068 return true;
7069}
7070
d44f9b63 7071static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 7072 const u8 *addr, bool is_mc)
46a3df9f
S
7073{
7074 const unsigned char *mac_addr = addr;
7075 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7076 (mac_addr[0]) | (mac_addr[1] << 8);
7077 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7078
3a586422
WL
7079 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7080 if (is_mc) {
7081 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7082 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7083 }
7084
46a3df9f
S
7085 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7086 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7087}
7088
46a3df9f 7089static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7090 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
7091{
7092 struct hclge_dev *hdev = vport->back;
7093 struct hclge_desc desc;
7094 u8 resp_code;
a90bb9a5 7095 u16 retval;
46a3df9f
S
7096 int ret;
7097
7098 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7099
d44f9b63 7100 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7101
7102 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7103 if (ret) {
7104 dev_err(&hdev->pdev->dev,
7105 "del mac addr failed for cmd_send, ret =%d.\n",
7106 ret);
7107 return ret;
7108 }
a90bb9a5
YL
7109 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7110 retval = le16_to_cpu(desc.retval);
46a3df9f 7111
a90bb9a5 7112 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7113 HCLGE_MAC_VLAN_REMOVE);
7114}
7115
7116static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7117 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7118 struct hclge_desc *desc,
7119 bool is_mc)
7120{
7121 struct hclge_dev *hdev = vport->back;
7122 u8 resp_code;
a90bb9a5 7123 u16 retval;
46a3df9f
S
7124 int ret;
7125
7126 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7127 if (is_mc) {
7128 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7129 memcpy(desc[0].data,
7130 req,
d44f9b63 7131 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7132 hclge_cmd_setup_basic_desc(&desc[1],
7133 HCLGE_OPC_MAC_VLAN_ADD,
7134 true);
7135 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7136 hclge_cmd_setup_basic_desc(&desc[2],
7137 HCLGE_OPC_MAC_VLAN_ADD,
7138 true);
7139 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7140 } else {
7141 memcpy(desc[0].data,
7142 req,
d44f9b63 7143 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7144 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7145 }
7146 if (ret) {
7147 dev_err(&hdev->pdev->dev,
7148 "lookup mac addr failed for cmd_send, ret =%d.\n",
7149 ret);
7150 return ret;
7151 }
a90bb9a5
YL
7152 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7153 retval = le16_to_cpu(desc[0].retval);
46a3df9f 7154
a90bb9a5 7155 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7156 HCLGE_MAC_VLAN_LKUP);
7157}
7158
7159static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7160 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7161 struct hclge_desc *mc_desc)
7162{
7163 struct hclge_dev *hdev = vport->back;
7164 int cfg_status;
7165 u8 resp_code;
a90bb9a5 7166 u16 retval;
46a3df9f
S
7167 int ret;
7168
7169 if (!mc_desc) {
7170 struct hclge_desc desc;
7171
7172 hclge_cmd_setup_basic_desc(&desc,
7173 HCLGE_OPC_MAC_VLAN_ADD,
7174 false);
d44f9b63
YL
7175 memcpy(desc.data, req,
7176 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7177 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
7178 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7179 retval = le16_to_cpu(desc.retval);
7180
7181 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7182 resp_code,
7183 HCLGE_MAC_VLAN_ADD);
7184 } else {
c3b6f755 7185 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 7186 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7187 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 7188 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7189 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
7190 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7191 memcpy(mc_desc[0].data, req,
d44f9b63 7192 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7193 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
7194 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7195 retval = le16_to_cpu(mc_desc[0].retval);
7196
7197 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7198 resp_code,
7199 HCLGE_MAC_VLAN_ADD);
7200 }
7201
7202 if (ret) {
7203 dev_err(&hdev->pdev->dev,
7204 "add mac addr failed for cmd_send, ret =%d.\n",
7205 ret);
7206 return ret;
7207 }
7208
7209 return cfg_status;
7210}
7211
39932473 7212static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
c1c5f66e 7213 u16 *allocated_size)
39932473
JS
7214{
7215 struct hclge_umv_spc_alc_cmd *req;
7216 struct hclge_desc desc;
7217 int ret;
7218
7219 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7220 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
63cbf7a9 7221
39932473
JS
7222 req->space_size = cpu_to_le32(space_size);
7223
7224 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7225 if (ret) {
c1c5f66e
JS
7226 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7227 ret);
39932473
JS
7228 return ret;
7229 }
7230
3fd8dc26 7231 *allocated_size = le32_to_cpu(desc.data[1]);
39932473
JS
7232
7233 return 0;
7234}
7235
1ac0e6c2
JS
7236static int hclge_init_umv_space(struct hclge_dev *hdev)
7237{
7238 u16 allocated_size = 0;
7239 int ret;
7240
c1c5f66e 7241 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
1ac0e6c2
JS
7242 if (ret)
7243 return ret;
7244
7245 if (allocated_size < hdev->wanted_umv_size)
7246 dev_warn(&hdev->pdev->dev,
7247 "failed to alloc umv space, want %u, get %u\n",
7248 hdev->wanted_umv_size, allocated_size);
7249
1ac0e6c2
JS
7250 hdev->max_umv_size = allocated_size;
7251 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7252 hdev->share_umv_size = hdev->priv_umv_size +
7253 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7254
7255 return 0;
7256}
7257
39932473
JS
7258static void hclge_reset_umv_space(struct hclge_dev *hdev)
7259{
7260 struct hclge_vport *vport;
7261 int i;
7262
7263 for (i = 0; i < hdev->num_alloc_vport; i++) {
7264 vport = &hdev->vport[i];
7265 vport->used_umv_num = 0;
7266 }
7267
7d0b3451 7268 mutex_lock(&hdev->vport_lock);
39932473 7269 hdev->share_umv_size = hdev->priv_umv_size +
4c58f592 7270 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7d0b3451 7271 mutex_unlock(&hdev->vport_lock);
39932473
JS
7272}
7273
7d0b3451 7274static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
39932473
JS
7275{
7276 struct hclge_dev *hdev = vport->back;
7277 bool is_full;
7278
7d0b3451
JS
7279 if (need_lock)
7280 mutex_lock(&hdev->vport_lock);
7281
39932473
JS
7282 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7283 hdev->share_umv_size == 0);
7d0b3451
JS
7284
7285 if (need_lock)
7286 mutex_unlock(&hdev->vport_lock);
39932473
JS
7287
7288 return is_full;
7289}
7290
7291static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7292{
7293 struct hclge_dev *hdev = vport->back;
7294
39932473
JS
7295 if (is_free) {
7296 if (vport->used_umv_num > hdev->priv_umv_size)
7297 hdev->share_umv_size++;
54a395b6 7298
7299 if (vport->used_umv_num > 0)
7300 vport->used_umv_num--;
39932473 7301 } else {
54a395b6 7302 if (vport->used_umv_num >= hdev->priv_umv_size &&
7303 hdev->share_umv_size > 0)
39932473
JS
7304 hdev->share_umv_size--;
7305 vport->used_umv_num++;
7306 }
39932473
JS
7307}
7308
ee4bcd3b
JS
7309static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7310 const u8 *mac_addr)
7311{
7312 struct hclge_mac_node *mac_node, *tmp;
7313
7314 list_for_each_entry_safe(mac_node, tmp, list, node)
7315 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7316 return mac_node;
7317
7318 return NULL;
7319}
7320
7321static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7322 enum HCLGE_MAC_NODE_STATE state)
7323{
7324 switch (state) {
7325 /* from set_rx_mode or tmp_add_list */
7326 case HCLGE_MAC_TO_ADD:
7327 if (mac_node->state == HCLGE_MAC_TO_DEL)
7328 mac_node->state = HCLGE_MAC_ACTIVE;
7329 break;
7330 /* only from set_rx_mode */
7331 case HCLGE_MAC_TO_DEL:
7332 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7333 list_del(&mac_node->node);
7334 kfree(mac_node);
7335 } else {
7336 mac_node->state = HCLGE_MAC_TO_DEL;
7337 }
7338 break;
7339 /* only from tmp_add_list, the mac_node->state won't be
7340 * ACTIVE.
7341 */
7342 case HCLGE_MAC_ACTIVE:
7343 if (mac_node->state == HCLGE_MAC_TO_ADD)
7344 mac_node->state = HCLGE_MAC_ACTIVE;
7345
7346 break;
7347 }
7348}
7349
7350int hclge_update_mac_list(struct hclge_vport *vport,
7351 enum HCLGE_MAC_NODE_STATE state,
7352 enum HCLGE_MAC_ADDR_TYPE mac_type,
7353 const unsigned char *addr)
7354{
7355 struct hclge_dev *hdev = vport->back;
7356 struct hclge_mac_node *mac_node;
7357 struct list_head *list;
7358
7359 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7360 &vport->uc_mac_list : &vport->mc_mac_list;
7361
7362 spin_lock_bh(&vport->mac_list_lock);
7363
7364 /* if the mac addr is already in the mac list, no need to add a new
7365 * one into it, just check the mac addr state, convert it to a new
7366 * new state, or just remove it, or do nothing.
7367 */
7368 mac_node = hclge_find_mac_node(list, addr);
7369 if (mac_node) {
7370 hclge_update_mac_node(mac_node, state);
7371 spin_unlock_bh(&vport->mac_list_lock);
7372 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7373 return 0;
7374 }
7375
7376 /* if this address is never added, unnecessary to delete */
7377 if (state == HCLGE_MAC_TO_DEL) {
7378 spin_unlock_bh(&vport->mac_list_lock);
7379 dev_err(&hdev->pdev->dev,
7380 "failed to delete address %pM from mac list\n",
7381 addr);
7382 return -ENOENT;
7383 }
7384
7385 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7386 if (!mac_node) {
7387 spin_unlock_bh(&vport->mac_list_lock);
7388 return -ENOMEM;
7389 }
7390
7391 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7392
7393 mac_node->state = state;
7394 ether_addr_copy(mac_node->mac_addr, addr);
7395 list_add_tail(&mac_node->node, list);
7396
7397 spin_unlock_bh(&vport->mac_list_lock);
7398
7399 return 0;
7400}
7401
46a3df9f
S
7402static int hclge_add_uc_addr(struct hnae3_handle *handle,
7403 const unsigned char *addr)
7404{
7405 struct hclge_vport *vport = hclge_get_vport(handle);
7406
ee4bcd3b
JS
7407 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7408 addr);
46a3df9f
S
7409}
7410
7411int hclge_add_uc_addr_common(struct hclge_vport *vport,
7412 const unsigned char *addr)
7413{
7414 struct hclge_dev *hdev = vport->back;
d44f9b63 7415 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 7416 struct hclge_desc desc;
a90bb9a5 7417 u16 egress_port = 0;
aa7a795e 7418 int ret;
46a3df9f
S
7419
7420 /* mac addr check */
7421 if (is_zero_ether_addr(addr) ||
7422 is_broadcast_ether_addr(addr) ||
7423 is_multicast_ether_addr(addr)) {
7424 dev_err(&hdev->pdev->dev,
7425 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
9b2f3477 7426 addr, is_zero_ether_addr(addr),
46a3df9f
S
7427 is_broadcast_ether_addr(addr),
7428 is_multicast_ether_addr(addr));
7429 return -EINVAL;
7430 }
7431
7432 memset(&req, 0, sizeof(req));
a90bb9a5 7433
e4e87715
PL
7434 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7435 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
7436
7437 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 7438
3a586422 7439 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 7440
d07b6bb4
JS
7441 /* Lookup the mac address in the mac_vlan table, and add
7442 * it if the entry is inexistent. Repeated unicast entry
7443 * is not allowed in the mac vlan table.
7444 */
7445 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473 7446 if (ret == -ENOENT) {
7d0b3451
JS
7447 mutex_lock(&hdev->vport_lock);
7448 if (!hclge_is_umv_space_full(vport, false)) {
39932473
JS
7449 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7450 if (!ret)
7451 hclge_update_umv_space(vport, false);
7d0b3451 7452 mutex_unlock(&hdev->vport_lock);
39932473
JS
7453 return ret;
7454 }
7d0b3451 7455 mutex_unlock(&hdev->vport_lock);
39932473 7456
c631c696
JS
7457 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7458 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7459 hdev->priv_umv_size);
39932473
JS
7460
7461 return -ENOSPC;
7462 }
d07b6bb4
JS
7463
7464 /* check if we just hit the duplicate */
72110b56 7465 if (!ret) {
adcf738b 7466 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
72110b56
PL
7467 vport->vport_id, addr);
7468 return 0;
7469 }
d07b6bb4
JS
7470
7471 dev_err(&hdev->pdev->dev,
7472 "PF failed to add unicast entry(%pM) in the MAC table\n",
7473 addr);
46a3df9f 7474
aa7a795e 7475 return ret;
46a3df9f
S
7476}
7477
7478static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7479 const unsigned char *addr)
7480{
7481 struct hclge_vport *vport = hclge_get_vport(handle);
7482
ee4bcd3b
JS
7483 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7484 addr);
46a3df9f
S
7485}
7486
7487int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7488 const unsigned char *addr)
7489{
7490 struct hclge_dev *hdev = vport->back;
d44f9b63 7491 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 7492 int ret;
46a3df9f
S
7493
7494 /* mac addr check */
7495 if (is_zero_ether_addr(addr) ||
7496 is_broadcast_ether_addr(addr) ||
7497 is_multicast_ether_addr(addr)) {
9b2f3477
WL
7498 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7499 addr);
46a3df9f
S
7500 return -EINVAL;
7501 }
7502
7503 memset(&req, 0, sizeof(req));
e4e87715 7504 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 7505 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 7506 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7d0b3451
JS
7507 if (!ret) {
7508 mutex_lock(&hdev->vport_lock);
39932473 7509 hclge_update_umv_space(vport, true);
7d0b3451
JS
7510 mutex_unlock(&hdev->vport_lock);
7511 } else if (ret == -ENOENT) {
ee4bcd3b 7512 ret = 0;
7d0b3451 7513 }
46a3df9f 7514
aa7a795e 7515 return ret;
46a3df9f
S
7516}
7517
7518static int hclge_add_mc_addr(struct hnae3_handle *handle,
7519 const unsigned char *addr)
7520{
7521 struct hclge_vport *vport = hclge_get_vport(handle);
7522
ee4bcd3b
JS
7523 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7524 addr);
46a3df9f
S
7525}
7526
7527int hclge_add_mc_addr_common(struct hclge_vport *vport,
7528 const unsigned char *addr)
7529{
7530 struct hclge_dev *hdev = vport->back;
d44f9b63 7531 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 7532 struct hclge_desc desc[3];
46a3df9f
S
7533 int status;
7534
7535 /* mac addr check */
7536 if (!is_multicast_ether_addr(addr)) {
7537 dev_err(&hdev->pdev->dev,
7538 "Add mc mac err! invalid mac:%pM.\n",
7539 addr);
7540 return -EINVAL;
7541 }
7542 memset(&req, 0, sizeof(req));
3a586422 7543 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f 7544 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
63cbf7a9 7545 if (status) {
46a3df9f
S
7546 /* This mac addr do not exist, add new entry for it */
7547 memset(desc[0].data, 0, sizeof(desc[0].data));
7548 memset(desc[1].data, 0, sizeof(desc[0].data));
7549 memset(desc[2].data, 0, sizeof(desc[0].data));
46a3df9f 7550 }
63cbf7a9
YM
7551 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7552 if (status)
7553 return status;
7554 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
46a3df9f 7555
c631c696
JS
7556 /* if already overflow, not to print each time */
7557 if (status == -ENOSPC &&
7558 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
1f6db589 7559 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
7560
7561 return status;
7562}
7563
7564static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7565 const unsigned char *addr)
7566{
7567 struct hclge_vport *vport = hclge_get_vport(handle);
7568
ee4bcd3b
JS
7569 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7570 addr);
46a3df9f
S
7571}
7572
7573int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7574 const unsigned char *addr)
7575{
7576 struct hclge_dev *hdev = vport->back;
d44f9b63 7577 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
7578 enum hclge_cmd_status status;
7579 struct hclge_desc desc[3];
46a3df9f
S
7580
7581 /* mac addr check */
7582 if (!is_multicast_ether_addr(addr)) {
7583 dev_dbg(&hdev->pdev->dev,
7584 "Remove mc mac err! invalid mac:%pM.\n",
7585 addr);
7586 return -EINVAL;
7587 }
7588
7589 memset(&req, 0, sizeof(req));
3a586422 7590 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
7591 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7592 if (!status) {
7593 /* This mac addr exist, remove this handle's VFID for it */
63cbf7a9
YM
7594 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7595 if (status)
7596 return status;
46a3df9f
S
7597
7598 if (hclge_is_all_function_id_zero(desc))
7599 /* All the vfid is zero, so need to delete this entry */
7600 status = hclge_remove_mac_vlan_tbl(vport, &req);
7601 else
7602 /* Not all the vfid is zero, update the vfid */
7603 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7604
ee4bcd3b 7605 } else if (status == -ENOENT) {
40cca1c5 7606 status = 0;
46a3df9f
S
7607 }
7608
46a3df9f
S
7609 return status;
7610}
7611
ee4bcd3b
JS
7612static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7613 struct list_head *list,
7614 int (*sync)(struct hclge_vport *,
7615 const unsigned char *))
6dd86902 7616{
ee4bcd3b
JS
7617 struct hclge_mac_node *mac_node, *tmp;
7618 int ret;
6dd86902 7619
ee4bcd3b
JS
7620 list_for_each_entry_safe(mac_node, tmp, list, node) {
7621 ret = sync(vport, mac_node->mac_addr);
7622 if (!ret) {
7623 mac_node->state = HCLGE_MAC_ACTIVE;
7624 } else {
7625 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7626 &vport->state);
7627 break;
7628 }
7629 }
7630}
6dd86902 7631
ee4bcd3b
JS
7632static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7633 struct list_head *list,
7634 int (*unsync)(struct hclge_vport *,
7635 const unsigned char *))
7636{
7637 struct hclge_mac_node *mac_node, *tmp;
7638 int ret;
6dd86902 7639
ee4bcd3b
JS
7640 list_for_each_entry_safe(mac_node, tmp, list, node) {
7641 ret = unsync(vport, mac_node->mac_addr);
7642 if (!ret || ret == -ENOENT) {
7643 list_del(&mac_node->node);
7644 kfree(mac_node);
7645 } else {
7646 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7647 &vport->state);
7648 break;
7649 }
7650 }
7651}
6dd86902 7652
c631c696 7653static bool hclge_sync_from_add_list(struct list_head *add_list,
ee4bcd3b
JS
7654 struct list_head *mac_list)
7655{
7656 struct hclge_mac_node *mac_node, *tmp, *new_node;
c631c696 7657 bool all_added = true;
6dd86902 7658
ee4bcd3b 7659 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
c631c696
JS
7660 if (mac_node->state == HCLGE_MAC_TO_ADD)
7661 all_added = false;
7662
ee4bcd3b
JS
7663 /* if the mac address from tmp_add_list is not in the
7664 * uc/mc_mac_list, it means have received a TO_DEL request
7665 * during the time window of adding the mac address into mac
7666 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7667 * then it will be removed at next time. else it must be TO_ADD,
7668 * this address hasn't been added into mac table,
7669 * so just remove the mac node.
7670 */
7671 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7672 if (new_node) {
7673 hclge_update_mac_node(new_node, mac_node->state);
7674 list_del(&mac_node->node);
7675 kfree(mac_node);
7676 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7677 mac_node->state = HCLGE_MAC_TO_DEL;
7678 list_del(&mac_node->node);
7679 list_add_tail(&mac_node->node, mac_list);
7680 } else {
7681 list_del(&mac_node->node);
7682 kfree(mac_node);
7683 }
7684 }
c631c696
JS
7685
7686 return all_added;
6dd86902 7687}
7688
ee4bcd3b
JS
7689static void hclge_sync_from_del_list(struct list_head *del_list,
7690 struct list_head *mac_list)
6dd86902 7691{
ee4bcd3b 7692 struct hclge_mac_node *mac_node, *tmp, *new_node;
6dd86902 7693
ee4bcd3b
JS
7694 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7695 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7696 if (new_node) {
7697 /* If the mac addr exists in the mac list, it means
7698 * received a new TO_ADD request during the time window
7699 * of configuring the mac address. For the mac node
7700 * state is TO_ADD, and the address is already in the
7701 * in the hardware(due to delete fail), so we just need
7702 * to change the mac node state to ACTIVE.
7703 */
7704 new_node->state = HCLGE_MAC_ACTIVE;
7705 list_del(&mac_node->node);
7706 kfree(mac_node);
7707 } else {
7708 list_del(&mac_node->node);
7709 list_add_tail(&mac_node->node, mac_list);
7710 }
7711 }
7712}
6dd86902 7713
c631c696
JS
7714static void hclge_update_overflow_flags(struct hclge_vport *vport,
7715 enum HCLGE_MAC_ADDR_TYPE mac_type,
7716 bool is_all_added)
7717{
7718 if (mac_type == HCLGE_MAC_ADDR_UC) {
7719 if (is_all_added)
7720 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7721 else
7722 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7723 } else {
7724 if (is_all_added)
7725 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7726 else
7727 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7728 }
7729}
7730
ee4bcd3b
JS
7731static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7732 enum HCLGE_MAC_ADDR_TYPE mac_type)
7733{
7734 struct hclge_mac_node *mac_node, *tmp, *new_node;
7735 struct list_head tmp_add_list, tmp_del_list;
7736 struct list_head *list;
c631c696 7737 bool all_added;
6dd86902 7738
ee4bcd3b
JS
7739 INIT_LIST_HEAD(&tmp_add_list);
7740 INIT_LIST_HEAD(&tmp_del_list);
6dd86902 7741
ee4bcd3b
JS
7742 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7743 * we can add/delete these mac addr outside the spin lock
7744 */
7745 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7746 &vport->uc_mac_list : &vport->mc_mac_list;
6dd86902 7747
ee4bcd3b
JS
7748 spin_lock_bh(&vport->mac_list_lock);
7749
7750 list_for_each_entry_safe(mac_node, tmp, list, node) {
7751 switch (mac_node->state) {
7752 case HCLGE_MAC_TO_DEL:
7753 list_del(&mac_node->node);
7754 list_add_tail(&mac_node->node, &tmp_del_list);
7755 break;
7756 case HCLGE_MAC_TO_ADD:
7757 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7758 if (!new_node)
7759 goto stop_traverse;
7760 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7761 new_node->state = mac_node->state;
7762 list_add_tail(&new_node->node, &tmp_add_list);
7763 break;
7764 default:
6dd86902 7765 break;
7766 }
7767 }
ee4bcd3b
JS
7768
7769stop_traverse:
7770 spin_unlock_bh(&vport->mac_list_lock);
7771
7772 /* delete first, in order to get max mac table space for adding */
7773 if (mac_type == HCLGE_MAC_ADDR_UC) {
7774 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7775 hclge_rm_uc_addr_common);
7776 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7777 hclge_add_uc_addr_common);
7778 } else {
7779 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7780 hclge_rm_mc_addr_common);
7781 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7782 hclge_add_mc_addr_common);
7783 }
7784
7785 /* if some mac addresses were added/deleted fail, move back to the
7786 * mac_list, and retry at next time.
7787 */
7788 spin_lock_bh(&vport->mac_list_lock);
7789
7790 hclge_sync_from_del_list(&tmp_del_list, list);
c631c696 7791 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
ee4bcd3b
JS
7792
7793 spin_unlock_bh(&vport->mac_list_lock);
c631c696
JS
7794
7795 hclge_update_overflow_flags(vport, mac_type, all_added);
ee4bcd3b
JS
7796}
7797
7798static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7799{
7800 struct hclge_dev *hdev = vport->back;
7801
7802 if (test_bit(vport->vport_id, hdev->vport_config_block))
7803 return false;
7804
7805 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7806 return true;
7807
7808 return false;
7809}
7810
7811static void hclge_sync_mac_table(struct hclge_dev *hdev)
7812{
7813 int i;
7814
7815 for (i = 0; i < hdev->num_alloc_vport; i++) {
7816 struct hclge_vport *vport = &hdev->vport[i];
7817
7818 if (!hclge_need_sync_mac_table(vport))
7819 continue;
7820
7821 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7822 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7823 }
6dd86902 7824}
7825
7826void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7827 enum HCLGE_MAC_ADDR_TYPE mac_type)
7828{
ee4bcd3b
JS
7829 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7830 struct hclge_mac_node *mac_cfg, *tmp;
7831 struct hclge_dev *hdev = vport->back;
7832 struct list_head tmp_del_list, *list;
7833 int ret;
6dd86902 7834
ee4bcd3b
JS
7835 if (mac_type == HCLGE_MAC_ADDR_UC) {
7836 list = &vport->uc_mac_list;
7837 unsync = hclge_rm_uc_addr_common;
7838 } else {
7839 list = &vport->mc_mac_list;
7840 unsync = hclge_rm_mc_addr_common;
7841 }
6dd86902 7842
ee4bcd3b 7843 INIT_LIST_HEAD(&tmp_del_list);
6dd86902 7844
ee4bcd3b
JS
7845 if (!is_del_list)
7846 set_bit(vport->vport_id, hdev->vport_config_block);
6dd86902 7847
ee4bcd3b
JS
7848 spin_lock_bh(&vport->mac_list_lock);
7849
7850 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7851 switch (mac_cfg->state) {
7852 case HCLGE_MAC_TO_DEL:
7853 case HCLGE_MAC_ACTIVE:
6dd86902 7854 list_del(&mac_cfg->node);
ee4bcd3b
JS
7855 list_add_tail(&mac_cfg->node, &tmp_del_list);
7856 break;
7857 case HCLGE_MAC_TO_ADD:
7858 if (is_del_list) {
7859 list_del(&mac_cfg->node);
7860 kfree(mac_cfg);
7861 }
7862 break;
6dd86902 7863 }
7864 }
ee4bcd3b
JS
7865
7866 spin_unlock_bh(&vport->mac_list_lock);
7867
7868 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7869 ret = unsync(vport, mac_cfg->mac_addr);
7870 if (!ret || ret == -ENOENT) {
7871 /* clear all mac addr from hardware, but remain these
7872 * mac addr in the mac list, and restore them after
7873 * vf reset finished.
7874 */
7875 if (!is_del_list &&
7876 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7877 mac_cfg->state = HCLGE_MAC_TO_ADD;
7878 } else {
7879 list_del(&mac_cfg->node);
7880 kfree(mac_cfg);
7881 }
7882 } else if (is_del_list) {
7883 mac_cfg->state = HCLGE_MAC_TO_DEL;
7884 }
7885 }
7886
7887 spin_lock_bh(&vport->mac_list_lock);
7888
7889 hclge_sync_from_del_list(&tmp_del_list, list);
7890
7891 spin_unlock_bh(&vport->mac_list_lock);
7892}
7893
7894/* remove all mac address when uninitailize */
7895static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7896 enum HCLGE_MAC_ADDR_TYPE mac_type)
7897{
7898 struct hclge_mac_node *mac_node, *tmp;
7899 struct hclge_dev *hdev = vport->back;
7900 struct list_head tmp_del_list, *list;
7901
7902 INIT_LIST_HEAD(&tmp_del_list);
7903
7904 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7905 &vport->uc_mac_list : &vport->mc_mac_list;
7906
7907 spin_lock_bh(&vport->mac_list_lock);
7908
7909 list_for_each_entry_safe(mac_node, tmp, list, node) {
7910 switch (mac_node->state) {
7911 case HCLGE_MAC_TO_DEL:
7912 case HCLGE_MAC_ACTIVE:
7913 list_del(&mac_node->node);
7914 list_add_tail(&mac_node->node, &tmp_del_list);
7915 break;
7916 case HCLGE_MAC_TO_ADD:
7917 list_del(&mac_node->node);
7918 kfree(mac_node);
7919 break;
7920 }
7921 }
7922
7923 spin_unlock_bh(&vport->mac_list_lock);
7924
7925 if (mac_type == HCLGE_MAC_ADDR_UC)
7926 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7927 hclge_rm_uc_addr_common);
7928 else
7929 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7930 hclge_rm_mc_addr_common);
7931
7932 if (!list_empty(&tmp_del_list))
7933 dev_warn(&hdev->pdev->dev,
7934 "uninit %s mac list for vport %u not completely.\n",
7935 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7936 vport->vport_id);
7937
7938 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7939 list_del(&mac_node->node);
7940 kfree(mac_node);
7941 }
6dd86902 7942}
7943
ee4bcd3b 7944static void hclge_uninit_mac_table(struct hclge_dev *hdev)
6dd86902 7945{
6dd86902 7946 struct hclge_vport *vport;
7947 int i;
7948
6dd86902 7949 for (i = 0; i < hdev->num_alloc_vport; i++) {
7950 vport = &hdev->vport[i];
ee4bcd3b
JS
7951 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7952 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
6dd86902 7953 }
6dd86902 7954}
7955
f5aac71c
FL
7956static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7957 u16 cmdq_resp, u8 resp_code)
7958{
7959#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7960#define HCLGE_ETHERTYPE_ALREADY_ADD 1
7961#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7962#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7963
7964 int return_status;
7965
7966 if (cmdq_resp) {
7967 dev_err(&hdev->pdev->dev,
adcf738b 7968 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
f5aac71c
FL
7969 cmdq_resp);
7970 return -EIO;
7971 }
7972
7973 switch (resp_code) {
7974 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7975 case HCLGE_ETHERTYPE_ALREADY_ADD:
7976 return_status = 0;
7977 break;
7978 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7979 dev_err(&hdev->pdev->dev,
7980 "add mac ethertype failed for manager table overflow.\n");
7981 return_status = -EIO;
7982 break;
7983 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7984 dev_err(&hdev->pdev->dev,
7985 "add mac ethertype failed for key conflict.\n");
7986 return_status = -EIO;
7987 break;
7988 default:
7989 dev_err(&hdev->pdev->dev,
adcf738b 7990 "add mac ethertype failed for undefined, code=%u.\n",
f5aac71c
FL
7991 resp_code);
7992 return_status = -EIO;
7993 }
7994
7995 return return_status;
7996}
7997
8e6de441
HT
7998static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7999 u8 *mac_addr)
8000{
8001 struct hclge_mac_vlan_tbl_entry_cmd req;
8002 struct hclge_dev *hdev = vport->back;
8003 struct hclge_desc desc;
8004 u16 egress_port = 0;
8005 int i;
8006
8007 if (is_zero_ether_addr(mac_addr))
8008 return false;
8009
8010 memset(&req, 0, sizeof(req));
8011 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8012 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8013 req.egress_port = cpu_to_le16(egress_port);
8014 hclge_prepare_mac_addr(&req, mac_addr, false);
8015
8016 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8017 return true;
8018
8019 vf_idx += HCLGE_VF_VPORT_START_NUM;
8020 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8021 if (i != vf_idx &&
8022 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8023 return true;
8024
8025 return false;
8026}
8027
8028static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8029 u8 *mac_addr)
8030{
8031 struct hclge_vport *vport = hclge_get_vport(handle);
8032 struct hclge_dev *hdev = vport->back;
8033
8034 vport = hclge_get_vf_vport(hdev, vf);
8035 if (!vport)
8036 return -EINVAL;
8037
8038 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8039 dev_info(&hdev->pdev->dev,
8040 "Specified MAC(=%pM) is same as before, no change committed!\n",
8041 mac_addr);
8042 return 0;
8043 }
8044
8045 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8046 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8047 mac_addr);
8048 return -EEXIST;
8049 }
8050
8051 ether_addr_copy(vport->vf_info.mac, mac_addr);
8e6de441 8052
90913670
YL
8053 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8054 dev_info(&hdev->pdev->dev,
8055 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8056 vf, mac_addr);
8057 return hclge_inform_reset_assert_to_vf(vport);
8058 }
8059
8060 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8061 vf, mac_addr);
8062 return 0;
8e6de441
HT
8063}
8064
f5aac71c
FL
8065static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8066 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8067{
8068 struct hclge_desc desc;
8069 u8 resp_code;
8070 u16 retval;
8071 int ret;
8072
8073 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8074 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8075
8076 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8077 if (ret) {
8078 dev_err(&hdev->pdev->dev,
8079 "add mac ethertype failed for cmd_send, ret =%d.\n",
8080 ret);
8081 return ret;
8082 }
8083
8084 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8085 retval = le16_to_cpu(desc.retval);
8086
8087 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8088}
8089
8090static int init_mgr_tbl(struct hclge_dev *hdev)
8091{
8092 int ret;
8093 int i;
8094
8095 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8096 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8097 if (ret) {
8098 dev_err(&hdev->pdev->dev,
8099 "add mac ethertype failed, ret =%d.\n",
8100 ret);
8101 return ret;
8102 }
8103 }
8104
8105 return 0;
8106}
8107
46a3df9f
S
8108static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8109{
8110 struct hclge_vport *vport = hclge_get_vport(handle);
8111 struct hclge_dev *hdev = vport->back;
8112
8113 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8114}
8115
ee4bcd3b
JS
8116int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8117 const u8 *old_addr, const u8 *new_addr)
8118{
8119 struct list_head *list = &vport->uc_mac_list;
8120 struct hclge_mac_node *old_node, *new_node;
8121
8122 new_node = hclge_find_mac_node(list, new_addr);
8123 if (!new_node) {
8124 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8125 if (!new_node)
8126 return -ENOMEM;
8127
8128 new_node->state = HCLGE_MAC_TO_ADD;
8129 ether_addr_copy(new_node->mac_addr, new_addr);
8130 list_add(&new_node->node, list);
8131 } else {
8132 if (new_node->state == HCLGE_MAC_TO_DEL)
8133 new_node->state = HCLGE_MAC_ACTIVE;
8134
8135 /* make sure the new addr is in the list head, avoid dev
8136 * addr may be not re-added into mac table for the umv space
8137 * limitation after global/imp reset which will clear mac
8138 * table by hardware.
8139 */
8140 list_move(&new_node->node, list);
8141 }
8142
8143 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8144 old_node = hclge_find_mac_node(list, old_addr);
8145 if (old_node) {
8146 if (old_node->state == HCLGE_MAC_TO_ADD) {
8147 list_del(&old_node->node);
8148 kfree(old_node);
8149 } else {
8150 old_node->state = HCLGE_MAC_TO_DEL;
8151 }
8152 }
8153 }
8154
8155 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8156
8157 return 0;
8158}
8159
59098055
FL
8160static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8161 bool is_first)
46a3df9f
S
8162{
8163 const unsigned char *new_addr = (const unsigned char *)p;
8164 struct hclge_vport *vport = hclge_get_vport(handle);
8165 struct hclge_dev *hdev = vport->back;
ee4bcd3b 8166 unsigned char *old_addr = NULL;
18838d0c 8167 int ret;
46a3df9f
S
8168
8169 /* mac addr check */
8170 if (is_zero_ether_addr(new_addr) ||
8171 is_broadcast_ether_addr(new_addr) ||
8172 is_multicast_ether_addr(new_addr)) {
8173 dev_err(&hdev->pdev->dev,
ee4bcd3b 8174 "change uc mac err! invalid mac: %pM.\n",
46a3df9f
S
8175 new_addr);
8176 return -EINVAL;
8177 }
8178
ee4bcd3b 8179 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
8180 if (ret) {
8181 dev_err(&hdev->pdev->dev,
ee4bcd3b 8182 "failed to configure mac pause address, ret = %d\n",
18838d0c 8183 ret);
ee4bcd3b 8184 return ret;
46a3df9f
S
8185 }
8186
ee4bcd3b
JS
8187 if (!is_first)
8188 old_addr = hdev->hw.mac.mac_addr;
8189
8190 spin_lock_bh(&vport->mac_list_lock);
8191 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
18838d0c
FL
8192 if (ret) {
8193 dev_err(&hdev->pdev->dev,
ee4bcd3b
JS
8194 "failed to change the mac addr:%pM, ret = %d\n",
8195 new_addr, ret);
8196 spin_unlock_bh(&vport->mac_list_lock);
8197
8198 if (!is_first)
8199 hclge_pause_addr_cfg(hdev, old_addr);
18838d0c 8200
ee4bcd3b
JS
8201 return ret;
8202 }
8203 /* we must update dev addr with spin lock protect, preventing dev addr
8204 * being removed by set_rx_mode path.
8205 */
18838d0c 8206 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
ee4bcd3b
JS
8207 spin_unlock_bh(&vport->mac_list_lock);
8208
8209 hclge_task_schedule(hdev, 0);
18838d0c
FL
8210
8211 return 0;
46a3df9f
S
8212}
8213
26483246
XW
8214static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8215 int cmd)
8216{
8217 struct hclge_vport *vport = hclge_get_vport(handle);
8218 struct hclge_dev *hdev = vport->back;
8219
8220 if (!hdev->hw.mac.phydev)
8221 return -EOPNOTSUPP;
8222
8223 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8224}
8225
46a3df9f 8226static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 8227 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 8228{
d44f9b63 8229 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
8230 struct hclge_desc desc;
8231 int ret;
8232
903b85d3
JS
8233 /* read current vlan filter parameter */
8234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
d44f9b63 8235 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 8236 req->vlan_type = vlan_type;
30ebc576 8237 req->vf_id = vf_id;
46a3df9f 8238
903b85d3
JS
8239 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8240 if (ret) {
8241 dev_err(&hdev->pdev->dev,
8242 "failed to get vlan filter config, ret = %d.\n", ret);
8243 return ret;
8244 }
8245
8246 /* modify and write new config parameter */
8247 hclge_cmd_reuse_desc(&desc, false);
8248 req->vlan_fe = filter_en ?
8249 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8250
46a3df9f 8251 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 8252 if (ret)
903b85d3 8253 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
46a3df9f 8254 ret);
46a3df9f 8255
3f639907 8256 return ret;
46a3df9f
S
8257}
8258
391b5e93
JS
8259#define HCLGE_FILTER_TYPE_VF 0
8260#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
8261#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8262#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8263#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8264#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8265#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8266#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8267 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8268#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8269 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
8270
8271static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8272{
8273 struct hclge_vport *vport = hclge_get_vport(handle);
8274 struct hclge_dev *hdev = vport->back;
8275
64d114f0
ZL
8276 if (hdev->pdev->revision >= 0x21) {
8277 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 8278 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 8279 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 8280 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
8281 } else {
8282 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
8283 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8284 0);
64d114f0 8285 }
c60edc17
JS
8286 if (enable)
8287 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8288 else
8289 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
8290}
8291
ebaf1908 8292static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
70a21490 8293 bool is_kill, u16 vlan,
dc8131d8 8294 __be16 proto)
46a3df9f 8295{
22044f95 8296 struct hclge_vport *vport = &hdev->vport[vfid];
d44f9b63
YL
8297 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8298 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
8299 struct hclge_desc desc[2];
8300 u8 vf_byte_val;
8301 u8 vf_byte_off;
8302 int ret;
8303
81a9255e 8304 /* if vf vlan table is full, firmware will close vf vlan filter, it
22044f95
JS
8305 * is unable and unnecessary to add new vlan id to vf vlan filter.
8306 * If spoof check is enable, and vf vlan is full, it shouldn't add
8307 * new vlan, because tx packets with these vlan id will be dropped.
81a9255e 8308 */
22044f95
JS
8309 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8310 if (vport->vf_info.spoofchk && vlan) {
8311 dev_err(&hdev->pdev->dev,
8312 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8313 return -EPERM;
8314 }
81a9255e 8315 return 0;
22044f95 8316 }
81a9255e 8317
46a3df9f
S
8318 hclge_cmd_setup_basic_desc(&desc[0],
8319 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8320 hclge_cmd_setup_basic_desc(&desc[1],
8321 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8322
8323 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8324
8325 vf_byte_off = vfid / 8;
8326 vf_byte_val = 1 << (vfid % 8);
8327
d44f9b63
YL
8328 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8329 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 8330
a90bb9a5 8331 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
8332 req0->vlan_cfg = is_kill;
8333
8334 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8335 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8336 else
8337 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8338
8339 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8340 if (ret) {
8341 dev_err(&hdev->pdev->dev,
8342 "Send vf vlan command fail, ret =%d.\n",
8343 ret);
8344 return ret;
8345 }
8346
8347 if (!is_kill) {
6c251711 8348#define HCLGE_VF_VLAN_NO_ENTRY 2
46a3df9f
S
8349 if (!req0->resp_code || req0->resp_code == 1)
8350 return 0;
8351
6c251711 8352 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
81a9255e 8353 set_bit(vfid, hdev->vf_vlan_full);
6c251711
YL
8354 dev_warn(&hdev->pdev->dev,
8355 "vf vlan table is full, vf vlan filter is disabled\n");
8356 return 0;
8357 }
8358
46a3df9f 8359 dev_err(&hdev->pdev->dev,
adcf738b 8360 "Add vf vlan filter fail, ret =%u.\n",
46a3df9f
S
8361 req0->resp_code);
8362 } else {
41dafea2 8363#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
46a3df9f
S
8364 if (!req0->resp_code)
8365 return 0;
8366
d0c31df2
JS
8367 /* vf vlan filter is disabled when vf vlan table is full,
8368 * then new vlan id will not be added into vf vlan table.
8369 * Just return 0 without warning, avoid massive verbose
8370 * print logs when unload.
8371 */
8372 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
41dafea2 8373 return 0;
41dafea2 8374
46a3df9f 8375 dev_err(&hdev->pdev->dev,
adcf738b 8376 "Kill vf vlan filter fail, ret =%u.\n",
46a3df9f
S
8377 req0->resp_code);
8378 }
8379
8380 return -EIO;
8381}
8382
dc8131d8
YL
8383static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8384 u16 vlan_id, bool is_kill)
46a3df9f 8385{
d44f9b63 8386 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
8387 struct hclge_desc desc;
8388 u8 vlan_offset_byte_val;
8389 u8 vlan_offset_byte;
8390 u8 vlan_offset_160;
8391 int ret;
8392
8393 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8394
d6ad7c53
GL
8395 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8396 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8397 HCLGE_VLAN_BYTE_SIZE;
8398 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
46a3df9f 8399
d44f9b63 8400 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
8401 req->vlan_offset = vlan_offset_160;
8402 req->vlan_cfg = is_kill;
8403 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8404
8405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
8406 if (ret)
8407 dev_err(&hdev->pdev->dev,
8408 "port vlan command, send fail, ret =%d.\n", ret);
8409 return ret;
8410}
8411
8412static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
70a21490 8413 u16 vport_id, u16 vlan_id,
dc8131d8
YL
8414 bool is_kill)
8415{
8416 u16 vport_idx, vport_num = 0;
8417 int ret;
8418
daaa8521
YL
8419 if (is_kill && !vlan_id)
8420 return 0;
8421
dc8131d8 8422 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
70a21490 8423 proto);
46a3df9f
S
8424 if (ret) {
8425 dev_err(&hdev->pdev->dev,
adcf738b 8426 "Set %u vport vlan filter config fail, ret =%d.\n",
dc8131d8 8427 vport_id, ret);
46a3df9f
S
8428 return ret;
8429 }
8430
dc8131d8
YL
8431 /* vlan 0 may be added twice when 8021q module is enabled */
8432 if (!is_kill && !vlan_id &&
8433 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8434 return 0;
8435
8436 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 8437 dev_err(&hdev->pdev->dev,
adcf738b 8438 "Add port vlan failed, vport %u is already in vlan %u\n",
dc8131d8
YL
8439 vport_id, vlan_id);
8440 return -EINVAL;
46a3df9f
S
8441 }
8442
dc8131d8
YL
8443 if (is_kill &&
8444 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8445 dev_err(&hdev->pdev->dev,
adcf738b 8446 "Delete port vlan failed, vport %u is not in vlan %u\n",
dc8131d8
YL
8447 vport_id, vlan_id);
8448 return -EINVAL;
8449 }
8450
54e97d11 8451 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
8452 vport_num++;
8453
8454 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8455 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8456 is_kill);
8457
8458 return ret;
8459}
8460
5f6ea83f
PL
8461static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8462{
8463 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8464 struct hclge_vport_vtag_tx_cfg_cmd *req;
8465 struct hclge_dev *hdev = vport->back;
8466 struct hclge_desc desc;
d9c0f275 8467 u16 bmap_index;
5f6ea83f
PL
8468 int status;
8469
8470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8471
8472 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8473 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8474 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
8475 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8476 vcfg->accept_tag1 ? 1 : 0);
8477 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8478 vcfg->accept_untag1 ? 1 : 0);
8479 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8480 vcfg->accept_tag2 ? 1 : 0);
8481 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8482 vcfg->accept_untag2 ? 1 : 0);
8483 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8484 vcfg->insert_tag1_en ? 1 : 0);
8485 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8486 vcfg->insert_tag2_en ? 1 : 0);
8487 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
8488
8489 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8490 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8491 HCLGE_VF_NUM_PER_BYTE;
8492 req->vf_bitmap[bmap_index] =
8493 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8494
8495 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8496 if (status)
8497 dev_err(&hdev->pdev->dev,
8498 "Send port txvlan cfg command fail, ret =%d\n",
8499 status);
8500
8501 return status;
8502}
8503
8504static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8505{
8506 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8507 struct hclge_vport_vtag_rx_cfg_cmd *req;
8508 struct hclge_dev *hdev = vport->back;
8509 struct hclge_desc desc;
d9c0f275 8510 u16 bmap_index;
5f6ea83f
PL
8511 int status;
8512
8513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8514
8515 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
8516 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8517 vcfg->strip_tag1_en ? 1 : 0);
8518 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8519 vcfg->strip_tag2_en ? 1 : 0);
8520 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8521 vcfg->vlan1_vlan_prionly ? 1 : 0);
8522 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8523 vcfg->vlan2_vlan_prionly ? 1 : 0);
5f6ea83f
PL
8524
8525 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
8526 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8527 HCLGE_VF_NUM_PER_BYTE;
8528 req->vf_bitmap[bmap_index] =
8529 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
8530
8531 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8532 if (status)
8533 dev_err(&hdev->pdev->dev,
8534 "Send port rxvlan cfg command fail, ret =%d\n",
8535 status);
8536
8537 return status;
8538}
8539
741fca16
JS
8540static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8541 u16 port_base_vlan_state,
8542 u16 vlan_tag)
8543{
8544 int ret;
8545
8546 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8547 vport->txvlan_cfg.accept_tag1 = true;
8548 vport->txvlan_cfg.insert_tag1_en = false;
8549 vport->txvlan_cfg.default_tag1 = 0;
8550 } else {
8551 vport->txvlan_cfg.accept_tag1 = false;
8552 vport->txvlan_cfg.insert_tag1_en = true;
8553 vport->txvlan_cfg.default_tag1 = vlan_tag;
8554 }
8555
8556 vport->txvlan_cfg.accept_untag1 = true;
8557
8558 /* accept_tag2 and accept_untag2 are not supported on
8559 * pdev revision(0x20), new revision support them,
8560 * this two fields can not be configured by user.
8561 */
8562 vport->txvlan_cfg.accept_tag2 = true;
8563 vport->txvlan_cfg.accept_untag2 = true;
8564 vport->txvlan_cfg.insert_tag2_en = false;
8565 vport->txvlan_cfg.default_tag2 = 0;
8566
8567 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8568 vport->rxvlan_cfg.strip_tag1_en = false;
8569 vport->rxvlan_cfg.strip_tag2_en =
8570 vport->rxvlan_cfg.rx_vlan_offload_en;
8571 } else {
8572 vport->rxvlan_cfg.strip_tag1_en =
8573 vport->rxvlan_cfg.rx_vlan_offload_en;
8574 vport->rxvlan_cfg.strip_tag2_en = true;
8575 }
8576 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8577 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8578
8579 ret = hclge_set_vlan_tx_offload_cfg(vport);
8580 if (ret)
8581 return ret;
8582
8583 return hclge_set_vlan_rx_offload_cfg(vport);
8584}
8585
5f6ea83f
PL
8586static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8587{
8588 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8589 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8590 struct hclge_desc desc;
8591 int status;
8592
8593 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8594 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8595 rx_req->ot_fst_vlan_type =
8596 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8597 rx_req->ot_sec_vlan_type =
8598 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8599 rx_req->in_fst_vlan_type =
8600 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8601 rx_req->in_sec_vlan_type =
8602 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8603
8604 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8605 if (status) {
8606 dev_err(&hdev->pdev->dev,
8607 "Send rxvlan protocol type command fail, ret =%d\n",
8608 status);
8609 return status;
8610 }
8611
8612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8613
d0d72bac 8614 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
8615 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8616 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8617
8618 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8619 if (status)
8620 dev_err(&hdev->pdev->dev,
8621 "Send txvlan protocol type command fail, ret =%d\n",
8622 status);
8623
8624 return status;
8625}
8626
46a3df9f
S
8627static int hclge_init_vlan_config(struct hclge_dev *hdev)
8628{
5f6ea83f
PL
8629#define HCLGE_DEF_VLAN_TYPE 0x8100
8630
c60edc17 8631 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 8632 struct hclge_vport *vport;
46a3df9f 8633 int ret;
5f6ea83f
PL
8634 int i;
8635
64d114f0 8636 if (hdev->pdev->revision >= 0x21) {
30ebc576
JS
8637 /* for revision 0x21, vf vlan filter is per function */
8638 for (i = 0; i < hdev->num_alloc_vport; i++) {
8639 vport = &hdev->vport[i];
8640 ret = hclge_set_vlan_filter_ctrl(hdev,
8641 HCLGE_FILTER_TYPE_VF,
8642 HCLGE_FILTER_FE_EGRESS,
8643 true,
8644 vport->vport_id);
8645 if (ret)
8646 return ret;
8647 }
46a3df9f 8648
64d114f0 8649 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
8650 HCLGE_FILTER_FE_INGRESS, true,
8651 0);
64d114f0
ZL
8652 if (ret)
8653 return ret;
8654 } else {
8655 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8656 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 8657 true, 0);
64d114f0
ZL
8658 if (ret)
8659 return ret;
8660 }
46a3df9f 8661
c60edc17
JS
8662 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8663
5f6ea83f
PL
8664 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8665 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8666 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8667 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8668 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8669 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8670
8671 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
8672 if (ret)
8673 return ret;
46a3df9f 8674
5f6ea83f 8675 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 8676 u16 vlan_tag;
dcb35cce 8677
741fca16
JS
8678 vport = &hdev->vport[i];
8679 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 8680
741fca16
JS
8681 ret = hclge_vlan_offload_cfg(vport,
8682 vport->port_base_vlan_cfg.state,
8683 vlan_tag);
5f6ea83f
PL
8684 if (ret)
8685 return ret;
8686 }
8687
dc8131d8 8688 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
8689}
8690
21e043cd
JS
8691static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8692 bool writen_to_tbl)
c6075b19 8693{
8694 struct hclge_vport_vlan_cfg *vlan;
8695
c6075b19 8696 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8697 if (!vlan)
8698 return;
8699
21e043cd 8700 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 8701 vlan->vlan_id = vlan_id;
8702
8703 list_add_tail(&vlan->node, &vport->vlan_list);
8704}
8705
21e043cd
JS
8706static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8707{
8708 struct hclge_vport_vlan_cfg *vlan, *tmp;
8709 struct hclge_dev *hdev = vport->back;
8710 int ret;
8711
8712 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8713 if (!vlan->hd_tbl_status) {
8714 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8715 vport->vport_id,
70a21490 8716 vlan->vlan_id, false);
21e043cd
JS
8717 if (ret) {
8718 dev_err(&hdev->pdev->dev,
8719 "restore vport vlan list failed, ret=%d\n",
8720 ret);
8721 return ret;
8722 }
8723 }
8724 vlan->hd_tbl_status = true;
8725 }
8726
8727 return 0;
8728}
8729
8730static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8731 bool is_write_tbl)
c6075b19 8732{
8733 struct hclge_vport_vlan_cfg *vlan, *tmp;
8734 struct hclge_dev *hdev = vport->back;
8735
8736 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8737 if (vlan->vlan_id == vlan_id) {
8738 if (is_write_tbl && vlan->hd_tbl_status)
8739 hclge_set_vlan_filter_hw(hdev,
8740 htons(ETH_P_8021Q),
8741 vport->vport_id,
70a21490 8742 vlan_id,
c6075b19 8743 true);
8744
8745 list_del(&vlan->node);
8746 kfree(vlan);
8747 break;
8748 }
8749 }
8750}
8751
8752void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8753{
8754 struct hclge_vport_vlan_cfg *vlan, *tmp;
8755 struct hclge_dev *hdev = vport->back;
8756
8757 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8758 if (vlan->hd_tbl_status)
8759 hclge_set_vlan_filter_hw(hdev,
8760 htons(ETH_P_8021Q),
8761 vport->vport_id,
70a21490 8762 vlan->vlan_id,
c6075b19 8763 true);
8764
8765 vlan->hd_tbl_status = false;
8766 if (is_del_list) {
8767 list_del(&vlan->node);
8768 kfree(vlan);
8769 }
8770 }
23b4201d 8771 clear_bit(vport->vport_id, hdev->vf_vlan_full);
c6075b19 8772}
8773
8774void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8775{
8776 struct hclge_vport_vlan_cfg *vlan, *tmp;
8777 struct hclge_vport *vport;
8778 int i;
8779
c6075b19 8780 for (i = 0; i < hdev->num_alloc_vport; i++) {
8781 vport = &hdev->vport[i];
8782 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8783 list_del(&vlan->node);
8784 kfree(vlan);
8785 }
8786 }
c6075b19 8787}
8788
039ba863 8789void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
b524b38f 8790{
b524b38f
JS
8791 struct hclge_vport_vlan_cfg *vlan, *tmp;
8792 struct hclge_dev *hdev = vport->back;
b943e033 8793 u16 vlan_proto;
039ba863
JS
8794 u16 vlan_id;
8795 u16 state;
8796 int ret;
b524b38f 8797
039ba863
JS
8798 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8799 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8800 state = vport->port_base_vlan_cfg.state;
b524b38f 8801
039ba863
JS
8802 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8803 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8804 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8805 vport->vport_id, vlan_id,
8806 false);
8807 return;
8808 }
22044f95 8809
039ba863
JS
8810 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8811 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8812 vport->vport_id,
8813 vlan->vlan_id, false);
8814 if (ret)
8815 break;
8816 vlan->hd_tbl_status = true;
b524b38f 8817 }
b524b38f
JS
8818}
8819
ee4bcd3b
JS
8820/* For global reset and imp reset, hardware will clear the mac table,
8821 * so we change the mac address state from ACTIVE to TO_ADD, then they
8822 * can be restored in the service task after reset complete. Furtherly,
8823 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8824 * be restored after reset, so just remove these mac nodes from mac_list.
8825 */
8826static void hclge_mac_node_convert_for_reset(struct list_head *list)
8827{
8828 struct hclge_mac_node *mac_node, *tmp;
8829
8830 list_for_each_entry_safe(mac_node, tmp, list, node) {
8831 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8832 mac_node->state = HCLGE_MAC_TO_ADD;
8833 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8834 list_del(&mac_node->node);
8835 kfree(mac_node);
8836 }
8837 }
8838}
8839
8840void hclge_restore_mac_table_common(struct hclge_vport *vport)
8841{
8842 spin_lock_bh(&vport->mac_list_lock);
8843
8844 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8845 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8846 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8847
8848 spin_unlock_bh(&vport->mac_list_lock);
8849}
8850
039ba863
JS
8851static void hclge_restore_hw_table(struct hclge_dev *hdev)
8852{
8853 struct hclge_vport *vport = &hdev->vport[0];
8854 struct hnae3_handle *handle = &vport->nic;
8855
8856 hclge_restore_mac_table_common(vport);
8857 hclge_restore_vport_vlan_table(vport);
8858 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8859
8860 hclge_restore_fd_entries(handle);
8861}
8862
b2641e2a 8863int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
8864{
8865 struct hclge_vport *vport = hclge_get_vport(handle);
8866
44e626f7
JS
8867 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8868 vport->rxvlan_cfg.strip_tag1_en = false;
8869 vport->rxvlan_cfg.strip_tag2_en = enable;
8870 } else {
8871 vport->rxvlan_cfg.strip_tag1_en = enable;
8872 vport->rxvlan_cfg.strip_tag2_en = true;
8873 }
052ece6d
PL
8874 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8875 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 8876 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
8877
8878 return hclge_set_vlan_rx_offload_cfg(vport);
8879}
8880
21e043cd
JS
8881static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8882 u16 port_base_vlan_state,
8883 struct hclge_vlan_info *new_info,
8884 struct hclge_vlan_info *old_info)
8885{
8886 struct hclge_dev *hdev = vport->back;
8887 int ret;
8888
8889 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8890 hclge_rm_vport_all_vlan_table(vport, false);
8891 return hclge_set_vlan_filter_hw(hdev,
8892 htons(new_info->vlan_proto),
8893 vport->vport_id,
8894 new_info->vlan_tag,
70a21490 8895 false);
21e043cd
JS
8896 }
8897
8898 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8899 vport->vport_id, old_info->vlan_tag,
70a21490 8900 true);
21e043cd
JS
8901 if (ret)
8902 return ret;
8903
8904 return hclge_add_vport_all_vlan_table(vport);
8905}
8906
8907int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8908 struct hclge_vlan_info *vlan_info)
8909{
8910 struct hnae3_handle *nic = &vport->nic;
8911 struct hclge_vlan_info *old_vlan_info;
8912 struct hclge_dev *hdev = vport->back;
8913 int ret;
8914
8915 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8916
8917 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8918 if (ret)
8919 return ret;
8920
8921 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8922 /* add new VLAN tag */
8a9a654b
JS
8923 ret = hclge_set_vlan_filter_hw(hdev,
8924 htons(vlan_info->vlan_proto),
21e043cd
JS
8925 vport->vport_id,
8926 vlan_info->vlan_tag,
70a21490 8927 false);
21e043cd
JS
8928 if (ret)
8929 return ret;
8930
8931 /* remove old VLAN tag */
8a9a654b
JS
8932 ret = hclge_set_vlan_filter_hw(hdev,
8933 htons(old_vlan_info->vlan_proto),
21e043cd
JS
8934 vport->vport_id,
8935 old_vlan_info->vlan_tag,
70a21490 8936 true);
21e043cd
JS
8937 if (ret)
8938 return ret;
8939
8940 goto update;
8941 }
8942
8943 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8944 old_vlan_info);
8945 if (ret)
8946 return ret;
8947
8948 /* update state only when disable/enable port based VLAN */
8949 vport->port_base_vlan_cfg.state = state;
8950 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8951 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8952 else
8953 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8954
8955update:
8956 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8957 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8958 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8959
8960 return 0;
8961}
8962
8963static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8964 enum hnae3_port_base_vlan_state state,
8965 u16 vlan)
8966{
8967 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8968 if (!vlan)
8969 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8970 else
8971 return HNAE3_PORT_BASE_VLAN_ENABLE;
8972 } else {
8973 if (!vlan)
8974 return HNAE3_PORT_BASE_VLAN_DISABLE;
8975 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8976 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8977 else
8978 return HNAE3_PORT_BASE_VLAN_MODIFY;
8979 }
8980}
8981
8982static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8983 u16 vlan, u8 qos, __be16 proto)
8984{
8985 struct hclge_vport *vport = hclge_get_vport(handle);
8986 struct hclge_dev *hdev = vport->back;
8987 struct hclge_vlan_info vlan_info;
8988 u16 state;
8989 int ret;
8990
8991 if (hdev->pdev->revision == 0x20)
8992 return -EOPNOTSUPP;
8993
1c985508
JS
8994 vport = hclge_get_vf_vport(hdev, vfid);
8995 if (!vport)
8996 return -EINVAL;
8997
21e043cd 8998 /* qos is a 3 bits value, so can not be bigger than 7 */
1c985508 8999 if (vlan > VLAN_N_VID - 1 || qos > 7)
21e043cd
JS
9000 return -EINVAL;
9001 if (proto != htons(ETH_P_8021Q))
9002 return -EPROTONOSUPPORT;
9003
21e043cd
JS
9004 state = hclge_get_port_base_vlan_state(vport,
9005 vport->port_base_vlan_cfg.state,
9006 vlan);
9007 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9008 return 0;
9009
9010 vlan_info.vlan_tag = vlan;
9011 vlan_info.qos = qos;
9012 vlan_info.vlan_proto = ntohs(proto);
9013
92f11ea1
JS
9014 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9015 return hclge_update_port_base_vlan_cfg(vport, state,
9016 &vlan_info);
9017 } else {
9018 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
1c985508 9019 vport->vport_id, state,
92f11ea1
JS
9020 vlan, qos,
9021 ntohs(proto));
9022 return ret;
9023 }
21e043cd
JS
9024}
9025
59359fc8
JS
9026static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9027{
9028 struct hclge_vlan_info *vlan_info;
9029 struct hclge_vport *vport;
9030 int ret;
9031 int vf;
9032
9033 /* clear port base vlan for all vf */
9034 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9035 vport = &hdev->vport[vf];
9036 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9037
9038 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9039 vport->vport_id,
9040 vlan_info->vlan_tag, true);
9041 if (ret)
9042 dev_err(&hdev->pdev->dev,
9043 "failed to clear vf vlan for vf%d, ret = %d\n",
9044 vf - HCLGE_VF_VPORT_START_NUM, ret);
9045 }
9046}
9047
21e043cd
JS
9048int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9049 u16 vlan_id, bool is_kill)
9050{
9051 struct hclge_vport *vport = hclge_get_vport(handle);
9052 struct hclge_dev *hdev = vport->back;
9053 bool writen_to_tbl = false;
9054 int ret = 0;
9055
fe4144d4
JS
9056 /* When device is resetting, firmware is unable to handle
9057 * mailbox. Just record the vlan id, and remove it after
9058 * reset finished.
9059 */
9060 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9061 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9062 return -EBUSY;
9063 }
9064
46ee7350 9065 /* when port base vlan enabled, we use port base vlan as the vlan
fe4144d4
JS
9066 * filter entry. In this case, we don't update vlan filter table
9067 * when user add new vlan or remove exist vlan, just update the vport
9068 * vlan list. The vlan id in vlan list will be writen in vlan filter
9069 * table until port base vlan disabled
21e043cd
JS
9070 */
9071 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9072 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
70a21490 9073 vlan_id, is_kill);
21e043cd
JS
9074 writen_to_tbl = true;
9075 }
9076
fe4144d4
JS
9077 if (!ret) {
9078 if (is_kill)
9079 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9080 else
9081 hclge_add_vport_vlan_table(vport, vlan_id,
9082 writen_to_tbl);
9083 } else if (is_kill) {
46ee7350 9084 /* when remove hw vlan filter failed, record the vlan id,
fe4144d4
JS
9085 * and try to remove it from hw later, to be consistence
9086 * with stack
9087 */
9088 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9089 }
9090 return ret;
9091}
21e043cd 9092
fe4144d4
JS
9093static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9094{
9095#define HCLGE_MAX_SYNC_COUNT 60
21e043cd 9096
fe4144d4
JS
9097 int i, ret, sync_cnt = 0;
9098 u16 vlan_id;
9099
9100 /* start from vport 1 for PF is always alive */
9101 for (i = 0; i < hdev->num_alloc_vport; i++) {
9102 struct hclge_vport *vport = &hdev->vport[i];
9103
9104 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9105 VLAN_N_VID);
9106 while (vlan_id != VLAN_N_VID) {
9107 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9108 vport->vport_id, vlan_id,
70a21490 9109 true);
fe4144d4
JS
9110 if (ret && ret != -EINVAL)
9111 return;
9112
9113 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9114 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9115
9116 sync_cnt++;
9117 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9118 return;
9119
9120 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9121 VLAN_N_VID);
9122 }
9123 }
21e043cd
JS
9124}
9125
e6d7d79d 9126static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 9127{
d44f9b63 9128 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 9129 struct hclge_desc desc;
46a3df9f 9130
46a3df9f
S
9131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9132
d44f9b63 9133 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 9134 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 9135 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 9136
e6d7d79d 9137 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
9138}
9139
dd72140c
FL
9140static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9141{
9142 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
9143
9144 return hclge_set_vport_mtu(vport, new_mtu);
9145}
9146
9147int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9148{
dd72140c 9149 struct hclge_dev *hdev = vport->back;
63cbf7a9 9150 int i, max_frm_size, ret;
dd72140c 9151
9e690456 9152 /* HW supprt 2 layer vlan */
e6d7d79d
YL
9153 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9154 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9155 max_frm_size > HCLGE_MAC_MAX_FRAME)
9156 return -EINVAL;
9157
818f1675
YL
9158 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9159 mutex_lock(&hdev->vport_lock);
9160 /* VF's mps must fit within hdev->mps */
9161 if (vport->vport_id && max_frm_size > hdev->mps) {
9162 mutex_unlock(&hdev->vport_lock);
9163 return -EINVAL;
9164 } else if (vport->vport_id) {
9165 vport->mps = max_frm_size;
9166 mutex_unlock(&hdev->vport_lock);
9167 return 0;
9168 }
9169
9170 /* PF's mps must be greater then VF's mps */
9171 for (i = 1; i < hdev->num_alloc_vport; i++)
9172 if (max_frm_size < hdev->vport[i].mps) {
9173 mutex_unlock(&hdev->vport_lock);
9174 return -EINVAL;
9175 }
9176
cdca4c48
YL
9177 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9178
e6d7d79d 9179 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
9180 if (ret) {
9181 dev_err(&hdev->pdev->dev,
9182 "Change mtu fail, ret =%d\n", ret);
818f1675 9183 goto out;
dd72140c
FL
9184 }
9185
e6d7d79d 9186 hdev->mps = max_frm_size;
818f1675 9187 vport->mps = max_frm_size;
e6d7d79d 9188
dd72140c
FL
9189 ret = hclge_buffer_alloc(hdev);
9190 if (ret)
9191 dev_err(&hdev->pdev->dev,
9192 "Allocate buffer fail, ret =%d\n", ret);
9193
818f1675 9194out:
cdca4c48 9195 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 9196 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
9197 return ret;
9198}
9199
46a3df9f
S
9200static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9201 bool enable)
9202{
d44f9b63 9203 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9204 struct hclge_desc desc;
9205 int ret;
9206
9207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9208
d44f9b63 9209 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f 9210 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
b9a8f883
YL
9211 if (enable)
9212 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
46a3df9f
S
9213
9214 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9215 if (ret) {
9216 dev_err(&hdev->pdev->dev,
9217 "Send tqp reset cmd error, status =%d\n", ret);
9218 return ret;
9219 }
9220
9221 return 0;
9222}
9223
9224static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9225{
d44f9b63 9226 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9227 struct hclge_desc desc;
9228 int ret;
9229
9230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9231
d44f9b63 9232 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
9233 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9234
9235 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9236 if (ret) {
9237 dev_err(&hdev->pdev->dev,
9238 "Get reset status error, status =%d\n", ret);
9239 return ret;
9240 }
9241
e4e87715 9242 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
9243}
9244
0c29d191 9245u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
9246{
9247 struct hnae3_queue *queue;
9248 struct hclge_tqp *tqp;
9249
9250 queue = handle->kinfo.tqp[queue_id];
9251 tqp = container_of(queue, struct hclge_tqp, q);
9252
9253 return tqp->index;
9254}
9255
7fa6be4f 9256int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
9257{
9258 struct hclge_vport *vport = hclge_get_vport(handle);
9259 struct hclge_dev *hdev = vport->back;
9260 int reset_try_times = 0;
9261 int reset_status;
814e0274 9262 u16 queue_gid;
63cbf7a9 9263 int ret;
46a3df9f 9264
814e0274
PL
9265 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9266
46a3df9f
S
9267 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9268 if (ret) {
7fa6be4f
HT
9269 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9270 return ret;
46a3df9f
S
9271 }
9272
814e0274 9273 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 9274 if (ret) {
7fa6be4f
HT
9275 dev_err(&hdev->pdev->dev,
9276 "Send reset tqp cmd fail, ret = %d\n", ret);
9277 return ret;
46a3df9f
S
9278 }
9279
46a3df9f 9280 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
814e0274 9281 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
9282 if (reset_status)
9283 break;
e8df45c2
ZL
9284
9285 /* Wait for tqp hw reset */
9286 usleep_range(1000, 1200);
46a3df9f
S
9287 }
9288
9289 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
9290 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9291 return ret;
46a3df9f
S
9292 }
9293
814e0274 9294 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
9295 if (ret)
9296 dev_err(&hdev->pdev->dev,
9297 "Deassert the soft reset fail, ret = %d\n", ret);
9298
9299 return ret;
46a3df9f
S
9300}
9301
1a426f8b
PL
9302void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9303{
9304 struct hclge_dev *hdev = vport->back;
9305 int reset_try_times = 0;
9306 int reset_status;
9307 u16 queue_gid;
9308 int ret;
9309
9310 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9311
9312 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9313 if (ret) {
9314 dev_warn(&hdev->pdev->dev,
9315 "Send reset tqp cmd fail, ret = %d\n", ret);
9316 return;
9317 }
9318
1a426f8b 9319 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
1a426f8b
PL
9320 reset_status = hclge_get_reset_status(hdev, queue_gid);
9321 if (reset_status)
9322 break;
e8df45c2
ZL
9323
9324 /* Wait for tqp hw reset */
9325 usleep_range(1000, 1200);
1a426f8b
PL
9326 }
9327
9328 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9329 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9330 return;
9331 }
9332
9333 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9334 if (ret)
9335 dev_warn(&hdev->pdev->dev,
9336 "Deassert the soft reset fail, ret = %d\n", ret);
9337}
9338
46a3df9f
S
9339static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9340{
9341 struct hclge_vport *vport = hclge_get_vport(handle);
9342 struct hclge_dev *hdev = vport->back;
9343
9344 return hdev->fw_version;
9345}
9346
61387774
PL
9347static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9348{
9349 struct phy_device *phydev = hdev->hw.mac.phydev;
9350
9351 if (!phydev)
9352 return;
9353
70814e81 9354 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
9355}
9356
9357static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9358{
61387774
PL
9359 int ret;
9360
40173a2e 9361 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 9362 return 0;
61387774
PL
9363
9364 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
aacbe27e
YL
9365 if (ret)
9366 dev_err(&hdev->pdev->dev,
9367 "configure pauseparam error, ret = %d.\n", ret);
61387774 9368
aacbe27e 9369 return ret;
61387774
PL
9370}
9371
1770a7a3
PL
9372int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9373{
9374 struct phy_device *phydev = hdev->hw.mac.phydev;
9375 u16 remote_advertising = 0;
63cbf7a9 9376 u16 local_advertising;
1770a7a3
PL
9377 u32 rx_pause, tx_pause;
9378 u8 flowctl;
9379
9380 if (!phydev->link || !phydev->autoneg)
9381 return 0;
9382
3c1bcc86 9383 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
9384
9385 if (phydev->pause)
9386 remote_advertising = LPA_PAUSE_CAP;
9387
9388 if (phydev->asym_pause)
9389 remote_advertising |= LPA_PAUSE_ASYM;
9390
9391 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9392 remote_advertising);
9393 tx_pause = flowctl & FLOW_CTRL_TX;
9394 rx_pause = flowctl & FLOW_CTRL_RX;
9395
9396 if (phydev->duplex == HCLGE_MAC_HALF) {
9397 tx_pause = 0;
9398 rx_pause = 0;
9399 }
9400
9401 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9402}
9403
46a3df9f
S
9404static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9405 u32 *rx_en, u32 *tx_en)
9406{
9407 struct hclge_vport *vport = hclge_get_vport(handle);
9408 struct hclge_dev *hdev = vport->back;
fb89629f 9409 struct phy_device *phydev = hdev->hw.mac.phydev;
46a3df9f 9410
fb89629f 9411 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
46a3df9f
S
9412
9413 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9414 *rx_en = 0;
9415 *tx_en = 0;
9416 return;
9417 }
9418
9419 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9420 *rx_en = 1;
9421 *tx_en = 0;
9422 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9423 *tx_en = 1;
9424 *rx_en = 0;
9425 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9426 *rx_en = 1;
9427 *tx_en = 1;
9428 } else {
9429 *rx_en = 0;
9430 *tx_en = 0;
9431 }
9432}
9433
aacbe27e
YL
9434static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9435 u32 rx_en, u32 tx_en)
9436{
9437 if (rx_en && tx_en)
9438 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9439 else if (rx_en && !tx_en)
9440 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9441 else if (!rx_en && tx_en)
9442 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9443 else
9444 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9445
9446 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9447}
9448
61387774
PL
9449static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9450 u32 rx_en, u32 tx_en)
9451{
9452 struct hclge_vport *vport = hclge_get_vport(handle);
9453 struct hclge_dev *hdev = vport->back;
9454 struct phy_device *phydev = hdev->hw.mac.phydev;
9455 u32 fc_autoneg;
9456
fb89629f
JS
9457 if (phydev) {
9458 fc_autoneg = hclge_get_autoneg(handle);
9459 if (auto_neg != fc_autoneg) {
9460 dev_info(&hdev->pdev->dev,
9461 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9462 return -EOPNOTSUPP;
9463 }
61387774
PL
9464 }
9465
9466 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9467 dev_info(&hdev->pdev->dev,
9468 "Priority flow control enabled. Cannot set link flow control.\n");
9469 return -EOPNOTSUPP;
9470 }
9471
9472 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9473
aacbe27e
YL
9474 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9475
fb89629f 9476 if (!auto_neg)
61387774
PL
9477 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9478
22f48e24
JS
9479 if (phydev)
9480 return phy_start_aneg(phydev);
9481
fb89629f 9482 return -EOPNOTSUPP;
61387774
PL
9483}
9484
46a3df9f
S
9485static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9486 u8 *auto_neg, u32 *speed, u8 *duplex)
9487{
9488 struct hclge_vport *vport = hclge_get_vport(handle);
9489 struct hclge_dev *hdev = vport->back;
9490
9491 if (speed)
9492 *speed = hdev->hw.mac.speed;
9493 if (duplex)
9494 *duplex = hdev->hw.mac.duplex;
9495 if (auto_neg)
9496 *auto_neg = hdev->hw.mac.autoneg;
9497}
9498
88d10bd6
JS
9499static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9500 u8 *module_type)
46a3df9f
S
9501{
9502 struct hclge_vport *vport = hclge_get_vport(handle);
9503 struct hclge_dev *hdev = vport->back;
9504
a9775bb6
GH
9505 /* When nic is down, the service task is not running, doesn't update
9506 * the port information per second. Query the port information before
9507 * return the media type, ensure getting the correct media information.
9508 */
9509 hclge_update_port_info(hdev);
9510
46a3df9f
S
9511 if (media_type)
9512 *media_type = hdev->hw.mac.media_type;
88d10bd6
JS
9513
9514 if (module_type)
9515 *module_type = hdev->hw.mac.module_type;
46a3df9f
S
9516}
9517
9518static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9519 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9520{
9521 struct hclge_vport *vport = hclge_get_vport(handle);
9522 struct hclge_dev *hdev = vport->back;
9523 struct phy_device *phydev = hdev->hw.mac.phydev;
ebaf1908
WL
9524 int mdix_ctrl, mdix, is_resolved;
9525 unsigned int retval;
46a3df9f
S
9526
9527 if (!phydev) {
9528 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9529 *tp_mdix = ETH_TP_MDI_INVALID;
9530 return;
9531 }
9532
9533 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9534
9535 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
9536 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9537 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
9538
9539 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
9540 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9541 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
9542
9543 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9544
9545 switch (mdix_ctrl) {
9546 case 0x0:
9547 *tp_mdix_ctrl = ETH_TP_MDI;
9548 break;
9549 case 0x1:
9550 *tp_mdix_ctrl = ETH_TP_MDI_X;
9551 break;
9552 case 0x3:
9553 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9554 break;
9555 default:
9556 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9557 break;
9558 }
9559
9560 if (!is_resolved)
9561 *tp_mdix = ETH_TP_MDI_INVALID;
9562 else if (mdix)
9563 *tp_mdix = ETH_TP_MDI_X;
9564 else
9565 *tp_mdix = ETH_TP_MDI;
9566}
9567
bb87be87
YL
9568static void hclge_info_show(struct hclge_dev *hdev)
9569{
9570 struct device *dev = &hdev->pdev->dev;
9571
9572 dev_info(dev, "PF info begin:\n");
9573
adcf738b
GL
9574 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9575 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9576 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9577 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9578 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9579 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9580 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9581 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9582 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9583 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
bb87be87
YL
9584 dev_info(dev, "This is %s PF\n",
9585 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9586 dev_info(dev, "DCB %s\n",
9587 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9588 dev_info(dev, "MQPRIO %s\n",
9589 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9590
9591 dev_info(dev, "PF info end.\n");
9592}
9593
994e04f1
HT
9594static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9595 struct hclge_vport *vport)
9596{
9597 struct hnae3_client *client = vport->nic.client;
9598 struct hclge_dev *hdev = ae_dev->priv;
0bfdf286 9599 int rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9600 int ret;
9601
9602 ret = client->ops->init_instance(&vport->nic);
9603 if (ret)
9604 return ret;
9605
9606 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9607 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9608 rst_cnt != hdev->rst_stats.reset_cnt) {
9609 ret = -EBUSY;
9610 goto init_nic_err;
9611 }
9612
00ea6e5f
WL
9613 /* Enable nic hw error interrupts */
9614 ret = hclge_config_nic_hw_error(hdev, true);
bcf643c5 9615 if (ret) {
00ea6e5f
WL
9616 dev_err(&ae_dev->pdev->dev,
9617 "fail(%d) to enable hw error interrupts\n", ret);
bcf643c5
WL
9618 goto init_nic_err;
9619 }
9620
9621 hnae3_set_client_init_flag(client, ae_dev, 1);
00ea6e5f 9622
994e04f1
HT
9623 if (netif_msg_drv(&hdev->vport->nic))
9624 hclge_info_show(hdev);
9625
00ea6e5f 9626 return ret;
7cf9c069
HT
9627
9628init_nic_err:
9629 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9630 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9631 msleep(HCLGE_WAIT_RESET_DONE);
9632
9633 client->ops->uninit_instance(&vport->nic, 0);
9634
9635 return ret;
994e04f1
HT
9636}
9637
9638static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9639 struct hclge_vport *vport)
9640{
994e04f1 9641 struct hclge_dev *hdev = ae_dev->priv;
31a57fde 9642 struct hnae3_client *client;
7cf9c069 9643 int rst_cnt;
994e04f1
HT
9644 int ret;
9645
9646 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9647 !hdev->nic_client)
9648 return 0;
9649
9650 client = hdev->roce_client;
9651 ret = hclge_init_roce_base_info(vport);
9652 if (ret)
9653 return ret;
9654
7cf9c069 9655 rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
9656 ret = client->ops->init_instance(&vport->roce);
9657 if (ret)
9658 return ret;
9659
9660 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9661 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9662 rst_cnt != hdev->rst_stats.reset_cnt) {
9663 ret = -EBUSY;
9664 goto init_roce_err;
9665 }
9666
72fcd2be
HT
9667 /* Enable roce ras interrupts */
9668 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9669 if (ret) {
9670 dev_err(&ae_dev->pdev->dev,
9671 "fail(%d) to enable roce ras interrupts\n", ret);
9672 goto init_roce_err;
9673 }
9674
994e04f1
HT
9675 hnae3_set_client_init_flag(client, ae_dev, 1);
9676
9677 return 0;
7cf9c069
HT
9678
9679init_roce_err:
9680 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9681 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9682 msleep(HCLGE_WAIT_RESET_DONE);
9683
9684 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9685
9686 return ret;
994e04f1
HT
9687}
9688
46a3df9f
S
9689static int hclge_init_client_instance(struct hnae3_client *client,
9690 struct hnae3_ae_dev *ae_dev)
9691{
9692 struct hclge_dev *hdev = ae_dev->priv;
9693 struct hclge_vport *vport;
9694 int i, ret;
9695
9696 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9697 vport = &hdev->vport[i];
9698
9699 switch (client->type) {
9700 case HNAE3_CLIENT_KNIC:
46a3df9f
S
9701 hdev->nic_client = client;
9702 vport->nic.client = client;
994e04f1 9703 ret = hclge_init_nic_client_instance(ae_dev, vport);
46a3df9f 9704 if (ret)
49dd8054 9705 goto clear_nic;
46a3df9f 9706
994e04f1
HT
9707 ret = hclge_init_roce_client_instance(ae_dev, vport);
9708 if (ret)
9709 goto clear_roce;
46a3df9f 9710
46a3df9f
S
9711 break;
9712 case HNAE3_CLIENT_ROCE:
e92a0843 9713 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
9714 hdev->roce_client = client;
9715 vport->roce.client = client;
9716 }
9717
994e04f1
HT
9718 ret = hclge_init_roce_client_instance(ae_dev, vport);
9719 if (ret)
9720 goto clear_roce;
fa7a4bd5
JS
9721
9722 break;
9723 default:
9724 return -EINVAL;
46a3df9f
S
9725 }
9726 }
9727
37417c66 9728 return 0;
49dd8054
JS
9729
9730clear_nic:
9731 hdev->nic_client = NULL;
9732 vport->nic.client = NULL;
9733 return ret;
9734clear_roce:
9735 hdev->roce_client = NULL;
9736 vport->roce.client = NULL;
9737 return ret;
46a3df9f
S
9738}
9739
9740static void hclge_uninit_client_instance(struct hnae3_client *client,
9741 struct hnae3_ae_dev *ae_dev)
9742{
9743 struct hclge_dev *hdev = ae_dev->priv;
9744 struct hclge_vport *vport;
9745 int i;
9746
9747 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9748 vport = &hdev->vport[i];
a17dcf3f 9749 if (hdev->roce_client) {
2a0bfc36 9750 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
9751 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9752 msleep(HCLGE_WAIT_RESET_DONE);
9753
46a3df9f
S
9754 hdev->roce_client->ops->uninit_instance(&vport->roce,
9755 0);
a17dcf3f
L
9756 hdev->roce_client = NULL;
9757 vport->roce.client = NULL;
9758 }
46a3df9f
S
9759 if (client->type == HNAE3_CLIENT_ROCE)
9760 return;
49dd8054 9761 if (hdev->nic_client && client->ops->uninit_instance) {
bd9109c9 9762 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
9763 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9764 msleep(HCLGE_WAIT_RESET_DONE);
9765
46a3df9f 9766 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
9767 hdev->nic_client = NULL;
9768 vport->nic.client = NULL;
9769 }
46a3df9f
S
9770 }
9771}
9772
9773static int hclge_pci_init(struct hclge_dev *hdev)
9774{
9775 struct pci_dev *pdev = hdev->pdev;
9776 struct hclge_hw *hw;
9777 int ret;
9778
9779 ret = pci_enable_device(pdev);
9780 if (ret) {
9781 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 9782 return ret;
46a3df9f
S
9783 }
9784
9785 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9786 if (ret) {
9787 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9788 if (ret) {
9789 dev_err(&pdev->dev,
9790 "can't set consistent PCI DMA");
9791 goto err_disable_device;
9792 }
9793 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9794 }
9795
9796 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9797 if (ret) {
9798 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9799 goto err_disable_device;
9800 }
9801
9802 pci_set_master(pdev);
9803 hw = &hdev->hw;
46a3df9f
S
9804 hw->io_base = pcim_iomap(pdev, 2, 0);
9805 if (!hw->io_base) {
9806 dev_err(&pdev->dev, "Can't map configuration register space\n");
9807 ret = -ENOMEM;
9808 goto err_clr_master;
9809 }
9810
709eb41a
L
9811 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9812
46a3df9f
S
9813 return 0;
9814err_clr_master:
9815 pci_clear_master(pdev);
9816 pci_release_regions(pdev);
9817err_disable_device:
9818 pci_disable_device(pdev);
46a3df9f
S
9819
9820 return ret;
9821}
9822
9823static void hclge_pci_uninit(struct hclge_dev *hdev)
9824{
9825 struct pci_dev *pdev = hdev->pdev;
9826
6a814413 9827 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 9828 pci_free_irq_vectors(pdev);
46a3df9f
S
9829 pci_clear_master(pdev);
9830 pci_release_mem_regions(pdev);
9831 pci_disable_device(pdev);
9832}
9833
48569cda
PL
9834static void hclge_state_init(struct hclge_dev *hdev)
9835{
9836 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9837 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9838 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9839 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
d5432455 9840 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
48569cda
PL
9841 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9842 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9843}
9844
9845static void hclge_state_uninit(struct hclge_dev *hdev)
9846{
9847 set_bit(HCLGE_STATE_DOWN, &hdev->state);
acfc3d55 9848 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
48569cda 9849
65e41e7e
HT
9850 if (hdev->reset_timer.function)
9851 del_timer_sync(&hdev->reset_timer);
7be1b9f3
YL
9852 if (hdev->service_task.work.func)
9853 cancel_delayed_work_sync(&hdev->service_task);
48569cda
PL
9854}
9855
6b9a97ee
HT
9856static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9857{
8627bded
HT
9858#define HCLGE_FLR_RETRY_WAIT_MS 500
9859#define HCLGE_FLR_RETRY_CNT 5
6b9a97ee 9860
8627bded
HT
9861 struct hclge_dev *hdev = ae_dev->priv;
9862 int retry_cnt = 0;
9863 int ret;
6b9a97ee 9864
8627bded
HT
9865retry:
9866 down(&hdev->reset_sem);
9867 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9868 hdev->reset_type = HNAE3_FLR_RESET;
9869 ret = hclge_reset_prepare(hdev);
9870 if (ret) {
9871 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9872 ret);
9873 if (hdev->reset_pending ||
9874 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9875 dev_err(&hdev->pdev->dev,
9876 "reset_pending:0x%lx, retry_cnt:%d\n",
9877 hdev->reset_pending, retry_cnt);
9878 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9879 up(&hdev->reset_sem);
9880 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9881 goto retry;
9882 }
9883 }
6b9a97ee 9884
8627bded
HT
9885 /* disable misc vector before FLR done */
9886 hclge_enable_vector(&hdev->misc_vector, false);
9887 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9888 hdev->rst_stats.flr_rst_cnt++;
6b9a97ee
HT
9889}
9890
9891static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9892{
9893 struct hclge_dev *hdev = ae_dev->priv;
8627bded
HT
9894 int ret;
9895
9896 hclge_enable_vector(&hdev->misc_vector, true);
6b9a97ee 9897
8627bded
HT
9898 ret = hclge_reset_rebuild(hdev);
9899 if (ret)
9900 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9901
9902 hdev->reset_type = HNAE3_NONE_RESET;
9903 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9904 up(&hdev->reset_sem);
6b9a97ee
HT
9905}
9906
31bb229d
PL
9907static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9908{
9909 u16 i;
9910
9911 for (i = 0; i < hdev->num_alloc_vport; i++) {
9912 struct hclge_vport *vport = &hdev->vport[i];
9913 int ret;
9914
9915 /* Send cmd to clear VF's FUNC_RST_ING */
9916 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9917 if (ret)
9918 dev_warn(&hdev->pdev->dev,
adcf738b 9919 "clear vf(%u) rst failed %d!\n",
31bb229d
PL
9920 vport->vport_id, ret);
9921 }
9922}
9923
46a3df9f
S
9924static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9925{
9926 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
9927 struct hclge_dev *hdev;
9928 int ret;
9929
9930 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9931 if (!hdev) {
9932 ret = -ENOMEM;
ffd5656e 9933 goto out;
46a3df9f
S
9934 }
9935
46a3df9f
S
9936 hdev->pdev = pdev;
9937 hdev->ae_dev = ae_dev;
4ed340ab 9938 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 9939 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 9940 ae_dev->priv = hdev;
9e690456
GH
9941
9942 /* HW supprt 2 layer vlan */
e6d7d79d 9943 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 9944
818f1675 9945 mutex_init(&hdev->vport_lock);
44122887 9946 spin_lock_init(&hdev->fd_rule_lock);
8627bded 9947 sema_init(&hdev->reset_sem, 1);
818f1675 9948
46a3df9f 9949 ret = hclge_pci_init(hdev);
60df7e91 9950 if (ret)
ffd5656e 9951 goto out;
46a3df9f 9952
3efb960f
L
9953 /* Firmware command queue initialize */
9954 ret = hclge_cmd_queue_init(hdev);
60df7e91 9955 if (ret)
ffd5656e 9956 goto err_pci_uninit;
3efb960f
L
9957
9958 /* Firmware command initialize */
46a3df9f
S
9959 ret = hclge_cmd_init(hdev);
9960 if (ret)
ffd5656e 9961 goto err_cmd_uninit;
46a3df9f
S
9962
9963 ret = hclge_get_cap(hdev);
60df7e91 9964 if (ret)
ffd5656e 9965 goto err_cmd_uninit;
46a3df9f
S
9966
9967 ret = hclge_configure(hdev);
9968 if (ret) {
9969 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 9970 goto err_cmd_uninit;
46a3df9f
S
9971 }
9972
887c3820 9973 ret = hclge_init_msi(hdev);
46a3df9f 9974 if (ret) {
887c3820 9975 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 9976 goto err_cmd_uninit;
46a3df9f
S
9977 }
9978
466b0c00 9979 ret = hclge_misc_irq_init(hdev);
60df7e91 9980 if (ret)
ffd5656e 9981 goto err_msi_uninit;
466b0c00 9982
46a3df9f
S
9983 ret = hclge_alloc_tqps(hdev);
9984 if (ret) {
9985 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 9986 goto err_msi_irq_uninit;
46a3df9f
S
9987 }
9988
9989 ret = hclge_alloc_vport(hdev);
60df7e91 9990 if (ret)
ffd5656e 9991 goto err_msi_irq_uninit;
46a3df9f 9992
7df7dad6 9993 ret = hclge_map_tqp(hdev);
60df7e91 9994 if (ret)
2312e050 9995 goto err_msi_irq_uninit;
7df7dad6 9996
c5ef83cb
HT
9997 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9998 ret = hclge_mac_mdio_config(hdev);
60df7e91 9999 if (ret)
2312e050 10000 goto err_msi_irq_uninit;
cf9cca2d 10001 }
10002
39932473 10003 ret = hclge_init_umv_space(hdev);
60df7e91 10004 if (ret)
9fc55413 10005 goto err_mdiobus_unreg;
39932473 10006
46a3df9f
S
10007 ret = hclge_mac_init(hdev);
10008 if (ret) {
10009 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 10010 goto err_mdiobus_unreg;
46a3df9f 10011 }
46a3df9f
S
10012
10013 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10014 if (ret) {
10015 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 10016 goto err_mdiobus_unreg;
46a3df9f
S
10017 }
10018
b26a6fea
PL
10019 ret = hclge_config_gro(hdev, true);
10020 if (ret)
10021 goto err_mdiobus_unreg;
10022
46a3df9f
S
10023 ret = hclge_init_vlan_config(hdev);
10024 if (ret) {
10025 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 10026 goto err_mdiobus_unreg;
46a3df9f
S
10027 }
10028
10029 ret = hclge_tm_schd_init(hdev);
10030 if (ret) {
10031 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 10032 goto err_mdiobus_unreg;
68ece54e
YL
10033 }
10034
268f5dfa 10035 hclge_rss_init_cfg(hdev);
68ece54e
YL
10036 ret = hclge_rss_init_hw(hdev);
10037 if (ret) {
10038 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 10039 goto err_mdiobus_unreg;
46a3df9f
S
10040 }
10041
f5aac71c
FL
10042 ret = init_mgr_tbl(hdev);
10043 if (ret) {
10044 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 10045 goto err_mdiobus_unreg;
f5aac71c
FL
10046 }
10047
d695964d
JS
10048 ret = hclge_init_fd_config(hdev);
10049 if (ret) {
10050 dev_err(&pdev->dev,
10051 "fd table init fail, ret=%d\n", ret);
10052 goto err_mdiobus_unreg;
10053 }
10054
a6345787
WL
10055 INIT_KFIFO(hdev->mac_tnl_log);
10056
cacde272
YL
10057 hclge_dcb_ops_set(hdev);
10058
65e41e7e 10059 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7be1b9f3 10060 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
46a3df9f 10061
08125454
YL
10062 /* Setup affinity after service timer setup because add_timer_on
10063 * is called in affinity notify.
10064 */
10065 hclge_misc_affinity_setup(hdev);
10066
8e52a602 10067 hclge_clear_all_event_cause(hdev);
31bb229d 10068 hclge_clear_resetting_state(hdev);
8e52a602 10069
e4193e24
SJ
10070 /* Log and clear the hw errors those already occurred */
10071 hclge_handle_all_hns_hw_errors(ae_dev);
10072
e3b84ed2
SJ
10073 /* request delayed reset for the error recovery because an immediate
10074 * global reset on a PF affecting pending initialization of other PFs
10075 */
10076 if (ae_dev->hw_err_reset_req) {
10077 enum hnae3_reset_type reset_level;
10078
10079 reset_level = hclge_get_reset_level(ae_dev,
10080 &ae_dev->hw_err_reset_req);
10081 hclge_set_def_reset_request(ae_dev, reset_level);
10082 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10083 }
10084
466b0c00
L
10085 /* Enable MISC vector(vector0) */
10086 hclge_enable_vector(&hdev->misc_vector, true);
10087
48569cda 10088 hclge_state_init(hdev);
0742ed7c 10089 hdev->last_reset_time = jiffies;
46a3df9f 10090
08d80a4c
HT
10091 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10092 HCLGE_DRIVER_NAME);
10093
1c6dfe6f
YL
10094 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10095
46a3df9f
S
10096 return 0;
10097
ffd5656e
HT
10098err_mdiobus_unreg:
10099 if (hdev->hw.mac.phydev)
10100 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
10101err_msi_irq_uninit:
10102 hclge_misc_irq_uninit(hdev);
10103err_msi_uninit:
10104 pci_free_irq_vectors(pdev);
10105err_cmd_uninit:
232d0d55 10106 hclge_cmd_uninit(hdev);
ffd5656e 10107err_pci_uninit:
6a814413 10108 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 10109 pci_clear_master(pdev);
46a3df9f 10110 pci_release_regions(pdev);
ffd5656e 10111 pci_disable_device(pdev);
ffd5656e 10112out:
46a3df9f
S
10113 return ret;
10114}
10115
c6dc5213 10116static void hclge_stats_clear(struct hclge_dev *hdev)
10117{
1c6dfe6f 10118 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
c6dc5213 10119}
10120
22044f95
JS
10121static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10122{
10123 return hclge_config_switch_param(hdev, vf, enable,
10124 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10125}
10126
10127static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10128{
10129 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10130 HCLGE_FILTER_FE_NIC_INGRESS_B,
10131 enable, vf);
10132}
10133
10134static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10135{
10136 int ret;
10137
10138 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10139 if (ret) {
10140 dev_err(&hdev->pdev->dev,
10141 "Set vf %d mac spoof check %s failed, ret=%d\n",
10142 vf, enable ? "on" : "off", ret);
10143 return ret;
10144 }
10145
10146 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10147 if (ret)
10148 dev_err(&hdev->pdev->dev,
10149 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10150 vf, enable ? "on" : "off", ret);
10151
10152 return ret;
10153}
10154
10155static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10156 bool enable)
10157{
10158 struct hclge_vport *vport = hclge_get_vport(handle);
10159 struct hclge_dev *hdev = vport->back;
10160 u32 new_spoofchk = enable ? 1 : 0;
10161 int ret;
10162
10163 if (hdev->pdev->revision == 0x20)
10164 return -EOPNOTSUPP;
10165
10166 vport = hclge_get_vf_vport(hdev, vf);
10167 if (!vport)
10168 return -EINVAL;
10169
10170 if (vport->vf_info.spoofchk == new_spoofchk)
10171 return 0;
10172
10173 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10174 dev_warn(&hdev->pdev->dev,
10175 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10176 vf);
7d0b3451 10177 else if (enable && hclge_is_umv_space_full(vport, true))
22044f95
JS
10178 dev_warn(&hdev->pdev->dev,
10179 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10180 vf);
10181
10182 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10183 if (ret)
10184 return ret;
10185
10186 vport->vf_info.spoofchk = new_spoofchk;
10187 return 0;
10188}
10189
10190static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10191{
10192 struct hclge_vport *vport = hdev->vport;
10193 int ret;
10194 int i;
10195
10196 if (hdev->pdev->revision == 0x20)
10197 return 0;
10198
10199 /* resume the vf spoof check state after reset */
10200 for (i = 0; i < hdev->num_alloc_vport; i++) {
10201 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10202 vport->vf_info.spoofchk);
10203 if (ret)
10204 return ret;
10205
10206 vport++;
10207 }
10208
10209 return 0;
10210}
10211
e196ec75
JS
10212static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10213{
10214 struct hclge_vport *vport = hclge_get_vport(handle);
10215 struct hclge_dev *hdev = vport->back;
10216 u32 new_trusted = enable ? 1 : 0;
10217 bool en_bc_pmc;
10218 int ret;
10219
10220 vport = hclge_get_vf_vport(hdev, vf);
10221 if (!vport)
10222 return -EINVAL;
10223
10224 if (vport->vf_info.trusted == new_trusted)
10225 return 0;
10226
10227 /* Disable promisc mode for VF if it is not trusted any more. */
10228 if (!enable && vport->vf_info.promisc_enable) {
10229 en_bc_pmc = hdev->pdev->revision != 0x20;
10230 ret = hclge_set_vport_promisc_mode(vport, false, false,
10231 en_bc_pmc);
10232 if (ret)
10233 return ret;
10234 vport->vf_info.promisc_enable = 0;
10235 hclge_inform_vf_promisc_info(vport);
10236 }
10237
10238 vport->vf_info.trusted = new_trusted;
10239
10240 return 0;
10241}
10242
ee9e4424
YL
10243static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10244{
10245 int ret;
10246 int vf;
10247
10248 /* reset vf rate to default value */
10249 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10250 struct hclge_vport *vport = &hdev->vport[vf];
10251
10252 vport->vf_info.max_tx_rate = 0;
10253 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10254 if (ret)
10255 dev_err(&hdev->pdev->dev,
10256 "vf%d failed to reset to default, ret=%d\n",
10257 vf - HCLGE_VF_VPORT_START_NUM, ret);
10258 }
10259}
10260
10261static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10262 int min_tx_rate, int max_tx_rate)
10263{
10264 if (min_tx_rate != 0 ||
10265 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10266 dev_err(&hdev->pdev->dev,
10267 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10268 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10269 return -EINVAL;
10270 }
10271
10272 return 0;
10273}
10274
10275static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10276 int min_tx_rate, int max_tx_rate, bool force)
10277{
10278 struct hclge_vport *vport = hclge_get_vport(handle);
10279 struct hclge_dev *hdev = vport->back;
10280 int ret;
10281
10282 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10283 if (ret)
10284 return ret;
10285
10286 vport = hclge_get_vf_vport(hdev, vf);
10287 if (!vport)
10288 return -EINVAL;
10289
10290 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10291 return 0;
10292
10293 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10294 if (ret)
10295 return ret;
10296
10297 vport->vf_info.max_tx_rate = max_tx_rate;
10298
10299 return 0;
10300}
10301
10302static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10303{
10304 struct hnae3_handle *handle = &hdev->vport->nic;
10305 struct hclge_vport *vport;
10306 int ret;
10307 int vf;
10308
10309 /* resume the vf max_tx_rate after reset */
10310 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10311 vport = hclge_get_vf_vport(hdev, vf);
10312 if (!vport)
10313 return -EINVAL;
10314
10315 /* zero means max rate, after reset, firmware already set it to
10316 * max rate, so just continue.
10317 */
10318 if (!vport->vf_info.max_tx_rate)
10319 continue;
10320
10321 ret = hclge_set_vf_rate(handle, vf, 0,
10322 vport->vf_info.max_tx_rate, true);
10323 if (ret) {
10324 dev_err(&hdev->pdev->dev,
10325 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10326 vf, vport->vf_info.max_tx_rate, ret);
10327 return ret;
10328 }
10329 }
10330
10331 return 0;
10332}
10333
a6d818e3
YL
10334static void hclge_reset_vport_state(struct hclge_dev *hdev)
10335{
10336 struct hclge_vport *vport = hdev->vport;
10337 int i;
10338
10339 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 10340 hclge_vport_stop(vport);
a6d818e3
YL
10341 vport++;
10342 }
10343}
10344
4ed340ab
L
10345static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10346{
10347 struct hclge_dev *hdev = ae_dev->priv;
10348 struct pci_dev *pdev = ae_dev->pdev;
10349 int ret;
10350
10351 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10352
c6dc5213 10353 hclge_stats_clear(hdev);
ee4bcd3b
JS
10354 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10355 * so here should not clean table in memory.
10356 */
10357 if (hdev->reset_type == HNAE3_IMP_RESET ||
10358 hdev->reset_type == HNAE3_GLOBAL_RESET) {
039ba863
JS
10359 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10360 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
ee4bcd3b
JS
10361 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10362 hclge_reset_umv_space(hdev);
10363 }
10364
4ed340ab
L
10365 ret = hclge_cmd_init(hdev);
10366 if (ret) {
10367 dev_err(&pdev->dev, "Cmd queue init failed\n");
10368 return ret;
10369 }
10370
4ed340ab
L
10371 ret = hclge_map_tqp(hdev);
10372 if (ret) {
10373 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10374 return ret;
10375 }
10376
10377 ret = hclge_mac_init(hdev);
10378 if (ret) {
10379 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10380 return ret;
10381 }
10382
4ed340ab
L
10383 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10384 if (ret) {
10385 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10386 return ret;
10387 }
10388
b26a6fea
PL
10389 ret = hclge_config_gro(hdev, true);
10390 if (ret)
10391 return ret;
10392
4ed340ab
L
10393 ret = hclge_init_vlan_config(hdev);
10394 if (ret) {
10395 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10396 return ret;
10397 }
10398
44e59e37 10399 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 10400 if (ret) {
f31c1ba6 10401 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
10402 return ret;
10403 }
10404
10405 ret = hclge_rss_init_hw(hdev);
10406 if (ret) {
10407 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10408 return ret;
10409 }
10410
d0db7ed3
YM
10411 ret = init_mgr_tbl(hdev);
10412 if (ret) {
10413 dev_err(&pdev->dev,
10414 "failed to reinit manager table, ret = %d\n", ret);
10415 return ret;
10416 }
10417
d695964d
JS
10418 ret = hclge_init_fd_config(hdev);
10419 if (ret) {
9b2f3477 10420 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
d695964d
JS
10421 return ret;
10422 }
10423
4fdd0bca
JS
10424 /* Log and clear the hw errors those already occurred */
10425 hclge_handle_all_hns_hw_errors(ae_dev);
10426
f3fa4a94 10427 /* Re-enable the hw error interrupts because
00ea6e5f 10428 * the interrupts get disabled on global reset.
01865a50 10429 */
00ea6e5f 10430 ret = hclge_config_nic_hw_error(hdev, true);
f3fa4a94
SJ
10431 if (ret) {
10432 dev_err(&pdev->dev,
00ea6e5f
WL
10433 "fail(%d) to re-enable NIC hw error interrupts\n",
10434 ret);
f3fa4a94
SJ
10435 return ret;
10436 }
01865a50 10437
00ea6e5f
WL
10438 if (hdev->roce_client) {
10439 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10440 if (ret) {
10441 dev_err(&pdev->dev,
10442 "fail(%d) to re-enable roce ras interrupts\n",
10443 ret);
10444 return ret;
10445 }
10446 }
10447
a6d818e3 10448 hclge_reset_vport_state(hdev);
22044f95
JS
10449 ret = hclge_reset_vport_spoofchk(hdev);
10450 if (ret)
10451 return ret;
a6d818e3 10452
ee9e4424
YL
10453 ret = hclge_resume_vf_rate(hdev);
10454 if (ret)
10455 return ret;
10456
4ed340ab
L
10457 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10458 HCLGE_DRIVER_NAME);
10459
10460 return 0;
10461}
10462
46a3df9f
S
10463static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10464{
10465 struct hclge_dev *hdev = ae_dev->priv;
10466 struct hclge_mac *mac = &hdev->hw.mac;
10467
ee9e4424 10468 hclge_reset_vf_rate(hdev);
59359fc8 10469 hclge_clear_vf_vlan(hdev);
08125454 10470 hclge_misc_affinity_teardown(hdev);
48569cda 10471 hclge_state_uninit(hdev);
ee4bcd3b 10472 hclge_uninit_mac_table(hdev);
46a3df9f
S
10473
10474 if (mac->phydev)
10475 mdiobus_unregister(mac->mdio_bus);
10476
466b0c00
L
10477 /* Disable MISC vector(vector0) */
10478 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
10479 synchronize_irq(hdev->misc_vector.vector_irq);
10480
00ea6e5f 10481 /* Disable all hw interrupts */
a6345787 10482 hclge_config_mac_tnl_int(hdev, false);
00ea6e5f
WL
10483 hclge_config_nic_hw_error(hdev, false);
10484 hclge_config_rocee_ras_interrupt(hdev, false);
10485
232d0d55 10486 hclge_cmd_uninit(hdev);
ca1d7669 10487 hclge_misc_irq_uninit(hdev);
46a3df9f 10488 hclge_pci_uninit(hdev);
818f1675 10489 mutex_destroy(&hdev->vport_lock);
c6075b19 10490 hclge_uninit_vport_vlan_table(hdev);
46a3df9f
S
10491 ae_dev->priv = NULL;
10492}
10493
482d2e9c
PL
10494static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10495{
10496 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10497 struct hclge_vport *vport = hclge_get_vport(handle);
10498 struct hclge_dev *hdev = vport->back;
10499
c3b9c50d
HT
10500 return min_t(u32, hdev->rss_size_max,
10501 vport->alloc_tqps / kinfo->num_tc);
482d2e9c
PL
10502}
10503
10504static void hclge_get_channels(struct hnae3_handle *handle,
10505 struct ethtool_channels *ch)
10506{
482d2e9c
PL
10507 ch->max_combined = hclge_get_max_channels(handle);
10508 ch->other_count = 1;
10509 ch->max_other = 1;
c3b9c50d 10510 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
10511}
10512
09f2af64 10513static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 10514 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
10515{
10516 struct hclge_vport *vport = hclge_get_vport(handle);
10517 struct hclge_dev *hdev = vport->back;
09f2af64 10518
0d43bf45 10519 *alloc_tqps = vport->alloc_tqps;
09f2af64
PL
10520 *max_rss_size = hdev->rss_size_max;
10521}
10522
90c68a41
YL
10523static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10524 bool rxfh_configured)
09f2af64
PL
10525{
10526 struct hclge_vport *vport = hclge_get_vport(handle);
10527 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
354d0fab 10528 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
09f2af64 10529 struct hclge_dev *hdev = vport->back;
354d0fab 10530 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
adcf738b
GL
10531 u16 cur_rss_size = kinfo->rss_size;
10532 u16 cur_tqps = kinfo->num_tqps;
09f2af64 10533 u16 tc_valid[HCLGE_MAX_TC_NUM];
09f2af64
PL
10534 u16 roundup_size;
10535 u32 *rss_indir;
ebaf1908
WL
10536 unsigned int i;
10537 int ret;
09f2af64 10538
672ad0ed 10539 kinfo->req_rss_size = new_tqps_num;
09f2af64 10540
672ad0ed 10541 ret = hclge_tm_vport_map_update(hdev);
09f2af64 10542 if (ret) {
672ad0ed 10543 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
10544 return ret;
10545 }
10546
10547 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10548 roundup_size = ilog2(roundup_size);
10549 /* Set the RSS TC mode according to the new RSS size */
10550 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10551 tc_valid[i] = 0;
10552
10553 if (!(hdev->hw_tc_map & BIT(i)))
10554 continue;
10555
10556 tc_valid[i] = 1;
10557 tc_size[i] = roundup_size;
10558 tc_offset[i] = kinfo->rss_size * i;
10559 }
10560 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10561 if (ret)
10562 return ret;
10563
90c68a41
YL
10564 /* RSS indirection table has been configuared by user */
10565 if (rxfh_configured)
10566 goto out;
10567
09f2af64
PL
10568 /* Reinitializes the rss indirect table according to the new RSS size */
10569 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10570 if (!rss_indir)
10571 return -ENOMEM;
10572
10573 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10574 rss_indir[i] = i % kinfo->rss_size;
10575
10576 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10577 if (ret)
10578 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10579 ret);
10580
10581 kfree(rss_indir);
10582
90c68a41 10583out:
09f2af64
PL
10584 if (!ret)
10585 dev_info(&hdev->pdev->dev,
adcf738b 10586 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
09f2af64
PL
10587 cur_rss_size, kinfo->rss_size,
10588 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10589
10590 return ret;
10591}
10592
77b34110
FL
10593static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10594 u32 *regs_num_64_bit)
10595{
10596 struct hclge_desc desc;
10597 u32 total_num;
10598 int ret;
10599
10600 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10601 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10602 if (ret) {
10603 dev_err(&hdev->pdev->dev,
10604 "Query register number cmd failed, ret = %d.\n", ret);
10605 return ret;
10606 }
10607
10608 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10609 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10610
10611 total_num = *regs_num_32_bit + *regs_num_64_bit;
10612 if (!total_num)
10613 return -EINVAL;
10614
10615 return 0;
10616}
10617
10618static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10619 void *data)
10620{
10621#define HCLGE_32_BIT_REG_RTN_DATANUM 8
b37ce587 10622#define HCLGE_32_BIT_DESC_NODATA_LEN 2
77b34110
FL
10623
10624 struct hclge_desc *desc;
10625 u32 *reg_val = data;
10626 __le32 *desc_data;
b37ce587 10627 int nodata_num;
77b34110
FL
10628 int cmd_num;
10629 int i, k, n;
10630 int ret;
10631
10632 if (regs_num == 0)
10633 return 0;
10634
b37ce587
YM
10635 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10636 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10637 HCLGE_32_BIT_REG_RTN_DATANUM);
77b34110
FL
10638 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10639 if (!desc)
10640 return -ENOMEM;
10641
10642 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10643 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10644 if (ret) {
10645 dev_err(&hdev->pdev->dev,
10646 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10647 kfree(desc);
10648 return ret;
10649 }
10650
10651 for (i = 0; i < cmd_num; i++) {
10652 if (i == 0) {
10653 desc_data = (__le32 *)(&desc[i].data[0]);
b37ce587 10654 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
77b34110
FL
10655 } else {
10656 desc_data = (__le32 *)(&desc[i]);
10657 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10658 }
10659 for (k = 0; k < n; k++) {
10660 *reg_val++ = le32_to_cpu(*desc_data++);
10661
10662 regs_num--;
10663 if (!regs_num)
10664 break;
10665 }
10666 }
10667
10668 kfree(desc);
10669 return 0;
10670}
10671
10672static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10673 void *data)
10674{
10675#define HCLGE_64_BIT_REG_RTN_DATANUM 4
b37ce587 10676#define HCLGE_64_BIT_DESC_NODATA_LEN 1
77b34110
FL
10677
10678 struct hclge_desc *desc;
10679 u64 *reg_val = data;
10680 __le64 *desc_data;
b37ce587 10681 int nodata_len;
77b34110
FL
10682 int cmd_num;
10683 int i, k, n;
10684 int ret;
10685
10686 if (regs_num == 0)
10687 return 0;
10688
b37ce587
YM
10689 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10690 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10691 HCLGE_64_BIT_REG_RTN_DATANUM);
77b34110
FL
10692 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10693 if (!desc)
10694 return -ENOMEM;
10695
10696 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10697 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10698 if (ret) {
10699 dev_err(&hdev->pdev->dev,
10700 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10701 kfree(desc);
10702 return ret;
10703 }
10704
10705 for (i = 0; i < cmd_num; i++) {
10706 if (i == 0) {
10707 desc_data = (__le64 *)(&desc[i].data[0]);
b37ce587 10708 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
77b34110
FL
10709 } else {
10710 desc_data = (__le64 *)(&desc[i]);
10711 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10712 }
10713 for (k = 0; k < n; k++) {
10714 *reg_val++ = le64_to_cpu(*desc_data++);
10715
10716 regs_num--;
10717 if (!regs_num)
10718 break;
10719 }
10720 }
10721
10722 kfree(desc);
10723 return 0;
10724}
10725
ea4750ca 10726#define MAX_SEPARATE_NUM 4
ddb54554 10727#define SEPARATOR_VALUE 0xFDFCFBFA
ea4750ca
JS
10728#define REG_NUM_PER_LINE 4
10729#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
ddb54554
GH
10730#define REG_SEPARATOR_LINE 1
10731#define REG_NUM_REMAIN_MASK 3
10732#define BD_LIST_MAX_NUM 30
ea4750ca 10733
ddb54554 10734int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
77b34110 10735{
ddb54554
GH
10736 /*prepare 4 commands to query DFX BD number*/
10737 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10738 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10739 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10740 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10741 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10742 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10743 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10744
10745 return hclge_cmd_send(&hdev->hw, desc, 4);
10746}
10747
10748static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10749 int *bd_num_list,
10750 u32 type_num)
10751{
ddb54554 10752 u32 entries_per_desc, desc_index, index, offset, i;
9027d043 10753 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
77b34110
FL
10754 int ret;
10755
ddb54554 10756 ret = hclge_query_bd_num_cmd_send(hdev, desc);
77b34110
FL
10757 if (ret) {
10758 dev_err(&hdev->pdev->dev,
ddb54554
GH
10759 "Get dfx bd num fail, status is %d.\n", ret);
10760 return ret;
77b34110
FL
10761 }
10762
ddb54554
GH
10763 entries_per_desc = ARRAY_SIZE(desc[0].data);
10764 for (i = 0; i < type_num; i++) {
10765 offset = hclge_dfx_bd_offset_list[i];
10766 index = offset % entries_per_desc;
10767 desc_index = offset / entries_per_desc;
10768 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10769 }
ea4750ca 10770
ddb54554 10771 return ret;
77b34110
FL
10772}
10773
ddb54554
GH
10774static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10775 struct hclge_desc *desc_src, int bd_num,
10776 enum hclge_opcode_type cmd)
77b34110 10777{
ddb54554
GH
10778 struct hclge_desc *desc = desc_src;
10779 int i, ret;
10780
10781 hclge_cmd_setup_basic_desc(desc, cmd, true);
10782 for (i = 0; i < bd_num - 1; i++) {
10783 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10784 desc++;
10785 hclge_cmd_setup_basic_desc(desc, cmd, true);
10786 }
10787
10788 desc = desc_src;
10789 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10790 if (ret)
10791 dev_err(&hdev->pdev->dev,
10792 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10793 cmd, ret);
10794
10795 return ret;
10796}
10797
10798static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10799 void *data)
10800{
10801 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10802 struct hclge_desc *desc = desc_src;
ea4750ca 10803 u32 *reg = data;
ddb54554
GH
10804
10805 entries_per_desc = ARRAY_SIZE(desc->data);
10806 reg_num = entries_per_desc * bd_num;
10807 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10808 for (i = 0; i < reg_num; i++) {
10809 index = i % entries_per_desc;
10810 desc_index = i / entries_per_desc;
10811 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10812 }
10813 for (i = 0; i < separator_num; i++)
10814 *reg++ = SEPARATOR_VALUE;
10815
10816 return reg_num + separator_num;
10817}
10818
10819static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10820{
10821 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
72fa4904 10822 int data_len_per_desc, bd_num, i;
ddb54554 10823 int bd_num_list[BD_LIST_MAX_NUM];
72fa4904 10824 u32 data_len;
77b34110
FL
10825 int ret;
10826
ddb54554
GH
10827 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10828 if (ret) {
10829 dev_err(&hdev->pdev->dev,
10830 "Get dfx reg bd num fail, status is %d.\n", ret);
10831 return ret;
10832 }
77b34110 10833
c593642c 10834 data_len_per_desc = sizeof_field(struct hclge_desc, data);
ddb54554
GH
10835 *len = 0;
10836 for (i = 0; i < dfx_reg_type_num; i++) {
10837 bd_num = bd_num_list[i];
10838 data_len = data_len_per_desc * bd_num;
10839 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10840 }
10841
10842 return ret;
10843}
10844
10845static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10846{
10847 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10848 int bd_num, bd_num_max, buf_len, i;
10849 int bd_num_list[BD_LIST_MAX_NUM];
10850 struct hclge_desc *desc_src;
10851 u32 *reg = data;
10852 int ret;
10853
10854 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
77b34110
FL
10855 if (ret) {
10856 dev_err(&hdev->pdev->dev,
ddb54554
GH
10857 "Get dfx reg bd num fail, status is %d.\n", ret);
10858 return ret;
10859 }
10860
10861 bd_num_max = bd_num_list[0];
10862 for (i = 1; i < dfx_reg_type_num; i++)
10863 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10864
10865 buf_len = sizeof(*desc_src) * bd_num_max;
10866 desc_src = kzalloc(buf_len, GFP_KERNEL);
322cb97c 10867 if (!desc_src)
ddb54554 10868 return -ENOMEM;
77b34110 10869
ddb54554
GH
10870 for (i = 0; i < dfx_reg_type_num; i++) {
10871 bd_num = bd_num_list[i];
10872 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10873 hclge_dfx_reg_opcode_list[i]);
10874 if (ret) {
10875 dev_err(&hdev->pdev->dev,
10876 "Get dfx reg fail, status is %d.\n", ret);
10877 break;
10878 }
10879
10880 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10881 }
10882
10883 kfree(desc_src);
10884 return ret;
10885}
10886
10887static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10888 struct hnae3_knic_private_info *kinfo)
10889{
10890#define HCLGE_RING_REG_OFFSET 0x200
10891#define HCLGE_RING_INT_REG_OFFSET 0x4
10892
10893 int i, j, reg_num, separator_num;
10894 int data_num_sum;
10895 u32 *reg = data;
10896
ea4750ca 10897 /* fetching per-PF registers valus from PF PCIe register space */
ddb54554
GH
10898 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10899 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10900 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10901 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10902 for (i = 0; i < separator_num; i++)
10903 *reg++ = SEPARATOR_VALUE;
ddb54554 10904 data_num_sum = reg_num + separator_num;
ea4750ca 10905
ddb54554
GH
10906 reg_num = ARRAY_SIZE(common_reg_addr_list);
10907 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10908 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10909 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10910 for (i = 0; i < separator_num; i++)
10911 *reg++ = SEPARATOR_VALUE;
ddb54554 10912 data_num_sum += reg_num + separator_num;
ea4750ca 10913
ddb54554
GH
10914 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10915 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 10916 for (j = 0; j < kinfo->num_tqps; j++) {
ddb54554 10917 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10918 *reg++ = hclge_read_dev(&hdev->hw,
10919 ring_reg_addr_list[i] +
ddb54554 10920 HCLGE_RING_REG_OFFSET * j);
ea4750ca
JS
10921 for (i = 0; i < separator_num; i++)
10922 *reg++ = SEPARATOR_VALUE;
10923 }
ddb54554 10924 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
ea4750ca 10925
ddb54554
GH
10926 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10927 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 10928 for (j = 0; j < hdev->num_msi_used - 1; j++) {
ddb54554 10929 for (i = 0; i < reg_num; i++)
ea4750ca
JS
10930 *reg++ = hclge_read_dev(&hdev->hw,
10931 tqp_intr_reg_addr_list[i] +
ddb54554 10932 HCLGE_RING_INT_REG_OFFSET * j);
ea4750ca
JS
10933 for (i = 0; i < separator_num; i++)
10934 *reg++ = SEPARATOR_VALUE;
10935 }
ddb54554
GH
10936 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10937
10938 return data_num_sum;
10939}
10940
10941static int hclge_get_regs_len(struct hnae3_handle *handle)
10942{
10943 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10944 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10945 struct hclge_vport *vport = hclge_get_vport(handle);
10946 struct hclge_dev *hdev = vport->back;
10947 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10948 int regs_lines_32_bit, regs_lines_64_bit;
10949 int ret;
10950
10951 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10952 if (ret) {
10953 dev_err(&hdev->pdev->dev,
10954 "Get register number failed, ret = %d.\n", ret);
10955 return ret;
10956 }
10957
10958 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10959 if (ret) {
10960 dev_err(&hdev->pdev->dev,
10961 "Get dfx reg len failed, ret = %d.\n", ret);
10962 return ret;
10963 }
10964
10965 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10966 REG_SEPARATOR_LINE;
10967 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10968 REG_SEPARATOR_LINE;
10969 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10970 REG_SEPARATOR_LINE;
10971 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10972 REG_SEPARATOR_LINE;
10973 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10974 REG_SEPARATOR_LINE;
10975 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10976 REG_SEPARATOR_LINE;
10977
10978 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10979 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10980 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10981}
10982
10983static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10984 void *data)
10985{
10986 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10987 struct hclge_vport *vport = hclge_get_vport(handle);
10988 struct hclge_dev *hdev = vport->back;
10989 u32 regs_num_32_bit, regs_num_64_bit;
10990 int i, reg_num, separator_num, ret;
10991 u32 *reg = data;
10992
10993 *version = hdev->fw_version;
10994
10995 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10996 if (ret) {
10997 dev_err(&hdev->pdev->dev,
10998 "Get register number failed, ret = %d.\n", ret);
10999 return;
11000 }
11001
11002 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
ea4750ca 11003
ea4750ca 11004 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
11005 if (ret) {
11006 dev_err(&hdev->pdev->dev,
11007 "Get 32 bit register failed, ret = %d.\n", ret);
11008 return;
11009 }
ddb54554
GH
11010 reg_num = regs_num_32_bit;
11011 reg += reg_num;
11012 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11013 for (i = 0; i < separator_num; i++)
11014 *reg++ = SEPARATOR_VALUE;
77b34110 11015
ea4750ca 11016 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
ddb54554 11017 if (ret) {
77b34110
FL
11018 dev_err(&hdev->pdev->dev,
11019 "Get 64 bit register failed, ret = %d.\n", ret);
ddb54554
GH
11020 return;
11021 }
11022 reg_num = regs_num_64_bit * 2;
11023 reg += reg_num;
11024 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11025 for (i = 0; i < separator_num; i++)
11026 *reg++ = SEPARATOR_VALUE;
11027
11028 ret = hclge_get_dfx_reg(hdev, reg);
11029 if (ret)
11030 dev_err(&hdev->pdev->dev,
11031 "Get dfx register failed, ret = %d.\n", ret);
77b34110
FL
11032}
11033
f6f75abc 11034static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
11035{
11036 struct hclge_set_led_state_cmd *req;
11037 struct hclge_desc desc;
11038 int ret;
11039
11040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11041
11042 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
11043 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11044 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
11045
11046 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11047 if (ret)
11048 dev_err(&hdev->pdev->dev,
11049 "Send set led state cmd error, ret =%d\n", ret);
11050
11051 return ret;
11052}
11053
11054enum hclge_led_status {
11055 HCLGE_LED_OFF,
11056 HCLGE_LED_ON,
11057 HCLGE_LED_NO_CHANGE = 0xFF,
11058};
11059
11060static int hclge_set_led_id(struct hnae3_handle *handle,
11061 enum ethtool_phys_id_state status)
11062{
07f8e940
JS
11063 struct hclge_vport *vport = hclge_get_vport(handle);
11064 struct hclge_dev *hdev = vport->back;
07f8e940
JS
11065
11066 switch (status) {
11067 case ETHTOOL_ID_ACTIVE:
f6f75abc 11068 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 11069 case ETHTOOL_ID_INACTIVE:
f6f75abc 11070 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 11071 default:
f6f75abc 11072 return -EINVAL;
07f8e940 11073 }
07f8e940
JS
11074}
11075
0979aa0b
FL
11076static void hclge_get_link_mode(struct hnae3_handle *handle,
11077 unsigned long *supported,
11078 unsigned long *advertising)
11079{
11080 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11081 struct hclge_vport *vport = hclge_get_vport(handle);
11082 struct hclge_dev *hdev = vport->back;
11083 unsigned int idx = 0;
11084
11085 for (; idx < size; idx++) {
11086 supported[idx] = hdev->hw.mac.supported[idx];
11087 advertising[idx] = hdev->hw.mac.advertising[idx];
11088 }
11089}
11090
1731be4c 11091static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
11092{
11093 struct hclge_vport *vport = hclge_get_vport(handle);
11094 struct hclge_dev *hdev = vport->back;
11095
11096 return hclge_config_gro(hdev, enable);
11097}
11098
c631c696
JS
11099static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11100{
11101 struct hclge_vport *vport = &hdev->vport[0];
11102 struct hnae3_handle *handle = &vport->nic;
11103 u8 tmp_flags = 0;
11104 int ret;
11105
11106 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11107 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11108 vport->last_promisc_flags = vport->overflow_promisc_flags;
11109 }
11110
11111 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11112 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11113 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11114 tmp_flags & HNAE3_MPE);
11115 if (!ret) {
11116 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11117 hclge_enable_vlan_filter(handle,
11118 tmp_flags & HNAE3_VLAN_FLTR);
11119 }
11120 }
11121}
11122
cb10228d
YL
11123static bool hclge_module_existed(struct hclge_dev *hdev)
11124{
11125 struct hclge_desc desc;
11126 u32 existed;
11127 int ret;
11128
11129 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11130 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11131 if (ret) {
11132 dev_err(&hdev->pdev->dev,
11133 "failed to get SFP exist state, ret = %d\n", ret);
11134 return false;
11135 }
11136
11137 existed = le32_to_cpu(desc.data[0]);
11138
11139 return existed != 0;
11140}
11141
11142/* need 6 bds(total 140 bytes) in one reading
11143 * return the number of bytes actually read, 0 means read failed.
11144 */
11145static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11146 u32 len, u8 *data)
11147{
11148 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11149 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11150 u16 read_len;
11151 u16 copy_len;
11152 int ret;
11153 int i;
11154
11155 /* setup all 6 bds to read module eeprom info. */
11156 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11157 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11158 true);
11159
11160 /* bd0~bd4 need next flag */
11161 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11162 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11163 }
11164
11165 /* setup bd0, this bd contains offset and read length. */
11166 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11167 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11168 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11169 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11170
11171 ret = hclge_cmd_send(&hdev->hw, desc, i);
11172 if (ret) {
11173 dev_err(&hdev->pdev->dev,
11174 "failed to get SFP eeprom info, ret = %d\n", ret);
11175 return 0;
11176 }
11177
11178 /* copy sfp info from bd0 to out buffer. */
11179 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11180 memcpy(data, sfp_info_bd0->data, copy_len);
11181 read_len = copy_len;
11182
11183 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11184 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11185 if (read_len >= len)
11186 return read_len;
11187
11188 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11189 memcpy(data + read_len, desc[i].data, copy_len);
11190 read_len += copy_len;
11191 }
11192
11193 return read_len;
11194}
11195
11196static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11197 u32 len, u8 *data)
11198{
11199 struct hclge_vport *vport = hclge_get_vport(handle);
11200 struct hclge_dev *hdev = vport->back;
11201 u32 read_len = 0;
11202 u16 data_len;
11203
11204 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11205 return -EOPNOTSUPP;
11206
11207 if (!hclge_module_existed(hdev))
11208 return -ENXIO;
11209
11210 while (read_len < len) {
11211 data_len = hclge_get_sfp_eeprom_info(hdev,
11212 offset + read_len,
11213 len - read_len,
11214 data + read_len);
11215 if (!data_len)
11216 return -EIO;
11217
11218 read_len += data_len;
11219 }
11220
11221 return 0;
11222}
11223
46a3df9f
S
11224static const struct hnae3_ae_ops hclge_ops = {
11225 .init_ae_dev = hclge_init_ae_dev,
11226 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
11227 .flr_prepare = hclge_flr_prepare,
11228 .flr_done = hclge_flr_done,
46a3df9f
S
11229 .init_client_instance = hclge_init_client_instance,
11230 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
11231 .map_ring_to_vector = hclge_map_ring_to_vector,
11232 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 11233 .get_vector = hclge_get_vector,
0d3e6631 11234 .put_vector = hclge_put_vector,
46a3df9f 11235 .set_promisc_mode = hclge_set_promisc_mode,
c631c696 11236 .request_update_promisc_mode = hclge_request_update_promisc_mode,
c39c4d98 11237 .set_loopback = hclge_set_loopback,
46a3df9f
S
11238 .start = hclge_ae_start,
11239 .stop = hclge_ae_stop,
a6d818e3
YL
11240 .client_start = hclge_client_start,
11241 .client_stop = hclge_client_stop,
46a3df9f
S
11242 .get_status = hclge_get_status,
11243 .get_ksettings_an_result = hclge_get_ksettings_an_result,
46a3df9f
S
11244 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11245 .get_media_type = hclge_get_media_type,
22f48e24 11246 .check_port_speed = hclge_check_port_speed,
7e6ec914
JS
11247 .get_fec = hclge_get_fec,
11248 .set_fec = hclge_set_fec,
46a3df9f
S
11249 .get_rss_key_size = hclge_get_rss_key_size,
11250 .get_rss_indir_size = hclge_get_rss_indir_size,
11251 .get_rss = hclge_get_rss,
11252 .set_rss = hclge_set_rss,
f7db940a 11253 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 11254 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
11255 .get_tc_size = hclge_get_tc_size,
11256 .get_mac_addr = hclge_get_mac_addr,
11257 .set_mac_addr = hclge_set_mac_addr,
26483246 11258 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
11259 .add_uc_addr = hclge_add_uc_addr,
11260 .rm_uc_addr = hclge_rm_uc_addr,
11261 .add_mc_addr = hclge_add_mc_addr,
11262 .rm_mc_addr = hclge_rm_mc_addr,
11263 .set_autoneg = hclge_set_autoneg,
11264 .get_autoneg = hclge_get_autoneg,
22f48e24 11265 .restart_autoneg = hclge_restart_autoneg,
7786a996 11266 .halt_autoneg = hclge_halt_autoneg,
46a3df9f 11267 .get_pauseparam = hclge_get_pauseparam,
61387774 11268 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
11269 .set_mtu = hclge_set_mtu,
11270 .reset_queue = hclge_reset_tqp,
11271 .get_stats = hclge_get_stats,
615466ce 11272 .get_mac_stats = hclge_get_mac_stat,
46a3df9f
S
11273 .update_stats = hclge_update_stats,
11274 .get_strings = hclge_get_strings,
11275 .get_sset_count = hclge_get_sset_count,
11276 .get_fw_version = hclge_get_fw_version,
11277 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 11278 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 11279 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 11280 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 11281 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 11282 .reset_event = hclge_reset_event,
123297b7 11283 .get_reset_level = hclge_get_reset_level,
720bd583 11284 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
11285 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11286 .set_channels = hclge_set_channels,
482d2e9c 11287 .get_channels = hclge_get_channels,
77b34110
FL
11288 .get_regs_len = hclge_get_regs_len,
11289 .get_regs = hclge_get_regs,
07f8e940 11290 .set_led_id = hclge_set_led_id,
0979aa0b 11291 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
11292 .add_fd_entry = hclge_add_fd_entry,
11293 .del_fd_entry = hclge_del_fd_entry,
6871af29 11294 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
11295 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11296 .get_fd_rule_info = hclge_get_fd_rule_info,
11297 .get_fd_all_rules = hclge_get_all_rules,
c17852a8 11298 .enable_fd = hclge_enable_fd,
d93ed94f 11299 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
3c666b58 11300 .dbg_run_cmd = hclge_dbg_run_cmd,
381c356e 11301 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
11302 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11303 .ae_dev_resetting = hclge_ae_dev_resetting,
11304 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 11305 .set_gro_en = hclge_gro_en,
0c29d191 11306 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 11307 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
11308 .mac_connect_phy = hclge_mac_connect_phy,
11309 .mac_disconnect_phy = hclge_mac_disconnect_phy,
6430f744
YM
11310 .get_vf_config = hclge_get_vf_config,
11311 .set_vf_link_state = hclge_set_vf_link_state,
22044f95 11312 .set_vf_spoofchk = hclge_set_vf_spoofchk,
e196ec75 11313 .set_vf_trust = hclge_set_vf_trust,
ee9e4424 11314 .set_vf_rate = hclge_set_vf_rate,
8e6de441 11315 .set_vf_mac = hclge_set_vf_mac,
cb10228d 11316 .get_module_eeprom = hclge_get_module_eeprom,
a4de0228 11317 .get_cmdq_stat = hclge_get_cmdq_stat,
46a3df9f
S
11318};
11319
11320static struct hnae3_ae_algo ae_algo = {
11321 .ops = &hclge_ops,
46a3df9f
S
11322 .pdev_id_table = ae_algo_pci_tbl,
11323};
11324
11325static int hclge_init(void)
11326{
11327 pr_info("%s is initializing\n", HCLGE_NAME);
11328
16deaef2 11329 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
0ea68902
YL
11330 if (!hclge_wq) {
11331 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11332 return -ENOMEM;
11333 }
11334
854cf33a
FL
11335 hnae3_register_ae_algo(&ae_algo);
11336
11337 return 0;
46a3df9f
S
11338}
11339
11340static void hclge_exit(void)
11341{
11342 hnae3_unregister_ae_algo(&ae_algo);
0ea68902 11343 destroy_workqueue(hclge_wq);
46a3df9f
S
11344}
11345module_init(hclge_init);
11346module_exit(hclge_exit);
11347
11348MODULE_LICENSE("GPL");
11349MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11350MODULE_DESCRIPTION("HCLGE Driver");
11351MODULE_VERSION(HCLGE_MOD_VERSION);