Merge tag 'devprop-5.12-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
46a3df9f
S
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
2866ccb2 14#include <linux/if_vlan.h>
962e31bd 15#include <linux/crash_dump.h>
eaede835 16#include <net/ipv6.h>
f2f432f2 17#include <net/rtnetlink.h>
46a3df9f 18#include "hclge_cmd.h"
cacde272 19#include "hclge_dcb.h"
46a3df9f 20#include "hclge_main.h"
dde1a86e 21#include "hclge_mbx.h"
46a3df9f
S
22#include "hclge_mdio.h"
23#include "hclge_tm.h"
5a9f0eac 24#include "hclge_err.h"
46a3df9f
S
25#include "hnae3.h"
26
27#define HCLGE_NAME "hclge"
9393eb50 28#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
46a3df9f 29#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
46a3df9f 30
ebaf1908 31#define HCLGE_BUF_SIZE_UNIT 256U
b37ce587
YM
32#define HCLGE_BUF_MUL_BY 2
33#define HCLGE_BUF_DIV_BY 2
9e15be90
YL
34#define NEED_RESERVE_TC_NUM 2
35#define BUF_MAX_PERCENT 100
36#define BUF_RESERVE_PERCENT 90
b9a400ac 37
63cbf7a9 38#define HCLGE_RESET_MAX_FAIL_CNT 5
427a7bff
HT
39#define HCLGE_RESET_SYNC_TIME 100
40#define HCLGE_PF_RESET_SYNC_TIME 20
41#define HCLGE_PF_RESET_SYNC_CNT 1500
63cbf7a9 42
ddb54554
GH
43/* Get DFX BD number offset */
44#define HCLGE_DFX_BIOS_BD_OFFSET 1
45#define HCLGE_DFX_SSU_0_BD_OFFSET 2
46#define HCLGE_DFX_SSU_1_BD_OFFSET 3
47#define HCLGE_DFX_IGU_BD_OFFSET 4
48#define HCLGE_DFX_RPU_0_BD_OFFSET 5
49#define HCLGE_DFX_RPU_1_BD_OFFSET 6
50#define HCLGE_DFX_NCSI_BD_OFFSET 7
51#define HCLGE_DFX_RTC_BD_OFFSET 8
52#define HCLGE_DFX_PPP_BD_OFFSET 9
53#define HCLGE_DFX_RCB_BD_OFFSET 10
54#define HCLGE_DFX_TQP_BD_OFFSET 11
55#define HCLGE_DFX_SSU_2_BD_OFFSET 12
56
c9765a89
YM
57#define HCLGE_LINK_STATUS_MS 10
58
e6d7d79d 59static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
46a3df9f 60static int hclge_init_vlan_config(struct hclge_dev *hdev);
fe4144d4 61static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
4ed340ab 62static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
4f765d3e 63static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
d93ed94f
JS
64static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
123297b7
SJ
66static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 unsigned long *addr);
1cbc662d 68static int hclge_set_default_loopback(struct hclge_dev *hdev);
46a3df9f 69
ee4bcd3b 70static void hclge_sync_mac_table(struct hclge_dev *hdev);
039ba863 71static void hclge_restore_hw_table(struct hclge_dev *hdev);
c631c696 72static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
ee4bcd3b 73
46a3df9f
S
74static struct hnae3_ae_algo ae_algo;
75
0ea68902
YL
76static struct workqueue_struct *hclge_wq;
77
46a3df9f
S
78static const struct pci_device_id ae_algo_pci_tbl[] = {
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
ae6f010c 86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
e92a0843 87 /* required last entry */
46a3df9f
S
88 {0, }
89};
90
2f550a46
YL
91MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
ea4750ca
JS
93static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
107
108static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
113 HCLGE_FUN_RST_ING,
114 HCLGE_GRO_EN_REG};
115
116static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
141 HCLGE_RING_EN_REG};
142
143static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
148
46a3df9f 149static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
eb66d503 150 "App Loopback test",
4dc13b96
FL
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
46a3df9f
S
153 "Phy Loopback test"
154};
155
46a3df9f
S
156static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
d174ea75 161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
46a3df9f
S
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
d174ea75 183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
46a3df9f
S
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
200a88c6
JS
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
46a3df9f
S
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
91f384f6
JS
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
91f384f6
JS
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
46a3df9f
S
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
200a88c6
JS
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
46a3df9f
S
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
91f384f6
JS
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
91f384f6
JS
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
46a3df9f 301
a6c51c26
JS
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
46a3df9f
S
326};
327
f5aac71c
FL
328static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
7efffc64 331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
0e02a53d 332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
f5aac71c
FL
333 .i_port_bitmap = 0x1,
334 },
335};
336
472d7ece
JS
337static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343};
344
ddb54554
GH
345static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
358};
359
360static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
373};
374
2307f4a5
Y
375static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
377 { IP_FRAGEMENT, 1},
378 { ROCE_TYPE, 1},
379 { NEXT_KEY, 5},
380 { VLAN_NUMBER, 2},
381 { SRC_VPORT, 12},
382 { DST_VPORT, 12},
383 { TUNNEL_PACKET, 1},
384};
385
386static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
392 { OUTER_L2_RSV, 16},
393 { OUTER_IP_TOS, 8},
394 { OUTER_IP_PROTO, 8},
395 { OUTER_SRC_IP, 32},
396 { OUTER_DST_IP, 32},
397 { OUTER_L3_RSV, 16},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
400 { OUTER_L4_RSV, 32},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
408 { INNER_L2_RSV, 16},
409 { INNER_IP_TOS, 8},
410 { INNER_IP_PROTO, 8},
411 { INNER_SRC_IP, 32},
412 { INNER_DST_IP, 32},
413 { INNER_L3_RSV, 16},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
416 { INNER_L4_RSV, 32},
417};
418
d174ea75 419static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
46a3df9f 420{
91f384f6 421#define HCLGE_MAC_CMD_NUM 21
46a3df9f 422
1c6dfe6f 423 u64 *data = (u64 *)(&hdev->mac_stats);
46a3df9f 424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 425 __le64 *desc_data;
46a3df9f
S
426 int i, k, n;
427 int ret;
428
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 if (ret) {
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
434
435 return ret;
436 }
437
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
d174ea75 439 /* for special opcode 0032, only the first desc has the head */
46a3df9f 440 if (unlikely(i == 0)) {
a90bb9a5 441 desc_data = (__le64 *)(&desc[i].data[0]);
d174ea75 442 n = HCLGE_RD_FIRST_STATS_NUM;
46a3df9f 443 } else {
a90bb9a5 444 desc_data = (__le64 *)(&desc[i]);
d174ea75 445 n = HCLGE_RD_OTHER_STATS_NUM;
46a3df9f 446 }
d174ea75 447
46a3df9f 448 for (k = 0; k < n; k++) {
d174ea75 449 *data += le64_to_cpu(*desc_data);
450 data++;
46a3df9f
S
451 desc_data++;
452 }
453 }
454
455 return 0;
456}
457
d174ea75 458static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459{
1c6dfe6f 460 u64 *data = (u64 *)(&hdev->mac_stats);
d174ea75 461 struct hclge_desc *desc;
462 __le64 *desc_data;
463 u16 i, k, n;
464 int ret;
465
9e6717af
ZL
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
468 */
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
39ee6e82
DC
470 if (!desc)
471 return -ENOMEM;
9e6717af 472
d174ea75 473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 if (ret) {
476 kfree(desc);
477 return ret;
478 }
479
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
482 if (i == 0) {
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
485 } else {
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
488 }
489
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
492 data++;
493 desc_data++;
494 }
495 }
496
497 kfree(desc);
498
499 return 0;
500}
501
502static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503{
504 struct hclge_desc desc;
505 __le32 *desc_data;
506 u32 reg_num;
507 int ret;
508
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 if (ret)
512 return ret;
513
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
516
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520 return 0;
521}
522
523static int hclge_mac_update_stats(struct hclge_dev *hdev)
524{
525 u32 desc_num;
526 int ret;
527
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530 /* The firmware supports the new statistics acquisition method */
531 if (!ret)
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
535 else
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538 return ret;
539}
540
46a3df9f
S
541static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542{
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
549 int ret, i;
550
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
4279b4d5 555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
46a3df9f
S
556 true);
557
9a5ef4aa 558 desc[0].data[0] = cpu_to_le32(tqp->index);
46a3df9f
S
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 if (ret) {
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
9b2f3477 563 ret, i);
46a3df9f
S
564 return ret;
565 }
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
cf72fa63 567 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
568 }
569
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
4279b4d5 575 HCLGE_OPC_QUERY_TX_STATS,
46a3df9f
S
576 true);
577
9a5ef4aa 578 desc[0].data[0] = cpu_to_le32(tqp->index);
46a3df9f
S
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 if (ret) {
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
583 ret, i);
584 return ret;
585 }
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
cf72fa63 587 le32_to_cpu(desc[0].data[1]);
46a3df9f
S
588 }
589
590 return 0;
591}
592
593static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594{
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
597 u64 *buff = data;
598 int i;
599
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
603 }
604
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
608 }
609
610 return buff;
611}
612
613static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614{
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
9b2f3477 617 /* each tqp has TX & RX two queues */
46a3df9f
S
618 return kinfo->num_tqps * (2);
619}
620
621static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622{
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 u8 *buff = data;
9d8d5a36 625 int i;
46a3df9f
S
626
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
c5aaf176 630 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
46a3df9f
S
631 tqp->index);
632 buff = buff + ETH_GSTRING_LEN;
633 }
634
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
c5aaf176 638 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
46a3df9f
S
639 tqp->index);
640 buff = buff + ETH_GSTRING_LEN;
641 }
642
643 return buff;
644}
645
ebaf1908 646static u64 *hclge_comm_get_stats(const void *comm_stats,
46a3df9f
S
647 const struct hclge_comm_stats_str strs[],
648 int size, u64 *data)
649{
650 u64 *buf = data;
651 u32 i;
652
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656 return buf + size;
657}
658
659static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
661 int size, u8 *data)
662{
663 char *buff = (char *)data;
664 u32 i;
665
666 if (stringset != ETH_SS_STATS)
667 return buff;
668
669 for (i = 0; i < size; i++) {
18d219b7 670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
46a3df9f
S
671 buff = buff + ETH_GSTRING_LEN;
672 }
673
674 return (u8 *)buff;
675}
676
46a3df9f
S
677static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678{
679 struct hnae3_handle *handle;
680 int status;
681
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
685 if (status) {
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
688 status);
689 }
690 }
691
692 status = hclge_mac_update_stats(hdev);
693 if (status)
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
46a3df9f
S
696}
697
698static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
700{
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
46a3df9f
S
703 int status;
704
c5f65480
JS
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 return;
707
46a3df9f
S
708 status = hclge_mac_update_stats(hdev);
709 if (status)
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
712 status);
713
46a3df9f
S
714 status = hclge_tqps_update_stats(handle);
715 if (status)
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
718 status);
719
c5f65480 720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
46a3df9f
S
721}
722
723static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724{
4dc13b96
FL
725#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
46a3df9f
S
729
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
732 int count = 0;
733
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
738 */
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
295ba232 742 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
4dc13b96 743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
46a3df9f
S
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 count += 1;
eb66d503 747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
46a3df9f 748 }
5fd50ac3 749
4dc13b96
FL
750 count += 2;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
c9765a89 753
f04bbcbf
YL
754 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755 hdev->hw.mac.phydev->drv->set_loopback) {
c9765a89
YM
756 count += 1;
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758 }
759
46a3df9f
S
760 } else if (stringset == ETH_SS_STATS) {
761 count = ARRAY_SIZE(g_mac_stats_string) +
46a3df9f
S
762 hclge_tqps_get_sset_count(handle, stringset);
763 }
764
765 return count;
766}
767
9b2f3477 768static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
46a3df9f
S
769 u8 *data)
770{
771 u8 *p = (char *)data;
772 int size;
773
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
9b2f3477
WL
776 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 size, p);
46a3df9f
S
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
eb66d503 780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
9b2f3477 781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
46a3df9f
S
782 ETH_GSTRING_LEN);
783 p += ETH_GSTRING_LEN;
784 }
4dc13b96 785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
9b2f3477 786 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
4dc13b96
FL
787 ETH_GSTRING_LEN);
788 p += ETH_GSTRING_LEN;
789 }
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 memcpy(p,
792 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
46a3df9f
S
793 ETH_GSTRING_LEN);
794 p += ETH_GSTRING_LEN;
795 }
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
9b2f3477 797 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
46a3df9f
S
798 ETH_GSTRING_LEN);
799 p += ETH_GSTRING_LEN;
800 }
801 }
802}
803
804static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805{
806 struct hclge_vport *vport = hclge_get_vport(handle);
807 struct hclge_dev *hdev = vport->back;
808 u64 *p;
809
1c6dfe6f 810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
9b2f3477 811 ARRAY_SIZE(g_mac_stats_string), data);
46a3df9f
S
812 p = hclge_tqps_get_stats(handle, p);
813}
814
615466ce
YM
815static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 struct hns3_mac_stats *mac_stats)
e511c97d
JS
817{
818 struct hclge_vport *vport = hclge_get_vport(handle);
819 struct hclge_dev *hdev = vport->back;
820
615466ce
YM
821 hclge_update_stats(handle, NULL);
822
1c6dfe6f
YL
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
e511c97d
JS
825}
826
46a3df9f 827static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 828 struct hclge_func_status_cmd *status)
46a3df9f 829{
ded45d40
YM
830#define HCLGE_MAC_ID_MASK 0xF
831
46a3df9f
S
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833 return -EINVAL;
834
835 /* Set the pf to main pf */
836 if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 hdev->flag |= HCLGE_FLAG_MAIN;
838 else
839 hdev->flag &= ~HCLGE_FLAG_MAIN;
840
ded45d40 841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
46a3df9f
S
842 return 0;
843}
844
845static int hclge_query_function_status(struct hclge_dev *hdev)
846{
b37ce587
YM
847#define HCLGE_QUERY_MAX_CNT 5
848
d44f9b63 849 struct hclge_func_status_cmd *req;
46a3df9f
S
850 struct hclge_desc desc;
851 int timeout = 0;
852 int ret;
853
854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 855 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
856
857 do {
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 if (ret) {
860 dev_err(&hdev->pdev->dev,
9b2f3477 861 "query function status failed %d.\n", ret);
46a3df9f
S
862 return ret;
863 }
864
865 /* Check pf reset is done */
866 if (req->pf_state)
867 break;
868 usleep_range(1000, 2000);
b37ce587 869 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
46a3df9f 870
60df7e91 871 return hclge_parse_func_status(hdev, req);
46a3df9f
S
872}
873
874static int hclge_query_pf_resource(struct hclge_dev *hdev)
875{
d44f9b63 876 struct hclge_pf_res_cmd *req;
46a3df9f
S
877 struct hclge_desc desc;
878 int ret;
879
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 if (ret) {
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
885 return ret;
886 }
887
d44f9b63 888 req = (struct hclge_pf_res_cmd *)desc.data;
9a5ef4aa
YL
889 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 le16_to_cpu(req->ext_tqp_num);
60df7e91 891 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
46a3df9f 892
368686be
YL
893 if (req->tx_buf_size)
894 hdev->tx_buf_size =
60df7e91 895 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
896 else
897 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
b9a400ac
YL
899 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
368686be
YL
901 if (req->dv_buf_size)
902 hdev->dv_buf_size =
60df7e91 903 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
368686be
YL
904 else
905 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
b9a400ac
YL
907 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908
3a6863e4
YM
909 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911 dev_err(&hdev->pdev->dev,
912 "only %u msi resources available, not enough for pf(min:2).\n",
913 hdev->num_nic_msi);
914 return -EINVAL;
915 }
916
e92a0843 917 if (hnae3_dev_roce_supported(hdev)) {
887c3820 918 hdev->num_roce_msi =
3a6863e4 919 le16_to_cpu(req->pf_intr_vector_number_roce);
580a05f9 920
46a3df9f
S
921 /* PF should have NIC vectors and Roce vectors,
922 * NIC vectors are queued before Roce vectors.
923 */
3a6863e4 924 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
46a3df9f 925 } else {
3a6863e4 926 hdev->num_msi = hdev->num_nic_msi;
46a3df9f
S
927 }
928
929 return 0;
930}
931
6e7f109e 932static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
46a3df9f
S
933{
934 switch (speed_cmd) {
935 case 6:
936 *speed = HCLGE_MAC_SPEED_10M;
937 break;
938 case 7:
939 *speed = HCLGE_MAC_SPEED_100M;
940 break;
941 case 0:
942 *speed = HCLGE_MAC_SPEED_1G;
943 break;
944 case 1:
945 *speed = HCLGE_MAC_SPEED_10G;
946 break;
947 case 2:
948 *speed = HCLGE_MAC_SPEED_25G;
949 break;
950 case 3:
951 *speed = HCLGE_MAC_SPEED_40G;
952 break;
953 case 4:
954 *speed = HCLGE_MAC_SPEED_50G;
955 break;
956 case 5:
957 *speed = HCLGE_MAC_SPEED_100G;
958 break;
ae6f010c
GH
959 case 8:
960 *speed = HCLGE_MAC_SPEED_200G;
961 break;
46a3df9f
S
962 default:
963 return -EINVAL;
964 }
965
966 return 0;
967}
968
22f48e24
JS
969static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970{
971 struct hclge_vport *vport = hclge_get_vport(handle);
972 struct hclge_dev *hdev = vport->back;
973 u32 speed_ability = hdev->hw.mac.speed_ability;
974 u32 speed_bit = 0;
975
976 switch (speed) {
977 case HCLGE_MAC_SPEED_10M:
978 speed_bit = HCLGE_SUPPORT_10M_BIT;
979 break;
980 case HCLGE_MAC_SPEED_100M:
981 speed_bit = HCLGE_SUPPORT_100M_BIT;
982 break;
983 case HCLGE_MAC_SPEED_1G:
984 speed_bit = HCLGE_SUPPORT_1G_BIT;
985 break;
986 case HCLGE_MAC_SPEED_10G:
987 speed_bit = HCLGE_SUPPORT_10G_BIT;
988 break;
989 case HCLGE_MAC_SPEED_25G:
990 speed_bit = HCLGE_SUPPORT_25G_BIT;
991 break;
992 case HCLGE_MAC_SPEED_40G:
993 speed_bit = HCLGE_SUPPORT_40G_BIT;
994 break;
995 case HCLGE_MAC_SPEED_50G:
996 speed_bit = HCLGE_SUPPORT_50G_BIT;
997 break;
998 case HCLGE_MAC_SPEED_100G:
999 speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 break;
ae6f010c
GH
1001 case HCLGE_MAC_SPEED_200G:
1002 speed_bit = HCLGE_SUPPORT_200G_BIT;
1003 break;
22f48e24
JS
1004 default:
1005 return -EINVAL;
1006 }
1007
1008 if (speed_bit & speed_ability)
1009 return 0;
1010
1011 return -EINVAL;
1012}
1013
ae6f010c 1014static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
0979aa0b 1015{
0979aa0b 1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
db68ca0e 1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
88d10bd6
JS
1018 mac->supported);
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021 mac->supported);
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024 mac->supported);
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027 mac->supported);
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030 mac->supported);
ae6f010c
GH
1031 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1033 mac->supported);
88d10bd6 1034}
0979aa0b 1035
ae6f010c 1036static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
88d10bd6
JS
1037{
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 mac->supported);
0979aa0b 1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
db68ca0e 1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
88d10bd6
JS
1043 mac->supported);
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 mac->supported);
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 mac->supported);
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 mac->supported);
ae6f010c
GH
1053 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1054 linkmode_set_bit(
1055 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1056 mac->supported);
88d10bd6 1057}
0979aa0b 1058
ae6f010c 1059static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
88d10bd6
JS
1060{
1061 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1063 mac->supported);
1064 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1066 mac->supported);
1067 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1069 mac->supported);
0979aa0b 1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
88d10bd6
JS
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1072 mac->supported);
1073 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1075 mac->supported);
ae6f010c
GH
1076 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1078 mac->supported);
88d10bd6 1079}
0979aa0b 1080
ae6f010c 1081static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
88d10bd6
JS
1082{
1083 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1085 mac->supported);
1086 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1088 mac->supported);
1089 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1091 mac->supported);
1092 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1094 mac->supported);
1095 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1097 mac->supported);
0979aa0b 1098 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
88d10bd6
JS
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1100 mac->supported);
ae6f010c
GH
1101 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1103 mac->supported);
88d10bd6 1104}
0979aa0b 1105
7e6ec914
JS
1106static void hclge_convert_setting_fec(struct hclge_mac *mac)
1107{
1108 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1110
1111 switch (mac->speed) {
1112 case HCLGE_MAC_SPEED_10G:
1113 case HCLGE_MAC_SPEED_40G:
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1115 mac->supported);
1116 mac->fec_ability =
1117 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1118 break;
1119 case HCLGE_MAC_SPEED_25G:
1120 case HCLGE_MAC_SPEED_50G:
1121 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1122 mac->supported);
1123 mac->fec_ability =
1124 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125 BIT(HNAE3_FEC_AUTO);
1126 break;
1127 case HCLGE_MAC_SPEED_100G:
ae6f010c 1128 case HCLGE_MAC_SPEED_200G:
7e6ec914
JS
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1131 break;
1132 default:
1133 mac->fec_ability = 0;
1134 break;
1135 }
1136}
1137
88d10bd6 1138static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
ae6f010c 1139 u16 speed_ability)
88d10bd6
JS
1140{
1141 struct hclge_mac *mac = &hdev->hw.mac;
1142
1143 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1145 mac->supported);
1146
1147 hclge_convert_setting_sr(mac, speed_ability);
1148 hclge_convert_setting_lr(mac, speed_ability);
1149 hclge_convert_setting_cr(mac, speed_ability);
74ba23a1 1150 if (hnae3_dev_fec_supported(hdev))
7e6ec914 1151 hclge_convert_setting_fec(mac);
88d10bd6
JS
1152
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
88d10bd6
JS
1156}
1157
1158static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
ae6f010c 1159 u16 speed_ability)
88d10bd6
JS
1160{
1161 struct hclge_mac *mac = &hdev->hw.mac;
1162
1163 hclge_convert_setting_kr(mac, speed_ability);
74ba23a1 1164 if (hnae3_dev_fec_supported(hdev))
7e6ec914 1165 hclge_convert_setting_fec(mac);
88d10bd6
JS
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
7e6ec914 1168 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
0979aa0b
FL
1169}
1170
f18635d5 1171static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
ae6f010c 1172 u16 speed_ability)
f18635d5
JS
1173{
1174 unsigned long *supported = hdev->hw.mac.supported;
1175
1176 /* default to support all speed for GE port */
1177 if (!speed_ability)
1178 speed_ability = HCLGE_SUPPORT_GE;
1179
1180 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1182 supported);
1183
1184 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1186 supported);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1188 supported);
1189 }
1190
1191 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1194 }
1195
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
bc3781ed 1199 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
f18635d5
JS
1200}
1201
ae6f010c 1202static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
0979aa0b
FL
1203{
1204 u8 media_type = hdev->hw.mac.media_type;
1205
f18635d5
JS
1206 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207 hclge_parse_fiber_link_mode(hdev, speed_ability);
1208 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209 hclge_parse_copper_link_mode(hdev, speed_ability);
88d10bd6
JS
1210 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211 hclge_parse_backplane_link_mode(hdev, speed_ability);
0979aa0b 1212}
37417c66 1213
ae6f010c 1214static u32 hclge_get_max_speed(u16 speed_ability)
ee9e4424 1215{
ae6f010c
GH
1216 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217 return HCLGE_MAC_SPEED_200G;
1218
ee9e4424
YL
1219 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220 return HCLGE_MAC_SPEED_100G;
1221
1222 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223 return HCLGE_MAC_SPEED_50G;
1224
1225 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226 return HCLGE_MAC_SPEED_40G;
1227
1228 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229 return HCLGE_MAC_SPEED_25G;
1230
1231 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232 return HCLGE_MAC_SPEED_10G;
1233
1234 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235 return HCLGE_MAC_SPEED_1G;
1236
1237 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238 return HCLGE_MAC_SPEED_100M;
1239
1240 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241 return HCLGE_MAC_SPEED_10M;
1242
1243 return HCLGE_MAC_SPEED_1G;
1244}
1245
46a3df9f
S
1246static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1247{
ae6f010c
GH
1248#define SPEED_ABILITY_EXT_SHIFT 8
1249
d44f9b63 1250 struct hclge_cfg_param_cmd *req;
46a3df9f 1251 u64 mac_addr_tmp_high;
ae6f010c 1252 u16 speed_ability_ext;
46a3df9f 1253 u64 mac_addr_tmp;
ebaf1908 1254 unsigned int i;
46a3df9f 1255
d44f9b63 1256 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
1257
1258 /* get the configuration */
e4e87715
PL
1259 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1260 HCLGE_CFG_VMDQ_M,
1261 HCLGE_CFG_VMDQ_S);
1262 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265 HCLGE_CFG_TQP_DESC_N_M,
1266 HCLGE_CFG_TQP_DESC_N_S);
1267
1268 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269 HCLGE_CFG_PHY_ADDR_M,
1270 HCLGE_CFG_PHY_ADDR_S);
1271 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272 HCLGE_CFG_MEDIA_TP_M,
1273 HCLGE_CFG_MEDIA_TP_S);
1274 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275 HCLGE_CFG_RX_BUF_LEN_M,
1276 HCLGE_CFG_RX_BUF_LEN_S);
46a3df9f
S
1277 /* get mac_address */
1278 mac_addr_tmp = __le32_to_cpu(req->param[2]);
e4e87715
PL
1279 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280 HCLGE_CFG_MAC_ADDR_H_M,
1281 HCLGE_CFG_MAC_ADDR_H_S);
46a3df9f
S
1282
1283 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1284
e4e87715
PL
1285 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286 HCLGE_CFG_DEFAULT_SPEED_M,
1287 HCLGE_CFG_DEFAULT_SPEED_S);
f1c2e66d
GL
1288 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 HCLGE_CFG_RSS_SIZE_M,
1290 HCLGE_CFG_RSS_SIZE_S);
0e7a40cd 1291
46a3df9f
S
1292 for (i = 0; i < ETH_ALEN; i++)
1293 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1294
d44f9b63 1295 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f 1296 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
0979aa0b 1297
e4e87715
PL
1298 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299 HCLGE_CFG_SPEED_ABILITY_M,
1300 HCLGE_CFG_SPEED_ABILITY_S);
ae6f010c
GH
1301 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1305
39932473
JS
1306 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_UMV_TBL_SPACE_M,
1308 HCLGE_CFG_UMV_TBL_SPACE_S);
1309 if (!cfg->umv_space)
1310 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
f1c2e66d
GL
1311
1312 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1313 HCLGE_CFG_PF_RSS_SIZE_M,
1314 HCLGE_CFG_PF_RSS_SIZE_S);
1315
1316 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1317 * power of 2, instead of reading out directly. This would
1318 * be more flexible for future changes and expansions.
1319 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1320 * it does not make sense if PF's field is 0. In this case, PF and VF
1321 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1322 */
1323 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1324 1U << cfg->pf_rss_size_max :
1325 cfg->vf_rss_size_max;
46a3df9f
S
1326}
1327
1328/* hclge_get_cfg: query the static parameter from flash
1329 * @hdev: pointer to struct hclge_dev
1330 * @hcfg: the config structure to be getted
1331 */
1332static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1333{
1334 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 1335 struct hclge_cfg_param_cmd *req;
ebaf1908
WL
1336 unsigned int i;
1337 int ret;
46a3df9f
S
1338
1339 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1340 u32 offset = 0;
1341
d44f9b63 1342 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1343 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1344 true);
e4e87715
PL
1345 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1346 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
46a3df9f 1347 /* Len should be united by 4 bytes when send to hardware */
e4e87715
PL
1348 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1349 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1350 req->offset = cpu_to_le32(offset);
46a3df9f
S
1351 }
1352
1353 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1354 if (ret) {
3f639907 1355 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
46a3df9f
S
1356 return ret;
1357 }
1358
1359 hclge_parse_cfg(hcfg, desc);
3f639907 1360
46a3df9f
S
1361 return 0;
1362}
1363
af2aedc5
GH
1364static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1365{
1366#define HCLGE_MAX_NON_TSO_BD_NUM 8U
1367
1368 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1369
1370 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1371 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1372 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
d9c7d20d 1373 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ab16b49c 1374 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
e070c8b9 1375 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
3f094bd1 1376 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
af2aedc5
GH
1377}
1378
1379static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1380 struct hclge_desc *desc)
1381{
1382 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1383 struct hclge_dev_specs_0_cmd *req0;
ab16b49c 1384 struct hclge_dev_specs_1_cmd *req1;
af2aedc5
GH
1385
1386 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
ab16b49c 1387 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
af2aedc5
GH
1388
1389 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1390 ae_dev->dev_specs.rss_ind_tbl_size =
1391 le16_to_cpu(req0->rss_ind_tbl_size);
91bfae25 1392 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
af2aedc5 1393 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
d9c7d20d 1394 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
3f094bd1 1395 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ab16b49c 1396 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
e070c8b9 1397 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
af2aedc5
GH
1398}
1399
13297028
GH
1400static void hclge_check_dev_specs(struct hclge_dev *hdev)
1401{
1402 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1403
1404 if (!dev_specs->max_non_tso_bd_num)
1405 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 if (!dev_specs->rss_ind_tbl_size)
1407 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1408 if (!dev_specs->rss_key_size)
1409 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1410 if (!dev_specs->max_tm_rate)
1411 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
3f094bd1
GH
1412 if (!dev_specs->max_qset_num)
1413 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
ab16b49c
HT
1414 if (!dev_specs->max_int_gl)
1415 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
e070c8b9
YM
1416 if (!dev_specs->max_frm_size)
1417 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
13297028
GH
1418}
1419
af2aedc5
GH
1420static int hclge_query_dev_specs(struct hclge_dev *hdev)
1421{
1422 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1423 int ret;
1424 int i;
1425
1426 /* set default specifications as devices lower than version V3 do not
1427 * support querying specifications from firmware.
1428 */
1429 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1430 hclge_set_default_dev_specs(hdev);
1431 return 0;
1432 }
1433
1434 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1435 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1436 true);
1437 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1438 }
1439 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1440
1441 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1442 if (ret)
1443 return ret;
1444
1445 hclge_parse_dev_specs(hdev, desc);
13297028 1446 hclge_check_dev_specs(hdev);
af2aedc5
GH
1447
1448 return 0;
1449}
1450
46a3df9f
S
1451static int hclge_get_cap(struct hclge_dev *hdev)
1452{
1453 int ret;
1454
1455 ret = hclge_query_function_status(hdev);
1456 if (ret) {
1457 dev_err(&hdev->pdev->dev,
1458 "query function status error %d.\n", ret);
1459 return ret;
1460 }
1461
1462 /* get pf resource */
60df7e91 1463 return hclge_query_pf_resource(hdev);
46a3df9f
S
1464}
1465
962e31bd
YL
1466static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1467{
1468#define HCLGE_MIN_TX_DESC 64
1469#define HCLGE_MIN_RX_DESC 64
1470
1471 if (!is_kdump_kernel())
1472 return;
1473
1474 dev_info(&hdev->pdev->dev,
1475 "Running kdump kernel. Using minimal resources\n");
1476
1477 /* minimal queue pairs equals to the number of vports */
1478 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1479 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1480 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1481}
1482
46a3df9f
S
1483static int hclge_configure(struct hclge_dev *hdev)
1484{
1485 struct hclge_cfg cfg;
ebaf1908
WL
1486 unsigned int i;
1487 int ret;
46a3df9f
S
1488
1489 ret = hclge_get_cfg(hdev, &cfg);
727f514b 1490 if (ret)
46a3df9f 1491 return ret;
46a3df9f
S
1492
1493 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1494 hdev->base_tqp_pid = 0;
f1c2e66d
GL
1495 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1496 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
46a3df9f 1497 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1498 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1499 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1500 hdev->hw.mac.phy_addr = cfg.phy_addr;
c0425944
PL
1501 hdev->num_tx_desc = cfg.tqp_desc_num;
1502 hdev->num_rx_desc = cfg.tqp_desc_num;
46a3df9f 1503 hdev->tm_info.num_pg = 1;
cacde272 1504 hdev->tc_max = cfg.tc_num;
46a3df9f 1505 hdev->tm_info.hw_pfc_map = 0;
39932473 1506 hdev->wanted_umv_size = cfg.umv_space;
46a3df9f 1507
44122887 1508 if (hnae3_dev_fd_supported(hdev)) {
9abeb7d8 1509 hdev->fd_en = true;
44122887
JS
1510 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1511 }
9abeb7d8 1512
46a3df9f
S
1513 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1514 if (ret) {
ead38a85
HT
1515 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1516 cfg.default_speed, ret);
46a3df9f
S
1517 return ret;
1518 }
1519
0979aa0b
FL
1520 hclge_parse_link_mode(hdev, cfg.speed_ability);
1521
ee9e4424
YL
1522 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1523
cacde272
YL
1524 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1525 (hdev->tc_max < 1)) {
adcf738b 1526 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
cacde272
YL
1527 hdev->tc_max);
1528 hdev->tc_max = 1;
46a3df9f
S
1529 }
1530
cacde272
YL
1531 /* Dev does not support DCB */
1532 if (!hnae3_dev_dcb_supported(hdev)) {
1533 hdev->tc_max = 1;
1534 hdev->pfc_max = 0;
1535 } else {
1536 hdev->pfc_max = hdev->tc_max;
1537 }
1538
a2987975 1539 hdev->tm_info.num_tc = 1;
cacde272 1540
46a3df9f 1541 /* Currently not support uncontiuous tc */
cacde272 1542 for (i = 0; i < hdev->tm_info.num_tc; i++)
e4e87715 1543 hnae3_set_bit(hdev->hw_tc_map, i, 1);
46a3df9f 1544
71b83869 1545 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
46a3df9f 1546
962e31bd
YL
1547 hclge_init_kdump_kernel_config(hdev);
1548
08125454
YL
1549 /* Set the init affinity based on pci func number */
1550 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1551 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1552 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1553 &hdev->affinity_mask);
1554
46a3df9f
S
1555 return ret;
1556}
1557
9f5a9816
HT
1558static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1559 u16 tso_mss_max)
46a3df9f 1560{
d44f9b63 1561 struct hclge_cfg_tso_status_cmd *req;
46a3df9f
S
1562 struct hclge_desc desc;
1563
1564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1565
d44f9b63 1566 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
9f5a9816
HT
1567 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1568 req->tso_mss_max = cpu_to_le16(tso_mss_max);
46a3df9f
S
1569
1570 return hclge_cmd_send(&hdev->hw, &desc, 1);
1571}
1572
b26a6fea
PL
1573static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1574{
1575 struct hclge_cfg_gro_status_cmd *req;
1576 struct hclge_desc desc;
1577 int ret;
1578
1579 if (!hnae3_dev_gro_supported(hdev))
1580 return 0;
1581
1582 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1583 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1584
639d84d0 1585 req->gro_en = en ? 1 : 0;
b26a6fea
PL
1586
1587 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1588 if (ret)
1589 dev_err(&hdev->pdev->dev,
1590 "GRO hardware config cmd failed, ret = %d\n", ret);
1591
1592 return ret;
1593}
1594
46a3df9f
S
1595static int hclge_alloc_tqps(struct hclge_dev *hdev)
1596{
1597 struct hclge_tqp *tqp;
1598 int i;
1599
1600 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1601 sizeof(struct hclge_tqp), GFP_KERNEL);
1602 if (!hdev->htqp)
1603 return -ENOMEM;
1604
1605 tqp = hdev->htqp;
1606
1607 for (i = 0; i < hdev->num_tqps; i++) {
1608 tqp->dev = &hdev->pdev->dev;
1609 tqp->index = i;
1610
1611 tqp->q.ae_algo = &ae_algo;
1612 tqp->q.buf_size = hdev->rx_buf_len;
c0425944
PL
1613 tqp->q.tx_desc_num = hdev->num_tx_desc;
1614 tqp->q.rx_desc_num = hdev->num_rx_desc;
9a5ef4aa
YL
1615
1616 /* need an extended offset to configure queues >=
1617 * HCLGE_TQP_MAX_SIZE_DEV_V2
1618 */
1619 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1620 tqp->q.io_base = hdev->hw.io_base +
1621 HCLGE_TQP_REG_OFFSET +
1622 i * HCLGE_TQP_REG_SIZE;
1623 else
1624 tqp->q.io_base = hdev->hw.io_base +
1625 HCLGE_TQP_REG_OFFSET +
1626 HCLGE_TQP_EXT_REG_OFFSET +
1627 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1628 HCLGE_TQP_REG_SIZE;
46a3df9f
S
1629
1630 tqp++;
1631 }
1632
1633 return 0;
1634}
1635
1636static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1637 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1638{
d44f9b63 1639 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1640 struct hclge_desc desc;
1641 int ret;
1642
1643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1644
d44f9b63 1645 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1646 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1647 req->tqp_vf = func_id;
b9a8f883
YL
1648 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1649 if (!is_pf)
1650 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
46a3df9f
S
1651 req->tqp_vid = cpu_to_le16(tqp_vid);
1652
1653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907
JS
1654 if (ret)
1655 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
46a3df9f 1656
3f639907 1657 return ret;
46a3df9f
S
1658}
1659
672ad0ed 1660static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
46a3df9f 1661{
128b900d 1662 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
46a3df9f 1663 struct hclge_dev *hdev = vport->back;
7df7dad6 1664 int i, alloced;
46a3df9f
S
1665
1666 for (i = 0, alloced = 0; i < hdev->num_tqps &&
672ad0ed 1667 alloced < num_tqps; i++) {
46a3df9f
S
1668 if (!hdev->htqp[i].alloced) {
1669 hdev->htqp[i].q.handle = &vport->nic;
1670 hdev->htqp[i].q.tqp_index = alloced;
c0425944
PL
1671 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1672 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
128b900d 1673 kinfo->tqp[alloced] = &hdev->htqp[i].q;
46a3df9f 1674 hdev->htqp[i].alloced = true;
46a3df9f
S
1675 alloced++;
1676 }
1677 }
672ad0ed 1678 vport->alloc_tqps = alloced;
f1c2e66d 1679 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
672ad0ed 1680 vport->alloc_tqps / hdev->tm_info.num_tc);
46a3df9f 1681
580a05f9
YL
1682 /* ensure one to one mapping between irq and queue at default */
1683 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1684 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1685
46a3df9f
S
1686 return 0;
1687}
1688
c0425944
PL
1689static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1690 u16 num_tx_desc, u16 num_rx_desc)
1691
46a3df9f
S
1692{
1693 struct hnae3_handle *nic = &vport->nic;
1694 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1695 struct hclge_dev *hdev = vport->back;
af958827 1696 int ret;
46a3df9f 1697
c0425944
PL
1698 kinfo->num_tx_desc = num_tx_desc;
1699 kinfo->num_rx_desc = num_rx_desc;
1700
46a3df9f 1701 kinfo->rx_buf_len = hdev->rx_buf_len;
46a3df9f 1702
672ad0ed 1703 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
46a3df9f
S
1704 sizeof(struct hnae3_queue *), GFP_KERNEL);
1705 if (!kinfo->tqp)
1706 return -ENOMEM;
1707
672ad0ed 1708 ret = hclge_assign_tqp(vport, num_tqps);
3f639907 1709 if (ret)
46a3df9f 1710 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
46a3df9f 1711
3f639907 1712 return ret;
46a3df9f
S
1713}
1714
7df7dad6
L
1715static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1716 struct hclge_vport *vport)
1717{
1718 struct hnae3_handle *nic = &vport->nic;
1719 struct hnae3_knic_private_info *kinfo;
1720 u16 i;
1721
1722 kinfo = &nic->kinfo;
205a24ca 1723 for (i = 0; i < vport->alloc_tqps; i++) {
7df7dad6
L
1724 struct hclge_tqp *q =
1725 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1726 bool is_pf;
1727 int ret;
1728
1729 is_pf = !(vport->vport_id);
1730 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1731 i, is_pf);
1732 if (ret)
1733 return ret;
1734 }
1735
1736 return 0;
1737}
1738
1739static int hclge_map_tqp(struct hclge_dev *hdev)
1740{
1741 struct hclge_vport *vport = hdev->vport;
1742 u16 i, num_vport;
1743
1744 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1745 for (i = 0; i < num_vport; i++) {
1746 int ret;
1747
1748 ret = hclge_map_tqp_to_vport(hdev, vport);
1749 if (ret)
1750 return ret;
1751
1752 vport++;
1753 }
1754
1755 return 0;
1756}
1757
46a3df9f
S
1758static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1759{
1760 struct hnae3_handle *nic = &vport->nic;
1761 struct hclge_dev *hdev = vport->back;
1762 int ret;
1763
1764 nic->pdev = hdev->pdev;
1765 nic->ae_algo = &ae_algo;
1766 nic->numa_node_mask = hdev->numa_node_mask;
1767
b69c9737
YL
1768 ret = hclge_knic_setup(vport, num_tqps,
1769 hdev->num_tx_desc, hdev->num_rx_desc);
1770 if (ret)
1771 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
46a3df9f 1772
b69c9737 1773 return ret;
46a3df9f
S
1774}
1775
1776static int hclge_alloc_vport(struct hclge_dev *hdev)
1777{
1778 struct pci_dev *pdev = hdev->pdev;
1779 struct hclge_vport *vport;
1780 u32 tqp_main_vport;
1781 u32 tqp_per_vport;
1782 int num_vport, i;
1783 int ret;
1784
1785 /* We need to alloc a vport for main NIC of PF */
1786 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1787
38e62046 1788 if (hdev->num_tqps < num_vport) {
adcf738b 1789 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
38e62046
HT
1790 hdev->num_tqps, num_vport);
1791 return -EINVAL;
1792 }
46a3df9f
S
1793
1794 /* Alloc the same number of TQPs for every vport */
1795 tqp_per_vport = hdev->num_tqps / num_vport;
1796 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1797
1798 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1799 GFP_KERNEL);
1800 if (!vport)
1801 return -ENOMEM;
1802
1803 hdev->vport = vport;
1804 hdev->num_alloc_vport = num_vport;
1805
2312e050
FL
1806 if (IS_ENABLED(CONFIG_PCI_IOV))
1807 hdev->num_alloc_vfs = hdev->num_req_vfs;
46a3df9f
S
1808
1809 for (i = 0; i < num_vport; i++) {
1810 vport->back = hdev;
1811 vport->vport_id = i;
6430f744 1812 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
818f1675 1813 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
741fca16
JS
1814 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1815 vport->rxvlan_cfg.rx_vlan_offload_en = true;
c6075b19 1816 INIT_LIST_HEAD(&vport->vlan_list);
6dd86902 1817 INIT_LIST_HEAD(&vport->uc_mac_list);
1818 INIT_LIST_HEAD(&vport->mc_mac_list);
ee4bcd3b 1819 spin_lock_init(&vport->mac_list_lock);
46a3df9f
S
1820
1821 if (i == 0)
1822 ret = hclge_vport_setup(vport, tqp_main_vport);
1823 else
1824 ret = hclge_vport_setup(vport, tqp_per_vport);
1825 if (ret) {
1826 dev_err(&pdev->dev,
1827 "vport setup failed for vport %d, %d\n",
1828 i, ret);
1829 return ret;
1830 }
1831
1832 vport++;
1833 }
1834
1835 return 0;
1836}
1837
acf61ecd
YL
1838static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1840{
1841/* TX buffer size is unit by 128 byte */
1842#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1843#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1844 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1845 struct hclge_desc desc;
1846 int ret;
1847 u8 i;
1848
d44f9b63 1849 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1850
1851 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
f9f07091 1852 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1853 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1854
46a3df9f
S
1855 req->tx_pkt_buff[i] =
1856 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1857 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1858 }
46a3df9f
S
1859
1860 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 1861 if (ret)
46a3df9f
S
1862 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1863 ret);
46a3df9f 1864
3f639907 1865 return ret;
46a3df9f
S
1866}
1867
acf61ecd
YL
1868static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1869 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1870{
acf61ecd 1871 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f 1872
3f639907
JS
1873 if (ret)
1874 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
46a3df9f 1875
3f639907 1876 return ret;
46a3df9f
S
1877}
1878
1a49f3c6 1879static u32 hclge_get_tc_num(struct hclge_dev *hdev)
46a3df9f 1880{
ebaf1908
WL
1881 unsigned int i;
1882 u32 cnt = 0;
46a3df9f
S
1883
1884 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1885 if (hdev->hw_tc_map & BIT(i))
1886 cnt++;
1887 return cnt;
1888}
1889
46a3df9f 1890/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1891static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1892 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1893{
1894 struct hclge_priv_buf *priv;
ebaf1908
WL
1895 unsigned int i;
1896 int cnt = 0;
46a3df9f
S
1897
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1899 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1900 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1901 priv->enable)
1902 cnt++;
1903 }
1904
1905 return cnt;
1906}
1907
1908/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1909static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1910 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1911{
1912 struct hclge_priv_buf *priv;
ebaf1908
WL
1913 unsigned int i;
1914 int cnt = 0;
46a3df9f
S
1915
1916 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1917 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1918 if (hdev->hw_tc_map & BIT(i) &&
1919 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1920 priv->enable)
1921 cnt++;
1922 }
1923
1924 return cnt;
1925}
1926
acf61ecd 1927static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1928{
1929 struct hclge_priv_buf *priv;
1930 u32 rx_priv = 0;
1931 int i;
1932
1933 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1934 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1935 if (priv->enable)
1936 rx_priv += priv->buf_size;
1937 }
1938 return rx_priv;
1939}
1940
acf61ecd 1941static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1942{
1943 u32 i, total_tx_size = 0;
1944
1945 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1946 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1947
1948 return total_tx_size;
1949}
1950
acf61ecd
YL
1951static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1952 struct hclge_pkt_buf_alloc *buf_alloc,
1953 u32 rx_all)
46a3df9f 1954{
1a49f3c6
YL
1955 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1956 u32 tc_num = hclge_get_tc_num(hdev);
b9a400ac 1957 u32 shared_buf, aligned_mps;
46a3df9f
S
1958 u32 rx_priv;
1959 int i;
1960
b9a400ac 1961 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
46a3df9f 1962
d221df4e 1963 if (hnae3_dev_dcb_supported(hdev))
b37ce587
YM
1964 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1965 hdev->dv_buf_size;
d221df4e 1966 else
b9a400ac 1967 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
368686be 1968 + hdev->dv_buf_size;
d221df4e 1969
db5936db 1970 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
af854724
YL
1971 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1972 HCLGE_BUF_SIZE_UNIT);
46a3df9f 1973
acf61ecd 1974 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
af854724 1975 if (rx_all < rx_priv + shared_std)
46a3df9f
S
1976 return false;
1977
b9a400ac 1978 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
acf61ecd 1979 buf_alloc->s_buf.buf_size = shared_buf;
368686be
YL
1980 if (hnae3_dev_dcb_supported(hdev)) {
1981 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1982 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
b37ce587
YM
1983 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1984 HCLGE_BUF_SIZE_UNIT);
368686be 1985 } else {
b9a400ac 1986 buf_alloc->s_buf.self.high = aligned_mps +
368686be 1987 HCLGE_NON_DCB_ADDITIONAL_BUF;
1a49f3c6
YL
1988 buf_alloc->s_buf.self.low = aligned_mps;
1989 }
1990
1991 if (hnae3_dev_dcb_supported(hdev)) {
9e15be90
YL
1992 hi_thrd = shared_buf - hdev->dv_buf_size;
1993
1994 if (tc_num <= NEED_RESERVE_TC_NUM)
1995 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1996 / BUF_MAX_PERCENT;
1997
1a49f3c6 1998 if (tc_num)
9e15be90 1999 hi_thrd = hi_thrd / tc_num;
1a49f3c6 2000
b37ce587 2001 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1a49f3c6 2002 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
b37ce587 2003 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1a49f3c6
YL
2004 } else {
2005 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2006 lo_thrd = aligned_mps;
368686be 2007 }
46a3df9f
S
2008
2009 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1a49f3c6
YL
2010 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2011 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
46a3df9f
S
2012 }
2013
2014 return true;
2015}
2016
acf61ecd
YL
2017static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2018 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
2019{
2020 u32 i, total_size;
2021
2022 total_size = hdev->pkt_buf_size;
2023
2024 /* alloc tx buffer for all enabled tc */
2025 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2026 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9 2027
b6b4f987
HT
2028 if (hdev->hw_tc_map & BIT(i)) {
2029 if (total_size < hdev->tx_buf_size)
2030 return -ENOMEM;
9ffe79a9 2031
368686be 2032 priv->tx_buf_size = hdev->tx_buf_size;
b6b4f987 2033 } else {
9ffe79a9 2034 priv->tx_buf_size = 0;
b6b4f987 2035 }
9ffe79a9
YL
2036
2037 total_size -= priv->tx_buf_size;
2038 }
2039
2040 return 0;
2041}
2042
8ca754b1
YL
2043static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2044 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2045{
8ca754b1
YL
2046 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2047 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
ebaf1908 2048 unsigned int i;
46a3df9f 2049
46a3df9f 2050 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8ca754b1 2051 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f 2052
bb1fe9ea
YL
2053 priv->enable = 0;
2054 priv->wl.low = 0;
2055 priv->wl.high = 0;
2056 priv->buf_size = 0;
2057
2058 if (!(hdev->hw_tc_map & BIT(i)))
2059 continue;
2060
2061 priv->enable = 1;
46a3df9f
S
2062
2063 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
b37ce587 2064 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
8ca754b1
YL
2065 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2066 HCLGE_BUF_SIZE_UNIT);
46a3df9f
S
2067 } else {
2068 priv->wl.low = 0;
b37ce587
YM
2069 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2070 aligned_mps;
46a3df9f 2071 }
8ca754b1
YL
2072
2073 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
46a3df9f
S
2074 }
2075
8ca754b1
YL
2076 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077}
46a3df9f 2078
8ca754b1
YL
2079static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2080 struct hclge_pkt_buf_alloc *buf_alloc)
2081{
2082 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2084 int i;
46a3df9f
S
2085
2086 /* let the last to be cleared first */
2087 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 2088 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 2089 unsigned int mask = BIT((unsigned int)i);
46a3df9f 2090
ebaf1908
WL
2091 if (hdev->hw_tc_map & mask &&
2092 !(hdev->tm_info.hw_pfc_map & mask)) {
46a3df9f
S
2093 /* Clear the no pfc TC private buffer */
2094 priv->wl.low = 0;
2095 priv->wl.high = 0;
2096 priv->buf_size = 0;
2097 priv->enable = 0;
2098 no_pfc_priv_num--;
2099 }
2100
acf61ecd 2101 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
2102 no_pfc_priv_num == 0)
2103 break;
2104 }
2105
8ca754b1
YL
2106 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107}
46a3df9f 2108
8ca754b1
YL
2109static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2110 struct hclge_pkt_buf_alloc *buf_alloc)
2111{
2112 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2114 int i;
46a3df9f
S
2115
2116 /* let the last to be cleared first */
2117 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
8ca754b1 2118 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
ebaf1908 2119 unsigned int mask = BIT((unsigned int)i);
46a3df9f 2120
ebaf1908
WL
2121 if (hdev->hw_tc_map & mask &&
2122 hdev->tm_info.hw_pfc_map & mask) {
46a3df9f
S
2123 /* Reduce the number of pfc TC with private buffer */
2124 priv->wl.low = 0;
2125 priv->enable = 0;
2126 priv->wl.high = 0;
2127 priv->buf_size = 0;
2128 pfc_priv_num--;
2129 }
2130
acf61ecd 2131 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
2132 pfc_priv_num == 0)
2133 break;
2134 }
8ca754b1
YL
2135
2136 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137}
2138
9e15be90
YL
2139static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2140 struct hclge_pkt_buf_alloc *buf_alloc)
2141{
2142#define COMPENSATE_BUFFER 0x3C00
2143#define COMPENSATE_HALF_MPS_NUM 5
2144#define PRIV_WL_GAP 0x1800
2145
2146 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2147 u32 tc_num = hclge_get_tc_num(hdev);
2148 u32 half_mps = hdev->mps >> 1;
2149 u32 min_rx_priv;
2150 unsigned int i;
2151
2152 if (tc_num)
2153 rx_priv = rx_priv / tc_num;
2154
2155 if (tc_num <= NEED_RESERVE_TC_NUM)
2156 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2157
2158 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2159 COMPENSATE_HALF_MPS_NUM * half_mps;
2160 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2161 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2162
2163 if (rx_priv < min_rx_priv)
2164 return false;
2165
2166 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2167 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2168
2169 priv->enable = 0;
2170 priv->wl.low = 0;
2171 priv->wl.high = 0;
2172 priv->buf_size = 0;
2173
2174 if (!(hdev->hw_tc_map & BIT(i)))
2175 continue;
2176
2177 priv->enable = 1;
2178 priv->buf_size = rx_priv;
2179 priv->wl.high = rx_priv - hdev->dv_buf_size;
2180 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2181 }
2182
2183 buf_alloc->s_buf.buf_size = 0;
2184
2185 return true;
2186}
2187
8ca754b1
YL
2188/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2189 * @hdev: pointer to struct hclge_dev
2190 * @buf_alloc: pointer to buffer calculation data
2191 * @return: 0: calculate sucessful, negative: fail
2192 */
2193static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2194 struct hclge_pkt_buf_alloc *buf_alloc)
2195{
2196 /* When DCB is not supported, rx private buffer is not allocated. */
2197 if (!hnae3_dev_dcb_supported(hdev)) {
2198 u32 rx_all = hdev->pkt_buf_size;
2199
2200 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2201 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2202 return -ENOMEM;
2203
2204 return 0;
2205 }
2206
9e15be90
YL
2207 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2208 return 0;
2209
8ca754b1
YL
2210 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2211 return 0;
2212
2213 /* try to decrease the buffer size */
2214 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2215 return 0;
2216
2217 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2218 return 0;
2219
2220 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
46a3df9f
S
2221 return 0;
2222
2223 return -ENOMEM;
2224}
2225
acf61ecd
YL
2226static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2227 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2228{
d44f9b63 2229 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
2230 struct hclge_desc desc;
2231 int ret;
2232 int i;
2233
2234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 2235 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
2236
2237 /* Alloc private buffer TCs */
2238 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 2239 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
2240
2241 req->buf_num[i] =
2242 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2243 req->buf_num[i] |=
5bca3b94 2244 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
2245 }
2246
b8c8bf47 2247 req->shared_buf =
acf61ecd 2248 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
2249 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2250
46a3df9f 2251 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2252 if (ret)
46a3df9f
S
2253 dev_err(&hdev->pdev->dev,
2254 "rx private buffer alloc cmd failed %d\n", ret);
46a3df9f 2255
3f639907 2256 return ret;
46a3df9f
S
2257}
2258
acf61ecd
YL
2259static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2260 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
2261{
2262 struct hclge_rx_priv_wl_buf *req;
2263 struct hclge_priv_buf *priv;
2264 struct hclge_desc desc[2];
2265 int i, j;
2266 int ret;
2267
2268 for (i = 0; i < 2; i++) {
2269 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2270 false);
2271 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2272
2273 /* The first descriptor set the NEXT bit to 1 */
2274 if (i == 0)
2275 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2276 else
2277 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2278
2279 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
2280 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2281
2282 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
2283 req->tc_wl[j].high =
2284 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2285 req->tc_wl[j].high |=
3738287c 2286 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2287 req->tc_wl[j].low =
2288 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2289 req->tc_wl[j].low |=
3738287c 2290 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2291 }
2292 }
2293
2294 /* Send 2 descriptor at one time */
2295 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2296 if (ret)
46a3df9f
S
2297 dev_err(&hdev->pdev->dev,
2298 "rx private waterline config cmd failed %d\n",
2299 ret);
3f639907 2300 return ret;
46a3df9f
S
2301}
2302
acf61ecd
YL
2303static int hclge_common_thrd_config(struct hclge_dev *hdev,
2304 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2305{
acf61ecd 2306 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
2307 struct hclge_rx_com_thrd *req;
2308 struct hclge_desc desc[2];
2309 struct hclge_tc_thrd *tc;
2310 int i, j;
2311 int ret;
2312
2313 for (i = 0; i < 2; i++) {
2314 hclge_cmd_setup_basic_desc(&desc[i],
2315 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2316 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2317
2318 /* The first descriptor set the NEXT bit to 1 */
2319 if (i == 0)
2320 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2321 else
2322 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2323
2324 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2325 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2326
2327 req->com_thrd[j].high =
2328 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2329 req->com_thrd[j].high |=
3738287c 2330 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2331 req->com_thrd[j].low =
2332 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2333 req->com_thrd[j].low |=
3738287c 2334 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2335 }
2336 }
2337
2338 /* Send 2 descriptors at one time */
2339 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3f639907 2340 if (ret)
46a3df9f
S
2341 dev_err(&hdev->pdev->dev,
2342 "common threshold config cmd failed %d\n", ret);
3f639907 2343 return ret;
46a3df9f
S
2344}
2345
acf61ecd
YL
2346static int hclge_common_wl_config(struct hclge_dev *hdev,
2347 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 2348{
acf61ecd 2349 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
2350 struct hclge_rx_com_wl *req;
2351 struct hclge_desc desc;
2352 int ret;
2353
2354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2355
2356 req = (struct hclge_rx_com_wl *)desc.data;
2357 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
3738287c 2358 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2359
2360 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
3738287c 2361 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
46a3df9f
S
2362
2363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2364 if (ret)
46a3df9f
S
2365 dev_err(&hdev->pdev->dev,
2366 "common waterline config cmd failed %d\n", ret);
46a3df9f 2367
3f639907 2368 return ret;
46a3df9f
S
2369}
2370
2371int hclge_buffer_alloc(struct hclge_dev *hdev)
2372{
acf61ecd 2373 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
2374 int ret;
2375
acf61ecd
YL
2376 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2377 if (!pkt_buf)
46a3df9f
S
2378 return -ENOMEM;
2379
acf61ecd 2380 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
2381 if (ret) {
2382 dev_err(&hdev->pdev->dev,
2383 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 2384 goto out;
9ffe79a9
YL
2385 }
2386
acf61ecd 2387 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
2388 if (ret) {
2389 dev_err(&hdev->pdev->dev,
2390 "could not alloc tx buffers %d\n", ret);
acf61ecd 2391 goto out;
46a3df9f
S
2392 }
2393
acf61ecd 2394 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
2395 if (ret) {
2396 dev_err(&hdev->pdev->dev,
2397 "could not calc rx priv buffer size for all TCs %d\n",
2398 ret);
acf61ecd 2399 goto out;
46a3df9f
S
2400 }
2401
acf61ecd 2402 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
2403 if (ret) {
2404 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2405 ret);
acf61ecd 2406 goto out;
46a3df9f
S
2407 }
2408
2daf4a65 2409 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 2410 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
2411 if (ret) {
2412 dev_err(&hdev->pdev->dev,
2413 "could not configure rx private waterline %d\n",
2414 ret);
acf61ecd 2415 goto out;
2daf4a65 2416 }
46a3df9f 2417
acf61ecd 2418 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
2419 if (ret) {
2420 dev_err(&hdev->pdev->dev,
2421 "could not configure common threshold %d\n",
2422 ret);
acf61ecd 2423 goto out;
2daf4a65 2424 }
46a3df9f
S
2425 }
2426
acf61ecd
YL
2427 ret = hclge_common_wl_config(hdev, pkt_buf);
2428 if (ret)
46a3df9f
S
2429 dev_err(&hdev->pdev->dev,
2430 "could not configure common waterline %d\n", ret);
46a3df9f 2431
acf61ecd
YL
2432out:
2433 kfree(pkt_buf);
2434 return ret;
46a3df9f
S
2435}
2436
2437static int hclge_init_roce_base_info(struct hclge_vport *vport)
2438{
2439 struct hnae3_handle *roce = &vport->roce;
2440 struct hnae3_handle *nic = &vport->nic;
3a6863e4 2441 struct hclge_dev *hdev = vport->back;
46a3df9f 2442
887c3820 2443 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f 2444
3a6863e4 2445 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
46a3df9f
S
2446 return -EINVAL;
2447
3a6863e4 2448 roce->rinfo.base_vector = hdev->roce_base_vector;
46a3df9f
S
2449
2450 roce->rinfo.netdev = nic->kinfo.netdev;
3a6863e4
YM
2451 roce->rinfo.roce_io_base = hdev->hw.io_base;
2452 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
46a3df9f
S
2453
2454 roce->pdev = nic->pdev;
2455 roce->ae_algo = nic->ae_algo;
2456 roce->numa_node_mask = nic->numa_node_mask;
2457
2458 return 0;
2459}
2460
887c3820 2461static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
2462{
2463 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
2464 int vectors;
2465 int i;
46a3df9f 2466
580a05f9
YL
2467 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2468 hdev->num_msi,
887c3820
SM
2469 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2470 if (vectors < 0) {
2471 dev_err(&pdev->dev,
2472 "failed(%d) to allocate MSI/MSI-X vectors\n",
2473 vectors);
2474 return vectors;
46a3df9f 2475 }
887c3820
SM
2476 if (vectors < hdev->num_msi)
2477 dev_warn(&hdev->pdev->dev,
adcf738b 2478 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
887c3820 2479 hdev->num_msi, vectors);
46a3df9f 2480
887c3820
SM
2481 hdev->num_msi = vectors;
2482 hdev->num_msi_left = vectors;
580a05f9 2483
887c3820 2484 hdev->base_msi_vector = pdev->irq;
46a3df9f 2485 hdev->roce_base_vector = hdev->base_msi_vector +
3a6863e4 2486 hdev->num_nic_msi;
46a3df9f 2487
46a3df9f
S
2488 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2489 sizeof(u16), GFP_KERNEL);
887c3820
SM
2490 if (!hdev->vector_status) {
2491 pci_free_irq_vectors(pdev);
46a3df9f 2492 return -ENOMEM;
887c3820 2493 }
46a3df9f
S
2494
2495 for (i = 0; i < hdev->num_msi; i++)
2496 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2497
887c3820
SM
2498 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2499 sizeof(int), GFP_KERNEL);
2500 if (!hdev->vector_irq) {
2501 pci_free_irq_vectors(pdev);
2502 return -ENOMEM;
46a3df9f 2503 }
46a3df9f
S
2504
2505 return 0;
2506}
2507
2d03eacc 2508static u8 hclge_check_speed_dup(u8 duplex, int speed)
46a3df9f 2509{
2d03eacc
YL
2510 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2511 duplex = HCLGE_MAC_FULL;
46a3df9f 2512
2d03eacc 2513 return duplex;
46a3df9f
S
2514}
2515
2d03eacc
YL
2516static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2517 u8 duplex)
46a3df9f 2518{
d44f9b63 2519 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2520 struct hclge_desc desc;
2521 int ret;
2522
d44f9b63 2523 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2524
2525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2526
63cbf7a9
YM
2527 if (duplex)
2528 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
46a3df9f
S
2529
2530 switch (speed) {
2531 case HCLGE_MAC_SPEED_10M:
e4e87715
PL
2532 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533 HCLGE_CFG_SPEED_S, 6);
46a3df9f
S
2534 break;
2535 case HCLGE_MAC_SPEED_100M:
e4e87715
PL
2536 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537 HCLGE_CFG_SPEED_S, 7);
46a3df9f
S
2538 break;
2539 case HCLGE_MAC_SPEED_1G:
e4e87715
PL
2540 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541 HCLGE_CFG_SPEED_S, 0);
46a3df9f
S
2542 break;
2543 case HCLGE_MAC_SPEED_10G:
e4e87715
PL
2544 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545 HCLGE_CFG_SPEED_S, 1);
46a3df9f
S
2546 break;
2547 case HCLGE_MAC_SPEED_25G:
e4e87715
PL
2548 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549 HCLGE_CFG_SPEED_S, 2);
46a3df9f
S
2550 break;
2551 case HCLGE_MAC_SPEED_40G:
e4e87715
PL
2552 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553 HCLGE_CFG_SPEED_S, 3);
46a3df9f
S
2554 break;
2555 case HCLGE_MAC_SPEED_50G:
e4e87715
PL
2556 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557 HCLGE_CFG_SPEED_S, 4);
46a3df9f
S
2558 break;
2559 case HCLGE_MAC_SPEED_100G:
e4e87715
PL
2560 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561 HCLGE_CFG_SPEED_S, 5);
46a3df9f 2562 break;
ae6f010c
GH
2563 case HCLGE_MAC_SPEED_200G:
2564 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565 HCLGE_CFG_SPEED_S, 8);
2566 break;
46a3df9f 2567 default:
d7629e74 2568 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2569 return -EINVAL;
2570 }
2571
e4e87715
PL
2572 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2573 1);
46a3df9f
S
2574
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576 if (ret) {
2577 dev_err(&hdev->pdev->dev,
2578 "mac speed/duplex config cmd failed %d.\n", ret);
2579 return ret;
2580 }
2581
2d03eacc
YL
2582 return 0;
2583}
2584
2585int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2586{
68e1006f 2587 struct hclge_mac *mac = &hdev->hw.mac;
2d03eacc
YL
2588 int ret;
2589
2590 duplex = hclge_check_speed_dup(duplex, speed);
68e1006f
JS
2591 if (!mac->support_autoneg && mac->speed == speed &&
2592 mac->duplex == duplex)
2d03eacc
YL
2593 return 0;
2594
2595 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2596 if (ret)
2597 return ret;
2598
2599 hdev->hw.mac.speed = speed;
2600 hdev->hw.mac.duplex = duplex;
46a3df9f
S
2601
2602 return 0;
2603}
2604
2605static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2606 u8 duplex)
2607{
2608 struct hclge_vport *vport = hclge_get_vport(handle);
2609 struct hclge_dev *hdev = vport->back;
2610
2611 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2612}
2613
46a3df9f
S
2614static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2615{
d44f9b63 2616 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2617 struct hclge_desc desc;
a90bb9a5 2618 u32 flag = 0;
46a3df9f
S
2619 int ret;
2620
2621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2622
d44f9b63 2623 req = (struct hclge_config_auto_neg_cmd *)desc.data;
b9a8f883
YL
2624 if (enable)
2625 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
a90bb9a5 2626 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2627
2628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 2629 if (ret)
46a3df9f
S
2630 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2631 ret);
46a3df9f 2632
3f639907 2633 return ret;
46a3df9f
S
2634}
2635
2636static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2637{
2638 struct hclge_vport *vport = hclge_get_vport(handle);
2639 struct hclge_dev *hdev = vport->back;
2640
22f48e24
JS
2641 if (!hdev->hw.mac.support_autoneg) {
2642 if (enable) {
2643 dev_err(&hdev->pdev->dev,
2644 "autoneg is not supported by current port\n");
2645 return -EOPNOTSUPP;
2646 } else {
2647 return 0;
2648 }
2649 }
2650
46a3df9f
S
2651 return hclge_set_autoneg_en(hdev, enable);
2652}
2653
2654static int hclge_get_autoneg(struct hnae3_handle *handle)
2655{
2656 struct hclge_vport *vport = hclge_get_vport(handle);
2657 struct hclge_dev *hdev = vport->back;
27b5bf49
FL
2658 struct phy_device *phydev = hdev->hw.mac.phydev;
2659
2660 if (phydev)
2661 return phydev->autoneg;
46a3df9f
S
2662
2663 return hdev->hw.mac.autoneg;
2664}
2665
22f48e24
JS
2666static int hclge_restart_autoneg(struct hnae3_handle *handle)
2667{
2668 struct hclge_vport *vport = hclge_get_vport(handle);
2669 struct hclge_dev *hdev = vport->back;
2670 int ret;
2671
2672 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2673
2674 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2675 if (ret)
2676 return ret;
2677 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2678}
2679
7786a996
JS
2680static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2681{
2682 struct hclge_vport *vport = hclge_get_vport(handle);
2683 struct hclge_dev *hdev = vport->back;
2684
2685 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2686 return hclge_set_autoneg_en(hdev, !halt);
2687
2688 return 0;
2689}
2690
7e6ec914
JS
2691static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2692{
2693 struct hclge_config_fec_cmd *req;
2694 struct hclge_desc desc;
2695 int ret;
2696
2697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2698
2699 req = (struct hclge_config_fec_cmd *)desc.data;
2700 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2701 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2702 if (fec_mode & BIT(HNAE3_FEC_RS))
2703 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2704 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2705 if (fec_mode & BIT(HNAE3_FEC_BASER))
2706 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2707 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2708
2709 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2710 if (ret)
2711 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2712
2713 return ret;
2714}
2715
2716static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2717{
2718 struct hclge_vport *vport = hclge_get_vport(handle);
2719 struct hclge_dev *hdev = vport->back;
2720 struct hclge_mac *mac = &hdev->hw.mac;
2721 int ret;
2722
2723 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2724 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2725 return -EINVAL;
2726 }
2727
2728 ret = hclge_set_fec_hw(hdev, fec_mode);
2729 if (ret)
2730 return ret;
2731
2732 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2733 return 0;
2734}
2735
2736static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2737 u8 *fec_mode)
2738{
2739 struct hclge_vport *vport = hclge_get_vport(handle);
2740 struct hclge_dev *hdev = vport->back;
2741 struct hclge_mac *mac = &hdev->hw.mac;
2742
2743 if (fec_ability)
2744 *fec_ability = mac->fec_ability;
2745 if (fec_mode)
2746 *fec_mode = mac->fec_mode;
2747}
2748
46a3df9f
S
2749static int hclge_mac_init(struct hclge_dev *hdev)
2750{
2751 struct hclge_mac *mac = &hdev->hw.mac;
2752 int ret;
2753
5d497936 2754 hdev->support_sfp_query = true;
2d03eacc
YL
2755 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2756 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2757 hdev->hw.mac.duplex);
60df7e91 2758 if (ret)
46a3df9f 2759 return ret;
46a3df9f 2760
d736fc6c
JS
2761 if (hdev->hw.mac.support_autoneg) {
2762 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
60df7e91 2763 if (ret)
d736fc6c 2764 return ret;
d736fc6c
JS
2765 }
2766
46a3df9f
S
2767 mac->link = 0;
2768
7e6ec914
JS
2769 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2770 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
60df7e91 2771 if (ret)
7e6ec914 2772 return ret;
7e6ec914
JS
2773 }
2774
e6d7d79d
YL
2775 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2776 if (ret) {
2777 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2778 return ret;
2779 }
f9fd82a9 2780
1cbc662d
YM
2781 ret = hclge_set_default_loopback(hdev);
2782 if (ret)
2783 return ret;
2784
e6d7d79d 2785 ret = hclge_buffer_alloc(hdev);
3f639907 2786 if (ret)
f9fd82a9 2787 dev_err(&hdev->pdev->dev,
e6d7d79d 2788 "allocate buffer fail, ret=%d\n", ret);
f9fd82a9 2789
3f639907 2790 return ret;
46a3df9f
S
2791}
2792
c1a81619
SM
2793static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2794{
1c6dfe6f 2795 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
18e24888 2796 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2797 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2798 hclge_wq, &hdev->service_task, 0);
c1a81619
SM
2799}
2800
cb1b9f77
SM
2801static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2802{
acfc3d55
HT
2803 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1c6dfe6f 2805 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2806 hclge_wq, &hdev->service_task, 0);
cb1b9f77
SM
2807}
2808
ed8fb4b2 2809void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
46a3df9f 2810{
d5432455
GL
2811 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
08125454 2813 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
0ea68902 2814 hclge_wq, &hdev->service_task,
ed8fb4b2 2815 delay_time);
46a3df9f
S
2816}
2817
fac24df7 2818static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
46a3df9f 2819{
d44f9b63 2820 struct hclge_link_status_cmd *req;
46a3df9f 2821 struct hclge_desc desc;
46a3df9f
S
2822 int ret;
2823
2824 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2825 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2826 if (ret) {
2827 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2828 ret);
2829 return ret;
2830 }
2831
d44f9b63 2832 req = (struct hclge_link_status_cmd *)desc.data;
fac24df7
JS
2833 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2834 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
46a3df9f 2835
fac24df7 2836 return 0;
46a3df9f
S
2837}
2838
fac24df7 2839static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
46a3df9f 2840{
fac24df7
JS
2841 struct phy_device *phydev = hdev->hw.mac.phydev;
2842
2843 *link_status = HCLGE_LINK_STATUS_DOWN;
46a3df9f 2844
582d37bb
PL
2845 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2846 return 0;
2847
fac24df7
JS
2848 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2849 return 0;
46a3df9f 2850
fac24df7 2851 return hclge_get_mac_link_status(hdev, link_status);
46a3df9f
S
2852}
2853
2854static void hclge_update_link_status(struct hclge_dev *hdev)
2855{
45e92b7e 2856 struct hnae3_client *rclient = hdev->roce_client;
46a3df9f 2857 struct hnae3_client *client = hdev->nic_client;
45e92b7e 2858 struct hnae3_handle *rhandle;
46a3df9f
S
2859 struct hnae3_handle *handle;
2860 int state;
fac24df7 2861 int ret;
46a3df9f
S
2862 int i;
2863
2864 if (!client)
2865 return;
1c6dfe6f
YL
2866
2867 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2868 return;
2869
fac24df7
JS
2870 ret = hclge_get_mac_phy_link(hdev, &state);
2871 if (ret) {
2872 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2873 return;
2874 }
2875
46a3df9f
S
2876 if (state != hdev->hw.mac.link) {
2877 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2878 handle = &hdev->vport[i].nic;
2879 client->ops->link_status_change(handle, state);
a6345787 2880 hclge_config_mac_tnl_int(hdev, state);
45e92b7e
PL
2881 rhandle = &hdev->vport[i].roce;
2882 if (rclient && rclient->ops->link_status_change)
2883 rclient->ops->link_status_change(rhandle,
2884 state);
46a3df9f
S
2885 }
2886 hdev->hw.mac.link = state;
2887 }
1c6dfe6f
YL
2888
2889 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
46a3df9f
S
2890}
2891
88d10bd6
JS
2892static void hclge_update_port_capability(struct hclge_mac *mac)
2893{
f438bfe9
JS
2894 /* update fec ability by speed */
2895 hclge_convert_setting_fec(mac);
2896
88d10bd6
JS
2897 /* firmware can not identify back plane type, the media type
2898 * read from configuration can help deal it
2899 */
2900 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2901 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2902 mac->module_type = HNAE3_MODULE_TYPE_KR;
2903 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2904 mac->module_type = HNAE3_MODULE_TYPE_TP;
2905
db4d3d55 2906 if (mac->support_autoneg) {
88d10bd6
JS
2907 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2908 linkmode_copy(mac->advertising, mac->supported);
2909 } else {
2910 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2911 mac->supported);
2912 linkmode_zero(mac->advertising);
2913 }
2914}
2915
5d497936
PL
2916static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2917{
63cbf7a9 2918 struct hclge_sfp_info_cmd *resp;
5d497936
PL
2919 struct hclge_desc desc;
2920 int ret;
2921
88d10bd6
JS
2922 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2923 resp = (struct hclge_sfp_info_cmd *)desc.data;
5d497936
PL
2924 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2925 if (ret == -EOPNOTSUPP) {
2926 dev_warn(&hdev->pdev->dev,
2927 "IMP do not support get SFP speed %d\n", ret);
2928 return ret;
2929 } else if (ret) {
2930 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2931 return ret;
2932 }
2933
88d10bd6 2934 *speed = le32_to_cpu(resp->speed);
5d497936
PL
2935
2936 return 0;
2937}
2938
88d10bd6 2939static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
46a3df9f 2940{
88d10bd6
JS
2941 struct hclge_sfp_info_cmd *resp;
2942 struct hclge_desc desc;
46a3df9f
S
2943 int ret;
2944
88d10bd6
JS
2945 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2946 resp = (struct hclge_sfp_info_cmd *)desc.data;
2947
2948 resp->query_type = QUERY_ACTIVE_SPEED;
2949
2950 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2951 if (ret == -EOPNOTSUPP) {
2952 dev_warn(&hdev->pdev->dev,
2953 "IMP does not support get SFP info %d\n", ret);
2954 return ret;
2955 } else if (ret) {
2956 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2957 return ret;
2958 }
2959
2af8cb61
GL
2960 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2961 * set to mac->speed.
2962 */
2963 if (!le32_to_cpu(resp->speed))
2964 return 0;
2965
88d10bd6
JS
2966 mac->speed = le32_to_cpu(resp->speed);
2967 /* if resp->speed_ability is 0, it means it's an old version
2968 * firmware, do not update these params
46a3df9f 2969 */
88d10bd6
JS
2970 if (resp->speed_ability) {
2971 mac->module_type = le32_to_cpu(resp->module_type);
2972 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2973 mac->autoneg = resp->autoneg;
2974 mac->support_autoneg = resp->autoneg_ability;
49b12556 2975 mac->speed_type = QUERY_ACTIVE_SPEED;
f438bfe9
JS
2976 if (!resp->active_fec)
2977 mac->fec_mode = 0;
2978 else
2979 mac->fec_mode = BIT(resp->active_fec);
88d10bd6
JS
2980 } else {
2981 mac->speed_type = QUERY_SFP_SPEED;
2982 }
2983
2984 return 0;
2985}
2986
2987static int hclge_update_port_info(struct hclge_dev *hdev)
2988{
2989 struct hclge_mac *mac = &hdev->hw.mac;
2990 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2991 int ret;
2992
2993 /* get the port info from SFP cmd if not copper port */
2994 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
46a3df9f
S
2995 return 0;
2996
88d10bd6 2997 /* if IMP does not support get SFP/qSFP info, return directly */
5d497936
PL
2998 if (!hdev->support_sfp_query)
2999 return 0;
46a3df9f 3000
295ba232 3001 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
88d10bd6
JS
3002 ret = hclge_get_sfp_info(hdev, mac);
3003 else
3004 ret = hclge_get_sfp_speed(hdev, &speed);
3005
5d497936
PL
3006 if (ret == -EOPNOTSUPP) {
3007 hdev->support_sfp_query = false;
3008 return ret;
3009 } else if (ret) {
2d03eacc 3010 return ret;
46a3df9f
S
3011 }
3012
295ba232 3013 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
88d10bd6
JS
3014 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3015 hclge_update_port_capability(mac);
3016 return 0;
3017 }
3018 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3019 HCLGE_MAC_FULL);
3020 } else {
3021 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3022 return 0; /* do nothing if no SFP */
46a3df9f 3023
88d10bd6
JS
3024 /* must config full duplex for SFP */
3025 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3026 }
46a3df9f
S
3027}
3028
3029static int hclge_get_status(struct hnae3_handle *handle)
3030{
3031 struct hclge_vport *vport = hclge_get_vport(handle);
3032 struct hclge_dev *hdev = vport->back;
3033
3034 hclge_update_link_status(hdev);
3035
3036 return hdev->hw.mac.link;
3037}
3038
6430f744
YM
3039static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3040{
60df7e91 3041 if (!pci_num_vf(hdev->pdev)) {
6430f744
YM
3042 dev_err(&hdev->pdev->dev,
3043 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3044 return NULL;
3045 }
3046
3047 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3048 dev_err(&hdev->pdev->dev,
3049 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3050 vf, pci_num_vf(hdev->pdev));
3051 return NULL;
3052 }
3053
3054 /* VF start from 1 in vport */
3055 vf += HCLGE_VF_VPORT_START_NUM;
3056 return &hdev->vport[vf];
3057}
3058
3059static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3060 struct ifla_vf_info *ivf)
3061{
3062 struct hclge_vport *vport = hclge_get_vport(handle);
3063 struct hclge_dev *hdev = vport->back;
3064
3065 vport = hclge_get_vf_vport(hdev, vf);
3066 if (!vport)
3067 return -EINVAL;
3068
3069 ivf->vf = vf;
3070 ivf->linkstate = vport->vf_info.link_state;
22044f95 3071 ivf->spoofchk = vport->vf_info.spoofchk;
e196ec75 3072 ivf->trusted = vport->vf_info.trusted;
ee9e4424
YL
3073 ivf->min_tx_rate = 0;
3074 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
89b40c7f
HT
3075 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3076 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3077 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
6430f744
YM
3078 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3079
3080 return 0;
3081}
3082
3083static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3084 int link_state)
3085{
3086 struct hclge_vport *vport = hclge_get_vport(handle);
3087 struct hclge_dev *hdev = vport->back;
3088
3089 vport = hclge_get_vf_vport(hdev, vf);
3090 if (!vport)
3091 return -EINVAL;
3092
3093 vport->vf_info.link_state = link_state;
3094
3095 return 0;
3096}
3097
ca1d7669
SM
3098static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3099{
5705b451 3100 u32 cmdq_src_reg, msix_src_reg;
ca1d7669
SM
3101
3102 /* fetch the events from their corresponding regs */
c1a81619 3103 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
5705b451 3104 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
c1a81619
SM
3105
3106 /* Assumption: If by any chance reset and mailbox events are reported
3107 * together then we will only process reset event in this go and will
3108 * defer the processing of the mailbox events. Since, we would have not
3109 * cleared RX CMDQ event this time we would receive again another
3110 * interrupt from H/W just for the mailbox.
46ee7350
GL
3111 *
3112 * check for vector0 reset event sources
c1a81619 3113 */
5705b451 3114 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
6dd22bbc
HT
3115 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3116 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3117 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3118 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
f02eb82d 3119 hdev->rst_stats.imp_rst_cnt++;
6dd22bbc
HT
3120 return HCLGE_VECTOR0_EVENT_RST;
3121 }
3122
5705b451 3123 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
65e41e7e 3124 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
8d40854f 3125 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ca1d7669
SM
3126 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3127 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
f02eb82d 3128 hdev->rst_stats.global_rst_cnt++;
ca1d7669
SM
3129 return HCLGE_VECTOR0_EVENT_RST;
3130 }
3131
f6162d44 3132 /* check for vector0 msix event source */
147175c9 3133 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
9bc6ac91 3134 *clearval = msix_src_reg;
f6162d44 3135 return HCLGE_VECTOR0_EVENT_ERR;
147175c9 3136 }
f6162d44 3137
c1a81619
SM
3138 /* check for vector0 mailbox(=CMDQ RX) event source */
3139 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3140 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3141 *clearval = cmdq_src_reg;
3142 return HCLGE_VECTOR0_EVENT_MBX;
3143 }
ca1d7669 3144
147175c9 3145 /* print other vector0 event source */
9bc6ac91
HT
3146 dev_info(&hdev->pdev->dev,
3147 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3148 cmdq_src_reg, msix_src_reg);
3149 *clearval = msix_src_reg;
3150
ca1d7669
SM
3151 return HCLGE_VECTOR0_EVENT_OTHER;
3152}
3153
3154static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3155 u32 regclr)
3156{
c1a81619
SM
3157 switch (event_type) {
3158 case HCLGE_VECTOR0_EVENT_RST:
ca1d7669 3159 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
c1a81619
SM
3160 break;
3161 case HCLGE_VECTOR0_EVENT_MBX:
3162 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3163 break;
fa7a4bd5
JS
3164 default:
3165 break;
c1a81619 3166 }
ca1d7669
SM
3167}
3168
8e52a602
XW
3169static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3170{
3171 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3172 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3173 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3174 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3175 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3176}
3177
466b0c00
L
3178static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3179{
3180 writel(enable ? 1 : 0, vector->addr);
3181}
3182
3183static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3184{
3185 struct hclge_dev *hdev = data;
ebaf1908 3186 u32 clearval = 0;
ca1d7669 3187 u32 event_cause;
466b0c00
L
3188
3189 hclge_enable_vector(&hdev->misc_vector, false);
ca1d7669
SM
3190 event_cause = hclge_check_event_cause(hdev, &clearval);
3191
c1a81619 3192 /* vector 0 interrupt is shared with reset and mailbox source events.*/
ca1d7669 3193 switch (event_cause) {
f6162d44
SM
3194 case HCLGE_VECTOR0_EVENT_ERR:
3195 /* we do not know what type of reset is required now. This could
3196 * only be decided after we fetch the type of errors which
3197 * caused this event. Therefore, we will do below for now:
3198 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3199 * have defered type of reset to be used.
3200 * 2. Schedule the reset serivce task.
3201 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3202 * will fetch the correct type of reset. This would be done
3203 * by first decoding the types of errors.
3204 */
3205 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
df561f66 3206 fallthrough;
ca1d7669 3207 case HCLGE_VECTOR0_EVENT_RST:
cb1b9f77 3208 hclge_reset_task_schedule(hdev);
ca1d7669 3209 break;
c1a81619
SM
3210 case HCLGE_VECTOR0_EVENT_MBX:
3211 /* If we are here then,
3212 * 1. Either we are not handling any mbx task and we are not
3213 * scheduled as well
3214 * OR
3215 * 2. We could be handling a mbx task but nothing more is
3216 * scheduled.
3217 * In both cases, we should schedule mbx task as there are more
3218 * mbx messages reported by this interrupt.
3219 */
3220 hclge_mbx_task_schedule(hdev);
f0ad97ac 3221 break;
ca1d7669 3222 default:
f0ad97ac
YL
3223 dev_warn(&hdev->pdev->dev,
3224 "received unknown or unhandled event of vector0\n");
ca1d7669
SM
3225 break;
3226 }
3227
72e2fb07
HT
3228 hclge_clear_event_cause(hdev, event_cause, clearval);
3229
3230 /* Enable interrupt if it is not cause by reset. And when
3231 * clearval equal to 0, it means interrupt status may be
3232 * cleared by hardware before driver reads status register.
3233 * For this case, vector0 interrupt also should be enabled.
3234 */
9bc6ac91
HT
3235 if (!clearval ||
3236 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
cd8c5c26
YL
3237 hclge_enable_vector(&hdev->misc_vector, true);
3238 }
466b0c00
L
3239
3240 return IRQ_HANDLED;
3241}
3242
3243static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3244{
36cbbdf6
PL
3245 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3246 dev_warn(&hdev->pdev->dev,
3247 "vector(vector_id %d) has been freed.\n", vector_id);
3248 return;
3249 }
3250
466b0c00
L
3251 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3252 hdev->num_msi_left += 1;
3253 hdev->num_msi_used -= 1;
3254}
3255
3256static void hclge_get_misc_vector(struct hclge_dev *hdev)
3257{
3258 struct hclge_misc_vector *vector = &hdev->misc_vector;
3259
3260 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3261
3262 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3263 hdev->vector_status[0] = 0;
3264
3265 hdev->num_msi_left -= 1;
3266 hdev->num_msi_used += 1;
3267}
3268
08125454
YL
3269static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3270 const cpumask_t *mask)
3271{
3272 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3273 affinity_notify);
3274
3275 cpumask_copy(&hdev->affinity_mask, mask);
3276}
3277
3278static void hclge_irq_affinity_release(struct kref *ref)
3279{
3280}
3281
3282static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3283{
3284 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3285 &hdev->affinity_mask);
3286
3287 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3288 hdev->affinity_notify.release = hclge_irq_affinity_release;
3289 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3290 &hdev->affinity_notify);
3291}
3292
3293static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3294{
3295 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3296 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3297}
3298
466b0c00
L
3299static int hclge_misc_irq_init(struct hclge_dev *hdev)
3300{
3301 int ret;
3302
3303 hclge_get_misc_vector(hdev);
3304
ca1d7669 3305 /* this would be explicitly freed in the end */
f97c4d82
YL
3306 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3307 HCLGE_NAME, pci_name(hdev->pdev));
ca1d7669 3308 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
f97c4d82 3309 0, hdev->misc_vector.name, hdev);
466b0c00
L
3310 if (ret) {
3311 hclge_free_vector(hdev, 0);
3312 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3313 hdev->misc_vector.vector_irq);
3314 }
3315
3316 return ret;
3317}
3318
ca1d7669
SM
3319static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3320{
3321 free_irq(hdev->misc_vector.vector_irq, hdev);
3322 hclge_free_vector(hdev, 0);
3323}
3324
af013903
HT
3325int hclge_notify_client(struct hclge_dev *hdev,
3326 enum hnae3_reset_notify_type type)
4ed340ab
L
3327{
3328 struct hnae3_client *client = hdev->nic_client;
3329 u16 i;
3330
9b2f3477 3331 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
bd9109c9
HT
3332 return 0;
3333
4ed340ab
L
3334 if (!client->ops->reset_notify)
3335 return -EOPNOTSUPP;
3336
3337 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3338 struct hnae3_handle *handle = &hdev->vport[i].nic;
3339 int ret;
3340
3341 ret = client->ops->reset_notify(handle, type);
65e41e7e
HT
3342 if (ret) {
3343 dev_err(&hdev->pdev->dev,
3344 "notify nic client failed %d(%d)\n", type, ret);
4ed340ab 3345 return ret;
65e41e7e 3346 }
4ed340ab
L
3347 }
3348
3349 return 0;
3350}
3351
f403a84f
HT
3352static int hclge_notify_roce_client(struct hclge_dev *hdev,
3353 enum hnae3_reset_notify_type type)
3354{
3355 struct hnae3_client *client = hdev->roce_client;
9d8d5a36 3356 int ret;
f403a84f
HT
3357 u16 i;
3358
9b2f3477 3359 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
f403a84f
HT
3360 return 0;
3361
3362 if (!client->ops->reset_notify)
3363 return -EOPNOTSUPP;
3364
3365 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3366 struct hnae3_handle *handle = &hdev->vport[i].roce;
3367
3368 ret = client->ops->reset_notify(handle, type);
3369 if (ret) {
3370 dev_err(&hdev->pdev->dev,
3371 "notify roce client failed %d(%d)",
3372 type, ret);
3373 return ret;
3374 }
3375 }
3376
3377 return ret;
3378}
3379
4ed340ab
L
3380static int hclge_reset_wait(struct hclge_dev *hdev)
3381{
3382#define HCLGE_RESET_WATI_MS 100
5bb784e9
HT
3383#define HCLGE_RESET_WAIT_CNT 350
3384
4ed340ab
L
3385 u32 val, reg, reg_bit;
3386 u32 cnt = 0;
3387
3388 switch (hdev->reset_type) {
6dd22bbc
HT
3389 case HNAE3_IMP_RESET:
3390 reg = HCLGE_GLOBAL_RESET_REG;
3391 reg_bit = HCLGE_IMP_RESET_BIT;
3392 break;
4ed340ab
L
3393 case HNAE3_GLOBAL_RESET:
3394 reg = HCLGE_GLOBAL_RESET_REG;
3395 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3396 break;
4ed340ab
L
3397 case HNAE3_FUNC_RESET:
3398 reg = HCLGE_FUN_RST_ING;
3399 reg_bit = HCLGE_FUN_RST_ING_B;
3400 break;
3401 default:
3402 dev_err(&hdev->pdev->dev,
3403 "Wait for unsupported reset type: %d\n",
3404 hdev->reset_type);
3405 return -EINVAL;
3406 }
3407
3408 val = hclge_read_dev(&hdev->hw, reg);
e4e87715 3409 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
4ed340ab
L
3410 msleep(HCLGE_RESET_WATI_MS);
3411 val = hclge_read_dev(&hdev->hw, reg);
3412 cnt++;
3413 }
3414
4ed340ab
L
3415 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3416 dev_warn(&hdev->pdev->dev,
3417 "Wait for reset timeout: %d\n", hdev->reset_type);
3418 return -EBUSY;
3419 }
3420
3421 return 0;
3422}
3423
aa5c4f17
HT
3424static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3425{
3426 struct hclge_vf_rst_cmd *req;
3427 struct hclge_desc desc;
3428
3429 req = (struct hclge_vf_rst_cmd *)desc.data;
3430 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3431 req->dest_vfid = func_id;
3432
3433 if (reset)
3434 req->vf_rst = 0x1;
3435
3436 return hclge_cmd_send(&hdev->hw, &desc, 1);
3437}
3438
e511f17b 3439static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
aa5c4f17
HT
3440{
3441 int i;
3442
3443 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3444 struct hclge_vport *vport = &hdev->vport[i];
3445 int ret;
3446
3447 /* Send cmd to set/clear VF's FUNC_RST_ING */
3448 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3449 if (ret) {
3450 dev_err(&hdev->pdev->dev,
adcf738b 3451 "set vf(%u) rst failed %d!\n",
aa5c4f17
HT
3452 vport->vport_id, ret);
3453 return ret;
3454 }
3455
cc645dfa 3456 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
aa5c4f17
HT
3457 continue;
3458
3459 /* Inform VF to process the reset.
3460 * hclge_inform_reset_assert_to_vf may fail if VF
3461 * driver is not loaded.
3462 */
3463 ret = hclge_inform_reset_assert_to_vf(vport);
3464 if (ret)
3465 dev_warn(&hdev->pdev->dev,
adcf738b 3466 "inform reset to vf(%u) failed %d!\n",
aa5c4f17
HT
3467 vport->vport_id, ret);
3468 }
3469
3470 return 0;
3471}
3472
1c6dfe6f
YL
3473static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3474{
3475 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3476 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3477 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3478 return;
3479
3480 hclge_mbx_handler(hdev);
3481
3482 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3483}
3484
c3106cac 3485static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
427a7bff
HT
3486{
3487 struct hclge_pf_rst_sync_cmd *req;
3488 struct hclge_desc desc;
3489 int cnt = 0;
3490 int ret;
3491
3492 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3493 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3494
3495 do {
1c6dfe6f
YL
3496 /* vf need to down netdev by mbx during PF or FLR reset */
3497 hclge_mailbox_service_task(hdev);
3498
427a7bff
HT
3499 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3500 /* for compatible with old firmware, wait
3501 * 100 ms for VF to stop IO
3502 */
3503 if (ret == -EOPNOTSUPP) {
3504 msleep(HCLGE_RESET_SYNC_TIME);
c3106cac 3505 return;
427a7bff 3506 } else if (ret) {
c3106cac
HT
3507 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3508 ret);
3509 return;
427a7bff 3510 } else if (req->all_vf_ready) {
c3106cac 3511 return;
427a7bff
HT
3512 }
3513 msleep(HCLGE_PF_RESET_SYNC_TIME);
3514 hclge_cmd_reuse_desc(&desc, true);
3515 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3516
c3106cac 3517 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
427a7bff
HT
3518}
3519
a83d2961
WL
3520void hclge_report_hw_error(struct hclge_dev *hdev,
3521 enum hnae3_hw_error_type type)
3522{
3523 struct hnae3_client *client = hdev->nic_client;
3524 u16 i;
3525
3526 if (!client || !client->ops->process_hw_error ||
3527 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3528 return;
3529
3530 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3531 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3532}
3533
3534static void hclge_handle_imp_error(struct hclge_dev *hdev)
3535{
3536 u32 reg_val;
3537
3538 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3539 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3540 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3541 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3542 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3543 }
3544
3545 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3546 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3547 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3548 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3549 }
3550}
3551
2bfbd35d 3552int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4ed340ab
L
3553{
3554 struct hclge_desc desc;
3555 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3556 int ret;
3557
3558 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
e4e87715 3559 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4ed340ab
L
3560 req->fun_reset_vfid = func_id;
3561
3562 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3563 if (ret)
3564 dev_err(&hdev->pdev->dev,
3565 "send function reset cmd fail, status =%d\n", ret);
3566
3567 return ret;
3568}
3569
f2f432f2 3570static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab 3571{
4f765d3e 3572 struct hnae3_handle *handle = &hdev->vport[0].nic;
4ed340ab
L
3573 struct pci_dev *pdev = hdev->pdev;
3574 u32 val;
3575
4f765d3e 3576 if (hclge_get_hw_reset_stat(handle)) {
8de91e92 3577 dev_info(&pdev->dev, "hardware reset not finish\n");
4f765d3e
HT
3578 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3579 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3580 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3581 return;
3582 }
3583
f2f432f2 3584 switch (hdev->reset_type) {
4ed340ab 3585 case HNAE3_GLOBAL_RESET:
8de91e92 3586 dev_info(&pdev->dev, "global reset requested\n");
4ed340ab 3587 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
e4e87715 3588 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4ed340ab 3589 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4ed340ab 3590 break;
4ed340ab 3591 case HNAE3_FUNC_RESET:
8de91e92 3592 dev_info(&pdev->dev, "PF reset requested\n");
cb1b9f77
SM
3593 /* schedule again to check later */
3594 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3595 hclge_reset_task_schedule(hdev);
4ed340ab
L
3596 break;
3597 default:
3598 dev_warn(&pdev->dev,
8de91e92 3599 "unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
3600 break;
3601 }
3602}
3603
123297b7 3604static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
f2f432f2
SM
3605 unsigned long *addr)
3606{
3607 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
123297b7 3608 struct hclge_dev *hdev = ae_dev->priv;
f2f432f2 3609
f6162d44
SM
3610 /* first, resolve any unknown reset type to the known type(s) */
3611 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
d9b81c96 3612 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
5705b451 3613 HCLGE_MISC_VECTOR_INT_STS);
f6162d44
SM
3614 /* we will intentionally ignore any errors from this function
3615 * as we will end up in *some* reset request in any case
3616 */
d9b81c96
HT
3617 if (hclge_handle_hw_msix_error(hdev, addr))
3618 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3619 msix_sts_reg);
3620
f6162d44
SM
3621 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3622 /* We defered the clearing of the error event which caused
3623 * interrupt since it was not posssible to do that in
3624 * interrupt context (and this is the reason we introduced
3625 * new UNKNOWN reset type). Now, the errors have been
3626 * handled and cleared in hardware we can safely enable
3627 * interrupts. This is an exception to the norm.
3628 */
3629 hclge_enable_vector(&hdev->misc_vector, true);
3630 }
3631
f2f432f2 3632 /* return the highest priority reset level amongst all */
7cea834d
HT
3633 if (test_bit(HNAE3_IMP_RESET, addr)) {
3634 rst_level = HNAE3_IMP_RESET;
3635 clear_bit(HNAE3_IMP_RESET, addr);
3636 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3637 clear_bit(HNAE3_FUNC_RESET, addr);
3638 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
f2f432f2 3639 rst_level = HNAE3_GLOBAL_RESET;
7cea834d 3640 clear_bit(HNAE3_GLOBAL_RESET, addr);
7cea834d
HT
3641 clear_bit(HNAE3_FUNC_RESET, addr);
3642 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
f2f432f2 3643 rst_level = HNAE3_FUNC_RESET;
7cea834d 3644 clear_bit(HNAE3_FUNC_RESET, addr);
6b9a97ee
HT
3645 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3646 rst_level = HNAE3_FLR_RESET;
3647 clear_bit(HNAE3_FLR_RESET, addr);
7cea834d 3648 }
f2f432f2 3649
0fdf4d30
HT
3650 if (hdev->reset_type != HNAE3_NONE_RESET &&
3651 rst_level < hdev->reset_type)
3652 return HNAE3_NONE_RESET;
3653
f2f432f2
SM
3654 return rst_level;
3655}
3656
cd8c5c26
YL
3657static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3658{
3659 u32 clearval = 0;
3660
3661 switch (hdev->reset_type) {
3662 case HNAE3_IMP_RESET:
3663 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3664 break;
3665 case HNAE3_GLOBAL_RESET:
3666 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3667 break;
cd8c5c26 3668 default:
cd8c5c26
YL
3669 break;
3670 }
3671
3672 if (!clearval)
3673 return;
3674
72e2fb07
HT
3675 /* For revision 0x20, the reset interrupt source
3676 * can only be cleared after hardware reset done
3677 */
295ba232 3678 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
72e2fb07
HT
3679 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3680 clearval);
3681
cd8c5c26
YL
3682 hclge_enable_vector(&hdev->misc_vector, true);
3683}
3684
6b428b4f
HT
3685static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3686{
3687 u32 reg_val;
3688
3689 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3690 if (enable)
3691 reg_val |= HCLGE_NIC_SW_RST_RDY;
3692 else
3693 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3694
3695 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3696}
3697
c7554dcd
HT
3698static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3699{
3700 int ret;
3701
3702 ret = hclge_set_all_vf_rst(hdev, true);
3703 if (ret)
3704 return ret;
3705
3706 hclge_func_reset_sync_vf(hdev);
3707
3708 return 0;
3709}
3710
35d93a30
HT
3711static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3712{
6dd22bbc 3713 u32 reg_val;
35d93a30
HT
3714 int ret = 0;
3715
3716 switch (hdev->reset_type) {
3717 case HNAE3_FUNC_RESET:
c7554dcd
HT
3718 ret = hclge_func_reset_notify_vf(hdev);
3719 if (ret)
3720 return ret;
427a7bff 3721
35d93a30
HT
3722 ret = hclge_func_reset_cmd(hdev, 0);
3723 if (ret) {
3724 dev_err(&hdev->pdev->dev,
141b95d5 3725 "asserting function reset fail %d!\n", ret);
35d93a30
HT
3726 return ret;
3727 }
3728
3729 /* After performaning pf reset, it is not necessary to do the
3730 * mailbox handling or send any command to firmware, because
3731 * any mailbox handling or command to firmware is only valid
3732 * after hclge_cmd_init is called.
3733 */
3734 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
f02eb82d 3735 hdev->rst_stats.pf_rst_cnt++;
35d93a30 3736 break;
6b9a97ee 3737 case HNAE3_FLR_RESET:
c7554dcd
HT
3738 ret = hclge_func_reset_notify_vf(hdev);
3739 if (ret)
3740 return ret;
6b9a97ee 3741 break;
6dd22bbc 3742 case HNAE3_IMP_RESET:
a83d2961 3743 hclge_handle_imp_error(hdev);
6dd22bbc
HT
3744 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3745 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3746 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3747 break;
35d93a30
HT
3748 default:
3749 break;
3750 }
3751
ada13ee3
HT
3752 /* inform hardware that preparatory work is done */
3753 msleep(HCLGE_RESET_SYNC_TIME);
6b428b4f 3754 hclge_reset_handshake(hdev, true);
35d93a30
HT
3755 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3756
3757 return ret;
3758}
3759
8e9eee78 3760static bool hclge_reset_err_handle(struct hclge_dev *hdev)
65e41e7e
HT
3761{
3762#define MAX_RESET_FAIL_CNT 5
65e41e7e
HT
3763
3764 if (hdev->reset_pending) {
3765 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3766 hdev->reset_pending);
3767 return true;
2336f19d
HT
3768 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3769 HCLGE_RESET_INT_M) {
65e41e7e 3770 dev_info(&hdev->pdev->dev,
2336f19d 3771 "reset failed because new reset interrupt\n");
65e41e7e
HT
3772 hclge_clear_reset_cause(hdev);
3773 return false;
0ecf1f7b
HT
3774 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3775 hdev->rst_stats.reset_fail_cnt++;
8e9eee78
HT
3776 set_bit(hdev->reset_type, &hdev->reset_pending);
3777 dev_info(&hdev->pdev->dev,
adcf738b 3778 "re-schedule reset task(%u)\n",
0ecf1f7b 3779 hdev->rst_stats.reset_fail_cnt);
8e9eee78 3780 return true;
65e41e7e
HT
3781 }
3782
3783 hclge_clear_reset_cause(hdev);
6b428b4f
HT
3784
3785 /* recover the handshake status when reset fail */
3786 hclge_reset_handshake(hdev, true);
3787
65e41e7e 3788 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3d77d0cb
HT
3789
3790 hclge_dbg_dump_rst_info(hdev);
3791
d5432455
GL
3792 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3793
65e41e7e
HT
3794 return false;
3795}
3796
72e2fb07
HT
3797static int hclge_set_rst_done(struct hclge_dev *hdev)
3798{
3799 struct hclge_pf_rst_done_cmd *req;
3800 struct hclge_desc desc;
648db051 3801 int ret;
72e2fb07
HT
3802
3803 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3804 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3805 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3806
648db051
HT
3807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3808 /* To be compatible with the old firmware, which does not support
3809 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3810 * return success
3811 */
3812 if (ret == -EOPNOTSUPP) {
3813 dev_warn(&hdev->pdev->dev,
3814 "current firmware does not support command(0x%x)!\n",
3815 HCLGE_OPC_PF_RST_DONE);
3816 return 0;
3817 } else if (ret) {
3818 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3819 ret);
3820 }
3821
3822 return ret;
72e2fb07
HT
3823}
3824
aa5c4f17
HT
3825static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3826{
3827 int ret = 0;
3828
3829 switch (hdev->reset_type) {
3830 case HNAE3_FUNC_RESET:
6b9a97ee 3831 case HNAE3_FLR_RESET:
aa5c4f17
HT
3832 ret = hclge_set_all_vf_rst(hdev, false);
3833 break;
72e2fb07 3834 case HNAE3_GLOBAL_RESET:
72e2fb07
HT
3835 case HNAE3_IMP_RESET:
3836 ret = hclge_set_rst_done(hdev);
3837 break;
aa5c4f17
HT
3838 default:
3839 break;
3840 }
3841
6b428b4f
HT
3842 /* clear up the handshake status after re-initialize done */
3843 hclge_reset_handshake(hdev, false);
3844
aa5c4f17
HT
3845 return ret;
3846}
3847
63cbf7a9
YM
3848static int hclge_reset_stack(struct hclge_dev *hdev)
3849{
3850 int ret;
3851
3852 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3853 if (ret)
3854 return ret;
3855
3856 ret = hclge_reset_ae_dev(hdev->ae_dev);
3857 if (ret)
3858 return ret;
3859
039ba863 3860 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
63cbf7a9
YM
3861}
3862
d4fa0656 3863static int hclge_reset_prepare(struct hclge_dev *hdev)
f2f432f2 3864{
65e41e7e 3865 int ret;
9de0b86f 3866
f02eb82d 3867 hdev->rst_stats.reset_cnt++;
f2f432f2 3868 /* perform reset of the stack & ae device for a client */
65e41e7e
HT
3869 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3870 if (ret)
d4fa0656 3871 return ret;
65e41e7e 3872
6d4fab39 3873 rtnl_lock();
65e41e7e 3874 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
65e41e7e 3875 rtnl_unlock();
65e41e7e 3876 if (ret)
d4fa0656 3877 return ret;
cd8c5c26 3878
d4fa0656
HT
3879 return hclge_reset_prepare_wait(hdev);
3880}
3881
3882static int hclge_reset_rebuild(struct hclge_dev *hdev)
3883{
3884 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3885 enum hnae3_reset_type reset_level;
3886 int ret;
f2f432f2 3887
f02eb82d
HT
3888 hdev->rst_stats.hw_reset_done_cnt++;
3889
65e41e7e
HT
3890 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3891 if (ret)
d4fa0656 3892 return ret;
65e41e7e
HT
3893
3894 rtnl_lock();
63cbf7a9 3895 ret = hclge_reset_stack(hdev);
d4fa0656 3896 rtnl_unlock();
1f609492 3897 if (ret)
d4fa0656 3898 return ret;
1f609492 3899
65e41e7e
HT
3900 hclge_clear_reset_cause(hdev);
3901
63cbf7a9
YM
3902 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3903 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3904 * times
3905 */
0ecf1f7b
HT
3906 if (ret &&
3907 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
d4fa0656 3908 return ret;
63cbf7a9 3909
60c800c6
YM
3910 ret = hclge_reset_prepare_up(hdev);
3911 if (ret)
3912 return ret;
3913
63cbf7a9 3914 rtnl_lock();
65e41e7e 3915 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6d4fab39 3916 rtnl_unlock();
d4fa0656
HT
3917 if (ret)
3918 return ret;
f403a84f 3919
65e41e7e
HT
3920 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3921 if (ret)
d4fa0656 3922 return ret;
65e41e7e 3923
b644a8d4 3924 hdev->last_reset_time = jiffies;
0ecf1f7b 3925 hdev->rst_stats.reset_fail_cnt = 0;
f02eb82d 3926 hdev->rst_stats.reset_done_cnt++;
d5432455 3927 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
012fcb52
HT
3928
3929 /* if default_reset_request has a higher level reset request,
3930 * it should be handled as soon as possible. since some errors
3931 * need this kind of reset to fix.
3932 */
525a294e
HT
3933 reset_level = hclge_get_reset_level(ae_dev,
3934 &hdev->default_reset_request);
3935 if (reset_level != HNAE3_NONE_RESET)
3936 set_bit(reset_level, &hdev->reset_request);
b644a8d4 3937
d4fa0656
HT
3938 return 0;
3939}
3940
3941static void hclge_reset(struct hclge_dev *hdev)
3942{
3943 if (hclge_reset_prepare(hdev))
3944 goto err_reset;
3945
3946 if (hclge_reset_wait(hdev))
3947 goto err_reset;
3948
3949 if (hclge_reset_rebuild(hdev))
3950 goto err_reset;
3951
65e41e7e
HT
3952 return;
3953
65e41e7e 3954err_reset:
8e9eee78 3955 if (hclge_reset_err_handle(hdev))
65e41e7e 3956 hclge_reset_task_schedule(hdev);
f2f432f2
SM
3957}
3958
6ae4e733
SJ
3959static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3960{
3961 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3962 struct hclge_dev *hdev = ae_dev->priv;
3963
3964 /* We might end up getting called broadly because of 2 below cases:
3965 * 1. Recoverable error was conveyed through APEI and only way to bring
3966 * normalcy is to reset.
3967 * 2. A new reset request from the stack due to timeout
3968 *
3969 * For the first case,error event might not have ae handle available.
3970 * check if this is a new reset request and we are not here just because
6d4c3981
SM
3971 * last reset attempt did not succeed and watchdog hit us again. We will
3972 * know this if last reset request did not occur very recently (watchdog
3973 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3974 * In case of new request we reset the "reset level" to PF reset.
9de0b86f
HT
3975 * And if it is a repeat reset request of the most recent one then we
3976 * want to make sure we throttle the reset request. Therefore, we will
3977 * not allow it again before 3*HZ times.
6d4c3981 3978 */
6ae4e733
SJ
3979 if (!handle)
3980 handle = &hdev->vport[0].nic;
3981
b37ce587 3982 if (time_before(jiffies, (hdev->last_reset_time +
012fcb52
HT
3983 HCLGE_RESET_INTERVAL))) {
3984 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9de0b86f 3985 return;
db4d3d55 3986 } else if (hdev->default_reset_request) {
0742ed7c 3987 hdev->reset_level =
123297b7 3988 hclge_get_reset_level(ae_dev,
720bd583 3989 &hdev->default_reset_request);
db4d3d55 3990 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
0742ed7c 3991 hdev->reset_level = HNAE3_FUNC_RESET;
db4d3d55 3992 }
4ed340ab 3993
96e65abb 3994 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
0742ed7c 3995 hdev->reset_level);
6d4c3981
SM
3996
3997 /* request reset & schedule reset task */
0742ed7c 3998 set_bit(hdev->reset_level, &hdev->reset_request);
6d4c3981
SM
3999 hclge_reset_task_schedule(hdev);
4000
0742ed7c
HT
4001 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4002 hdev->reset_level++;
4ed340ab
L
4003}
4004
720bd583
HT
4005static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4006 enum hnae3_reset_type rst_type)
4007{
4008 struct hclge_dev *hdev = ae_dev->priv;
4009
4010 set_bit(rst_type, &hdev->default_reset_request);
4011}
4012
65e41e7e
HT
4013static void hclge_reset_timer(struct timer_list *t)
4014{
4015 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4016
012fcb52
HT
4017 /* if default_reset_request has no value, it means that this reset
4018 * request has already be handled, so just return here
4019 */
4020 if (!hdev->default_reset_request)
4021 return;
4022
65e41e7e 4023 dev_info(&hdev->pdev->dev,
e3b84ed2 4024 "triggering reset in reset timer\n");
65e41e7e
HT
4025 hclge_reset_event(hdev->pdev, NULL);
4026}
4027
4ed340ab
L
4028static void hclge_reset_subtask(struct hclge_dev *hdev)
4029{
123297b7
SJ
4030 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4031
f2f432f2
SM
4032 /* check if there is any ongoing reset in the hardware. This status can
4033 * be checked from reset_pending. If there is then, we need to wait for
4034 * hardware to complete reset.
4035 * a. If we are able to figure out in reasonable time that hardware
4036 * has fully resetted then, we can proceed with driver, client
4037 * reset.
4038 * b. else, we can come back later to check this status so re-sched
4039 * now.
4040 */
0742ed7c 4041 hdev->last_reset_time = jiffies;
123297b7 4042 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
f2f432f2
SM
4043 if (hdev->reset_type != HNAE3_NONE_RESET)
4044 hclge_reset(hdev);
4ed340ab 4045
f2f432f2 4046 /* check if we got any *new* reset requests to be honored */
123297b7 4047 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
f2f432f2
SM
4048 if (hdev->reset_type != HNAE3_NONE_RESET)
4049 hclge_do_reset(hdev);
4ed340ab 4050
4ed340ab
L
4051 hdev->reset_type = HNAE3_NONE_RESET;
4052}
4053
1c6dfe6f 4054static void hclge_reset_service_task(struct hclge_dev *hdev)
466b0c00 4055{
1c6dfe6f
YL
4056 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4057 return;
cb1b9f77 4058
8627bded
HT
4059 down(&hdev->reset_sem);
4060 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
cb1b9f77 4061
4ed340ab 4062 hclge_reset_subtask(hdev);
cb1b9f77
SM
4063
4064 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8627bded 4065 up(&hdev->reset_sem);
466b0c00
L
4066}
4067
a6d818e3
YL
4068static void hclge_update_vport_alive(struct hclge_dev *hdev)
4069{
4070 int i;
4071
4072 /* start from vport 1 for PF is always alive */
4073 for (i = 1; i < hdev->num_alloc_vport; i++) {
4074 struct hclge_vport *vport = &hdev->vport[i];
4075
4076 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4077 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
818f1675
YL
4078
4079 /* If vf is not alive, set to default value */
4080 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4081 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
a6d818e3
YL
4082 }
4083}
4084
1c6dfe6f 4085static void hclge_periodic_service_task(struct hclge_dev *hdev)
46a3df9f 4086{
1c6dfe6f 4087 unsigned long delta = round_jiffies_relative(HZ);
7be1b9f3 4088
e6394363
GH
4089 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4090 return;
4091
1c6dfe6f
YL
4092 /* Always handle the link updating to make sure link state is
4093 * updated when it is triggered by mbx.
4094 */
4095 hclge_update_link_status(hdev);
ee4bcd3b 4096 hclge_sync_mac_table(hdev);
c631c696 4097 hclge_sync_promisc_mode(hdev);
46a3df9f 4098
1c6dfe6f
YL
4099 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4100 delta = jiffies - hdev->last_serv_processed;
4101
4102 if (delta < round_jiffies_relative(HZ)) {
4103 delta = round_jiffies_relative(HZ) - delta;
4104 goto out;
4105 }
c5f65480
JS
4106 }
4107
1c6dfe6f 4108 hdev->serv_processed_cnt++;
a6d818e3 4109 hclge_update_vport_alive(hdev);
1c6dfe6f
YL
4110
4111 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4112 hdev->last_serv_processed = jiffies;
4113 goto out;
4114 }
4115
4116 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4117 hclge_update_stats_for_all(hdev);
4118
4119 hclge_update_port_info(hdev);
fe4144d4 4120 hclge_sync_vlan_filter(hdev);
db4d3d55 4121
1c6dfe6f 4122 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
d93ed94f 4123 hclge_rfs_filter_expire(hdev);
7be1b9f3 4124
1c6dfe6f
YL
4125 hdev->last_serv_processed = jiffies;
4126
4127out:
4128 hclge_task_schedule(hdev, delta);
4129}
4130
4131static void hclge_service_task(struct work_struct *work)
4132{
4133 struct hclge_dev *hdev =
4134 container_of(work, struct hclge_dev, service_task.work);
4135
4136 hclge_reset_service_task(hdev);
4137 hclge_mailbox_service_task(hdev);
4138 hclge_periodic_service_task(hdev);
4139
4140 /* Handle reset and mbx again in case periodical task delays the
4141 * handling by calling hclge_task_schedule() in
4142 * hclge_periodic_service_task().
4143 */
4144 hclge_reset_service_task(hdev);
4145 hclge_mailbox_service_task(hdev);
46a3df9f
S
4146}
4147
46a3df9f
S
4148struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4149{
4150 /* VF handle has no client */
4151 if (!handle->client)
4152 return container_of(handle, struct hclge_vport, nic);
4153 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4154 return container_of(handle, struct hclge_vport, roce);
4155 else
4156 return container_of(handle, struct hclge_vport, nic);
4157}
4158
3a6863e4
YM
4159static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4160 struct hnae3_vector_info *vector_info)
4161{
4162#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4163
4164 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4165
4166 /* need an extend offset to config vector >= 64 */
4167 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4168 vector_info->io_addr = hdev->hw.io_base +
4169 HCLGE_VECTOR_REG_BASE +
4170 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4171 else
4172 vector_info->io_addr = hdev->hw.io_base +
4173 HCLGE_VECTOR_EXT_REG_BASE +
4174 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4175 HCLGE_VECTOR_REG_OFFSET_H +
4176 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4177 HCLGE_VECTOR_REG_OFFSET;
4178
4179 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4180 hdev->vector_irq[idx] = vector_info->vector;
4181}
4182
46a3df9f
S
4183static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4184 struct hnae3_vector_info *vector_info)
4185{
4186 struct hclge_vport *vport = hclge_get_vport(handle);
4187 struct hnae3_vector_info *vector = vector_info;
4188 struct hclge_dev *hdev = vport->back;
4189 int alloc = 0;
3a6863e4
YM
4190 u16 i = 0;
4191 u16 j;
46a3df9f 4192
580a05f9 4193 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
46a3df9f
S
4194 vector_num = min(hdev->num_msi_left, vector_num);
4195
4196 for (j = 0; j < vector_num; j++) {
3a6863e4 4197 while (++i < hdev->num_nic_msi) {
46a3df9f 4198 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3a6863e4 4199 hclge_get_vector_info(hdev, i, vector);
46a3df9f
S
4200 vector++;
4201 alloc++;
4202
4203 break;
4204 }
4205 }
4206 }
4207 hdev->num_msi_left -= alloc;
4208 hdev->num_msi_used += alloc;
4209
4210 return alloc;
4211}
4212
4213static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4214{
4215 int i;
4216
887c3820
SM
4217 for (i = 0; i < hdev->num_msi; i++)
4218 if (vector == hdev->vector_irq[i])
4219 return i;
4220
46a3df9f
S
4221 return -EINVAL;
4222}
4223
0d3e6631
YL
4224static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4225{
4226 struct hclge_vport *vport = hclge_get_vport(handle);
4227 struct hclge_dev *hdev = vport->back;
4228 int vector_id;
4229
4230 vector_id = hclge_get_vector_index(hdev, vector);
4231 if (vector_id < 0) {
4232 dev_err(&hdev->pdev->dev,
6f8e330d 4233 "Get vector index fail. vector = %d\n", vector);
0d3e6631
YL
4234 return vector_id;
4235 }
4236
4237 hclge_free_vector(hdev, vector_id);
4238
4239 return 0;
4240}
4241
46a3df9f
S
4242static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4243{
4244 return HCLGE_RSS_KEY_SIZE;
4245}
4246
46a3df9f
S
4247static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4248 const u8 hfunc, const u8 *key)
4249{
d44f9b63 4250 struct hclge_rss_config_cmd *req;
ebaf1908 4251 unsigned int key_offset = 0;
46a3df9f 4252 struct hclge_desc desc;
3caf772b 4253 int key_counts;
46a3df9f
S
4254 int key_size;
4255 int ret;
4256
3caf772b 4257 key_counts = HCLGE_RSS_KEY_SIZE;
d44f9b63 4258 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f 4259
3caf772b 4260 while (key_counts) {
46a3df9f
S
4261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4262 false);
4263
4264 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4265 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4266
3caf772b 4267 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
46a3df9f
S
4268 memcpy(req->hash_key,
4269 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4270
3caf772b
YM
4271 key_counts -= key_size;
4272 key_offset++;
46a3df9f
S
4273 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4274 if (ret) {
4275 dev_err(&hdev->pdev->dev,
4276 "Configure RSS config fail, status = %d\n",
4277 ret);
4278 return ret;
4279 }
4280 }
4281 return 0;
4282}
4283
f1c2e66d 4284static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
46a3df9f 4285{
d44f9b63 4286 struct hclge_rss_indirection_table_cmd *req;
46a3df9f 4287 struct hclge_desc desc;
87ce161e 4288 int rss_cfg_tbl_num;
8eeb1f4b
GL
4289 u8 rss_msb_oft;
4290 u8 rss_msb_val;
46a3df9f 4291 int ret;
8eeb1f4b
GL
4292 u16 qid;
4293 int i;
4294 u32 j;
46a3df9f 4295
d44f9b63 4296 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
87ce161e
GH
4297 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4298 HCLGE_RSS_CFG_TBL_SIZE;
46a3df9f 4299
87ce161e 4300 for (i = 0; i < rss_cfg_tbl_num; i++) {
46a3df9f
S
4301 hclge_cmd_setup_basic_desc
4302 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4303
a90bb9a5
YL
4304 req->start_table_index =
4305 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4306 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
8eeb1f4b
GL
4307 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4308 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4309 req->rss_qid_l[j] = qid & 0xff;
4310 rss_msb_oft =
4311 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4312 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4313 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4314 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4315 }
46a3df9f
S
4316 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4317 if (ret) {
4318 dev_err(&hdev->pdev->dev,
4319 "Configure rss indir table fail,status = %d\n",
4320 ret);
4321 return ret;
4322 }
4323 }
4324 return 0;
4325}
4326
4327static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4328 u16 *tc_size, u16 *tc_offset)
4329{
d44f9b63 4330 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
4331 struct hclge_desc desc;
4332 int ret;
4333 int i;
4334
4335 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 4336 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
4337
4338 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
4339 u16 mode = 0;
4340
e4e87715
PL
4341 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4342 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4343 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
cdab7c97
GL
4344 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4345 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
e4e87715
PL
4346 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4347 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
4348
4349 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
4350 }
4351
4352 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4353 if (ret)
46a3df9f
S
4354 dev_err(&hdev->pdev->dev,
4355 "Configure rss tc mode fail, status = %d\n", ret);
46a3df9f 4356
3f639907 4357 return ret;
46a3df9f
S
4358}
4359
232fc64b
PL
4360static void hclge_get_rss_type(struct hclge_vport *vport)
4361{
4362 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4363 vport->rss_tuple_sets.ipv4_udp_en ||
4364 vport->rss_tuple_sets.ipv4_sctp_en ||
4365 vport->rss_tuple_sets.ipv6_tcp_en ||
4366 vport->rss_tuple_sets.ipv6_udp_en ||
4367 vport->rss_tuple_sets.ipv6_sctp_en)
4368 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4369 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4370 vport->rss_tuple_sets.ipv6_fragment_en)
4371 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4372 else
4373 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4374}
4375
46a3df9f
S
4376static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4377{
d44f9b63 4378 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
4379 struct hclge_desc desc;
4380 int ret;
4381
4382 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4383
d44f9b63 4384 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
6f2af429
YL
4385
4386 /* Get the tuple cfg from pf */
4387 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4388 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4389 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4390 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4391 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4392 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4393 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4394 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
232fc64b 4395 hclge_get_rss_type(&hdev->vport[0]);
46a3df9f 4396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4397 if (ret)
46a3df9f
S
4398 dev_err(&hdev->pdev->dev,
4399 "Configure rss input fail, status = %d\n", ret);
3f639907 4400 return ret;
46a3df9f
S
4401}
4402
4403static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4404 u8 *key, u8 *hfunc)
4405{
87ce161e 4406 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
46a3df9f 4407 struct hclge_vport *vport = hclge_get_vport(handle);
46a3df9f
S
4408 int i;
4409
4410 /* Get hash algorithm */
775501a1
JS
4411 if (hfunc) {
4412 switch (vport->rss_algo) {
4413 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4414 *hfunc = ETH_RSS_HASH_TOP;
4415 break;
4416 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4417 *hfunc = ETH_RSS_HASH_XOR;
4418 break;
4419 default:
4420 *hfunc = ETH_RSS_HASH_UNKNOWN;
4421 break;
4422 }
4423 }
46a3df9f
S
4424
4425 /* Get the RSS Key required by the user */
4426 if (key)
4427 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4428
4429 /* Get indirect table */
4430 if (indir)
87ce161e 4431 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
46a3df9f
S
4432 indir[i] = vport->rss_indirection_tbl[i];
4433
4434 return 0;
4435}
4436
4437static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4438 const u8 *key, const u8 hfunc)
4439{
87ce161e 4440 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
46a3df9f
S
4441 struct hclge_vport *vport = hclge_get_vport(handle);
4442 struct hclge_dev *hdev = vport->back;
4443 u8 hash_algo;
4444 int ret, i;
4445
4446 /* Set the RSS Hash Key if specififed by the user */
4447 if (key) {
775501a1
JS
4448 switch (hfunc) {
4449 case ETH_RSS_HASH_TOP:
46a3df9f 4450 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
775501a1
JS
4451 break;
4452 case ETH_RSS_HASH_XOR:
4453 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4454 break;
4455 case ETH_RSS_HASH_NO_CHANGE:
4456 hash_algo = vport->rss_algo;
4457 break;
4458 default:
46a3df9f 4459 return -EINVAL;
775501a1
JS
4460 }
4461
46a3df9f
S
4462 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4463 if (ret)
4464 return ret;
89523cfa
YL
4465
4466 /* Update the shadow RSS key with user specified qids */
4467 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4468 vport->rss_algo = hash_algo;
46a3df9f
S
4469 }
4470
4471 /* Update the shadow RSS table with user specified qids */
87ce161e 4472 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
46a3df9f
S
4473 vport->rss_indirection_tbl[i] = indir[i];
4474
4475 /* Update the hardware */
89523cfa 4476 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
46a3df9f
S
4477}
4478
f7db940a
L
4479static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4480{
4481 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4482
4483 if (nfc->data & RXH_L4_B_2_3)
4484 hash_sets |= HCLGE_D_PORT_BIT;
4485 else
4486 hash_sets &= ~HCLGE_D_PORT_BIT;
4487
4488 if (nfc->data & RXH_IP_SRC)
4489 hash_sets |= HCLGE_S_IP_BIT;
4490 else
4491 hash_sets &= ~HCLGE_S_IP_BIT;
4492
4493 if (nfc->data & RXH_IP_DST)
4494 hash_sets |= HCLGE_D_IP_BIT;
4495 else
4496 hash_sets &= ~HCLGE_D_IP_BIT;
4497
4498 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4499 hash_sets |= HCLGE_V_TAG_BIT;
4500
4501 return hash_sets;
4502}
4503
e291eff3
HT
4504static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4505 struct ethtool_rxnfc *nfc,
4506 struct hclge_rss_input_tuple_cmd *req)
f7db940a 4507{
f7db940a 4508 struct hclge_dev *hdev = vport->back;
f7db940a 4509 u8 tuple_sets;
f7db940a 4510
6f2af429
YL
4511 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4512 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4513 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4514 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4515 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4516 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4517 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4518 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
f7db940a
L
4519
4520 tuple_sets = hclge_get_rss_hash_bits(nfc);
4521 switch (nfc->flow_type) {
4522 case TCP_V4_FLOW:
4523 req->ipv4_tcp_en = tuple_sets;
4524 break;
4525 case TCP_V6_FLOW:
4526 req->ipv6_tcp_en = tuple_sets;
4527 break;
4528 case UDP_V4_FLOW:
4529 req->ipv4_udp_en = tuple_sets;
4530 break;
4531 case UDP_V6_FLOW:
4532 req->ipv6_udp_en = tuple_sets;
4533 break;
4534 case SCTP_V4_FLOW:
4535 req->ipv4_sctp_en = tuple_sets;
4536 break;
4537 case SCTP_V6_FLOW:
ab6e32d2
JS
4538 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4539 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
f7db940a
L
4540 return -EINVAL;
4541
4542 req->ipv6_sctp_en = tuple_sets;
4543 break;
4544 case IPV4_FLOW:
4545 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4546 break;
4547 case IPV6_FLOW:
4548 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4549 break;
4550 default:
4551 return -EINVAL;
4552 }
4553
e291eff3
HT
4554 return 0;
4555}
4556
4557static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4558 struct ethtool_rxnfc *nfc)
4559{
4560 struct hclge_vport *vport = hclge_get_vport(handle);
4561 struct hclge_dev *hdev = vport->back;
4562 struct hclge_rss_input_tuple_cmd *req;
4563 struct hclge_desc desc;
4564 int ret;
4565
4566 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4567 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4568 return -EINVAL;
4569
4570 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4572
4573 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4574 if (ret) {
4575 dev_err(&hdev->pdev->dev,
4576 "failed to init rss tuple cmd, ret = %d\n", ret);
4577 return ret;
4578 }
4579
f7db940a 4580 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6f2af429 4581 if (ret) {
f7db940a
L
4582 dev_err(&hdev->pdev->dev,
4583 "Set rss tuple fail, status = %d\n", ret);
6f2af429
YL
4584 return ret;
4585 }
f7db940a 4586
6f2af429
YL
4587 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4588 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4589 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4590 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4591 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4592 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4593 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4594 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
232fc64b 4595 hclge_get_rss_type(vport);
6f2af429 4596 return 0;
f7db940a
L
4597}
4598
405642a1
JS
4599static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4600 u8 *tuple_sets)
07d29954 4601{
405642a1 4602 switch (flow_type) {
07d29954 4603 case TCP_V4_FLOW:
405642a1 4604 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
07d29954
L
4605 break;
4606 case UDP_V4_FLOW:
405642a1 4607 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
07d29954
L
4608 break;
4609 case TCP_V6_FLOW:
405642a1 4610 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
07d29954
L
4611 break;
4612 case UDP_V6_FLOW:
405642a1 4613 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
07d29954
L
4614 break;
4615 case SCTP_V4_FLOW:
405642a1 4616 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
07d29954
L
4617 break;
4618 case SCTP_V6_FLOW:
405642a1 4619 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
07d29954
L
4620 break;
4621 case IPV4_FLOW:
4622 case IPV6_FLOW:
405642a1 4623 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
07d29954
L
4624 break;
4625 default:
4626 return -EINVAL;
4627 }
4628
405642a1
JS
4629 return 0;
4630}
4631
4632static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4633{
4634 u64 tuple_data = 0;
07d29954
L
4635
4636 if (tuple_sets & HCLGE_D_PORT_BIT)
405642a1 4637 tuple_data |= RXH_L4_B_2_3;
07d29954 4638 if (tuple_sets & HCLGE_S_PORT_BIT)
405642a1 4639 tuple_data |= RXH_L4_B_0_1;
07d29954 4640 if (tuple_sets & HCLGE_D_IP_BIT)
405642a1 4641 tuple_data |= RXH_IP_DST;
07d29954 4642 if (tuple_sets & HCLGE_S_IP_BIT)
405642a1
JS
4643 tuple_data |= RXH_IP_SRC;
4644
4645 return tuple_data;
4646}
4647
4648static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4649 struct ethtool_rxnfc *nfc)
4650{
4651 struct hclge_vport *vport = hclge_get_vport(handle);
4652 u8 tuple_sets;
4653 int ret;
4654
4655 nfc->data = 0;
4656
4657 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4658 if (ret || !tuple_sets)
4659 return ret;
4660
4661 nfc->data = hclge_convert_rss_tuple(tuple_sets);
07d29954
L
4662
4663 return 0;
4664}
4665
46a3df9f
S
4666static int hclge_get_tc_size(struct hnae3_handle *handle)
4667{
4668 struct hclge_vport *vport = hclge_get_vport(handle);
4669 struct hclge_dev *hdev = vport->back;
4670
f1c2e66d 4671 return hdev->pf_rss_size_max;
46a3df9f
S
4672}
4673
5a5c9091 4674static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
46a3df9f 4675{
5a5c9091 4676 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
46a3df9f 4677 struct hclge_vport *vport = hdev->vport;
354d0fab 4678 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5a5c9091 4679 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
354d0fab 4680 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5a5c9091
JS
4681 struct hnae3_tc_info *tc_info;
4682 u16 roundup_size;
4683 u16 rss_size;
4684 int i;
4685
4686 tc_info = &vport->nic.kinfo.tc_info;
4687 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4688 rss_size = tc_info->tqp_count[i];
4689 tc_valid[i] = 0;
4690
4691 if (!(hdev->hw_tc_map & BIT(i)))
4692 continue;
4693
4694 /* tc_size set to hardware is the log2 of roundup power of two
4695 * of rss_size, the acutal queue size is limited by indirection
4696 * table.
4697 */
4698 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4699 rss_size == 0) {
4700 dev_err(&hdev->pdev->dev,
4701 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4702 rss_size);
4703 return -EINVAL;
4704 }
4705
4706 roundup_size = roundup_pow_of_two(rss_size);
4707 roundup_size = ilog2(roundup_size);
4708
4709 tc_valid[i] = 1;
4710 tc_size[i] = roundup_size;
4711 tc_offset[i] = tc_info->tqp_offset[i];
4712 }
4713
4714 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4715}
4716
4717int hclge_rss_init_hw(struct hclge_dev *hdev)
4718{
4719 struct hclge_vport *vport = hdev->vport;
f1c2e66d 4720 u16 *rss_indir = vport[0].rss_indirection_tbl;
268f5dfa
YL
4721 u8 *key = vport[0].rss_hash_key;
4722 u8 hfunc = vport[0].rss_algo;
ebaf1908 4723 int ret;
68ece54e 4724
46a3df9f
S
4725 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4726 if (ret)
268f5dfa 4727 return ret;
46a3df9f 4728
46a3df9f
S
4729 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4730 if (ret)
268f5dfa 4731 return ret;
46a3df9f
S
4732
4733 ret = hclge_set_rss_input_tuple(hdev);
4734 if (ret)
268f5dfa 4735 return ret;
46a3df9f 4736
5a5c9091 4737 return hclge_init_rss_tc_mode(hdev);
268f5dfa 4738}
46a3df9f 4739
268f5dfa
YL
4740void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4741{
4742 struct hclge_vport *vport = hdev->vport;
4743 int i, j;
46a3df9f 4744
268f5dfa 4745 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
87ce161e 4746 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
268f5dfa
YL
4747 vport[j].rss_indirection_tbl[i] =
4748 i % vport[j].alloc_rss_size;
4749 }
4750}
4751
87ce161e 4752static int hclge_rss_init_cfg(struct hclge_dev *hdev)
268f5dfa 4753{
87ce161e 4754 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
472d7ece 4755 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
268f5dfa 4756 struct hclge_vport *vport = hdev->vport;
472d7ece 4757
295ba232 4758 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
472d7ece 4759 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
268f5dfa 4760
268f5dfa 4761 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
87ce161e
GH
4762 u16 *rss_ind_tbl;
4763
268f5dfa
YL
4764 vport[i].rss_tuple_sets.ipv4_tcp_en =
4765 HCLGE_RSS_INPUT_TUPLE_OTHER;
4766 vport[i].rss_tuple_sets.ipv4_udp_en =
4767 HCLGE_RSS_INPUT_TUPLE_OTHER;
4768 vport[i].rss_tuple_sets.ipv4_sctp_en =
4769 HCLGE_RSS_INPUT_TUPLE_SCTP;
4770 vport[i].rss_tuple_sets.ipv4_fragment_en =
4771 HCLGE_RSS_INPUT_TUPLE_OTHER;
4772 vport[i].rss_tuple_sets.ipv6_tcp_en =
4773 HCLGE_RSS_INPUT_TUPLE_OTHER;
4774 vport[i].rss_tuple_sets.ipv6_udp_en =
4775 HCLGE_RSS_INPUT_TUPLE_OTHER;
4776 vport[i].rss_tuple_sets.ipv6_sctp_en =
ab6e32d2
JS
4777 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4778 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
268f5dfa
YL
4779 HCLGE_RSS_INPUT_TUPLE_SCTP;
4780 vport[i].rss_tuple_sets.ipv6_fragment_en =
4781 HCLGE_RSS_INPUT_TUPLE_OTHER;
4782
472d7ece 4783 vport[i].rss_algo = rss_algo;
ea739c90 4784
87ce161e
GH
4785 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4786 sizeof(*rss_ind_tbl), GFP_KERNEL);
4787 if (!rss_ind_tbl)
4788 return -ENOMEM;
4789
4790 vport[i].rss_indirection_tbl = rss_ind_tbl;
472d7ece
JS
4791 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4792 HCLGE_RSS_KEY_SIZE);
268f5dfa
YL
4793 }
4794
4795 hclge_rss_indir_init_cfg(hdev);
87ce161e
GH
4796
4797 return 0;
46a3df9f
S
4798}
4799
84e095d6
SM
4800int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4801 int vector_id, bool en,
4802 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4803{
4804 struct hclge_dev *hdev = vport->back;
46a3df9f
S
4805 struct hnae3_ring_chain_node *node;
4806 struct hclge_desc desc;
37417c66
GL
4807 struct hclge_ctrl_vector_chain_cmd *req =
4808 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
84e095d6
SM
4809 enum hclge_cmd_status status;
4810 enum hclge_opcode_type op;
4811 u16 tqp_type_and_id;
46a3df9f
S
4812 int i;
4813
84e095d6
SM
4814 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4815 hclge_cmd_setup_basic_desc(&desc, op, false);
3a6863e4
YM
4816 req->int_vector_id_l = hnae3_get_field(vector_id,
4817 HCLGE_VECTOR_ID_L_M,
4818 HCLGE_VECTOR_ID_L_S);
4819 req->int_vector_id_h = hnae3_get_field(vector_id,
4820 HCLGE_VECTOR_ID_H_M,
4821 HCLGE_VECTOR_ID_H_S);
46a3df9f
S
4822
4823 i = 0;
4824 for (node = ring_chain; node; node = node->next) {
84e095d6 4825 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
e4e87715
PL
4826 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4827 HCLGE_INT_TYPE_S,
4828 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4829 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4830 HCLGE_TQP_ID_S, node->tqp_index);
4831 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4832 HCLGE_INT_GL_IDX_S,
4833 hnae3_get_field(node->int_gl_idx,
4834 HNAE3_RING_GL_IDX_M,
4835 HNAE3_RING_GL_IDX_S));
84e095d6 4836 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
4837 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4838 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
84e095d6 4839 req->vfid = vport->vport_id;
46a3df9f 4840
84e095d6
SM
4841 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4842 if (status) {
46a3df9f
S
4843 dev_err(&hdev->pdev->dev,
4844 "Map TQP fail, status is %d.\n",
84e095d6
SM
4845 status);
4846 return -EIO;
46a3df9f
S
4847 }
4848 i = 0;
4849
4850 hclge_cmd_setup_basic_desc(&desc,
84e095d6 4851 op,
46a3df9f 4852 false);
3a6863e4
YM
4853 req->int_vector_id_l =
4854 hnae3_get_field(vector_id,
4855 HCLGE_VECTOR_ID_L_M,
4856 HCLGE_VECTOR_ID_L_S);
4857 req->int_vector_id_h =
4858 hnae3_get_field(vector_id,
4859 HCLGE_VECTOR_ID_H_M,
4860 HCLGE_VECTOR_ID_H_S);
46a3df9f
S
4861 }
4862 }
4863
4864 if (i > 0) {
4865 req->int_cause_num = i;
84e095d6
SM
4866 req->vfid = vport->vport_id;
4867 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4868 if (status) {
46a3df9f 4869 dev_err(&hdev->pdev->dev,
84e095d6
SM
4870 "Map TQP fail, status is %d.\n", status);
4871 return -EIO;
46a3df9f
S
4872 }
4873 }
4874
4875 return 0;
4876}
4877
9b2f3477 4878static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
84e095d6 4879 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4880{
4881 struct hclge_vport *vport = hclge_get_vport(handle);
4882 struct hclge_dev *hdev = vport->back;
4883 int vector_id;
4884
4885 vector_id = hclge_get_vector_index(hdev, vector);
4886 if (vector_id < 0) {
4887 dev_err(&hdev->pdev->dev,
7ab2b53e 4888 "failed to get vector index. vector=%d\n", vector);
46a3df9f
S
4889 return vector_id;
4890 }
4891
84e095d6 4892 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
4893}
4894
9b2f3477 4895static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
84e095d6 4896 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
4897{
4898 struct hclge_vport *vport = hclge_get_vport(handle);
4899 struct hclge_dev *hdev = vport->back;
84e095d6 4900 int vector_id, ret;
46a3df9f 4901
b50ae26c
PL
4902 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4903 return 0;
4904
46a3df9f
S
4905 vector_id = hclge_get_vector_index(hdev, vector);
4906 if (vector_id < 0) {
4907 dev_err(&handle->pdev->dev,
4908 "Get vector index fail. ret =%d\n", vector_id);
4909 return vector_id;
4910 }
4911
84e095d6 4912 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
0d3e6631 4913 if (ret)
84e095d6
SM
4914 dev_err(&handle->pdev->dev,
4915 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
9b2f3477 4916 vector_id, ret);
46a3df9f 4917
0d3e6631 4918 return ret;
46a3df9f
S
4919}
4920
c43abe1a
GL
4921static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4922 bool en_uc, bool en_mc, bool en_bc)
46a3df9f 4923{
5e7414cd
JS
4924 struct hclge_vport *vport = &hdev->vport[vf_id];
4925 struct hnae3_handle *handle = &vport->nic;
d44f9b63 4926 struct hclge_promisc_cfg_cmd *req;
46a3df9f 4927 struct hclge_desc desc;
5e7414cd 4928 bool uc_tx_en = en_uc;
c43abe1a 4929 u8 promisc_cfg = 0;
46a3df9f
S
4930 int ret;
4931
4932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4933
d44f9b63 4934 req = (struct hclge_promisc_cfg_cmd *)desc.data;
c43abe1a
GL
4935 req->vf_id = vf_id;
4936
5e7414cd
JS
4937 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4938 uc_tx_en = false;
4939
c43abe1a
GL
4940 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4941 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4942 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5e7414cd 4943 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
c43abe1a
GL
4944 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4945 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4946 req->extend_promisc = promisc_cfg;
4947
4948 /* to be compatible with DEVICE_VERSION_V1/2 */
4949 promisc_cfg = 0;
4950 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4951 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4952 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4953 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4954 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4955 req->promisc = promisc_cfg;
46a3df9f
S
4956
4957 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 4958 if (ret)
46a3df9f 4959 dev_err(&hdev->pdev->dev,
c43abe1a
GL
4960 "failed to set vport %u promisc mode, ret = %d.\n",
4961 vf_id, ret);
3f639907
JS
4962
4963 return ret;
46a3df9f
S
4964}
4965
e196ec75
JS
4966int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4967 bool en_mc_pmc, bool en_bc_pmc)
4968{
c43abe1a
GL
4969 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4970 en_uc_pmc, en_mc_pmc, en_bc_pmc);
e196ec75
JS
4971}
4972
7fa6be4f
HT
4973static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4974 bool en_mc_pmc)
46a3df9f
S
4975{
4976 struct hclge_vport *vport = hclge_get_vport(handle);
295ba232 4977 struct hclge_dev *hdev = vport->back;
28673b33 4978 bool en_bc_pmc = true;
46a3df9f 4979
295ba232
GH
4980 /* For device whose version below V2, if broadcast promisc enabled,
4981 * vlan filter is always bypassed. So broadcast promisc should be
4982 * disabled until user enable promisc mode
28673b33 4983 */
295ba232 4984 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
28673b33
JS
4985 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4986
e196ec75
JS
4987 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4988 en_bc_pmc);
46a3df9f
S
4989}
4990
c631c696
JS
4991static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4992{
4993 struct hclge_vport *vport = hclge_get_vport(handle);
4994 struct hclge_dev *hdev = vport->back;
4995
4996 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4997}
4998
d695964d
JS
4999static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5000{
5001 struct hclge_get_fd_mode_cmd *req;
5002 struct hclge_desc desc;
5003 int ret;
5004
5005 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5006
5007 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5008
5009 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5010 if (ret) {
5011 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5012 return ret;
5013 }
5014
5015 *fd_mode = req->mode;
5016
5017 return ret;
5018}
5019
5020static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5021 u32 *stage1_entry_num,
5022 u32 *stage2_entry_num,
5023 u16 *stage1_counter_num,
5024 u16 *stage2_counter_num)
5025{
5026 struct hclge_get_fd_allocation_cmd *req;
5027 struct hclge_desc desc;
5028 int ret;
5029
5030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5031
5032 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5033
5034 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5035 if (ret) {
5036 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5037 ret);
5038 return ret;
5039 }
5040
5041 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5042 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5043 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5044 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5045
5046 return ret;
5047}
5048
84944d5c
GL
5049static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5050 enum HCLGE_FD_STAGE stage_num)
d695964d
JS
5051{
5052 struct hclge_set_fd_key_config_cmd *req;
5053 struct hclge_fd_key_cfg *stage;
5054 struct hclge_desc desc;
5055 int ret;
5056
5057 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5058
5059 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5060 stage = &hdev->fd_cfg.key_cfg[stage_num];
5061 req->stage = stage_num;
5062 req->key_select = stage->key_sel;
5063 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5064 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5065 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5066 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5067 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5068 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5069
5070 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5071 if (ret)
5072 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5073
5074 return ret;
5075}
5076
5077static int hclge_init_fd_config(struct hclge_dev *hdev)
5078{
5079#define LOW_2_WORDS 0x03
5080 struct hclge_fd_key_cfg *key_cfg;
5081 int ret;
5082
5083 if (!hnae3_dev_fd_supported(hdev))
5084 return 0;
5085
5086 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5087 if (ret)
5088 return ret;
5089
5090 switch (hdev->fd_cfg.fd_mode) {
5091 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5092 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5093 break;
5094 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5095 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5096 break;
5097 default:
5098 dev_err(&hdev->pdev->dev,
adcf738b 5099 "Unsupported flow director mode %u\n",
d695964d
JS
5100 hdev->fd_cfg.fd_mode);
5101 return -EOPNOTSUPP;
5102 }
5103
d695964d 5104 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
474d8fef 5105 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
d695964d
JS
5106 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5107 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5108 key_cfg->outer_sipv6_word_en = 0;
5109 key_cfg->outer_dipv6_word_en = 0;
5110
5111 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5112 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5113 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5114 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5115
5116 /* If use max 400bit key, we can support tuples for ether type */
16505f87 5117 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
d695964d
JS
5118 key_cfg->tuple_active |=
5119 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
d695964d
JS
5120
5121 /* roce_type is used to filter roce frames
5122 * dst_vport is used to specify the rule
5123 */
5124 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5125
5126 ret = hclge_get_fd_allocation(hdev,
5127 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5128 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5129 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5130 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5131 if (ret)
5132 return ret;
5133
5134 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5135}
5136
11732868
JS
5137static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5138 int loc, u8 *key, bool is_add)
5139{
5140 struct hclge_fd_tcam_config_1_cmd *req1;
5141 struct hclge_fd_tcam_config_2_cmd *req2;
5142 struct hclge_fd_tcam_config_3_cmd *req3;
5143 struct hclge_desc desc[3];
5144 int ret;
5145
5146 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5147 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5148 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5149 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5150 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5151
5152 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5153 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5154 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5155
5156 req1->stage = stage;
5157 req1->xy_sel = sel_x ? 1 : 0;
5158 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5159 req1->index = cpu_to_le32(loc);
5160 req1->entry_vld = sel_x ? is_add : 0;
5161
5162 if (key) {
5163 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5164 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5165 sizeof(req2->tcam_data));
5166 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5167 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5168 }
5169
5170 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5171 if (ret)
5172 dev_err(&hdev->pdev->dev,
5173 "config tcam key fail, ret=%d\n",
5174 ret);
5175
5176 return ret;
5177}
5178
5179static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5180 struct hclge_fd_ad_data *action)
5181{
0f993fe2 5182 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
11732868
JS
5183 struct hclge_fd_ad_config_cmd *req;
5184 struct hclge_desc desc;
5185 u64 ad_data = 0;
5186 int ret;
5187
5188 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5189
5190 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5191 req->index = cpu_to_le32(loc);
5192 req->stage = stage;
5193
5194 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5195 action->write_rule_id_to_bd);
5196 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5197 action->rule_id);
0f993fe2
JS
5198 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5199 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5200 action->override_tc);
5201 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5202 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5203 }
11732868
JS
5204 ad_data <<= 32;
5205 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5206 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5207 action->forward_to_direct_queue);
5208 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5209 action->queue_id);
5210 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5211 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5212 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5213 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5214 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5215 action->counter_id);
5216
5217 req->ad_data = cpu_to_le64(ad_data);
5218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5219 if (ret)
5220 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5221
5222 return ret;
5223}
5224
5225static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5226 struct hclge_fd_rule *rule)
5227{
5228 u16 tmp_x_s, tmp_y_s;
5229 u32 tmp_x_l, tmp_y_l;
5230 int i;
5231
5232 if (rule->unused_tuple & tuple_bit)
5233 return true;
5234
5235 switch (tuple_bit) {
11732868 5236 case BIT(INNER_DST_MAC):
e91e388c
JS
5237 for (i = 0; i < ETH_ALEN; i++) {
5238 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868 5239 rule->tuples_mask.dst_mac[i]);
e91e388c 5240 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
11732868
JS
5241 rule->tuples_mask.dst_mac[i]);
5242 }
5243
5244 return true;
5245 case BIT(INNER_SRC_MAC):
e91e388c
JS
5246 for (i = 0; i < ETH_ALEN; i++) {
5247 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
b36fc875 5248 rule->tuples_mask.src_mac[i]);
e91e388c 5249 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
b36fc875 5250 rule->tuples_mask.src_mac[i]);
11732868
JS
5251 }
5252
5253 return true;
5254 case BIT(INNER_VLAN_TAG_FST):
5255 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5256 rule->tuples_mask.vlan_tag1);
5257 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5258 rule->tuples_mask.vlan_tag1);
5259 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5260 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5261
5262 return true;
5263 case BIT(INNER_ETH_TYPE):
5264 calc_x(tmp_x_s, rule->tuples.ether_proto,
5265 rule->tuples_mask.ether_proto);
5266 calc_y(tmp_y_s, rule->tuples.ether_proto,
5267 rule->tuples_mask.ether_proto);
5268 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5269 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5270
5271 return true;
5272 case BIT(INNER_IP_TOS):
5273 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5274 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5275
5276 return true;
5277 case BIT(INNER_IP_PROTO):
5278 calc_x(*key_x, rule->tuples.ip_proto,
5279 rule->tuples_mask.ip_proto);
5280 calc_y(*key_y, rule->tuples.ip_proto,
5281 rule->tuples_mask.ip_proto);
5282
5283 return true;
5284 case BIT(INNER_SRC_IP):
e91e388c
JS
5285 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5286 rule->tuples_mask.src_ip[IPV4_INDEX]);
5287 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5288 rule->tuples_mask.src_ip[IPV4_INDEX]);
11732868
JS
5289 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5290 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5291
5292 return true;
5293 case BIT(INNER_DST_IP):
e91e388c
JS
5294 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5295 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5296 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5297 rule->tuples_mask.dst_ip[IPV4_INDEX]);
11732868
JS
5298 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5299 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5300
5301 return true;
5302 case BIT(INNER_SRC_PORT):
5303 calc_x(tmp_x_s, rule->tuples.src_port,
5304 rule->tuples_mask.src_port);
5305 calc_y(tmp_y_s, rule->tuples.src_port,
5306 rule->tuples_mask.src_port);
5307 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5308 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5309
5310 return true;
5311 case BIT(INNER_DST_PORT):
5312 calc_x(tmp_x_s, rule->tuples.dst_port,
5313 rule->tuples_mask.dst_port);
5314 calc_y(tmp_y_s, rule->tuples.dst_port,
5315 rule->tuples_mask.dst_port);
5316 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5317 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5318
5319 return true;
5320 default:
5321 return false;
5322 }
5323}
5324
5325static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5326 u8 vf_id, u8 network_port_id)
5327{
5328 u32 port_number = 0;
5329
5330 if (port_type == HOST_PORT) {
5331 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5332 pf_id);
5333 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5334 vf_id);
5335 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5336 } else {
5337 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5338 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5339 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5340 }
5341
5342 return port_number;
5343}
5344
5345static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5346 __le32 *key_x, __le32 *key_y,
5347 struct hclge_fd_rule *rule)
5348{
5349 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5350 u8 cur_pos = 0, tuple_size, shift_bits;
ebaf1908 5351 unsigned int i;
11732868
JS
5352
5353 for (i = 0; i < MAX_META_DATA; i++) {
5354 tuple_size = meta_data_key_info[i].key_length;
5355 tuple_bit = key_cfg->meta_data_active & BIT(i);
5356
5357 switch (tuple_bit) {
5358 case BIT(ROCE_TYPE):
5359 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5360 cur_pos += tuple_size;
5361 break;
5362 case BIT(DST_VPORT):
5363 port_number = hclge_get_port_number(HOST_PORT, 0,
5364 rule->vf_id, 0);
5365 hnae3_set_field(meta_data,
5366 GENMASK(cur_pos + tuple_size, cur_pos),
5367 cur_pos, port_number);
5368 cur_pos += tuple_size;
5369 break;
5370 default:
5371 break;
5372 }
5373 }
5374
5375 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5376 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5377 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5378
5379 *key_x = cpu_to_le32(tmp_x << shift_bits);
5380 *key_y = cpu_to_le32(tmp_y << shift_bits);
5381}
5382
5383/* A complete key is combined with meta data key and tuple key.
5384 * Meta data key is stored at the MSB region, and tuple key is stored at
5385 * the LSB region, unused bits will be filled 0.
5386 */
5387static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5388 struct hclge_fd_rule *rule)
5389{
5390 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5391 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5392 u8 *cur_key_x, *cur_key_y;
11732868 5393 u8 meta_data_region;
84944d5c
GL
5394 u8 tuple_size;
5395 int ret;
5396 u32 i;
11732868
JS
5397
5398 memset(key_x, 0, sizeof(key_x));
5399 memset(key_y, 0, sizeof(key_y));
5400 cur_key_x = key_x;
5401 cur_key_y = key_y;
5402
5403 for (i = 0 ; i < MAX_TUPLE; i++) {
5404 bool tuple_valid;
5405 u32 check_tuple;
5406
5407 tuple_size = tuple_key_info[i].key_length / 8;
5408 check_tuple = key_cfg->tuple_active & BIT(i);
5409
5410 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5411 cur_key_y, rule);
5412 if (tuple_valid) {
5413 cur_key_x += tuple_size;
5414 cur_key_y += tuple_size;
5415 }
5416 }
5417
5418 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5419 MAX_META_DATA_LENGTH / 8;
5420
5421 hclge_fd_convert_meta_data(key_cfg,
5422 (__le32 *)(key_x + meta_data_region),
5423 (__le32 *)(key_y + meta_data_region),
5424 rule);
5425
5426 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5427 true);
5428 if (ret) {
5429 dev_err(&hdev->pdev->dev,
adcf738b 5430 "fd key_y config fail, loc=%u, ret=%d\n",
11732868
JS
5431 rule->queue_id, ret);
5432 return ret;
5433 }
5434
5435 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5436 true);
5437 if (ret)
5438 dev_err(&hdev->pdev->dev,
adcf738b 5439 "fd key_x config fail, loc=%u, ret=%d\n",
11732868
JS
5440 rule->queue_id, ret);
5441 return ret;
5442}
5443
5444static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5445 struct hclge_fd_rule *rule)
5446{
0f993fe2
JS
5447 struct hclge_vport *vport = hdev->vport;
5448 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11732868
JS
5449 struct hclge_fd_ad_data ad_data;
5450
0f993fe2 5451 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
11732868
JS
5452 ad_data.ad_id = rule->location;
5453
5454 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5455 ad_data.drop_packet = true;
0f993fe2
JS
5456 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5457 ad_data.override_tc = true;
5458 ad_data.queue_id =
0205ec04 5459 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
0f993fe2 5460 ad_data.tc_size =
0205ec04 5461 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
11732868 5462 } else {
11732868
JS
5463 ad_data.forward_to_direct_queue = true;
5464 ad_data.queue_id = rule->queue_id;
5465 }
5466
5467 ad_data.use_counter = false;
5468 ad_data.counter_id = 0;
5469
5470 ad_data.use_next_stage = false;
5471 ad_data.next_input_key = 0;
5472
5473 ad_data.write_rule_id_to_bd = true;
5474 ad_data.rule_id = rule->location;
5475
5476 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5477}
5478
736fc0e1
JS
5479static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5480 u32 *unused_tuple)
dd74f815 5481{
736fc0e1 5482 if (!spec || !unused_tuple)
dd74f815
JS
5483 return -EINVAL;
5484
736fc0e1 5485 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
dd74f815 5486
736fc0e1
JS
5487 if (!spec->ip4src)
5488 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5489
736fc0e1
JS
5490 if (!spec->ip4dst)
5491 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5492
736fc0e1
JS
5493 if (!spec->psrc)
5494 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5495
736fc0e1
JS
5496 if (!spec->pdst)
5497 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5498
736fc0e1
JS
5499 if (!spec->tos)
5500 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5501
736fc0e1
JS
5502 return 0;
5503}
dd74f815 5504
736fc0e1
JS
5505static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5506 u32 *unused_tuple)
5507{
5508 if (!spec || !unused_tuple)
5509 return -EINVAL;
dd74f815 5510
736fc0e1
JS
5511 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5512 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5513
736fc0e1
JS
5514 if (!spec->ip4src)
5515 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5516
736fc0e1
JS
5517 if (!spec->ip4dst)
5518 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5519
736fc0e1
JS
5520 if (!spec->tos)
5521 *unused_tuple |= BIT(INNER_IP_TOS);
dd74f815 5522
736fc0e1
JS
5523 if (!spec->proto)
5524 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5525
736fc0e1
JS
5526 if (spec->l4_4_bytes)
5527 return -EOPNOTSUPP;
dd74f815 5528
736fc0e1
JS
5529 if (spec->ip_ver != ETH_RX_NFC_IP4)
5530 return -EOPNOTSUPP;
dd74f815 5531
736fc0e1
JS
5532 return 0;
5533}
dd74f815 5534
736fc0e1
JS
5535static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5536 u32 *unused_tuple)
5537{
5538 if (!spec || !unused_tuple)
5539 return -EINVAL;
dd74f815 5540
736fc0e1
JS
5541 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5542 BIT(INNER_IP_TOS);
dd74f815 5543
736fc0e1 5544 /* check whether src/dst ip address used */
eaede835 5545 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
736fc0e1 5546 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5547
eaede835 5548 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
736fc0e1 5549 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5550
736fc0e1
JS
5551 if (!spec->psrc)
5552 *unused_tuple |= BIT(INNER_SRC_PORT);
dd74f815 5553
736fc0e1
JS
5554 if (!spec->pdst)
5555 *unused_tuple |= BIT(INNER_DST_PORT);
dd74f815 5556
736fc0e1
JS
5557 if (spec->tclass)
5558 return -EOPNOTSUPP;
dd74f815 5559
736fc0e1
JS
5560 return 0;
5561}
dd74f815 5562
736fc0e1
JS
5563static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5564 u32 *unused_tuple)
5565{
5566 if (!spec || !unused_tuple)
5567 return -EINVAL;
dd74f815 5568
736fc0e1
JS
5569 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5570 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
dd74f815 5571
736fc0e1 5572 /* check whether src/dst ip address used */
eaede835 5573 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
736fc0e1 5574 *unused_tuple |= BIT(INNER_SRC_IP);
dd74f815 5575
eaede835 5576 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
736fc0e1 5577 *unused_tuple |= BIT(INNER_DST_IP);
dd74f815 5578
736fc0e1
JS
5579 if (!spec->l4_proto)
5580 *unused_tuple |= BIT(INNER_IP_PROTO);
dd74f815 5581
736fc0e1
JS
5582 if (spec->tclass)
5583 return -EOPNOTSUPP;
dd74f815 5584
736fc0e1 5585 if (spec->l4_4_bytes)
dd74f815 5586 return -EOPNOTSUPP;
dd74f815 5587
736fc0e1
JS
5588 return 0;
5589}
5590
5591static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5592{
5593 if (!spec || !unused_tuple)
5594 return -EINVAL;
5595
5596 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5597 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5598 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5599
5600 if (is_zero_ether_addr(spec->h_source))
5601 *unused_tuple |= BIT(INNER_SRC_MAC);
5602
5603 if (is_zero_ether_addr(spec->h_dest))
5604 *unused_tuple |= BIT(INNER_DST_MAC);
5605
5606 if (!spec->h_proto)
5607 *unused_tuple |= BIT(INNER_ETH_TYPE);
5608
5609 return 0;
5610}
5611
5612static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5613 struct ethtool_rx_flow_spec *fs,
5614 u32 *unused_tuple)
5615{
0b4bdc55 5616 if (fs->flow_type & FLOW_EXT) {
a3ca5e90
GL
5617 if (fs->h_ext.vlan_etype) {
5618 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
dd74f815 5619 return -EOPNOTSUPP;
a3ca5e90
GL
5620 }
5621
dd74f815 5622 if (!fs->h_ext.vlan_tci)
736fc0e1 5623 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815 5624
736fc0e1 5625 if (fs->m_ext.vlan_tci &&
a3ca5e90
GL
5626 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5627 dev_err(&hdev->pdev->dev,
c5aaf176 5628 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
a3ca5e90 5629 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
736fc0e1 5630 return -EINVAL;
a3ca5e90 5631 }
dd74f815 5632 } else {
736fc0e1 5633 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
dd74f815
JS
5634 }
5635
5636 if (fs->flow_type & FLOW_MAC_EXT) {
16505f87 5637 if (hdev->fd_cfg.fd_mode !=
a3ca5e90
GL
5638 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5639 dev_err(&hdev->pdev->dev,
5640 "FLOW_MAC_EXT is not supported in current fd mode!\n");
dd74f815 5641 return -EOPNOTSUPP;
a3ca5e90 5642 }
dd74f815
JS
5643
5644 if (is_zero_ether_addr(fs->h_ext.h_dest))
736fc0e1 5645 *unused_tuple |= BIT(INNER_DST_MAC);
dd74f815 5646 else
0b4bdc55 5647 *unused_tuple &= ~BIT(INNER_DST_MAC);
dd74f815
JS
5648 }
5649
5650 return 0;
5651}
5652
736fc0e1
JS
5653static int hclge_fd_check_spec(struct hclge_dev *hdev,
5654 struct ethtool_rx_flow_spec *fs,
5655 u32 *unused_tuple)
5656{
16505f87 5657 u32 flow_type;
736fc0e1
JS
5658 int ret;
5659
a3ca5e90
GL
5660 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5661 dev_err(&hdev->pdev->dev,
5662 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5663 fs->location,
5664 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
736fc0e1 5665 return -EINVAL;
a3ca5e90 5666 }
736fc0e1 5667
736fc0e1
JS
5668 if ((fs->flow_type & FLOW_EXT) &&
5669 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5670 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5671 return -EOPNOTSUPP;
5672 }
5673
16505f87
GL
5674 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5675 switch (flow_type) {
736fc0e1
JS
5676 case SCTP_V4_FLOW:
5677 case TCP_V4_FLOW:
5678 case UDP_V4_FLOW:
5679 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5680 unused_tuple);
5681 break;
5682 case IP_USER_FLOW:
5683 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5684 unused_tuple);
5685 break;
5686 case SCTP_V6_FLOW:
5687 case TCP_V6_FLOW:
5688 case UDP_V6_FLOW:
5689 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5690 unused_tuple);
5691 break;
5692 case IPV6_USER_FLOW:
5693 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5694 unused_tuple);
5695 break;
5696 case ETHER_FLOW:
5697 if (hdev->fd_cfg.fd_mode !=
5698 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5699 dev_err(&hdev->pdev->dev,
5700 "ETHER_FLOW is not supported in current fd mode!\n");
5701 return -EOPNOTSUPP;
5702 }
5703
5704 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5705 unused_tuple);
5706 break;
5707 default:
a3ca5e90
GL
5708 dev_err(&hdev->pdev->dev,
5709 "unsupported protocol type, protocol type = %#x\n",
5710 flow_type);
736fc0e1
JS
5711 return -EOPNOTSUPP;
5712 }
5713
a3ca5e90
GL
5714 if (ret) {
5715 dev_err(&hdev->pdev->dev,
5716 "failed to check flow union tuple, ret = %d\n",
5717 ret);
736fc0e1 5718 return ret;
a3ca5e90 5719 }
736fc0e1
JS
5720
5721 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5722}
5723
dd74f815
JS
5724static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5725{
5726 struct hclge_fd_rule *rule = NULL;
5727 struct hlist_node *node2;
5728
44122887 5729 spin_lock_bh(&hdev->fd_rule_lock);
dd74f815
JS
5730 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5731 if (rule->location >= location)
5732 break;
5733 }
5734
44122887
JS
5735 spin_unlock_bh(&hdev->fd_rule_lock);
5736
dd74f815
JS
5737 return rule && rule->location == location;
5738}
5739
44122887 5740/* make sure being called after lock up with fd_rule_lock */
dd74f815
JS
5741static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5742 struct hclge_fd_rule *new_rule,
5743 u16 location,
5744 bool is_add)
5745{
5746 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5747 struct hlist_node *node2;
5748
5749 if (is_add && !new_rule)
5750 return -EINVAL;
5751
5752 hlist_for_each_entry_safe(rule, node2,
5753 &hdev->fd_rule_list, rule_node) {
5754 if (rule->location >= location)
5755 break;
5756 parent = rule;
5757 }
5758
5759 if (rule && rule->location == location) {
5760 hlist_del(&rule->rule_node);
5761 kfree(rule);
5762 hdev->hclge_fd_rule_num--;
5763
44122887
JS
5764 if (!is_add) {
5765 if (!hdev->hclge_fd_rule_num)
5766 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5767 clear_bit(location, hdev->fd_bmap);
dd74f815 5768
44122887
JS
5769 return 0;
5770 }
dd74f815
JS
5771 } else if (!is_add) {
5772 dev_err(&hdev->pdev->dev,
adcf738b 5773 "delete fail, rule %u is inexistent\n",
dd74f815
JS
5774 location);
5775 return -EINVAL;
5776 }
5777
5778 INIT_HLIST_NODE(&new_rule->rule_node);
5779
5780 if (parent)
5781 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5782 else
5783 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5784
44122887 5785 set_bit(location, hdev->fd_bmap);
dd74f815 5786 hdev->hclge_fd_rule_num++;
44122887 5787 hdev->fd_active_type = new_rule->rule_type;
dd74f815
JS
5788
5789 return 0;
5790}
5791
5792static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5793 struct ethtool_rx_flow_spec *fs,
5794 struct hclge_fd_rule *rule)
5795{
5796 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5797
5798 switch (flow_type) {
5799 case SCTP_V4_FLOW:
5800 case TCP_V4_FLOW:
5801 case UDP_V4_FLOW:
e91e388c 5802 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5803 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
e91e388c 5804 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5805 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5806
e91e388c 5807 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5808 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
e91e388c 5809 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5810 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5811
5812 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5813 rule->tuples_mask.src_port =
5814 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5815
5816 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5817 rule->tuples_mask.dst_port =
5818 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5819
5820 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5821 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5822
5823 rule->tuples.ether_proto = ETH_P_IP;
5824 rule->tuples_mask.ether_proto = 0xFFFF;
5825
5826 break;
5827 case IP_USER_FLOW:
e91e388c 5828 rule->tuples.src_ip[IPV4_INDEX] =
dd74f815 5829 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
e91e388c 5830 rule->tuples_mask.src_ip[IPV4_INDEX] =
dd74f815
JS
5831 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5832
e91e388c 5833 rule->tuples.dst_ip[IPV4_INDEX] =
dd74f815 5834 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
e91e388c 5835 rule->tuples_mask.dst_ip[IPV4_INDEX] =
dd74f815
JS
5836 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5837
5838 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5839 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5840
5841 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5842 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5843
5844 rule->tuples.ether_proto = ETH_P_IP;
5845 rule->tuples_mask.ether_proto = 0xFFFF;
5846
5847 break;
5848 case SCTP_V6_FLOW:
5849 case TCP_V6_FLOW:
5850 case UDP_V6_FLOW:
5851 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5852 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5853 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5854 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5855
5856 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5857 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5858 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5859 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5860
5861 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5862 rule->tuples_mask.src_port =
5863 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5864
5865 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5866 rule->tuples_mask.dst_port =
5867 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5868
5869 rule->tuples.ether_proto = ETH_P_IPV6;
5870 rule->tuples_mask.ether_proto = 0xFFFF;
5871
5872 break;
5873 case IPV6_USER_FLOW:
5874 be32_to_cpu_array(rule->tuples.src_ip,
e91e388c 5875 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815 5876 be32_to_cpu_array(rule->tuples_mask.src_ip,
e91e388c 5877 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
dd74f815
JS
5878
5879 be32_to_cpu_array(rule->tuples.dst_ip,
e91e388c 5880 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815 5881 be32_to_cpu_array(rule->tuples_mask.dst_ip,
e91e388c 5882 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
dd74f815
JS
5883
5884 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5885 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5886
5887 rule->tuples.ether_proto = ETH_P_IPV6;
5888 rule->tuples_mask.ether_proto = 0xFFFF;
5889
5890 break;
5891 case ETHER_FLOW:
5892 ether_addr_copy(rule->tuples.src_mac,
5893 fs->h_u.ether_spec.h_source);
5894 ether_addr_copy(rule->tuples_mask.src_mac,
5895 fs->m_u.ether_spec.h_source);
5896
5897 ether_addr_copy(rule->tuples.dst_mac,
5898 fs->h_u.ether_spec.h_dest);
5899 ether_addr_copy(rule->tuples_mask.dst_mac,
5900 fs->m_u.ether_spec.h_dest);
5901
5902 rule->tuples.ether_proto =
5903 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5904 rule->tuples_mask.ether_proto =
5905 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5906
5907 break;
5908 default:
5909 return -EOPNOTSUPP;
5910 }
5911
5912 switch (flow_type) {
5913 case SCTP_V4_FLOW:
5914 case SCTP_V6_FLOW:
5915 rule->tuples.ip_proto = IPPROTO_SCTP;
5916 rule->tuples_mask.ip_proto = 0xFF;
5917 break;
5918 case TCP_V4_FLOW:
5919 case TCP_V6_FLOW:
5920 rule->tuples.ip_proto = IPPROTO_TCP;
5921 rule->tuples_mask.ip_proto = 0xFF;
5922 break;
5923 case UDP_V4_FLOW:
5924 case UDP_V6_FLOW:
5925 rule->tuples.ip_proto = IPPROTO_UDP;
5926 rule->tuples_mask.ip_proto = 0xFF;
5927 break;
5928 default:
5929 break;
5930 }
5931
0b4bdc55 5932 if (fs->flow_type & FLOW_EXT) {
dd74f815
JS
5933 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5934 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5935 }
5936
5937 if (fs->flow_type & FLOW_MAC_EXT) {
5938 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5939 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5940 }
5941
5942 return 0;
5943}
5944
44122887
JS
5945/* make sure being called after lock up with fd_rule_lock */
5946static int hclge_fd_config_rule(struct hclge_dev *hdev,
5947 struct hclge_fd_rule *rule)
5948{
5949 int ret;
5950
5951 if (!rule) {
5952 dev_err(&hdev->pdev->dev,
5953 "The flow director rule is NULL\n");
5954 return -EINVAL;
5955 }
5956
5957 /* it will never fail here, so needn't to check return value */
5958 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5959
5960 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5961 if (ret)
5962 goto clear_rule;
5963
5964 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5965 if (ret)
5966 goto clear_rule;
5967
5968 return 0;
5969
5970clear_rule:
5971 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5972 return ret;
5973}
5974
0205ec04
JS
5975static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5976{
5977 struct hclge_vport *vport = hclge_get_vport(handle);
5978 struct hclge_dev *hdev = vport->back;
5979
5980 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5981}
5982
dd74f815
JS
5983static int hclge_add_fd_entry(struct hnae3_handle *handle,
5984 struct ethtool_rxnfc *cmd)
5985{
5986 struct hclge_vport *vport = hclge_get_vport(handle);
5987 struct hclge_dev *hdev = vport->back;
5988 u16 dst_vport_id = 0, q_index = 0;
5989 struct ethtool_rx_flow_spec *fs;
5990 struct hclge_fd_rule *rule;
5991 u32 unused = 0;
5992 u8 action;
5993 int ret;
5994
a3ca5e90
GL
5995 if (!hnae3_dev_fd_supported(hdev)) {
5996 dev_err(&hdev->pdev->dev,
5997 "flow table director is not supported\n");
dd74f815 5998 return -EOPNOTSUPP;
a3ca5e90 5999 }
dd74f815 6000
9abeb7d8 6001 if (!hdev->fd_en) {
a3ca5e90
GL
6002 dev_err(&hdev->pdev->dev,
6003 "please enable flow director first\n");
dd74f815
JS
6004 return -EOPNOTSUPP;
6005 }
6006
0205ec04
JS
6007 if (hclge_is_cls_flower_active(handle)) {
6008 dev_err(&hdev->pdev->dev,
6009 "please delete all exist cls flower rules first\n");
6010 return -EINVAL;
6011 }
6012
dd74f815
JS
6013 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6014
6015 ret = hclge_fd_check_spec(hdev, fs, &unused);
a3ca5e90 6016 if (ret)
dd74f815 6017 return ret;
dd74f815
JS
6018
6019 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6020 action = HCLGE_FD_ACTION_DROP_PACKET;
6021 } else {
6022 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6023 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6024 u16 tqps;
6025
0285dbae
JS
6026 if (vf > hdev->num_req_vfs) {
6027 dev_err(&hdev->pdev->dev,
adcf738b 6028 "Error: vf id (%u) > max vf num (%u)\n",
0285dbae
JS
6029 vf, hdev->num_req_vfs);
6030 return -EINVAL;
6031 }
6032
dd74f815
JS
6033 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6034 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6035
6036 if (ring >= tqps) {
6037 dev_err(&hdev->pdev->dev,
adcf738b 6038 "Error: queue id (%u) > max tqp num (%u)\n",
dd74f815
JS
6039 ring, tqps - 1);
6040 return -EINVAL;
6041 }
6042
0f993fe2 6043 action = HCLGE_FD_ACTION_SELECT_QUEUE;
dd74f815
JS
6044 q_index = ring;
6045 }
6046
6047 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6048 if (!rule)
6049 return -ENOMEM;
6050
6051 ret = hclge_fd_get_tuple(hdev, fs, rule);
44122887
JS
6052 if (ret) {
6053 kfree(rule);
6054 return ret;
6055 }
dd74f815
JS
6056
6057 rule->flow_type = fs->flow_type;
dd74f815
JS
6058 rule->location = fs->location;
6059 rule->unused_tuple = unused;
6060 rule->vf_id = dst_vport_id;
6061 rule->queue_id = q_index;
6062 rule->action = action;
44122887 6063 rule->rule_type = HCLGE_FD_EP_ACTIVE;
dd74f815 6064
d93ed94f
JS
6065 /* to avoid rule conflict, when user configure rule by ethtool,
6066 * we need to clear all arfs rules
6067 */
efe3fa45 6068 spin_lock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6069 hclge_clear_arfs_rules(handle);
6070
44122887 6071 ret = hclge_fd_config_rule(hdev, rule);
dd74f815 6072
44122887 6073 spin_unlock_bh(&hdev->fd_rule_lock);
dd74f815 6074
dd74f815
JS
6075 return ret;
6076}
6077
6078static int hclge_del_fd_entry(struct hnae3_handle *handle,
6079 struct ethtool_rxnfc *cmd)
6080{
6081 struct hclge_vport *vport = hclge_get_vport(handle);
6082 struct hclge_dev *hdev = vport->back;
6083 struct ethtool_rx_flow_spec *fs;
6084 int ret;
6085
6086 if (!hnae3_dev_fd_supported(hdev))
6087 return -EOPNOTSUPP;
6088
6089 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6090
6091 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6092 return -EINVAL;
6093
0205ec04
JS
6094 if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6095 !hclge_fd_rule_exist(hdev, fs->location)) {
dd74f815 6096 dev_err(&hdev->pdev->dev,
39edaf24 6097 "Delete fail, rule %u is inexistent\n", fs->location);
dd74f815
JS
6098 return -ENOENT;
6099 }
6100
9b2f3477
WL
6101 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6102 NULL, false);
dd74f815
JS
6103 if (ret)
6104 return ret;
6105
44122887
JS
6106 spin_lock_bh(&hdev->fd_rule_lock);
6107 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6108
6109 spin_unlock_bh(&hdev->fd_rule_lock);
6110
6111 return ret;
dd74f815
JS
6112}
6113
efe3fa45 6114/* make sure being called after lock up with fd_rule_lock */
6871af29
JS
6115static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6116 bool clear_list)
6117{
6118 struct hclge_vport *vport = hclge_get_vport(handle);
6119 struct hclge_dev *hdev = vport->back;
6120 struct hclge_fd_rule *rule;
6121 struct hlist_node *node;
44122887 6122 u16 location;
6871af29
JS
6123
6124 if (!hnae3_dev_fd_supported(hdev))
6125 return;
6126
44122887
JS
6127 for_each_set_bit(location, hdev->fd_bmap,
6128 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6129 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6130 NULL, false);
6131
6871af29
JS
6132 if (clear_list) {
6133 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6134 rule_node) {
6871af29
JS
6135 hlist_del(&rule->rule_node);
6136 kfree(rule);
6871af29 6137 }
44122887
JS
6138 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6139 hdev->hclge_fd_rule_num = 0;
6140 bitmap_zero(hdev->fd_bmap,
6141 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6871af29
JS
6142 }
6143}
6144
6145static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6146{
6147 struct hclge_vport *vport = hclge_get_vport(handle);
6148 struct hclge_dev *hdev = vport->back;
6149 struct hclge_fd_rule *rule;
6150 struct hlist_node *node;
6151 int ret;
6152
65e41e7e
HT
6153 /* Return ok here, because reset error handling will check this
6154 * return value. If error is returned here, the reset process will
6155 * fail.
6156 */
6871af29 6157 if (!hnae3_dev_fd_supported(hdev))
65e41e7e 6158 return 0;
6871af29 6159
8edc2285 6160 /* if fd is disabled, should not restore it when reset */
9abeb7d8 6161 if (!hdev->fd_en)
8edc2285
JS
6162 return 0;
6163
44122887 6164 spin_lock_bh(&hdev->fd_rule_lock);
6871af29
JS
6165 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6166 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6167 if (!ret)
6168 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6169
6170 if (ret) {
6171 dev_warn(&hdev->pdev->dev,
adcf738b 6172 "Restore rule %u failed, remove it\n",
6871af29 6173 rule->location);
44122887 6174 clear_bit(rule->location, hdev->fd_bmap);
6871af29
JS
6175 hlist_del(&rule->rule_node);
6176 kfree(rule);
6177 hdev->hclge_fd_rule_num--;
6178 }
6179 }
44122887
JS
6180
6181 if (hdev->hclge_fd_rule_num)
6182 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6183
6184 spin_unlock_bh(&hdev->fd_rule_lock);
6185
6871af29
JS
6186 return 0;
6187}
6188
05c2314f
JS
6189static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6190 struct ethtool_rxnfc *cmd)
6191{
6192 struct hclge_vport *vport = hclge_get_vport(handle);
6193 struct hclge_dev *hdev = vport->back;
6194
0205ec04 6195 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
05c2314f
JS
6196 return -EOPNOTSUPP;
6197
6198 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6199 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6200
6201 return 0;
6202}
6203
fa663c09
JS
6204static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6205 struct ethtool_tcpip4_spec *spec,
6206 struct ethtool_tcpip4_spec *spec_mask)
6207{
6208 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6209 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6210 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6211
6212 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6213 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6214 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6215
6216 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6217 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6218 0 : cpu_to_be16(rule->tuples_mask.src_port);
6219
6220 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6221 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6222 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6223
6224 spec->tos = rule->tuples.ip_tos;
6225 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6226 0 : rule->tuples_mask.ip_tos;
6227}
6228
6229static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6230 struct ethtool_usrip4_spec *spec,
6231 struct ethtool_usrip4_spec *spec_mask)
6232{
6233 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6234 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6235 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6236
6237 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6238 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6239 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6240
6241 spec->tos = rule->tuples.ip_tos;
6242 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6243 0 : rule->tuples_mask.ip_tos;
6244
6245 spec->proto = rule->tuples.ip_proto;
6246 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6247 0 : rule->tuples_mask.ip_proto;
6248
6249 spec->ip_ver = ETH_RX_NFC_IP4;
6250}
6251
6252static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6253 struct ethtool_tcpip6_spec *spec,
6254 struct ethtool_tcpip6_spec *spec_mask)
6255{
6256 cpu_to_be32_array(spec->ip6src,
6257 rule->tuples.src_ip, IPV6_SIZE);
6258 cpu_to_be32_array(spec->ip6dst,
6259 rule->tuples.dst_ip, IPV6_SIZE);
6260 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6261 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6262 else
6263 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6264 IPV6_SIZE);
6265
6266 if (rule->unused_tuple & BIT(INNER_DST_IP))
6267 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6268 else
6269 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6270 IPV6_SIZE);
6271
6272 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6273 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6274 0 : cpu_to_be16(rule->tuples_mask.src_port);
6275
6276 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6277 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6278 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6279}
6280
6281static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6282 struct ethtool_usrip6_spec *spec,
6283 struct ethtool_usrip6_spec *spec_mask)
6284{
6285 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6286 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6287 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6288 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6289 else
6290 cpu_to_be32_array(spec_mask->ip6src,
6291 rule->tuples_mask.src_ip, IPV6_SIZE);
6292
6293 if (rule->unused_tuple & BIT(INNER_DST_IP))
6294 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6295 else
6296 cpu_to_be32_array(spec_mask->ip6dst,
6297 rule->tuples_mask.dst_ip, IPV6_SIZE);
6298
6299 spec->l4_proto = rule->tuples.ip_proto;
6300 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6301 0 : rule->tuples_mask.ip_proto;
6302}
6303
6304static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6305 struct ethhdr *spec,
6306 struct ethhdr *spec_mask)
6307{
6308 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6309 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6310
6311 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6312 eth_zero_addr(spec_mask->h_source);
6313 else
6314 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6315
6316 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6317 eth_zero_addr(spec_mask->h_dest);
6318 else
6319 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6320
6321 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6322 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6323 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6324}
6325
6326static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6327 struct hclge_fd_rule *rule)
6328{
6329 if (fs->flow_type & FLOW_EXT) {
6330 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6331 fs->m_ext.vlan_tci =
6332 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
c75ec148 6333 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
fa663c09
JS
6334 }
6335
6336 if (fs->flow_type & FLOW_MAC_EXT) {
6337 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6338 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6339 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6340 else
6341 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6342 rule->tuples_mask.dst_mac);
6343 }
6344}
6345
05c2314f
JS
6346static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6347 struct ethtool_rxnfc *cmd)
6348{
6349 struct hclge_vport *vport = hclge_get_vport(handle);
6350 struct hclge_fd_rule *rule = NULL;
6351 struct hclge_dev *hdev = vport->back;
6352 struct ethtool_rx_flow_spec *fs;
6353 struct hlist_node *node2;
6354
6355 if (!hnae3_dev_fd_supported(hdev))
6356 return -EOPNOTSUPP;
6357
6358 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6359
44122887
JS
6360 spin_lock_bh(&hdev->fd_rule_lock);
6361
05c2314f
JS
6362 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6363 if (rule->location >= fs->location)
6364 break;
6365 }
6366
44122887
JS
6367 if (!rule || fs->location != rule->location) {
6368 spin_unlock_bh(&hdev->fd_rule_lock);
6369
05c2314f 6370 return -ENOENT;
44122887 6371 }
05c2314f
JS
6372
6373 fs->flow_type = rule->flow_type;
6374 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6375 case SCTP_V4_FLOW:
6376 case TCP_V4_FLOW:
6377 case UDP_V4_FLOW:
fa663c09
JS
6378 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6379 &fs->m_u.tcp_ip4_spec);
05c2314f
JS
6380 break;
6381 case IP_USER_FLOW:
fa663c09
JS
6382 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6383 &fs->m_u.usr_ip4_spec);
05c2314f
JS
6384 break;
6385 case SCTP_V6_FLOW:
6386 case TCP_V6_FLOW:
6387 case UDP_V6_FLOW:
fa663c09
JS
6388 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6389 &fs->m_u.tcp_ip6_spec);
05c2314f
JS
6390 break;
6391 case IPV6_USER_FLOW:
fa663c09
JS
6392 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6393 &fs->m_u.usr_ip6_spec);
05c2314f 6394 break;
fa663c09
JS
6395 /* The flow type of fd rule has been checked before adding in to rule
6396 * list. As other flow types have been handled, it must be ETHER_FLOW
6397 * for the default case
6398 */
05c2314f 6399 default:
fa663c09
JS
6400 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6401 &fs->m_u.ether_spec);
6402 break;
05c2314f
JS
6403 }
6404
fa663c09 6405 hclge_fd_get_ext_info(fs, rule);
05c2314f
JS
6406
6407 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6408 fs->ring_cookie = RX_CLS_FLOW_DISC;
6409 } else {
6410 u64 vf_id;
6411
6412 fs->ring_cookie = rule->queue_id;
6413 vf_id = rule->vf_id;
6414 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6415 fs->ring_cookie |= vf_id;
6416 }
6417
44122887
JS
6418 spin_unlock_bh(&hdev->fd_rule_lock);
6419
05c2314f
JS
6420 return 0;
6421}
6422
6423static int hclge_get_all_rules(struct hnae3_handle *handle,
6424 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6425{
6426 struct hclge_vport *vport = hclge_get_vport(handle);
6427 struct hclge_dev *hdev = vport->back;
6428 struct hclge_fd_rule *rule;
6429 struct hlist_node *node2;
6430 int cnt = 0;
6431
6432 if (!hnae3_dev_fd_supported(hdev))
6433 return -EOPNOTSUPP;
6434
6435 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6436
44122887 6437 spin_lock_bh(&hdev->fd_rule_lock);
05c2314f
JS
6438 hlist_for_each_entry_safe(rule, node2,
6439 &hdev->fd_rule_list, rule_node) {
44122887
JS
6440 if (cnt == cmd->rule_cnt) {
6441 spin_unlock_bh(&hdev->fd_rule_lock);
05c2314f 6442 return -EMSGSIZE;
44122887 6443 }
05c2314f
JS
6444
6445 rule_locs[cnt] = rule->location;
6446 cnt++;
6447 }
6448
44122887
JS
6449 spin_unlock_bh(&hdev->fd_rule_lock);
6450
05c2314f
JS
6451 cmd->rule_cnt = cnt;
6452
6453 return 0;
6454}
6455
d93ed94f
JS
6456static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6457 struct hclge_fd_rule_tuples *tuples)
6458{
47327c93
GH
6459#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6460#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6461
d93ed94f
JS
6462 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6463 tuples->ip_proto = fkeys->basic.ip_proto;
6464 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6465
6466 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6467 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6468 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6469 } else {
47327c93
GH
6470 int i;
6471
6472 for (i = 0; i < IPV6_SIZE; i++) {
6473 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6474 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6475 }
d93ed94f
JS
6476 }
6477}
6478
6479/* traverse all rules, check whether an existed rule has the same tuples */
6480static struct hclge_fd_rule *
6481hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6482 const struct hclge_fd_rule_tuples *tuples)
6483{
6484 struct hclge_fd_rule *rule = NULL;
6485 struct hlist_node *node;
6486
6487 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6488 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6489 return rule;
6490 }
6491
6492 return NULL;
6493}
6494
6495static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6496 struct hclge_fd_rule *rule)
6497{
6498 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6499 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6500 BIT(INNER_SRC_PORT);
6501 rule->action = 0;
6502 rule->vf_id = 0;
6503 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6504 if (tuples->ether_proto == ETH_P_IP) {
6505 if (tuples->ip_proto == IPPROTO_TCP)
6506 rule->flow_type = TCP_V4_FLOW;
6507 else
6508 rule->flow_type = UDP_V4_FLOW;
6509 } else {
6510 if (tuples->ip_proto == IPPROTO_TCP)
6511 rule->flow_type = TCP_V6_FLOW;
6512 else
6513 rule->flow_type = UDP_V6_FLOW;
6514 }
6515 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6516 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6517}
6518
6519static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6520 u16 flow_id, struct flow_keys *fkeys)
6521{
d93ed94f 6522 struct hclge_vport *vport = hclge_get_vport(handle);
efe3fa45 6523 struct hclge_fd_rule_tuples new_tuples = {};
d93ed94f
JS
6524 struct hclge_dev *hdev = vport->back;
6525 struct hclge_fd_rule *rule;
6526 u16 tmp_queue_id;
6527 u16 bit_id;
6528 int ret;
6529
6530 if (!hnae3_dev_fd_supported(hdev))
6531 return -EOPNOTSUPP;
6532
d93ed94f
JS
6533 /* when there is already fd rule existed add by user,
6534 * arfs should not work
6535 */
efe3fa45 6536 spin_lock_bh(&hdev->fd_rule_lock);
efd5a158 6537 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
0205ec04 6538 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
d93ed94f 6539 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6540 return -EOPNOTSUPP;
6541 }
6542
efe3fa45
GL
6543 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6544
d93ed94f
JS
6545 /* check is there flow director filter existed for this flow,
6546 * if not, create a new filter for it;
6547 * if filter exist with different queue id, modify the filter;
6548 * if filter exist with same queue id, do nothing
6549 */
6550 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6551 if (!rule) {
6552 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6553 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6554 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6555 return -ENOSPC;
6556 }
6557
d659f9f6 6558 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
d93ed94f
JS
6559 if (!rule) {
6560 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f
JS
6561 return -ENOMEM;
6562 }
6563
6564 set_bit(bit_id, hdev->fd_bmap);
6565 rule->location = bit_id;
0205ec04 6566 rule->arfs.flow_id = flow_id;
d93ed94f
JS
6567 rule->queue_id = queue_id;
6568 hclge_fd_build_arfs_rule(&new_tuples, rule);
6569 ret = hclge_fd_config_rule(hdev, rule);
6570
6571 spin_unlock_bh(&hdev->fd_rule_lock);
6572
6573 if (ret)
6574 return ret;
6575
6576 return rule->location;
6577 }
6578
6579 spin_unlock_bh(&hdev->fd_rule_lock);
6580
6581 if (rule->queue_id == queue_id)
6582 return rule->location;
6583
6584 tmp_queue_id = rule->queue_id;
6585 rule->queue_id = queue_id;
6586 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6587 if (ret) {
6588 rule->queue_id = tmp_queue_id;
6589 return ret;
6590 }
6591
6592 return rule->location;
d93ed94f
JS
6593}
6594
6595static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6596{
6597#ifdef CONFIG_RFS_ACCEL
6598 struct hnae3_handle *handle = &hdev->vport[0].nic;
6599 struct hclge_fd_rule *rule;
6600 struct hlist_node *node;
6601 HLIST_HEAD(del_list);
6602
6603 spin_lock_bh(&hdev->fd_rule_lock);
6604 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6605 spin_unlock_bh(&hdev->fd_rule_lock);
6606 return;
6607 }
6608 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6609 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
0205ec04 6610 rule->arfs.flow_id, rule->location)) {
d93ed94f
JS
6611 hlist_del_init(&rule->rule_node);
6612 hlist_add_head(&rule->rule_node, &del_list);
6613 hdev->hclge_fd_rule_num--;
6614 clear_bit(rule->location, hdev->fd_bmap);
6615 }
6616 }
6617 spin_unlock_bh(&hdev->fd_rule_lock);
6618
6619 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6620 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6621 rule->location, NULL, false);
6622 kfree(rule);
6623 }
6624#endif
6625}
6626
efe3fa45 6627/* make sure being called after lock up with fd_rule_lock */
d93ed94f
JS
6628static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6629{
6630#ifdef CONFIG_RFS_ACCEL
6631 struct hclge_vport *vport = hclge_get_vport(handle);
6632 struct hclge_dev *hdev = vport->back;
6633
6634 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6635 hclge_del_all_fd_entries(handle, true);
6636#endif
6637}
6638
0205ec04
JS
6639static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6640 struct hclge_fd_rule *rule)
6641{
6642 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6643 struct flow_match_basic match;
6644 u16 ethtype_key, ethtype_mask;
6645
6646 flow_rule_match_basic(flow, &match);
6647 ethtype_key = ntohs(match.key->n_proto);
6648 ethtype_mask = ntohs(match.mask->n_proto);
6649
6650 if (ethtype_key == ETH_P_ALL) {
6651 ethtype_key = 0;
6652 ethtype_mask = 0;
6653 }
6654 rule->tuples.ether_proto = ethtype_key;
6655 rule->tuples_mask.ether_proto = ethtype_mask;
6656 rule->tuples.ip_proto = match.key->ip_proto;
6657 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6658 } else {
6659 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6660 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6661 }
6662}
6663
6664static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6665 struct hclge_fd_rule *rule)
6666{
6667 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6668 struct flow_match_eth_addrs match;
6669
6670 flow_rule_match_eth_addrs(flow, &match);
6671 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6672 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6673 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6674 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6675 } else {
6676 rule->unused_tuple |= BIT(INNER_DST_MAC);
6677 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6678 }
6679}
6680
6681static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6682 struct hclge_fd_rule *rule)
6683{
6684 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6685 struct flow_match_vlan match;
6686
6687 flow_rule_match_vlan(flow, &match);
6688 rule->tuples.vlan_tag1 = match.key->vlan_id |
6689 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6690 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6691 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6692 } else {
6693 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6694 }
6695}
6696
6697static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6698 struct hclge_fd_rule *rule)
6699{
6700 u16 addr_type = 0;
6701
6702 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6703 struct flow_match_control match;
6704
6705 flow_rule_match_control(flow, &match);
6706 addr_type = match.key->addr_type;
6707 }
6708
6709 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6710 struct flow_match_ipv4_addrs match;
6711
6712 flow_rule_match_ipv4_addrs(flow, &match);
6713 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6714 rule->tuples_mask.src_ip[IPV4_INDEX] =
6715 be32_to_cpu(match.mask->src);
6716 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6717 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6718 be32_to_cpu(match.mask->dst);
6719 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6720 struct flow_match_ipv6_addrs match;
6721
6722 flow_rule_match_ipv6_addrs(flow, &match);
6723 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6724 IPV6_SIZE);
6725 be32_to_cpu_array(rule->tuples_mask.src_ip,
6726 match.mask->src.s6_addr32, IPV6_SIZE);
6727 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6728 IPV6_SIZE);
6729 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6730 match.mask->dst.s6_addr32, IPV6_SIZE);
6731 } else {
6732 rule->unused_tuple |= BIT(INNER_SRC_IP);
6733 rule->unused_tuple |= BIT(INNER_DST_IP);
6734 }
6735}
6736
6737static void hclge_get_cls_key_port(const struct flow_rule *flow,
6738 struct hclge_fd_rule *rule)
6739{
6740 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6741 struct flow_match_ports match;
6742
6743 flow_rule_match_ports(flow, &match);
6744
6745 rule->tuples.src_port = be16_to_cpu(match.key->src);
6746 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6747 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6748 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6749 } else {
6750 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6751 rule->unused_tuple |= BIT(INNER_DST_PORT);
6752 }
6753}
6754
6755static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6756 struct flow_cls_offload *cls_flower,
6757 struct hclge_fd_rule *rule)
6758{
6759 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6760 struct flow_dissector *dissector = flow->match.dissector;
6761
6762 if (dissector->used_keys &
6763 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6764 BIT(FLOW_DISSECTOR_KEY_BASIC) |
6765 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6766 BIT(FLOW_DISSECTOR_KEY_VLAN) |
6767 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6768 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6769 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6770 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6771 dissector->used_keys);
6772 return -EOPNOTSUPP;
6773 }
6774
6775 hclge_get_cls_key_basic(flow, rule);
6776 hclge_get_cls_key_mac(flow, rule);
6777 hclge_get_cls_key_vlan(flow, rule);
6778 hclge_get_cls_key_ip(flow, rule);
6779 hclge_get_cls_key_port(flow, rule);
6780
6781 return 0;
6782}
6783
6784static int hclge_check_cls_flower(struct hclge_dev *hdev,
6785 struct flow_cls_offload *cls_flower, int tc)
6786{
6787 u32 prio = cls_flower->common.prio;
6788
6789 if (tc < 0 || tc > hdev->tc_max) {
6790 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6791 return -EINVAL;
6792 }
6793
6794 if (prio == 0 ||
6795 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6796 dev_err(&hdev->pdev->dev,
6797 "prio %u should be in range[1, %u]\n",
6798 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6799 return -EINVAL;
6800 }
6801
6802 if (test_bit(prio - 1, hdev->fd_bmap)) {
6803 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6804 return -EINVAL;
6805 }
6806 return 0;
6807}
6808
6809static int hclge_add_cls_flower(struct hnae3_handle *handle,
6810 struct flow_cls_offload *cls_flower,
6811 int tc)
6812{
6813 struct hclge_vport *vport = hclge_get_vport(handle);
6814 struct hclge_dev *hdev = vport->back;
6815 struct hclge_fd_rule *rule;
6816 int ret;
6817
6818 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6819 dev_err(&hdev->pdev->dev,
6820 "please remove all exist fd rules via ethtool first\n");
6821 return -EINVAL;
6822 }
6823
6824 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6825 if (ret) {
6826 dev_err(&hdev->pdev->dev,
6827 "failed to check cls flower params, ret = %d\n", ret);
6828 return ret;
6829 }
6830
6831 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6832 if (!rule)
6833 return -ENOMEM;
6834
6835 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6836 if (ret)
6837 goto err;
6838
6839 rule->action = HCLGE_FD_ACTION_SELECT_TC;
6840 rule->cls_flower.tc = tc;
6841 rule->location = cls_flower->common.prio - 1;
6842 rule->vf_id = 0;
6843 rule->cls_flower.cookie = cls_flower->cookie;
6844 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6845
6846 spin_lock_bh(&hdev->fd_rule_lock);
6847 hclge_clear_arfs_rules(handle);
6848
6849 ret = hclge_fd_config_rule(hdev, rule);
6850
6851 spin_unlock_bh(&hdev->fd_rule_lock);
6852
6853 if (ret) {
6854 dev_err(&hdev->pdev->dev,
6855 "failed to add cls flower rule, ret = %d\n", ret);
6856 goto err;
6857 }
6858
6859 return 0;
6860err:
6861 kfree(rule);
6862 return ret;
6863}
6864
6865static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6866 unsigned long cookie)
6867{
6868 struct hclge_fd_rule *rule;
6869 struct hlist_node *node;
6870
6871 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6872 if (rule->cls_flower.cookie == cookie)
6873 return rule;
6874 }
6875
6876 return NULL;
6877}
6878
6879static int hclge_del_cls_flower(struct hnae3_handle *handle,
6880 struct flow_cls_offload *cls_flower)
6881{
6882 struct hclge_vport *vport = hclge_get_vport(handle);
6883 struct hclge_dev *hdev = vport->back;
6884 struct hclge_fd_rule *rule;
6885 int ret;
6886
6887 spin_lock_bh(&hdev->fd_rule_lock);
6888
6889 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6890 if (!rule) {
6891 spin_unlock_bh(&hdev->fd_rule_lock);
6892 return -EINVAL;
6893 }
6894
6895 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6896 NULL, false);
6897 if (ret) {
6898 dev_err(&hdev->pdev->dev,
6899 "failed to delete cls flower rule %u, ret = %d\n",
6900 rule->location, ret);
6901 spin_unlock_bh(&hdev->fd_rule_lock);
6902 return ret;
6903 }
6904
6905 ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6906 if (ret) {
6907 dev_err(&hdev->pdev->dev,
6908 "failed to delete cls flower rule %u in list, ret = %d\n",
6909 rule->location, ret);
6910 spin_unlock_bh(&hdev->fd_rule_lock);
6911 return ret;
6912 }
6913
6914 spin_unlock_bh(&hdev->fd_rule_lock);
6915
6916 return 0;
6917}
6918
4d60291b
HT
6919static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6920{
6921 struct hclge_vport *vport = hclge_get_vport(handle);
6922 struct hclge_dev *hdev = vport->back;
6923
6924 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6925 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6926}
6927
a4de0228
HT
6928static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6929{
6930 struct hclge_vport *vport = hclge_get_vport(handle);
6931 struct hclge_dev *hdev = vport->back;
6932
6933 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6934}
6935
4d60291b
HT
6936static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6937{
6938 struct hclge_vport *vport = hclge_get_vport(handle);
6939 struct hclge_dev *hdev = vport->back;
6940
6941 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6942}
6943
6944static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6945{
6946 struct hclge_vport *vport = hclge_get_vport(handle);
6947 struct hclge_dev *hdev = vport->back;
6948
f02eb82d 6949 return hdev->rst_stats.hw_reset_done_cnt;
4d60291b
HT
6950}
6951
c17852a8
JS
6952static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6953{
6954 struct hclge_vport *vport = hclge_get_vport(handle);
6955 struct hclge_dev *hdev = vport->back;
44122887 6956 bool clear;
c17852a8 6957
9abeb7d8 6958 hdev->fd_en = enable;
1483fa49 6959 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
efe3fa45
GL
6960
6961 if (!enable) {
6962 spin_lock_bh(&hdev->fd_rule_lock);
44122887 6963 hclge_del_all_fd_entries(handle, clear);
efe3fa45
GL
6964 spin_unlock_bh(&hdev->fd_rule_lock);
6965 } else {
c17852a8 6966 hclge_restore_fd_entries(handle);
efe3fa45 6967 }
c17852a8
JS
6968}
6969
46a3df9f
S
6970static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6971{
6972 struct hclge_desc desc;
d44f9b63
YL
6973 struct hclge_config_mac_mode_cmd *req =
6974 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 6975 u32 loop_en = 0;
46a3df9f
S
6976 int ret;
6977
6978 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
b9a8f883
YL
6979
6980 if (enable) {
6981 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6982 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6983 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6984 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6985 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6986 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6987 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6988 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6989 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6990 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6991 }
6992
a90bb9a5 6993 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
6994
6995 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6996 if (ret)
6997 dev_err(&hdev->pdev->dev,
6998 "mac enable fail, ret =%d.\n", ret);
6999}
7000
dd2956ea
YM
7001static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7002 u8 switch_param, u8 param_mask)
7003{
7004 struct hclge_mac_vlan_switch_cmd *req;
7005 struct hclge_desc desc;
7006 u32 func_id;
7007 int ret;
7008
7009 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7010 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
71c5e83b
GH
7011
7012 /* read current config parameter */
dd2956ea 7013 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
71c5e83b 7014 true);
dd2956ea
YM
7015 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7016 req->func_id = cpu_to_le32(func_id);
71c5e83b
GH
7017
7018 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7019 if (ret) {
7020 dev_err(&hdev->pdev->dev,
7021 "read mac vlan switch parameter fail, ret = %d\n", ret);
7022 return ret;
7023 }
7024
7025 /* modify and write new config parameter */
7026 hclge_cmd_reuse_desc(&desc, false);
7027 req->switch_param = (req->switch_param & param_mask) | switch_param;
dd2956ea
YM
7028 req->param_mask = param_mask;
7029
7030 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7031 if (ret)
7032 dev_err(&hdev->pdev->dev,
7033 "set mac vlan switch parameter fail, ret = %d\n", ret);
7034 return ret;
7035}
7036
c9765a89
YM
7037static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7038 int link_ret)
7039{
7040#define HCLGE_PHY_LINK_STATUS_NUM 200
7041
7042 struct phy_device *phydev = hdev->hw.mac.phydev;
7043 int i = 0;
7044 int ret;
7045
7046 do {
7047 ret = phy_read_status(phydev);
7048 if (ret) {
7049 dev_err(&hdev->pdev->dev,
7050 "phy update link status fail, ret = %d\n", ret);
7051 return;
7052 }
7053
7054 if (phydev->link == link_ret)
7055 break;
7056
7057 msleep(HCLGE_LINK_STATUS_MS);
7058 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7059}
7060
7061static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7062{
7063#define HCLGE_MAC_LINK_STATUS_NUM 100
7064
fac24df7 7065 int link_status;
c9765a89
YM
7066 int i = 0;
7067 int ret;
7068
7069 do {
fac24df7
JS
7070 ret = hclge_get_mac_link_status(hdev, &link_status);
7071 if (ret)
c9765a89 7072 return ret;
fac24df7 7073 if (link_status == link_ret)
c9765a89
YM
7074 return 0;
7075
7076 msleep(HCLGE_LINK_STATUS_MS);
7077 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7078 return -EBUSY;
7079}
7080
7081static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7082 bool is_phy)
7083{
c9765a89
YM
7084 int link_ret;
7085
7086 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7087
7088 if (is_phy)
7089 hclge_phy_link_status_wait(hdev, link_ret);
7090
7091 return hclge_mac_link_status_wait(hdev, link_ret);
7092}
7093
eb66d503 7094static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
c39c4d98 7095{
c39c4d98 7096 struct hclge_config_mac_mode_cmd *req;
c39c4d98
YL
7097 struct hclge_desc desc;
7098 u32 loop_en;
7099 int ret;
7100
e4d68dae
YL
7101 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7102 /* 1 Read out the MAC mode config at first */
7103 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7104 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7105 if (ret) {
7106 dev_err(&hdev->pdev->dev,
7107 "mac loopback get fail, ret =%d.\n", ret);
7108 return ret;
7109 }
c39c4d98 7110
e4d68dae
YL
7111 /* 2 Then setup the loopback flag */
7112 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
e4e87715 7113 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
e4d68dae
YL
7114
7115 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
c39c4d98 7116
e4d68dae
YL
7117 /* 3 Config mac work mode with loopback flag
7118 * and its original configure parameters
7119 */
7120 hclge_cmd_reuse_desc(&desc, false);
7121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7122 if (ret)
7123 dev_err(&hdev->pdev->dev,
7124 "mac loopback set fail, ret =%d.\n", ret);
7125 return ret;
7126}
c39c4d98 7127
1cbc662d 7128static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
4dc13b96 7129 enum hnae3_loop loop_mode)
5fd50ac3
PL
7130{
7131#define HCLGE_SERDES_RETRY_MS 10
7132#define HCLGE_SERDES_RETRY_NUM 100
350fda0a 7133
5fd50ac3
PL
7134 struct hclge_serdes_lb_cmd *req;
7135 struct hclge_desc desc;
7136 int ret, i = 0;
4dc13b96 7137 u8 loop_mode_b;
5fd50ac3 7138
d0d72bac 7139 req = (struct hclge_serdes_lb_cmd *)desc.data;
5fd50ac3
PL
7140 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7141
4dc13b96
FL
7142 switch (loop_mode) {
7143 case HNAE3_LOOP_SERIAL_SERDES:
7144 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7145 break;
7146 case HNAE3_LOOP_PARALLEL_SERDES:
7147 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7148 break;
7149 default:
7150 dev_err(&hdev->pdev->dev,
7151 "unsupported serdes loopback mode %d\n", loop_mode);
7152 return -ENOTSUPP;
7153 }
7154
5fd50ac3 7155 if (en) {
4dc13b96
FL
7156 req->enable = loop_mode_b;
7157 req->mask = loop_mode_b;
5fd50ac3 7158 } else {
4dc13b96 7159 req->mask = loop_mode_b;
5fd50ac3
PL
7160 }
7161
7162 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7163 if (ret) {
7164 dev_err(&hdev->pdev->dev,
7165 "serdes loopback set fail, ret = %d\n", ret);
7166 return ret;
7167 }
7168
7169 do {
7170 msleep(HCLGE_SERDES_RETRY_MS);
7171 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7172 true);
7173 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7174 if (ret) {
7175 dev_err(&hdev->pdev->dev,
7176 "serdes loopback get, ret = %d\n", ret);
7177 return ret;
7178 }
7179 } while (++i < HCLGE_SERDES_RETRY_NUM &&
7180 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7181
7182 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7183 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7184 return -EBUSY;
7185 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7186 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7187 return -EIO;
7188 }
1cbc662d
YM
7189 return ret;
7190}
7191
7192static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7193 enum hnae3_loop loop_mode)
7194{
7195 int ret;
7196
7197 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7198 if (ret)
7199 return ret;
5fd50ac3 7200
0f29fc23 7201 hclge_cfg_mac_mode(hdev, en);
350fda0a 7202
60df7e91 7203 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
c9765a89
YM
7204 if (ret)
7205 dev_err(&hdev->pdev->dev,
7206 "serdes loopback config mac mode timeout\n");
7207
7208 return ret;
7209}
350fda0a 7210
c9765a89
YM
7211static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7212 struct phy_device *phydev)
7213{
7214 int ret;
350fda0a 7215
c9765a89
YM
7216 if (!phydev->suspended) {
7217 ret = phy_suspend(phydev);
7218 if (ret)
7219 return ret;
7220 }
7221
7222 ret = phy_resume(phydev);
7223 if (ret)
7224 return ret;
7225
7226 return phy_loopback(phydev, true);
7227}
7228
7229static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7230 struct phy_device *phydev)
7231{
7232 int ret;
7233
7234 ret = phy_loopback(phydev, false);
7235 if (ret)
7236 return ret;
7237
7238 return phy_suspend(phydev);
7239}
7240
7241static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7242{
7243 struct phy_device *phydev = hdev->hw.mac.phydev;
7244 int ret;
7245
7246 if (!phydev)
7247 return -ENOTSUPP;
7248
7249 if (en)
7250 ret = hclge_enable_phy_loopback(hdev, phydev);
7251 else
7252 ret = hclge_disable_phy_loopback(hdev, phydev);
7253 if (ret) {
7254 dev_err(&hdev->pdev->dev,
7255 "set phy loopback fail, ret = %d\n", ret);
7256 return ret;
7257 }
7258
7259 hclge_cfg_mac_mode(hdev, en);
7260
60df7e91 7261 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
c9765a89
YM
7262 if (ret)
7263 dev_err(&hdev->pdev->dev,
7264 "phy loopback config mac mode timeout\n");
7265
7266 return ret;
5fd50ac3
PL
7267}
7268
ebaf1908 7269static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
0f29fc23
YL
7270 int stream_id, bool enable)
7271{
7272 struct hclge_desc desc;
7273 struct hclge_cfg_com_tqp_queue_cmd *req =
7274 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7275 int ret;
7276
7277 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
9a5ef4aa 7278 req->tqp_id = cpu_to_le16(tqp_id);
0f29fc23 7279 req->stream_id = cpu_to_le16(stream_id);
ebaf1908
WL
7280 if (enable)
7281 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
0f29fc23
YL
7282
7283 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7284 if (ret)
7285 dev_err(&hdev->pdev->dev,
7286 "Tqp enable fail, status =%d.\n", ret);
7287 return ret;
7288}
7289
e4d68dae
YL
7290static int hclge_set_loopback(struct hnae3_handle *handle,
7291 enum hnae3_loop loop_mode, bool en)
7292{
7293 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 7294 struct hnae3_knic_private_info *kinfo;
e4d68dae 7295 struct hclge_dev *hdev = vport->back;
0f29fc23 7296 int i, ret;
e4d68dae 7297
dd2956ea
YM
7298 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7299 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7300 * the same, the packets are looped back in the SSU. If SSU loopback
7301 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7302 */
295ba232 7303 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
dd2956ea
YM
7304 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7305
7306 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7307 HCLGE_SWITCH_ALW_LPBK_MASK);
7308 if (ret)
7309 return ret;
7310 }
7311
e4d68dae 7312 switch (loop_mode) {
eb66d503
FL
7313 case HNAE3_LOOP_APP:
7314 ret = hclge_set_app_loopback(hdev, en);
c39c4d98 7315 break;
4dc13b96
FL
7316 case HNAE3_LOOP_SERIAL_SERDES:
7317 case HNAE3_LOOP_PARALLEL_SERDES:
7318 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5fd50ac3 7319 break;
c9765a89
YM
7320 case HNAE3_LOOP_PHY:
7321 ret = hclge_set_phy_loopback(hdev, en);
7322 break;
c39c4d98
YL
7323 default:
7324 ret = -ENOTSUPP;
7325 dev_err(&hdev->pdev->dev,
7326 "loop_mode %d is not supported\n", loop_mode);
7327 break;
7328 }
7329
47ef6dec
JS
7330 if (ret)
7331 return ret;
7332
205a24ca
HT
7333 kinfo = &vport->nic.kinfo;
7334 for (i = 0; i < kinfo->num_tqps; i++) {
0f29fc23
YL
7335 ret = hclge_tqp_enable(hdev, i, 0, en);
7336 if (ret)
7337 return ret;
7338 }
46a3df9f 7339
0f29fc23 7340 return 0;
46a3df9f
S
7341}
7342
1cbc662d
YM
7343static int hclge_set_default_loopback(struct hclge_dev *hdev)
7344{
7345 int ret;
7346
7347 ret = hclge_set_app_loopback(hdev, false);
7348 if (ret)
7349 return ret;
7350
7351 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7352 if (ret)
7353 return ret;
7354
7355 return hclge_cfg_serdes_loopback(hdev, false,
7356 HNAE3_LOOP_PARALLEL_SERDES);
7357}
7358
46a3df9f
S
7359static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7360{
7361 struct hclge_vport *vport = hclge_get_vport(handle);
205a24ca 7362 struct hnae3_knic_private_info *kinfo;
46a3df9f
S
7363 struct hnae3_queue *queue;
7364 struct hclge_tqp *tqp;
7365 int i;
7366
205a24ca
HT
7367 kinfo = &vport->nic.kinfo;
7368 for (i = 0; i < kinfo->num_tqps; i++) {
46a3df9f
S
7369 queue = handle->kinfo.tqp[i];
7370 tqp = container_of(queue, struct hclge_tqp, q);
7371 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7372 }
7373}
7374
1c6dfe6f
YL
7375static void hclge_flush_link_update(struct hclge_dev *hdev)
7376{
7377#define HCLGE_FLUSH_LINK_TIMEOUT 100000
7378
7379 unsigned long last = hdev->serv_processed_cnt;
7380 int i = 0;
7381
7382 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7383 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7384 last == hdev->serv_processed_cnt)
7385 usleep_range(1, 1);
7386}
7387
8cdb992f
JS
7388static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7389{
7390 struct hclge_vport *vport = hclge_get_vport(handle);
7391 struct hclge_dev *hdev = vport->back;
7392
7393 if (enable) {
a9775bb6 7394 hclge_task_schedule(hdev, 0);
8cdb992f 7395 } else {
1c6dfe6f 7396 /* Set the DOWN flag here to disable link updating */
7be1b9f3 7397 set_bit(HCLGE_STATE_DOWN, &hdev->state);
1c6dfe6f
YL
7398
7399 /* flush memory to make sure DOWN is seen by service task */
7400 smp_mb__before_atomic();
7401 hclge_flush_link_update(hdev);
8cdb992f
JS
7402 }
7403}
7404
46a3df9f
S
7405static int hclge_ae_start(struct hnae3_handle *handle)
7406{
7407 struct hclge_vport *vport = hclge_get_vport(handle);
7408 struct hclge_dev *hdev = vport->back;
46a3df9f 7409
46a3df9f
S
7410 /* mac enable */
7411 hclge_cfg_mac_mode(hdev, true);
7412 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
be8d8cdb 7413 hdev->hw.mac.link = 0;
46a3df9f 7414
b50ae26c
PL
7415 /* reset tqp stats */
7416 hclge_reset_tqp_stats(handle);
7417
b01b7cf1 7418 hclge_mac_start_phy(hdev);
46a3df9f 7419
46a3df9f
S
7420 return 0;
7421}
7422
7423static void hclge_ae_stop(struct hnae3_handle *handle)
7424{
7425 struct hclge_vport *vport = hclge_get_vport(handle);
7426 struct hclge_dev *hdev = vport->back;
39cfbc9c 7427 int i;
46a3df9f 7428
2f7e4896 7429 set_bit(HCLGE_STATE_DOWN, &hdev->state);
efe3fa45 7430 spin_lock_bh(&hdev->fd_rule_lock);
d93ed94f 7431 hclge_clear_arfs_rules(handle);
efe3fa45 7432 spin_unlock_bh(&hdev->fd_rule_lock);
d93ed94f 7433
35d93a30
HT
7434 /* If it is not PF reset, the firmware will disable the MAC,
7435 * so it only need to stop phy here.
7436 */
7437 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7438 hdev->reset_type != HNAE3_FUNC_RESET) {
9617f668 7439 hclge_mac_stop_phy(hdev);
ed8fb4b2 7440 hclge_update_link_status(hdev);
b50ae26c 7441 return;
9617f668 7442 }
b50ae26c 7443
39cfbc9c
HT
7444 for (i = 0; i < handle->kinfo.num_tqps; i++)
7445 hclge_reset_tqp(handle, i);
7446
20981a1e
HT
7447 hclge_config_mac_tnl_int(hdev, false);
7448
46a3df9f
S
7449 /* Mac disable */
7450 hclge_cfg_mac_mode(hdev, false);
7451
7452 hclge_mac_stop_phy(hdev);
7453
7454 /* reset tqp stats */
7455 hclge_reset_tqp_stats(handle);
f30dfddc 7456 hclge_update_link_status(hdev);
46a3df9f
S
7457}
7458
a6d818e3
YL
7459int hclge_vport_start(struct hclge_vport *vport)
7460{
ee4bcd3b
JS
7461 struct hclge_dev *hdev = vport->back;
7462
a6d818e3
YL
7463 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7464 vport->last_active_jiffies = jiffies;
ee4bcd3b 7465
039ba863
JS
7466 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7467 if (vport->vport_id) {
7468 hclge_restore_mac_table_common(vport);
7469 hclge_restore_vport_vlan_table(vport);
7470 } else {
7471 hclge_restore_hw_table(hdev);
7472 }
7473 }
ee4bcd3b
JS
7474
7475 clear_bit(vport->vport_id, hdev->vport_config_block);
7476
a6d818e3
YL
7477 return 0;
7478}
7479
7480void hclge_vport_stop(struct hclge_vport *vport)
7481{
7482 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7483}
7484
7485static int hclge_client_start(struct hnae3_handle *handle)
7486{
7487 struct hclge_vport *vport = hclge_get_vport(handle);
7488
7489 return hclge_vport_start(vport);
7490}
7491
7492static void hclge_client_stop(struct hnae3_handle *handle)
7493{
7494 struct hclge_vport *vport = hclge_get_vport(handle);
7495
7496 hclge_vport_stop(vport);
7497}
7498
46a3df9f
S
7499static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7500 u16 cmdq_resp, u8 resp_code,
7501 enum hclge_mac_vlan_tbl_opcode op)
7502{
7503 struct hclge_dev *hdev = vport->back;
46a3df9f
S
7504
7505 if (cmdq_resp) {
7506 dev_err(&hdev->pdev->dev,
adcf738b 7507 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
46a3df9f
S
7508 cmdq_resp);
7509 return -EIO;
7510 }
7511
7512 if (op == HCLGE_MAC_VLAN_ADD) {
c631c696 7513 if (!resp_code || resp_code == 1)
6e4139f6 7514 return 0;
c631c696
JS
7515 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7516 resp_code == HCLGE_ADD_MC_OVERFLOW)
6e4139f6 7517 return -ENOSPC;
6e4139f6
JS
7518
7519 dev_err(&hdev->pdev->dev,
7520 "add mac addr failed for undefined, code=%u.\n",
7521 resp_code);
7522 return -EIO;
46a3df9f
S
7523 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7524 if (!resp_code) {
6e4139f6 7525 return 0;
46a3df9f 7526 } else if (resp_code == 1) {
46a3df9f
S
7527 dev_dbg(&hdev->pdev->dev,
7528 "remove mac addr failed for miss.\n");
6e4139f6 7529 return -ENOENT;
46a3df9f 7530 }
6e4139f6
JS
7531
7532 dev_err(&hdev->pdev->dev,
7533 "remove mac addr failed for undefined, code=%u.\n",
7534 resp_code);
7535 return -EIO;
46a3df9f
S
7536 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7537 if (!resp_code) {
6e4139f6 7538 return 0;
46a3df9f 7539 } else if (resp_code == 1) {
46a3df9f
S
7540 dev_dbg(&hdev->pdev->dev,
7541 "lookup mac addr failed for miss.\n");
6e4139f6 7542 return -ENOENT;
46a3df9f 7543 }
6e4139f6 7544
46a3df9f 7545 dev_err(&hdev->pdev->dev,
6e4139f6
JS
7546 "lookup mac addr failed for undefined, code=%u.\n",
7547 resp_code);
7548 return -EIO;
46a3df9f
S
7549 }
7550
6e4139f6
JS
7551 dev_err(&hdev->pdev->dev,
7552 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7553
7554 return -EINVAL;
46a3df9f
S
7555}
7556
7557static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7558{
b37ce587
YM
7559#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7560
b9a8f883
YL
7561 unsigned int word_num;
7562 unsigned int bit_num;
46a3df9f
S
7563
7564 if (vfid > 255 || vfid < 0)
7565 return -EIO;
7566
b37ce587 7567 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
46a3df9f
S
7568 word_num = vfid / 32;
7569 bit_num = vfid % 32;
7570 if (clr)
a90bb9a5 7571 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7572 else
a90bb9a5 7573 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f 7574 } else {
b37ce587 7575 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
46a3df9f
S
7576 bit_num = vfid % 32;
7577 if (clr)
a90bb9a5 7578 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 7579 else
a90bb9a5 7580 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
7581 }
7582
7583 return 0;
7584}
7585
7586static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7587{
7588#define HCLGE_DESC_NUMBER 3
7589#define HCLGE_FUNC_NUMBER_PER_DESC 6
7590 int i, j;
7591
6c39d527 7592 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
46a3df9f
S
7593 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7594 if (desc[i].data[j])
7595 return false;
7596
7597 return true;
7598}
7599
d44f9b63 7600static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3a586422 7601 const u8 *addr, bool is_mc)
46a3df9f
S
7602{
7603 const unsigned char *mac_addr = addr;
7604 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7605 (mac_addr[0]) | (mac_addr[1] << 8);
7606 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7607
3a586422
WL
7608 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7609 if (is_mc) {
7610 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7611 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7612 }
7613
46a3df9f
S
7614 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7615 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7616}
7617
46a3df9f 7618static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7619 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
7620{
7621 struct hclge_dev *hdev = vport->back;
7622 struct hclge_desc desc;
7623 u8 resp_code;
a90bb9a5 7624 u16 retval;
46a3df9f
S
7625 int ret;
7626
7627 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7628
d44f9b63 7629 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7630
7631 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7632 if (ret) {
7633 dev_err(&hdev->pdev->dev,
7634 "del mac addr failed for cmd_send, ret =%d.\n",
7635 ret);
7636 return ret;
7637 }
a90bb9a5
YL
7638 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7639 retval = le16_to_cpu(desc.retval);
46a3df9f 7640
a90bb9a5 7641 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7642 HCLGE_MAC_VLAN_REMOVE);
7643}
7644
7645static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7646 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7647 struct hclge_desc *desc,
7648 bool is_mc)
7649{
7650 struct hclge_dev *hdev = vport->back;
7651 u8 resp_code;
a90bb9a5 7652 u16 retval;
46a3df9f
S
7653 int ret;
7654
7655 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7656 if (is_mc) {
7657 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7658 memcpy(desc[0].data,
7659 req,
d44f9b63 7660 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7661 hclge_cmd_setup_basic_desc(&desc[1],
7662 HCLGE_OPC_MAC_VLAN_ADD,
7663 true);
7664 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7665 hclge_cmd_setup_basic_desc(&desc[2],
7666 HCLGE_OPC_MAC_VLAN_ADD,
7667 true);
7668 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7669 } else {
7670 memcpy(desc[0].data,
7671 req,
d44f9b63 7672 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
7673 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7674 }
7675 if (ret) {
7676 dev_err(&hdev->pdev->dev,
7677 "lookup mac addr failed for cmd_send, ret =%d.\n",
7678 ret);
7679 return ret;
7680 }
a90bb9a5
YL
7681 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7682 retval = le16_to_cpu(desc[0].retval);
46a3df9f 7683
a90bb9a5 7684 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
7685 HCLGE_MAC_VLAN_LKUP);
7686}
7687
7688static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 7689 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
7690 struct hclge_desc *mc_desc)
7691{
7692 struct hclge_dev *hdev = vport->back;
7693 int cfg_status;
7694 u8 resp_code;
a90bb9a5 7695 u16 retval;
46a3df9f
S
7696 int ret;
7697
7698 if (!mc_desc) {
7699 struct hclge_desc desc;
7700
7701 hclge_cmd_setup_basic_desc(&desc,
7702 HCLGE_OPC_MAC_VLAN_ADD,
7703 false);
d44f9b63
YL
7704 memcpy(desc.data, req,
7705 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7706 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
7707 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7708 retval = le16_to_cpu(desc.retval);
7709
7710 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7711 resp_code,
7712 HCLGE_MAC_VLAN_ADD);
7713 } else {
c3b6f755 7714 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 7715 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7716 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 7717 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 7718 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
7719 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7720 memcpy(mc_desc[0].data, req,
d44f9b63 7721 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 7722 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
7723 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7724 retval = le16_to_cpu(mc_desc[0].retval);
7725
7726 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
7727 resp_code,
7728 HCLGE_MAC_VLAN_ADD);
7729 }
7730
7731 if (ret) {
7732 dev_err(&hdev->pdev->dev,
7733 "add mac addr failed for cmd_send, ret =%d.\n",
7734 ret);
7735 return ret;
7736 }
7737
7738 return cfg_status;
7739}
7740
39932473 7741static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
c1c5f66e 7742 u16 *allocated_size)
39932473
JS
7743{
7744 struct hclge_umv_spc_alc_cmd *req;
7745 struct hclge_desc desc;
7746 int ret;
7747
7748 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
63cbf7a9 7750
39932473
JS
7751 req->space_size = cpu_to_le32(space_size);
7752
7753 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7754 if (ret) {
c1c5f66e
JS
7755 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7756 ret);
39932473
JS
7757 return ret;
7758 }
7759
3fd8dc26 7760 *allocated_size = le32_to_cpu(desc.data[1]);
39932473
JS
7761
7762 return 0;
7763}
7764
1ac0e6c2
JS
7765static int hclge_init_umv_space(struct hclge_dev *hdev)
7766{
7767 u16 allocated_size = 0;
7768 int ret;
7769
c1c5f66e 7770 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
1ac0e6c2
JS
7771 if (ret)
7772 return ret;
7773
7774 if (allocated_size < hdev->wanted_umv_size)
7775 dev_warn(&hdev->pdev->dev,
7776 "failed to alloc umv space, want %u, get %u\n",
7777 hdev->wanted_umv_size, allocated_size);
7778
1ac0e6c2
JS
7779 hdev->max_umv_size = allocated_size;
7780 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7781 hdev->share_umv_size = hdev->priv_umv_size +
7782 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7783
7784 return 0;
7785}
7786
39932473
JS
7787static void hclge_reset_umv_space(struct hclge_dev *hdev)
7788{
7789 struct hclge_vport *vport;
7790 int i;
7791
7792 for (i = 0; i < hdev->num_alloc_vport; i++) {
7793 vport = &hdev->vport[i];
7794 vport->used_umv_num = 0;
7795 }
7796
7d0b3451 7797 mutex_lock(&hdev->vport_lock);
39932473 7798 hdev->share_umv_size = hdev->priv_umv_size +
4c58f592 7799 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7d0b3451 7800 mutex_unlock(&hdev->vport_lock);
39932473
JS
7801}
7802
7d0b3451 7803static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
39932473
JS
7804{
7805 struct hclge_dev *hdev = vport->back;
7806 bool is_full;
7807
7d0b3451
JS
7808 if (need_lock)
7809 mutex_lock(&hdev->vport_lock);
7810
39932473
JS
7811 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7812 hdev->share_umv_size == 0);
7d0b3451
JS
7813
7814 if (need_lock)
7815 mutex_unlock(&hdev->vport_lock);
39932473
JS
7816
7817 return is_full;
7818}
7819
7820static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7821{
7822 struct hclge_dev *hdev = vport->back;
7823
39932473
JS
7824 if (is_free) {
7825 if (vport->used_umv_num > hdev->priv_umv_size)
7826 hdev->share_umv_size++;
54a395b6 7827
7828 if (vport->used_umv_num > 0)
7829 vport->used_umv_num--;
39932473 7830 } else {
54a395b6 7831 if (vport->used_umv_num >= hdev->priv_umv_size &&
7832 hdev->share_umv_size > 0)
39932473
JS
7833 hdev->share_umv_size--;
7834 vport->used_umv_num++;
7835 }
39932473
JS
7836}
7837
ee4bcd3b
JS
7838static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7839 const u8 *mac_addr)
7840{
7841 struct hclge_mac_node *mac_node, *tmp;
7842
7843 list_for_each_entry_safe(mac_node, tmp, list, node)
7844 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7845 return mac_node;
7846
7847 return NULL;
7848}
7849
7850static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7851 enum HCLGE_MAC_NODE_STATE state)
7852{
7853 switch (state) {
7854 /* from set_rx_mode or tmp_add_list */
7855 case HCLGE_MAC_TO_ADD:
7856 if (mac_node->state == HCLGE_MAC_TO_DEL)
7857 mac_node->state = HCLGE_MAC_ACTIVE;
7858 break;
7859 /* only from set_rx_mode */
7860 case HCLGE_MAC_TO_DEL:
7861 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7862 list_del(&mac_node->node);
7863 kfree(mac_node);
7864 } else {
7865 mac_node->state = HCLGE_MAC_TO_DEL;
7866 }
7867 break;
7868 /* only from tmp_add_list, the mac_node->state won't be
7869 * ACTIVE.
7870 */
7871 case HCLGE_MAC_ACTIVE:
7872 if (mac_node->state == HCLGE_MAC_TO_ADD)
7873 mac_node->state = HCLGE_MAC_ACTIVE;
7874
7875 break;
7876 }
7877}
7878
7879int hclge_update_mac_list(struct hclge_vport *vport,
7880 enum HCLGE_MAC_NODE_STATE state,
7881 enum HCLGE_MAC_ADDR_TYPE mac_type,
7882 const unsigned char *addr)
7883{
7884 struct hclge_dev *hdev = vport->back;
7885 struct hclge_mac_node *mac_node;
7886 struct list_head *list;
7887
7888 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7889 &vport->uc_mac_list : &vport->mc_mac_list;
7890
7891 spin_lock_bh(&vport->mac_list_lock);
7892
7893 /* if the mac addr is already in the mac list, no need to add a new
7894 * one into it, just check the mac addr state, convert it to a new
7895 * new state, or just remove it, or do nothing.
7896 */
7897 mac_node = hclge_find_mac_node(list, addr);
7898 if (mac_node) {
7899 hclge_update_mac_node(mac_node, state);
7900 spin_unlock_bh(&vport->mac_list_lock);
7901 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7902 return 0;
7903 }
7904
7905 /* if this address is never added, unnecessary to delete */
7906 if (state == HCLGE_MAC_TO_DEL) {
7907 spin_unlock_bh(&vport->mac_list_lock);
7908 dev_err(&hdev->pdev->dev,
7909 "failed to delete address %pM from mac list\n",
7910 addr);
7911 return -ENOENT;
7912 }
7913
7914 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7915 if (!mac_node) {
7916 spin_unlock_bh(&vport->mac_list_lock);
7917 return -ENOMEM;
7918 }
7919
7920 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7921
7922 mac_node->state = state;
7923 ether_addr_copy(mac_node->mac_addr, addr);
7924 list_add_tail(&mac_node->node, list);
7925
7926 spin_unlock_bh(&vport->mac_list_lock);
7927
7928 return 0;
7929}
7930
46a3df9f
S
7931static int hclge_add_uc_addr(struct hnae3_handle *handle,
7932 const unsigned char *addr)
7933{
7934 struct hclge_vport *vport = hclge_get_vport(handle);
7935
ee4bcd3b
JS
7936 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7937 addr);
46a3df9f
S
7938}
7939
7940int hclge_add_uc_addr_common(struct hclge_vport *vport,
7941 const unsigned char *addr)
7942{
7943 struct hclge_dev *hdev = vport->back;
d44f9b63 7944 struct hclge_mac_vlan_tbl_entry_cmd req;
d07b6bb4 7945 struct hclge_desc desc;
a90bb9a5 7946 u16 egress_port = 0;
aa7a795e 7947 int ret;
46a3df9f
S
7948
7949 /* mac addr check */
7950 if (is_zero_ether_addr(addr) ||
7951 is_broadcast_ether_addr(addr) ||
7952 is_multicast_ether_addr(addr)) {
7953 dev_err(&hdev->pdev->dev,
7954 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
9b2f3477 7955 addr, is_zero_ether_addr(addr),
46a3df9f
S
7956 is_broadcast_ether_addr(addr),
7957 is_multicast_ether_addr(addr));
7958 return -EINVAL;
7959 }
7960
7961 memset(&req, 0, sizeof(req));
a90bb9a5 7962
e4e87715
PL
7963 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7964 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5
YL
7965
7966 req.egress_port = cpu_to_le16(egress_port);
46a3df9f 7967
3a586422 7968 hclge_prepare_mac_addr(&req, addr, false);
46a3df9f 7969
d07b6bb4
JS
7970 /* Lookup the mac address in the mac_vlan table, and add
7971 * it if the entry is inexistent. Repeated unicast entry
7972 * is not allowed in the mac vlan table.
7973 */
7974 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
39932473 7975 if (ret == -ENOENT) {
7d0b3451
JS
7976 mutex_lock(&hdev->vport_lock);
7977 if (!hclge_is_umv_space_full(vport, false)) {
39932473
JS
7978 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7979 if (!ret)
7980 hclge_update_umv_space(vport, false);
7d0b3451 7981 mutex_unlock(&hdev->vport_lock);
39932473
JS
7982 return ret;
7983 }
7d0b3451 7984 mutex_unlock(&hdev->vport_lock);
39932473 7985
c631c696
JS
7986 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7987 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7988 hdev->priv_umv_size);
39932473
JS
7989
7990 return -ENOSPC;
7991 }
d07b6bb4
JS
7992
7993 /* check if we just hit the duplicate */
72110b56 7994 if (!ret) {
adcf738b 7995 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
72110b56
PL
7996 vport->vport_id, addr);
7997 return 0;
7998 }
d07b6bb4
JS
7999
8000 dev_err(&hdev->pdev->dev,
8001 "PF failed to add unicast entry(%pM) in the MAC table\n",
8002 addr);
46a3df9f 8003
aa7a795e 8004 return ret;
46a3df9f
S
8005}
8006
8007static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8008 const unsigned char *addr)
8009{
8010 struct hclge_vport *vport = hclge_get_vport(handle);
8011
ee4bcd3b
JS
8012 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8013 addr);
46a3df9f
S
8014}
8015
8016int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8017 const unsigned char *addr)
8018{
8019 struct hclge_dev *hdev = vport->back;
d44f9b63 8020 struct hclge_mac_vlan_tbl_entry_cmd req;
aa7a795e 8021 int ret;
46a3df9f
S
8022
8023 /* mac addr check */
8024 if (is_zero_ether_addr(addr) ||
8025 is_broadcast_ether_addr(addr) ||
8026 is_multicast_ether_addr(addr)) {
9b2f3477
WL
8027 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8028 addr);
46a3df9f
S
8029 return -EINVAL;
8030 }
8031
8032 memset(&req, 0, sizeof(req));
e4e87715 8033 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3a586422 8034 hclge_prepare_mac_addr(&req, addr, false);
aa7a795e 8035 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7d0b3451
JS
8036 if (!ret) {
8037 mutex_lock(&hdev->vport_lock);
39932473 8038 hclge_update_umv_space(vport, true);
7d0b3451
JS
8039 mutex_unlock(&hdev->vport_lock);
8040 } else if (ret == -ENOENT) {
ee4bcd3b 8041 ret = 0;
7d0b3451 8042 }
46a3df9f 8043
aa7a795e 8044 return ret;
46a3df9f
S
8045}
8046
8047static int hclge_add_mc_addr(struct hnae3_handle *handle,
8048 const unsigned char *addr)
8049{
8050 struct hclge_vport *vport = hclge_get_vport(handle);
8051
ee4bcd3b
JS
8052 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8053 addr);
46a3df9f
S
8054}
8055
8056int hclge_add_mc_addr_common(struct hclge_vport *vport,
8057 const unsigned char *addr)
8058{
8059 struct hclge_dev *hdev = vport->back;
d44f9b63 8060 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 8061 struct hclge_desc desc[3];
46a3df9f
S
8062 int status;
8063
8064 /* mac addr check */
8065 if (!is_multicast_ether_addr(addr)) {
8066 dev_err(&hdev->pdev->dev,
8067 "Add mc mac err! invalid mac:%pM.\n",
8068 addr);
8069 return -EINVAL;
8070 }
8071 memset(&req, 0, sizeof(req));
3a586422 8072 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f 8073 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
63cbf7a9 8074 if (status) {
46a3df9f
S
8075 /* This mac addr do not exist, add new entry for it */
8076 memset(desc[0].data, 0, sizeof(desc[0].data));
8077 memset(desc[1].data, 0, sizeof(desc[0].data));
8078 memset(desc[2].data, 0, sizeof(desc[0].data));
46a3df9f 8079 }
63cbf7a9
YM
8080 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8081 if (status)
8082 return status;
8083 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
46a3df9f 8084
c631c696
JS
8085 /* if already overflow, not to print each time */
8086 if (status == -ENOSPC &&
8087 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
1f6db589 8088 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
46a3df9f
S
8089
8090 return status;
8091}
8092
8093static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8094 const unsigned char *addr)
8095{
8096 struct hclge_vport *vport = hclge_get_vport(handle);
8097
ee4bcd3b
JS
8098 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8099 addr);
46a3df9f
S
8100}
8101
8102int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8103 const unsigned char *addr)
8104{
8105 struct hclge_dev *hdev = vport->back;
d44f9b63 8106 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
8107 enum hclge_cmd_status status;
8108 struct hclge_desc desc[3];
46a3df9f
S
8109
8110 /* mac addr check */
8111 if (!is_multicast_ether_addr(addr)) {
8112 dev_dbg(&hdev->pdev->dev,
8113 "Remove mc mac err! invalid mac:%pM.\n",
8114 addr);
8115 return -EINVAL;
8116 }
8117
8118 memset(&req, 0, sizeof(req));
3a586422 8119 hclge_prepare_mac_addr(&req, addr, true);
46a3df9f
S
8120 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8121 if (!status) {
8122 /* This mac addr exist, remove this handle's VFID for it */
63cbf7a9
YM
8123 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8124 if (status)
8125 return status;
46a3df9f
S
8126
8127 if (hclge_is_all_function_id_zero(desc))
8128 /* All the vfid is zero, so need to delete this entry */
8129 status = hclge_remove_mac_vlan_tbl(vport, &req);
8130 else
8131 /* Not all the vfid is zero, update the vfid */
8132 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8133
ee4bcd3b 8134 } else if (status == -ENOENT) {
40cca1c5 8135 status = 0;
46a3df9f
S
8136 }
8137
46a3df9f
S
8138 return status;
8139}
8140
ee4bcd3b
JS
8141static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8142 struct list_head *list,
8143 int (*sync)(struct hclge_vport *,
8144 const unsigned char *))
6dd86902 8145{
ee4bcd3b
JS
8146 struct hclge_mac_node *mac_node, *tmp;
8147 int ret;
6dd86902 8148
ee4bcd3b
JS
8149 list_for_each_entry_safe(mac_node, tmp, list, node) {
8150 ret = sync(vport, mac_node->mac_addr);
8151 if (!ret) {
8152 mac_node->state = HCLGE_MAC_ACTIVE;
8153 } else {
8154 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8155 &vport->state);
8156 break;
8157 }
8158 }
8159}
6dd86902 8160
ee4bcd3b
JS
8161static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8162 struct list_head *list,
8163 int (*unsync)(struct hclge_vport *,
8164 const unsigned char *))
8165{
8166 struct hclge_mac_node *mac_node, *tmp;
8167 int ret;
6dd86902 8168
ee4bcd3b
JS
8169 list_for_each_entry_safe(mac_node, tmp, list, node) {
8170 ret = unsync(vport, mac_node->mac_addr);
8171 if (!ret || ret == -ENOENT) {
8172 list_del(&mac_node->node);
8173 kfree(mac_node);
8174 } else {
8175 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8176 &vport->state);
8177 break;
8178 }
8179 }
8180}
6dd86902 8181
c631c696 8182static bool hclge_sync_from_add_list(struct list_head *add_list,
ee4bcd3b
JS
8183 struct list_head *mac_list)
8184{
8185 struct hclge_mac_node *mac_node, *tmp, *new_node;
c631c696 8186 bool all_added = true;
6dd86902 8187
ee4bcd3b 8188 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
c631c696
JS
8189 if (mac_node->state == HCLGE_MAC_TO_ADD)
8190 all_added = false;
8191
ee4bcd3b
JS
8192 /* if the mac address from tmp_add_list is not in the
8193 * uc/mc_mac_list, it means have received a TO_DEL request
8194 * during the time window of adding the mac address into mac
8195 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8196 * then it will be removed at next time. else it must be TO_ADD,
8197 * this address hasn't been added into mac table,
8198 * so just remove the mac node.
8199 */
8200 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8201 if (new_node) {
8202 hclge_update_mac_node(new_node, mac_node->state);
8203 list_del(&mac_node->node);
8204 kfree(mac_node);
8205 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8206 mac_node->state = HCLGE_MAC_TO_DEL;
8207 list_del(&mac_node->node);
8208 list_add_tail(&mac_node->node, mac_list);
8209 } else {
8210 list_del(&mac_node->node);
8211 kfree(mac_node);
8212 }
8213 }
c631c696
JS
8214
8215 return all_added;
6dd86902 8216}
8217
ee4bcd3b
JS
8218static void hclge_sync_from_del_list(struct list_head *del_list,
8219 struct list_head *mac_list)
6dd86902 8220{
ee4bcd3b 8221 struct hclge_mac_node *mac_node, *tmp, *new_node;
6dd86902 8222
ee4bcd3b
JS
8223 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8224 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8225 if (new_node) {
8226 /* If the mac addr exists in the mac list, it means
8227 * received a new TO_ADD request during the time window
8228 * of configuring the mac address. For the mac node
8229 * state is TO_ADD, and the address is already in the
8230 * in the hardware(due to delete fail), so we just need
8231 * to change the mac node state to ACTIVE.
8232 */
8233 new_node->state = HCLGE_MAC_ACTIVE;
8234 list_del(&mac_node->node);
8235 kfree(mac_node);
8236 } else {
8237 list_del(&mac_node->node);
8238 list_add_tail(&mac_node->node, mac_list);
8239 }
8240 }
8241}
6dd86902 8242
c631c696
JS
8243static void hclge_update_overflow_flags(struct hclge_vport *vport,
8244 enum HCLGE_MAC_ADDR_TYPE mac_type,
8245 bool is_all_added)
8246{
8247 if (mac_type == HCLGE_MAC_ADDR_UC) {
8248 if (is_all_added)
8249 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8250 else
8251 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8252 } else {
8253 if (is_all_added)
8254 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8255 else
8256 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8257 }
8258}
8259
ee4bcd3b
JS
8260static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8261 enum HCLGE_MAC_ADDR_TYPE mac_type)
8262{
8263 struct hclge_mac_node *mac_node, *tmp, *new_node;
8264 struct list_head tmp_add_list, tmp_del_list;
8265 struct list_head *list;
c631c696 8266 bool all_added;
6dd86902 8267
ee4bcd3b
JS
8268 INIT_LIST_HEAD(&tmp_add_list);
8269 INIT_LIST_HEAD(&tmp_del_list);
6dd86902 8270
ee4bcd3b
JS
8271 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8272 * we can add/delete these mac addr outside the spin lock
8273 */
8274 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8275 &vport->uc_mac_list : &vport->mc_mac_list;
6dd86902 8276
ee4bcd3b
JS
8277 spin_lock_bh(&vport->mac_list_lock);
8278
8279 list_for_each_entry_safe(mac_node, tmp, list, node) {
8280 switch (mac_node->state) {
8281 case HCLGE_MAC_TO_DEL:
8282 list_del(&mac_node->node);
8283 list_add_tail(&mac_node->node, &tmp_del_list);
8284 break;
8285 case HCLGE_MAC_TO_ADD:
8286 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8287 if (!new_node)
8288 goto stop_traverse;
8289 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8290 new_node->state = mac_node->state;
8291 list_add_tail(&new_node->node, &tmp_add_list);
8292 break;
8293 default:
6dd86902 8294 break;
8295 }
8296 }
ee4bcd3b
JS
8297
8298stop_traverse:
8299 spin_unlock_bh(&vport->mac_list_lock);
8300
8301 /* delete first, in order to get max mac table space for adding */
8302 if (mac_type == HCLGE_MAC_ADDR_UC) {
8303 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8304 hclge_rm_uc_addr_common);
8305 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8306 hclge_add_uc_addr_common);
8307 } else {
8308 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8309 hclge_rm_mc_addr_common);
8310 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8311 hclge_add_mc_addr_common);
8312 }
8313
8314 /* if some mac addresses were added/deleted fail, move back to the
8315 * mac_list, and retry at next time.
8316 */
8317 spin_lock_bh(&vport->mac_list_lock);
8318
8319 hclge_sync_from_del_list(&tmp_del_list, list);
c631c696 8320 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
ee4bcd3b
JS
8321
8322 spin_unlock_bh(&vport->mac_list_lock);
c631c696
JS
8323
8324 hclge_update_overflow_flags(vport, mac_type, all_added);
ee4bcd3b
JS
8325}
8326
8327static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8328{
8329 struct hclge_dev *hdev = vport->back;
8330
8331 if (test_bit(vport->vport_id, hdev->vport_config_block))
8332 return false;
8333
8334 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8335 return true;
8336
8337 return false;
8338}
8339
8340static void hclge_sync_mac_table(struct hclge_dev *hdev)
8341{
8342 int i;
8343
8344 for (i = 0; i < hdev->num_alloc_vport; i++) {
8345 struct hclge_vport *vport = &hdev->vport[i];
8346
8347 if (!hclge_need_sync_mac_table(vport))
8348 continue;
8349
8350 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8351 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8352 }
6dd86902 8353}
8354
80a9f3f1
HC
8355static void hclge_build_del_list(struct list_head *list,
8356 bool is_del_list,
8357 struct list_head *tmp_del_list)
6dd86902 8358{
ee4bcd3b 8359 struct hclge_mac_node *mac_cfg, *tmp;
ee4bcd3b
JS
8360
8361 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8362 switch (mac_cfg->state) {
8363 case HCLGE_MAC_TO_DEL:
8364 case HCLGE_MAC_ACTIVE:
6dd86902 8365 list_del(&mac_cfg->node);
80a9f3f1 8366 list_add_tail(&mac_cfg->node, tmp_del_list);
ee4bcd3b
JS
8367 break;
8368 case HCLGE_MAC_TO_ADD:
8369 if (is_del_list) {
8370 list_del(&mac_cfg->node);
8371 kfree(mac_cfg);
8372 }
8373 break;
6dd86902 8374 }
8375 }
80a9f3f1 8376}
ee4bcd3b 8377
80a9f3f1
HC
8378static void hclge_unsync_del_list(struct hclge_vport *vport,
8379 int (*unsync)(struct hclge_vport *vport,
8380 const unsigned char *addr),
8381 bool is_del_list,
8382 struct list_head *tmp_del_list)
8383{
8384 struct hclge_mac_node *mac_cfg, *tmp;
8385 int ret;
ee4bcd3b 8386
80a9f3f1 8387 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
ee4bcd3b
JS
8388 ret = unsync(vport, mac_cfg->mac_addr);
8389 if (!ret || ret == -ENOENT) {
8390 /* clear all mac addr from hardware, but remain these
8391 * mac addr in the mac list, and restore them after
8392 * vf reset finished.
8393 */
8394 if (!is_del_list &&
8395 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8396 mac_cfg->state = HCLGE_MAC_TO_ADD;
8397 } else {
8398 list_del(&mac_cfg->node);
8399 kfree(mac_cfg);
8400 }
8401 } else if (is_del_list) {
8402 mac_cfg->state = HCLGE_MAC_TO_DEL;
8403 }
8404 }
80a9f3f1
HC
8405}
8406
8407void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8408 enum HCLGE_MAC_ADDR_TYPE mac_type)
8409{
8410 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8411 struct hclge_dev *hdev = vport->back;
8412 struct list_head tmp_del_list, *list;
8413
8414 if (mac_type == HCLGE_MAC_ADDR_UC) {
8415 list = &vport->uc_mac_list;
8416 unsync = hclge_rm_uc_addr_common;
8417 } else {
8418 list = &vport->mc_mac_list;
8419 unsync = hclge_rm_mc_addr_common;
8420 }
8421
8422 INIT_LIST_HEAD(&tmp_del_list);
8423
8424 if (!is_del_list)
8425 set_bit(vport->vport_id, hdev->vport_config_block);
8426
8427 spin_lock_bh(&vport->mac_list_lock);
8428
8429 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8430
8431 spin_unlock_bh(&vport->mac_list_lock);
8432
8433 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
ee4bcd3b
JS
8434
8435 spin_lock_bh(&vport->mac_list_lock);
8436
8437 hclge_sync_from_del_list(&tmp_del_list, list);
8438
8439 spin_unlock_bh(&vport->mac_list_lock);
8440}
8441
8442/* remove all mac address when uninitailize */
8443static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8444 enum HCLGE_MAC_ADDR_TYPE mac_type)
8445{
8446 struct hclge_mac_node *mac_node, *tmp;
8447 struct hclge_dev *hdev = vport->back;
8448 struct list_head tmp_del_list, *list;
8449
8450 INIT_LIST_HEAD(&tmp_del_list);
8451
8452 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8453 &vport->uc_mac_list : &vport->mc_mac_list;
8454
8455 spin_lock_bh(&vport->mac_list_lock);
8456
8457 list_for_each_entry_safe(mac_node, tmp, list, node) {
8458 switch (mac_node->state) {
8459 case HCLGE_MAC_TO_DEL:
8460 case HCLGE_MAC_ACTIVE:
8461 list_del(&mac_node->node);
8462 list_add_tail(&mac_node->node, &tmp_del_list);
8463 break;
8464 case HCLGE_MAC_TO_ADD:
8465 list_del(&mac_node->node);
8466 kfree(mac_node);
8467 break;
8468 }
8469 }
8470
8471 spin_unlock_bh(&vport->mac_list_lock);
8472
8473 if (mac_type == HCLGE_MAC_ADDR_UC)
8474 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8475 hclge_rm_uc_addr_common);
8476 else
8477 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8478 hclge_rm_mc_addr_common);
8479
8480 if (!list_empty(&tmp_del_list))
8481 dev_warn(&hdev->pdev->dev,
8482 "uninit %s mac list for vport %u not completely.\n",
8483 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8484 vport->vport_id);
8485
8486 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8487 list_del(&mac_node->node);
8488 kfree(mac_node);
8489 }
6dd86902 8490}
8491
ee4bcd3b 8492static void hclge_uninit_mac_table(struct hclge_dev *hdev)
6dd86902 8493{
6dd86902 8494 struct hclge_vport *vport;
8495 int i;
8496
6dd86902 8497 for (i = 0; i < hdev->num_alloc_vport; i++) {
8498 vport = &hdev->vport[i];
ee4bcd3b
JS
8499 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8500 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
6dd86902 8501 }
6dd86902 8502}
8503
f5aac71c
FL
8504static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8505 u16 cmdq_resp, u8 resp_code)
8506{
8507#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8508#define HCLGE_ETHERTYPE_ALREADY_ADD 1
8509#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8510#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8511
8512 int return_status;
8513
8514 if (cmdq_resp) {
8515 dev_err(&hdev->pdev->dev,
adcf738b 8516 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
f5aac71c
FL
8517 cmdq_resp);
8518 return -EIO;
8519 }
8520
8521 switch (resp_code) {
8522 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8523 case HCLGE_ETHERTYPE_ALREADY_ADD:
8524 return_status = 0;
8525 break;
8526 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8527 dev_err(&hdev->pdev->dev,
8528 "add mac ethertype failed for manager table overflow.\n");
8529 return_status = -EIO;
8530 break;
8531 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8532 dev_err(&hdev->pdev->dev,
8533 "add mac ethertype failed for key conflict.\n");
8534 return_status = -EIO;
8535 break;
8536 default:
8537 dev_err(&hdev->pdev->dev,
adcf738b 8538 "add mac ethertype failed for undefined, code=%u.\n",
f5aac71c
FL
8539 resp_code);
8540 return_status = -EIO;
8541 }
8542
8543 return return_status;
8544}
8545
8e6de441
HT
8546static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8547 u8 *mac_addr)
8548{
8549 struct hclge_mac_vlan_tbl_entry_cmd req;
8550 struct hclge_dev *hdev = vport->back;
8551 struct hclge_desc desc;
8552 u16 egress_port = 0;
8553 int i;
8554
8555 if (is_zero_ether_addr(mac_addr))
8556 return false;
8557
8558 memset(&req, 0, sizeof(req));
8559 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8560 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8561 req.egress_port = cpu_to_le16(egress_port);
8562 hclge_prepare_mac_addr(&req, mac_addr, false);
8563
8564 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8565 return true;
8566
8567 vf_idx += HCLGE_VF_VPORT_START_NUM;
8568 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8569 if (i != vf_idx &&
8570 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8571 return true;
8572
8573 return false;
8574}
8575
8576static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8577 u8 *mac_addr)
8578{
8579 struct hclge_vport *vport = hclge_get_vport(handle);
8580 struct hclge_dev *hdev = vport->back;
8581
8582 vport = hclge_get_vf_vport(hdev, vf);
8583 if (!vport)
8584 return -EINVAL;
8585
8586 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8587 dev_info(&hdev->pdev->dev,
8588 "Specified MAC(=%pM) is same as before, no change committed!\n",
8589 mac_addr);
8590 return 0;
8591 }
8592
8593 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8594 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8595 mac_addr);
8596 return -EEXIST;
8597 }
8598
8599 ether_addr_copy(vport->vf_info.mac, mac_addr);
8e6de441 8600
90913670
YL
8601 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8602 dev_info(&hdev->pdev->dev,
8603 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8604 vf, mac_addr);
8605 return hclge_inform_reset_assert_to_vf(vport);
8606 }
8607
8608 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8609 vf, mac_addr);
8610 return 0;
8e6de441
HT
8611}
8612
f5aac71c
FL
8613static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8614 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8615{
8616 struct hclge_desc desc;
8617 u8 resp_code;
8618 u16 retval;
8619 int ret;
8620
8621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8622 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8623
8624 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8625 if (ret) {
8626 dev_err(&hdev->pdev->dev,
8627 "add mac ethertype failed for cmd_send, ret =%d.\n",
8628 ret);
8629 return ret;
8630 }
8631
8632 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8633 retval = le16_to_cpu(desc.retval);
8634
8635 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8636}
8637
8638static int init_mgr_tbl(struct hclge_dev *hdev)
8639{
8640 int ret;
8641 int i;
8642
8643 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8644 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8645 if (ret) {
8646 dev_err(&hdev->pdev->dev,
8647 "add mac ethertype failed, ret =%d.\n",
8648 ret);
8649 return ret;
8650 }
8651 }
8652
8653 return 0;
8654}
8655
46a3df9f
S
8656static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8657{
8658 struct hclge_vport *vport = hclge_get_vport(handle);
8659 struct hclge_dev *hdev = vport->back;
8660
8661 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8662}
8663
ee4bcd3b
JS
8664int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8665 const u8 *old_addr, const u8 *new_addr)
8666{
8667 struct list_head *list = &vport->uc_mac_list;
8668 struct hclge_mac_node *old_node, *new_node;
8669
8670 new_node = hclge_find_mac_node(list, new_addr);
8671 if (!new_node) {
8672 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8673 if (!new_node)
8674 return -ENOMEM;
8675
8676 new_node->state = HCLGE_MAC_TO_ADD;
8677 ether_addr_copy(new_node->mac_addr, new_addr);
8678 list_add(&new_node->node, list);
8679 } else {
8680 if (new_node->state == HCLGE_MAC_TO_DEL)
8681 new_node->state = HCLGE_MAC_ACTIVE;
8682
8683 /* make sure the new addr is in the list head, avoid dev
8684 * addr may be not re-added into mac table for the umv space
8685 * limitation after global/imp reset which will clear mac
8686 * table by hardware.
8687 */
8688 list_move(&new_node->node, list);
8689 }
8690
8691 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8692 old_node = hclge_find_mac_node(list, old_addr);
8693 if (old_node) {
8694 if (old_node->state == HCLGE_MAC_TO_ADD) {
8695 list_del(&old_node->node);
8696 kfree(old_node);
8697 } else {
8698 old_node->state = HCLGE_MAC_TO_DEL;
8699 }
8700 }
8701 }
8702
8703 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8704
8705 return 0;
8706}
8707
59098055
FL
8708static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8709 bool is_first)
46a3df9f
S
8710{
8711 const unsigned char *new_addr = (const unsigned char *)p;
8712 struct hclge_vport *vport = hclge_get_vport(handle);
8713 struct hclge_dev *hdev = vport->back;
ee4bcd3b 8714 unsigned char *old_addr = NULL;
18838d0c 8715 int ret;
46a3df9f
S
8716
8717 /* mac addr check */
8718 if (is_zero_ether_addr(new_addr) ||
8719 is_broadcast_ether_addr(new_addr) ||
8720 is_multicast_ether_addr(new_addr)) {
8721 dev_err(&hdev->pdev->dev,
ee4bcd3b 8722 "change uc mac err! invalid mac: %pM.\n",
46a3df9f
S
8723 new_addr);
8724 return -EINVAL;
8725 }
8726
ee4bcd3b 8727 ret = hclge_pause_addr_cfg(hdev, new_addr);
18838d0c
FL
8728 if (ret) {
8729 dev_err(&hdev->pdev->dev,
ee4bcd3b 8730 "failed to configure mac pause address, ret = %d\n",
18838d0c 8731 ret);
ee4bcd3b 8732 return ret;
46a3df9f
S
8733 }
8734
ee4bcd3b
JS
8735 if (!is_first)
8736 old_addr = hdev->hw.mac.mac_addr;
8737
8738 spin_lock_bh(&vport->mac_list_lock);
8739 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
18838d0c
FL
8740 if (ret) {
8741 dev_err(&hdev->pdev->dev,
ee4bcd3b
JS
8742 "failed to change the mac addr:%pM, ret = %d\n",
8743 new_addr, ret);
8744 spin_unlock_bh(&vport->mac_list_lock);
8745
8746 if (!is_first)
8747 hclge_pause_addr_cfg(hdev, old_addr);
18838d0c 8748
ee4bcd3b
JS
8749 return ret;
8750 }
8751 /* we must update dev addr with spin lock protect, preventing dev addr
8752 * being removed by set_rx_mode path.
8753 */
18838d0c 8754 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
ee4bcd3b
JS
8755 spin_unlock_bh(&vport->mac_list_lock);
8756
8757 hclge_task_schedule(hdev, 0);
18838d0c
FL
8758
8759 return 0;
46a3df9f
S
8760}
8761
26483246
XW
8762static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8763 int cmd)
8764{
8765 struct hclge_vport *vport = hclge_get_vport(handle);
8766 struct hclge_dev *hdev = vport->back;
8767
8768 if (!hdev->hw.mac.phydev)
8769 return -EOPNOTSUPP;
8770
8771 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8772}
8773
46a3df9f 8774static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
30ebc576 8775 u8 fe_type, bool filter_en, u8 vf_id)
46a3df9f 8776{
d44f9b63 8777 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
8778 struct hclge_desc desc;
8779 int ret;
8780
903b85d3
JS
8781 /* read current vlan filter parameter */
8782 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
d44f9b63 8783 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f 8784 req->vlan_type = vlan_type;
30ebc576 8785 req->vf_id = vf_id;
46a3df9f 8786
903b85d3
JS
8787 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8788 if (ret) {
8789 dev_err(&hdev->pdev->dev,
8790 "failed to get vlan filter config, ret = %d.\n", ret);
8791 return ret;
8792 }
8793
8794 /* modify and write new config parameter */
8795 hclge_cmd_reuse_desc(&desc, false);
8796 req->vlan_fe = filter_en ?
8797 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8798
46a3df9f 8799 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3f639907 8800 if (ret)
903b85d3 8801 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
46a3df9f 8802 ret);
46a3df9f 8803
3f639907 8804 return ret;
46a3df9f
S
8805}
8806
391b5e93
JS
8807#define HCLGE_FILTER_TYPE_VF 0
8808#define HCLGE_FILTER_TYPE_PORT 1
64d114f0
ZL
8809#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8810#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8811#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8812#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8813#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8814#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8815 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8816#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8817 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
391b5e93
JS
8818
8819static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8820{
8821 struct hclge_vport *vport = hclge_get_vport(handle);
8822 struct hclge_dev *hdev = vport->back;
8823
295ba232 8824 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
64d114f0 8825 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576 8826 HCLGE_FILTER_FE_EGRESS, enable, 0);
64d114f0 8827 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576 8828 HCLGE_FILTER_FE_INGRESS, enable, 0);
64d114f0
ZL
8829 } else {
8830 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
30ebc576
JS
8831 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8832 0);
64d114f0 8833 }
c60edc17
JS
8834 if (enable)
8835 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8836 else
8837 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
391b5e93
JS
8838}
8839
88936e32
PL
8840static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8841 bool is_kill, u16 vlan,
8842 struct hclge_desc *desc)
46a3df9f 8843{
d44f9b63
YL
8844 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8845 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
8846 u8 vf_byte_val;
8847 u8 vf_byte_off;
8848 int ret;
8849
8850 hclge_cmd_setup_basic_desc(&desc[0],
8851 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8852 hclge_cmd_setup_basic_desc(&desc[1],
8853 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8854
8855 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8856
8857 vf_byte_off = vfid / 8;
8858 vf_byte_val = 1 << (vfid % 8);
8859
d44f9b63
YL
8860 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8861 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 8862
a90bb9a5 8863 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
8864 req0->vlan_cfg = is_kill;
8865
8866 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8867 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8868 else
8869 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8870
8871 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8872 if (ret) {
8873 dev_err(&hdev->pdev->dev,
8874 "Send vf vlan command fail, ret =%d.\n",
8875 ret);
8876 return ret;
8877 }
8878
88936e32
PL
8879 return 0;
8880}
8881
8882static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
8883 bool is_kill, struct hclge_desc *desc)
8884{
8885 struct hclge_vlan_filter_vf_cfg_cmd *req;
8886
8887 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8888
46a3df9f 8889 if (!is_kill) {
6c251711 8890#define HCLGE_VF_VLAN_NO_ENTRY 2
88936e32 8891 if (!req->resp_code || req->resp_code == 1)
46a3df9f
S
8892 return 0;
8893
88936e32 8894 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
81a9255e 8895 set_bit(vfid, hdev->vf_vlan_full);
6c251711
YL
8896 dev_warn(&hdev->pdev->dev,
8897 "vf vlan table is full, vf vlan filter is disabled\n");
8898 return 0;
8899 }
8900
46a3df9f 8901 dev_err(&hdev->pdev->dev,
adcf738b 8902 "Add vf vlan filter fail, ret =%u.\n",
88936e32 8903 req->resp_code);
46a3df9f 8904 } else {
41dafea2 8905#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
88936e32 8906 if (!req->resp_code)
46a3df9f
S
8907 return 0;
8908
d0c31df2
JS
8909 /* vf vlan filter is disabled when vf vlan table is full,
8910 * then new vlan id will not be added into vf vlan table.
8911 * Just return 0 without warning, avoid massive verbose
8912 * print logs when unload.
8913 */
88936e32 8914 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
41dafea2 8915 return 0;
41dafea2 8916
46a3df9f 8917 dev_err(&hdev->pdev->dev,
adcf738b 8918 "Kill vf vlan filter fail, ret =%u.\n",
88936e32 8919 req->resp_code);
46a3df9f
S
8920 }
8921
8922 return -EIO;
8923}
8924
88936e32
PL
8925static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8926 bool is_kill, u16 vlan,
8927 __be16 proto)
8928{
8929 struct hclge_vport *vport = &hdev->vport[vfid];
8930 struct hclge_desc desc[2];
8931 int ret;
8932
8933 /* if vf vlan table is full, firmware will close vf vlan filter, it
8934 * is unable and unnecessary to add new vlan id to vf vlan filter.
8935 * If spoof check is enable, and vf vlan is full, it shouldn't add
8936 * new vlan, because tx packets with these vlan id will be dropped.
8937 */
8938 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8939 if (vport->vf_info.spoofchk && vlan) {
8940 dev_err(&hdev->pdev->dev,
8941 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8942 return -EPERM;
8943 }
8944 return 0;
8945 }
8946
8947 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
8948 if (ret)
8949 return ret;
8950
8951 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
8952}
8953
dc8131d8
YL
8954static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8955 u16 vlan_id, bool is_kill)
46a3df9f 8956{
d44f9b63 8957 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
8958 struct hclge_desc desc;
8959 u8 vlan_offset_byte_val;
8960 u8 vlan_offset_byte;
8961 u8 vlan_offset_160;
8962 int ret;
8963
8964 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8965
d6ad7c53
GL
8966 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8967 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8968 HCLGE_VLAN_BYTE_SIZE;
8969 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
46a3df9f 8970
d44f9b63 8971 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
8972 req->vlan_offset = vlan_offset_160;
8973 req->vlan_cfg = is_kill;
8974 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8975
8976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
dc8131d8
YL
8977 if (ret)
8978 dev_err(&hdev->pdev->dev,
8979 "port vlan command, send fail, ret =%d.\n", ret);
8980 return ret;
8981}
8982
8983static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
70a21490 8984 u16 vport_id, u16 vlan_id,
dc8131d8
YL
8985 bool is_kill)
8986{
8987 u16 vport_idx, vport_num = 0;
8988 int ret;
8989
daaa8521
YL
8990 if (is_kill && !vlan_id)
8991 return 0;
8992
dc8131d8 8993 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
70a21490 8994 proto);
46a3df9f
S
8995 if (ret) {
8996 dev_err(&hdev->pdev->dev,
adcf738b 8997 "Set %u vport vlan filter config fail, ret =%d.\n",
dc8131d8 8998 vport_id, ret);
46a3df9f
S
8999 return ret;
9000 }
9001
dc8131d8
YL
9002 /* vlan 0 may be added twice when 8021q module is enabled */
9003 if (!is_kill && !vlan_id &&
9004 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9005 return 0;
9006
9007 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
46a3df9f 9008 dev_err(&hdev->pdev->dev,
adcf738b 9009 "Add port vlan failed, vport %u is already in vlan %u\n",
dc8131d8
YL
9010 vport_id, vlan_id);
9011 return -EINVAL;
46a3df9f
S
9012 }
9013
dc8131d8
YL
9014 if (is_kill &&
9015 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9016 dev_err(&hdev->pdev->dev,
adcf738b 9017 "Delete port vlan failed, vport %u is not in vlan %u\n",
dc8131d8
YL
9018 vport_id, vlan_id);
9019 return -EINVAL;
9020 }
9021
54e97d11 9022 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
dc8131d8
YL
9023 vport_num++;
9024
9025 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9026 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9027 is_kill);
9028
9029 return ret;
9030}
9031
5f6ea83f
PL
9032static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9033{
9034 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9035 struct hclge_vport_vtag_tx_cfg_cmd *req;
9036 struct hclge_dev *hdev = vport->back;
9037 struct hclge_desc desc;
d9c0f275 9038 u16 bmap_index;
5f6ea83f
PL
9039 int status;
9040
9041 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9042
9043 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9044 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9045 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
e4e87715
PL
9046 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9047 vcfg->accept_tag1 ? 1 : 0);
9048 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9049 vcfg->accept_untag1 ? 1 : 0);
9050 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9051 vcfg->accept_tag2 ? 1 : 0);
9052 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9053 vcfg->accept_untag2 ? 1 : 0);
9054 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9055 vcfg->insert_tag1_en ? 1 : 0);
9056 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9057 vcfg->insert_tag2_en ? 1 : 0);
592b0179
GL
9058 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9059 vcfg->tag_shift_mode_en ? 1 : 0);
e4e87715 9060 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5f6ea83f
PL
9061
9062 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
9063 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9064 HCLGE_VF_NUM_PER_BYTE;
9065 req->vf_bitmap[bmap_index] =
9066 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
9067
9068 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9069 if (status)
9070 dev_err(&hdev->pdev->dev,
9071 "Send port txvlan cfg command fail, ret =%d\n",
9072 status);
9073
9074 return status;
9075}
9076
9077static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9078{
9079 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9080 struct hclge_vport_vtag_rx_cfg_cmd *req;
9081 struct hclge_dev *hdev = vport->back;
9082 struct hclge_desc desc;
d9c0f275 9083 u16 bmap_index;
5f6ea83f
PL
9084 int status;
9085
9086 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9087
9088 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
e4e87715
PL
9089 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9090 vcfg->strip_tag1_en ? 1 : 0);
9091 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9092 vcfg->strip_tag2_en ? 1 : 0);
9093 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9094 vcfg->vlan1_vlan_prionly ? 1 : 0);
9095 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9096 vcfg->vlan2_vlan_prionly ? 1 : 0);
592b0179
GL
9097 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9098 vcfg->strip_tag1_discard_en ? 1 : 0);
9099 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9100 vcfg->strip_tag2_discard_en ? 1 : 0);
5f6ea83f
PL
9101
9102 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
d9c0f275
JS
9103 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9104 HCLGE_VF_NUM_PER_BYTE;
9105 req->vf_bitmap[bmap_index] =
9106 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5f6ea83f
PL
9107
9108 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9109 if (status)
9110 dev_err(&hdev->pdev->dev,
9111 "Send port rxvlan cfg command fail, ret =%d\n",
9112 status);
9113
9114 return status;
9115}
9116
741fca16
JS
9117static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9118 u16 port_base_vlan_state,
9119 u16 vlan_tag)
9120{
9121 int ret;
9122
9123 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9124 vport->txvlan_cfg.accept_tag1 = true;
9125 vport->txvlan_cfg.insert_tag1_en = false;
9126 vport->txvlan_cfg.default_tag1 = 0;
9127 } else {
592b0179
GL
9128 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9129
9130 vport->txvlan_cfg.accept_tag1 =
9131 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
741fca16
JS
9132 vport->txvlan_cfg.insert_tag1_en = true;
9133 vport->txvlan_cfg.default_tag1 = vlan_tag;
9134 }
9135
9136 vport->txvlan_cfg.accept_untag1 = true;
9137
9138 /* accept_tag2 and accept_untag2 are not supported on
9139 * pdev revision(0x20), new revision support them,
9140 * this two fields can not be configured by user.
9141 */
9142 vport->txvlan_cfg.accept_tag2 = true;
9143 vport->txvlan_cfg.accept_untag2 = true;
9144 vport->txvlan_cfg.insert_tag2_en = false;
9145 vport->txvlan_cfg.default_tag2 = 0;
592b0179 9146 vport->txvlan_cfg.tag_shift_mode_en = true;
741fca16
JS
9147
9148 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9149 vport->rxvlan_cfg.strip_tag1_en = false;
9150 vport->rxvlan_cfg.strip_tag2_en =
9151 vport->rxvlan_cfg.rx_vlan_offload_en;
592b0179 9152 vport->rxvlan_cfg.strip_tag2_discard_en = false;
741fca16
JS
9153 } else {
9154 vport->rxvlan_cfg.strip_tag1_en =
9155 vport->rxvlan_cfg.rx_vlan_offload_en;
9156 vport->rxvlan_cfg.strip_tag2_en = true;
592b0179 9157 vport->rxvlan_cfg.strip_tag2_discard_en = true;
741fca16 9158 }
592b0179
GL
9159
9160 vport->rxvlan_cfg.strip_tag1_discard_en = false;
741fca16
JS
9161 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9162 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9163
9164 ret = hclge_set_vlan_tx_offload_cfg(vport);
9165 if (ret)
9166 return ret;
9167
9168 return hclge_set_vlan_rx_offload_cfg(vport);
9169}
9170
5f6ea83f
PL
9171static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9172{
9173 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9174 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9175 struct hclge_desc desc;
9176 int status;
9177
9178 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9179 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9180 rx_req->ot_fst_vlan_type =
9181 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9182 rx_req->ot_sec_vlan_type =
9183 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9184 rx_req->in_fst_vlan_type =
9185 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9186 rx_req->in_sec_vlan_type =
9187 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9188
9189 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9190 if (status) {
9191 dev_err(&hdev->pdev->dev,
9192 "Send rxvlan protocol type command fail, ret =%d\n",
9193 status);
9194 return status;
9195 }
9196
9197 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9198
d0d72bac 9199 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5f6ea83f
PL
9200 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9201 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9202
9203 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9204 if (status)
9205 dev_err(&hdev->pdev->dev,
9206 "Send txvlan protocol type command fail, ret =%d\n",
9207 status);
9208
9209 return status;
9210}
9211
46a3df9f
S
9212static int hclge_init_vlan_config(struct hclge_dev *hdev)
9213{
5f6ea83f
PL
9214#define HCLGE_DEF_VLAN_TYPE 0x8100
9215
c60edc17 9216 struct hnae3_handle *handle = &hdev->vport[0].nic;
5f6ea83f 9217 struct hclge_vport *vport;
46a3df9f 9218 int ret;
5f6ea83f
PL
9219 int i;
9220
295ba232 9221 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
30ebc576
JS
9222 /* for revision 0x21, vf vlan filter is per function */
9223 for (i = 0; i < hdev->num_alloc_vport; i++) {
9224 vport = &hdev->vport[i];
9225 ret = hclge_set_vlan_filter_ctrl(hdev,
9226 HCLGE_FILTER_TYPE_VF,
9227 HCLGE_FILTER_FE_EGRESS,
9228 true,
9229 vport->vport_id);
9230 if (ret)
9231 return ret;
9232 }
46a3df9f 9233
64d114f0 9234 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
30ebc576
JS
9235 HCLGE_FILTER_FE_INGRESS, true,
9236 0);
64d114f0
ZL
9237 if (ret)
9238 return ret;
9239 } else {
9240 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9241 HCLGE_FILTER_FE_EGRESS_V1_B,
30ebc576 9242 true, 0);
64d114f0
ZL
9243 if (ret)
9244 return ret;
9245 }
46a3df9f 9246
c60edc17
JS
9247 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9248
5f6ea83f
PL
9249 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9250 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9251 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9252 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9253 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9254 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9255
9256 ret = hclge_set_vlan_protocol_type(hdev);
5e43aef8
L
9257 if (ret)
9258 return ret;
46a3df9f 9259
5f6ea83f 9260 for (i = 0; i < hdev->num_alloc_vport; i++) {
741fca16 9261 u16 vlan_tag;
dcb35cce 9262
741fca16
JS
9263 vport = &hdev->vport[i];
9264 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
5f6ea83f 9265
741fca16
JS
9266 ret = hclge_vlan_offload_cfg(vport,
9267 vport->port_base_vlan_cfg.state,
9268 vlan_tag);
5f6ea83f
PL
9269 if (ret)
9270 return ret;
9271 }
9272
dc8131d8 9273 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
9274}
9275
21e043cd
JS
9276static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9277 bool writen_to_tbl)
c6075b19 9278{
9279 struct hclge_vport_vlan_cfg *vlan;
9280
c6075b19 9281 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9282 if (!vlan)
9283 return;
9284
21e043cd 9285 vlan->hd_tbl_status = writen_to_tbl;
c6075b19 9286 vlan->vlan_id = vlan_id;
9287
9288 list_add_tail(&vlan->node, &vport->vlan_list);
9289}
9290
21e043cd
JS
9291static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9292{
9293 struct hclge_vport_vlan_cfg *vlan, *tmp;
9294 struct hclge_dev *hdev = vport->back;
9295 int ret;
9296
9297 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9298 if (!vlan->hd_tbl_status) {
9299 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9300 vport->vport_id,
70a21490 9301 vlan->vlan_id, false);
21e043cd
JS
9302 if (ret) {
9303 dev_err(&hdev->pdev->dev,
9304 "restore vport vlan list failed, ret=%d\n",
9305 ret);
9306 return ret;
9307 }
9308 }
9309 vlan->hd_tbl_status = true;
9310 }
9311
9312 return 0;
9313}
9314
9315static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9316 bool is_write_tbl)
c6075b19 9317{
9318 struct hclge_vport_vlan_cfg *vlan, *tmp;
9319 struct hclge_dev *hdev = vport->back;
9320
9321 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9322 if (vlan->vlan_id == vlan_id) {
9323 if (is_write_tbl && vlan->hd_tbl_status)
9324 hclge_set_vlan_filter_hw(hdev,
9325 htons(ETH_P_8021Q),
9326 vport->vport_id,
70a21490 9327 vlan_id,
c6075b19 9328 true);
9329
9330 list_del(&vlan->node);
9331 kfree(vlan);
9332 break;
9333 }
9334 }
9335}
9336
9337void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9338{
9339 struct hclge_vport_vlan_cfg *vlan, *tmp;
9340 struct hclge_dev *hdev = vport->back;
9341
9342 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9343 if (vlan->hd_tbl_status)
9344 hclge_set_vlan_filter_hw(hdev,
9345 htons(ETH_P_8021Q),
9346 vport->vport_id,
70a21490 9347 vlan->vlan_id,
c6075b19 9348 true);
9349
9350 vlan->hd_tbl_status = false;
9351 if (is_del_list) {
9352 list_del(&vlan->node);
9353 kfree(vlan);
9354 }
9355 }
23b4201d 9356 clear_bit(vport->vport_id, hdev->vf_vlan_full);
c6075b19 9357}
9358
9359void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9360{
9361 struct hclge_vport_vlan_cfg *vlan, *tmp;
9362 struct hclge_vport *vport;
9363 int i;
9364
c6075b19 9365 for (i = 0; i < hdev->num_alloc_vport; i++) {
9366 vport = &hdev->vport[i];
9367 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9368 list_del(&vlan->node);
9369 kfree(vlan);
9370 }
9371 }
c6075b19 9372}
9373
039ba863 9374void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
b524b38f 9375{
b524b38f
JS
9376 struct hclge_vport_vlan_cfg *vlan, *tmp;
9377 struct hclge_dev *hdev = vport->back;
b943e033 9378 u16 vlan_proto;
039ba863
JS
9379 u16 vlan_id;
9380 u16 state;
9381 int ret;
b524b38f 9382
039ba863
JS
9383 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9384 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9385 state = vport->port_base_vlan_cfg.state;
b524b38f 9386
039ba863
JS
9387 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9388 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9389 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9390 vport->vport_id, vlan_id,
9391 false);
9392 return;
9393 }
22044f95 9394
039ba863
JS
9395 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9396 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9397 vport->vport_id,
9398 vlan->vlan_id, false);
9399 if (ret)
9400 break;
9401 vlan->hd_tbl_status = true;
b524b38f 9402 }
b524b38f
JS
9403}
9404
ee4bcd3b
JS
9405/* For global reset and imp reset, hardware will clear the mac table,
9406 * so we change the mac address state from ACTIVE to TO_ADD, then they
9407 * can be restored in the service task after reset complete. Furtherly,
9408 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9409 * be restored after reset, so just remove these mac nodes from mac_list.
9410 */
9411static void hclge_mac_node_convert_for_reset(struct list_head *list)
9412{
9413 struct hclge_mac_node *mac_node, *tmp;
9414
9415 list_for_each_entry_safe(mac_node, tmp, list, node) {
9416 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9417 mac_node->state = HCLGE_MAC_TO_ADD;
9418 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9419 list_del(&mac_node->node);
9420 kfree(mac_node);
9421 }
9422 }
9423}
9424
9425void hclge_restore_mac_table_common(struct hclge_vport *vport)
9426{
9427 spin_lock_bh(&vport->mac_list_lock);
9428
9429 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9430 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9431 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9432
9433 spin_unlock_bh(&vport->mac_list_lock);
9434}
9435
039ba863
JS
9436static void hclge_restore_hw_table(struct hclge_dev *hdev)
9437{
9438 struct hclge_vport *vport = &hdev->vport[0];
9439 struct hnae3_handle *handle = &vport->nic;
9440
9441 hclge_restore_mac_table_common(vport);
9442 hclge_restore_vport_vlan_table(vport);
9443 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9444
9445 hclge_restore_fd_entries(handle);
9446}
9447
b2641e2a 9448int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
052ece6d
PL
9449{
9450 struct hclge_vport *vport = hclge_get_vport(handle);
9451
44e626f7
JS
9452 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9453 vport->rxvlan_cfg.strip_tag1_en = false;
9454 vport->rxvlan_cfg.strip_tag2_en = enable;
592b0179 9455 vport->rxvlan_cfg.strip_tag2_discard_en = false;
44e626f7
JS
9456 } else {
9457 vport->rxvlan_cfg.strip_tag1_en = enable;
9458 vport->rxvlan_cfg.strip_tag2_en = true;
592b0179 9459 vport->rxvlan_cfg.strip_tag2_discard_en = true;
44e626f7 9460 }
592b0179
GL
9461
9462 vport->rxvlan_cfg.strip_tag1_discard_en = false;
052ece6d
PL
9463 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9464 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
44e626f7 9465 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
052ece6d
PL
9466
9467 return hclge_set_vlan_rx_offload_cfg(vport);
9468}
9469
21e043cd
JS
9470static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9471 u16 port_base_vlan_state,
9472 struct hclge_vlan_info *new_info,
9473 struct hclge_vlan_info *old_info)
9474{
9475 struct hclge_dev *hdev = vport->back;
9476 int ret;
9477
9478 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9479 hclge_rm_vport_all_vlan_table(vport, false);
9480 return hclge_set_vlan_filter_hw(hdev,
9481 htons(new_info->vlan_proto),
9482 vport->vport_id,
9483 new_info->vlan_tag,
70a21490 9484 false);
21e043cd
JS
9485 }
9486
9487 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9488 vport->vport_id, old_info->vlan_tag,
70a21490 9489 true);
21e043cd
JS
9490 if (ret)
9491 return ret;
9492
9493 return hclge_add_vport_all_vlan_table(vport);
9494}
9495
9496int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9497 struct hclge_vlan_info *vlan_info)
9498{
9499 struct hnae3_handle *nic = &vport->nic;
9500 struct hclge_vlan_info *old_vlan_info;
9501 struct hclge_dev *hdev = vport->back;
9502 int ret;
9503
9504 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9505
9506 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9507 if (ret)
9508 return ret;
9509
9510 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9511 /* add new VLAN tag */
8a9a654b
JS
9512 ret = hclge_set_vlan_filter_hw(hdev,
9513 htons(vlan_info->vlan_proto),
21e043cd
JS
9514 vport->vport_id,
9515 vlan_info->vlan_tag,
70a21490 9516 false);
21e043cd
JS
9517 if (ret)
9518 return ret;
9519
9520 /* remove old VLAN tag */
8a9a654b
JS
9521 ret = hclge_set_vlan_filter_hw(hdev,
9522 htons(old_vlan_info->vlan_proto),
21e043cd
JS
9523 vport->vport_id,
9524 old_vlan_info->vlan_tag,
70a21490 9525 true);
21e043cd
JS
9526 if (ret)
9527 return ret;
9528
9529 goto update;
9530 }
9531
9532 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9533 old_vlan_info);
9534 if (ret)
9535 return ret;
9536
9537 /* update state only when disable/enable port based VLAN */
9538 vport->port_base_vlan_cfg.state = state;
9539 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9540 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9541 else
9542 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9543
9544update:
9545 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9546 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9547 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9548
9549 return 0;
9550}
9551
9552static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9553 enum hnae3_port_base_vlan_state state,
9554 u16 vlan)
9555{
9556 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9557 if (!vlan)
9558 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9559 else
9560 return HNAE3_PORT_BASE_VLAN_ENABLE;
9561 } else {
9562 if (!vlan)
9563 return HNAE3_PORT_BASE_VLAN_DISABLE;
9564 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9565 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9566 else
9567 return HNAE3_PORT_BASE_VLAN_MODIFY;
9568 }
9569}
9570
9571static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9572 u16 vlan, u8 qos, __be16 proto)
9573{
592b0179 9574 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
21e043cd
JS
9575 struct hclge_vport *vport = hclge_get_vport(handle);
9576 struct hclge_dev *hdev = vport->back;
9577 struct hclge_vlan_info vlan_info;
9578 u16 state;
9579 int ret;
9580
295ba232 9581 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
21e043cd
JS
9582 return -EOPNOTSUPP;
9583
1c985508
JS
9584 vport = hclge_get_vf_vport(hdev, vfid);
9585 if (!vport)
9586 return -EINVAL;
9587
21e043cd 9588 /* qos is a 3 bits value, so can not be bigger than 7 */
1c985508 9589 if (vlan > VLAN_N_VID - 1 || qos > 7)
21e043cd
JS
9590 return -EINVAL;
9591 if (proto != htons(ETH_P_8021Q))
9592 return -EPROTONOSUPPORT;
9593
21e043cd
JS
9594 state = hclge_get_port_base_vlan_state(vport,
9595 vport->port_base_vlan_cfg.state,
9596 vlan);
9597 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9598 return 0;
9599
9600 vlan_info.vlan_tag = vlan;
9601 vlan_info.qos = qos;
9602 vlan_info.vlan_proto = ntohs(proto);
9603
592b0179
GL
9604 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9605 if (ret) {
9606 dev_err(&hdev->pdev->dev,
9607 "failed to update port base vlan for vf %d, ret = %d\n",
9608 vfid, ret);
92f11ea1
JS
9609 return ret;
9610 }
592b0179
GL
9611
9612 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9613 * VLAN state.
9614 */
9615 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9616 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9617 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9618 vport->vport_id, state,
9619 vlan, qos,
9620 ntohs(proto));
9621
9622 return 0;
21e043cd
JS
9623}
9624
59359fc8
JS
9625static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9626{
9627 struct hclge_vlan_info *vlan_info;
9628 struct hclge_vport *vport;
9629 int ret;
9630 int vf;
9631
9632 /* clear port base vlan for all vf */
9633 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9634 vport = &hdev->vport[vf];
9635 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9636
9637 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9638 vport->vport_id,
9639 vlan_info->vlan_tag, true);
9640 if (ret)
9641 dev_err(&hdev->pdev->dev,
9642 "failed to clear vf vlan for vf%d, ret = %d\n",
9643 vf - HCLGE_VF_VPORT_START_NUM, ret);
9644 }
9645}
9646
21e043cd
JS
9647int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9648 u16 vlan_id, bool is_kill)
9649{
9650 struct hclge_vport *vport = hclge_get_vport(handle);
9651 struct hclge_dev *hdev = vport->back;
9652 bool writen_to_tbl = false;
9653 int ret = 0;
9654
b7b5d25b
GL
9655 /* When device is resetting or reset failed, firmware is unable to
9656 * handle mailbox. Just record the vlan id, and remove it after
fe4144d4
JS
9657 * reset finished.
9658 */
b7b5d25b
GL
9659 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9660 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
fe4144d4
JS
9661 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9662 return -EBUSY;
9663 }
9664
46ee7350 9665 /* when port base vlan enabled, we use port base vlan as the vlan
fe4144d4
JS
9666 * filter entry. In this case, we don't update vlan filter table
9667 * when user add new vlan or remove exist vlan, just update the vport
9668 * vlan list. The vlan id in vlan list will be writen in vlan filter
9669 * table until port base vlan disabled
21e043cd
JS
9670 */
9671 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9672 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
70a21490 9673 vlan_id, is_kill);
21e043cd
JS
9674 writen_to_tbl = true;
9675 }
9676
fe4144d4
JS
9677 if (!ret) {
9678 if (is_kill)
9679 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9680 else
9681 hclge_add_vport_vlan_table(vport, vlan_id,
9682 writen_to_tbl);
9683 } else if (is_kill) {
46ee7350 9684 /* when remove hw vlan filter failed, record the vlan id,
fe4144d4
JS
9685 * and try to remove it from hw later, to be consistence
9686 * with stack
9687 */
9688 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9689 }
9690 return ret;
9691}
21e043cd 9692
fe4144d4
JS
9693static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9694{
9695#define HCLGE_MAX_SYNC_COUNT 60
21e043cd 9696
fe4144d4
JS
9697 int i, ret, sync_cnt = 0;
9698 u16 vlan_id;
9699
9700 /* start from vport 1 for PF is always alive */
9701 for (i = 0; i < hdev->num_alloc_vport; i++) {
9702 struct hclge_vport *vport = &hdev->vport[i];
9703
9704 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9705 VLAN_N_VID);
9706 while (vlan_id != VLAN_N_VID) {
9707 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9708 vport->vport_id, vlan_id,
70a21490 9709 true);
fe4144d4
JS
9710 if (ret && ret != -EINVAL)
9711 return;
9712
9713 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9714 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9715
9716 sync_cnt++;
9717 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9718 return;
9719
9720 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9721 VLAN_N_VID);
9722 }
9723 }
21e043cd
JS
9724}
9725
e6d7d79d 9726static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
46a3df9f 9727{
d44f9b63 9728 struct hclge_config_max_frm_size_cmd *req;
46a3df9f 9729 struct hclge_desc desc;
46a3df9f 9730
46a3df9f
S
9731 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9732
d44f9b63 9733 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
e6d7d79d 9734 req->max_frm_size = cpu_to_le16(new_mps);
8fc7346c 9735 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
46a3df9f 9736
e6d7d79d 9737 return hclge_cmd_send(&hdev->hw, &desc, 1);
46a3df9f
S
9738}
9739
dd72140c
FL
9740static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9741{
9742 struct hclge_vport *vport = hclge_get_vport(handle);
818f1675
YL
9743
9744 return hclge_set_vport_mtu(vport, new_mtu);
9745}
9746
9747int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9748{
dd72140c 9749 struct hclge_dev *hdev = vport->back;
63cbf7a9 9750 int i, max_frm_size, ret;
dd72140c 9751
9e690456 9752 /* HW supprt 2 layer vlan */
e6d7d79d
YL
9753 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9754 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
e070c8b9 9755 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
e6d7d79d
YL
9756 return -EINVAL;
9757
818f1675
YL
9758 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9759 mutex_lock(&hdev->vport_lock);
9760 /* VF's mps must fit within hdev->mps */
9761 if (vport->vport_id && max_frm_size > hdev->mps) {
9762 mutex_unlock(&hdev->vport_lock);
9763 return -EINVAL;
9764 } else if (vport->vport_id) {
9765 vport->mps = max_frm_size;
9766 mutex_unlock(&hdev->vport_lock);
9767 return 0;
9768 }
9769
9770 /* PF's mps must be greater then VF's mps */
9771 for (i = 1; i < hdev->num_alloc_vport; i++)
9772 if (max_frm_size < hdev->vport[i].mps) {
9773 mutex_unlock(&hdev->vport_lock);
9774 return -EINVAL;
9775 }
9776
cdca4c48
YL
9777 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9778
e6d7d79d 9779 ret = hclge_set_mac_mtu(hdev, max_frm_size);
dd72140c
FL
9780 if (ret) {
9781 dev_err(&hdev->pdev->dev,
9782 "Change mtu fail, ret =%d\n", ret);
818f1675 9783 goto out;
dd72140c
FL
9784 }
9785
e6d7d79d 9786 hdev->mps = max_frm_size;
818f1675 9787 vport->mps = max_frm_size;
e6d7d79d 9788
dd72140c
FL
9789 ret = hclge_buffer_alloc(hdev);
9790 if (ret)
9791 dev_err(&hdev->pdev->dev,
9792 "Allocate buffer fail, ret =%d\n", ret);
9793
818f1675 9794out:
cdca4c48 9795 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
818f1675 9796 mutex_unlock(&hdev->vport_lock);
dd72140c
FL
9797 return ret;
9798}
9799
46a3df9f
S
9800static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9801 bool enable)
9802{
d44f9b63 9803 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9804 struct hclge_desc desc;
9805 int ret;
9806
9807 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9808
d44f9b63 9809 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9a5ef4aa 9810 req->tqp_id = cpu_to_le16(queue_id);
b9a8f883
YL
9811 if (enable)
9812 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
46a3df9f
S
9813
9814 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9815 if (ret) {
9816 dev_err(&hdev->pdev->dev,
9817 "Send tqp reset cmd error, status =%d\n", ret);
9818 return ret;
9819 }
9820
9821 return 0;
9822}
9823
9824static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9825{
d44f9b63 9826 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
9827 struct hclge_desc desc;
9828 int ret;
9829
9830 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9831
d44f9b63 9832 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9a5ef4aa 9833 req->tqp_id = cpu_to_le16(queue_id);
46a3df9f
S
9834
9835 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9836 if (ret) {
9837 dev_err(&hdev->pdev->dev,
9838 "Get reset status error, status =%d\n", ret);
9839 return ret;
9840 }
9841
e4e87715 9842 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
46a3df9f
S
9843}
9844
0c29d191 9845u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
814e0274
PL
9846{
9847 struct hnae3_queue *queue;
9848 struct hclge_tqp *tqp;
9849
9850 queue = handle->kinfo.tqp[queue_id];
9851 tqp = container_of(queue, struct hclge_tqp, q);
9852
9853 return tqp->index;
9854}
9855
7fa6be4f 9856int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
9857{
9858 struct hclge_vport *vport = hclge_get_vport(handle);
9859 struct hclge_dev *hdev = vport->back;
9860 int reset_try_times = 0;
9861 int reset_status;
814e0274 9862 u16 queue_gid;
63cbf7a9 9863 int ret;
46a3df9f 9864
814e0274
PL
9865 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9866
46a3df9f
S
9867 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9868 if (ret) {
7fa6be4f
HT
9869 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9870 return ret;
46a3df9f
S
9871 }
9872
814e0274 9873 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
46a3df9f 9874 if (ret) {
7fa6be4f
HT
9875 dev_err(&hdev->pdev->dev,
9876 "Send reset tqp cmd fail, ret = %d\n", ret);
9877 return ret;
46a3df9f
S
9878 }
9879
46a3df9f 9880 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
814e0274 9881 reset_status = hclge_get_reset_status(hdev, queue_gid);
46a3df9f
S
9882 if (reset_status)
9883 break;
e8df45c2
ZL
9884
9885 /* Wait for tqp hw reset */
9886 usleep_range(1000, 1200);
46a3df9f
S
9887 }
9888
9889 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7fa6be4f
HT
9890 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9891 return ret;
46a3df9f
S
9892 }
9893
814e0274 9894 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7fa6be4f
HT
9895 if (ret)
9896 dev_err(&hdev->pdev->dev,
9897 "Deassert the soft reset fail, ret = %d\n", ret);
9898
9899 return ret;
46a3df9f
S
9900}
9901
1a426f8b
PL
9902void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9903{
67a69f84 9904 struct hnae3_handle *handle = &vport->nic;
1a426f8b
PL
9905 struct hclge_dev *hdev = vport->back;
9906 int reset_try_times = 0;
9907 int reset_status;
9908 u16 queue_gid;
9909 int ret;
9910
67a69f84
YM
9911 if (queue_id >= handle->kinfo.num_tqps) {
9912 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9913 queue_id);
9914 return;
9915 }
9916
1a426f8b
PL
9917 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9918
9919 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9920 if (ret) {
9921 dev_warn(&hdev->pdev->dev,
9922 "Send reset tqp cmd fail, ret = %d\n", ret);
9923 return;
9924 }
9925
1a426f8b 9926 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
1a426f8b
PL
9927 reset_status = hclge_get_reset_status(hdev, queue_gid);
9928 if (reset_status)
9929 break;
e8df45c2
ZL
9930
9931 /* Wait for tqp hw reset */
9932 usleep_range(1000, 1200);
1a426f8b
PL
9933 }
9934
9935 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9936 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9937 return;
9938 }
9939
9940 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9941 if (ret)
9942 dev_warn(&hdev->pdev->dev,
9943 "Deassert the soft reset fail, ret = %d\n", ret);
9944}
9945
46a3df9f
S
9946static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9947{
9948 struct hclge_vport *vport = hclge_get_vport(handle);
9949 struct hclge_dev *hdev = vport->back;
9950
9951 return hdev->fw_version;
9952}
9953
61387774
PL
9954static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9955{
9956 struct phy_device *phydev = hdev->hw.mac.phydev;
9957
9958 if (!phydev)
9959 return;
9960
70814e81 9961 phy_set_asym_pause(phydev, rx_en, tx_en);
61387774
PL
9962}
9963
9964static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9965{
61387774
PL
9966 int ret;
9967
40173a2e 9968 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
61387774 9969 return 0;
61387774
PL
9970
9971 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
aacbe27e
YL
9972 if (ret)
9973 dev_err(&hdev->pdev->dev,
9974 "configure pauseparam error, ret = %d.\n", ret);
61387774 9975
aacbe27e 9976 return ret;
61387774
PL
9977}
9978
1770a7a3
PL
9979int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9980{
9981 struct phy_device *phydev = hdev->hw.mac.phydev;
9982 u16 remote_advertising = 0;
63cbf7a9 9983 u16 local_advertising;
1770a7a3
PL
9984 u32 rx_pause, tx_pause;
9985 u8 flowctl;
9986
9987 if (!phydev->link || !phydev->autoneg)
9988 return 0;
9989
3c1bcc86 9990 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1770a7a3
PL
9991
9992 if (phydev->pause)
9993 remote_advertising = LPA_PAUSE_CAP;
9994
9995 if (phydev->asym_pause)
9996 remote_advertising |= LPA_PAUSE_ASYM;
9997
9998 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9999 remote_advertising);
10000 tx_pause = flowctl & FLOW_CTRL_TX;
10001 rx_pause = flowctl & FLOW_CTRL_RX;
10002
10003 if (phydev->duplex == HCLGE_MAC_HALF) {
10004 tx_pause = 0;
10005 rx_pause = 0;
10006 }
10007
10008 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10009}
10010
46a3df9f
S
10011static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10012 u32 *rx_en, u32 *tx_en)
10013{
10014 struct hclge_vport *vport = hclge_get_vport(handle);
10015 struct hclge_dev *hdev = vport->back;
fb89629f 10016 struct phy_device *phydev = hdev->hw.mac.phydev;
46a3df9f 10017
fb89629f 10018 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
46a3df9f
S
10019
10020 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10021 *rx_en = 0;
10022 *tx_en = 0;
10023 return;
10024 }
10025
10026 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10027 *rx_en = 1;
10028 *tx_en = 0;
10029 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10030 *tx_en = 1;
10031 *rx_en = 0;
10032 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10033 *rx_en = 1;
10034 *tx_en = 1;
10035 } else {
10036 *rx_en = 0;
10037 *tx_en = 0;
10038 }
10039}
10040
aacbe27e
YL
10041static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10042 u32 rx_en, u32 tx_en)
10043{
10044 if (rx_en && tx_en)
10045 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10046 else if (rx_en && !tx_en)
10047 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10048 else if (!rx_en && tx_en)
10049 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10050 else
10051 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10052
10053 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10054}
10055
61387774
PL
10056static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10057 u32 rx_en, u32 tx_en)
10058{
10059 struct hclge_vport *vport = hclge_get_vport(handle);
10060 struct hclge_dev *hdev = vport->back;
10061 struct phy_device *phydev = hdev->hw.mac.phydev;
10062 u32 fc_autoneg;
10063
fb89629f
JS
10064 if (phydev) {
10065 fc_autoneg = hclge_get_autoneg(handle);
10066 if (auto_neg != fc_autoneg) {
10067 dev_info(&hdev->pdev->dev,
10068 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10069 return -EOPNOTSUPP;
10070 }
61387774
PL
10071 }
10072
10073 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10074 dev_info(&hdev->pdev->dev,
10075 "Priority flow control enabled. Cannot set link flow control.\n");
10076 return -EOPNOTSUPP;
10077 }
10078
10079 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10080
aacbe27e
YL
10081 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10082
fb89629f 10083 if (!auto_neg)
61387774
PL
10084 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10085
22f48e24
JS
10086 if (phydev)
10087 return phy_start_aneg(phydev);
10088
fb89629f 10089 return -EOPNOTSUPP;
61387774
PL
10090}
10091
46a3df9f
S
10092static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10093 u8 *auto_neg, u32 *speed, u8 *duplex)
10094{
10095 struct hclge_vport *vport = hclge_get_vport(handle);
10096 struct hclge_dev *hdev = vport->back;
10097
10098 if (speed)
10099 *speed = hdev->hw.mac.speed;
10100 if (duplex)
10101 *duplex = hdev->hw.mac.duplex;
10102 if (auto_neg)
10103 *auto_neg = hdev->hw.mac.autoneg;
10104}
10105
88d10bd6
JS
10106static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10107 u8 *module_type)
46a3df9f
S
10108{
10109 struct hclge_vport *vport = hclge_get_vport(handle);
10110 struct hclge_dev *hdev = vport->back;
10111
a9775bb6
GH
10112 /* When nic is down, the service task is not running, doesn't update
10113 * the port information per second. Query the port information before
10114 * return the media type, ensure getting the correct media information.
10115 */
10116 hclge_update_port_info(hdev);
10117
46a3df9f
S
10118 if (media_type)
10119 *media_type = hdev->hw.mac.media_type;
88d10bd6
JS
10120
10121 if (module_type)
10122 *module_type = hdev->hw.mac.module_type;
46a3df9f
S
10123}
10124
10125static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10126 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10127{
10128 struct hclge_vport *vport = hclge_get_vport(handle);
10129 struct hclge_dev *hdev = vport->back;
10130 struct phy_device *phydev = hdev->hw.mac.phydev;
ebaf1908
WL
10131 int mdix_ctrl, mdix, is_resolved;
10132 unsigned int retval;
46a3df9f
S
10133
10134 if (!phydev) {
10135 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10136 *tp_mdix = ETH_TP_MDI_INVALID;
10137 return;
10138 }
10139
10140 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10141
10142 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
e4e87715
PL
10143 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10144 HCLGE_PHY_MDIX_CTRL_S);
46a3df9f
S
10145
10146 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
e4e87715
PL
10147 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10148 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
46a3df9f
S
10149
10150 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10151
10152 switch (mdix_ctrl) {
10153 case 0x0:
10154 *tp_mdix_ctrl = ETH_TP_MDI;
10155 break;
10156 case 0x1:
10157 *tp_mdix_ctrl = ETH_TP_MDI_X;
10158 break;
10159 case 0x3:
10160 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10161 break;
10162 default:
10163 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10164 break;
10165 }
10166
10167 if (!is_resolved)
10168 *tp_mdix = ETH_TP_MDI_INVALID;
10169 else if (mdix)
10170 *tp_mdix = ETH_TP_MDI_X;
10171 else
10172 *tp_mdix = ETH_TP_MDI;
10173}
10174
bb87be87
YL
10175static void hclge_info_show(struct hclge_dev *hdev)
10176{
10177 struct device *dev = &hdev->pdev->dev;
10178
10179 dev_info(dev, "PF info begin:\n");
10180
adcf738b
GL
10181 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10182 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10183 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10184 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10185 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10186 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10187 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10188 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10189 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10190 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
bb87be87
YL
10191 dev_info(dev, "This is %s PF\n",
10192 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10193 dev_info(dev, "DCB %s\n",
10194 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10195 dev_info(dev, "MQPRIO %s\n",
10196 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10197
10198 dev_info(dev, "PF info end.\n");
10199}
10200
994e04f1
HT
10201static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10202 struct hclge_vport *vport)
10203{
10204 struct hnae3_client *client = vport->nic.client;
10205 struct hclge_dev *hdev = ae_dev->priv;
0bfdf286 10206 int rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
10207 int ret;
10208
10209 ret = client->ops->init_instance(&vport->nic);
10210 if (ret)
10211 return ret;
10212
10213 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
10214 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10215 rst_cnt != hdev->rst_stats.reset_cnt) {
10216 ret = -EBUSY;
10217 goto init_nic_err;
10218 }
10219
00ea6e5f
WL
10220 /* Enable nic hw error interrupts */
10221 ret = hclge_config_nic_hw_error(hdev, true);
bcf643c5 10222 if (ret) {
00ea6e5f
WL
10223 dev_err(&ae_dev->pdev->dev,
10224 "fail(%d) to enable hw error interrupts\n", ret);
bcf643c5
WL
10225 goto init_nic_err;
10226 }
10227
10228 hnae3_set_client_init_flag(client, ae_dev, 1);
00ea6e5f 10229
994e04f1
HT
10230 if (netif_msg_drv(&hdev->vport->nic))
10231 hclge_info_show(hdev);
10232
00ea6e5f 10233 return ret;
7cf9c069
HT
10234
10235init_nic_err:
10236 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10237 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10238 msleep(HCLGE_WAIT_RESET_DONE);
10239
10240 client->ops->uninit_instance(&vport->nic, 0);
10241
10242 return ret;
994e04f1
HT
10243}
10244
10245static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10246 struct hclge_vport *vport)
10247{
994e04f1 10248 struct hclge_dev *hdev = ae_dev->priv;
31a57fde 10249 struct hnae3_client *client;
7cf9c069 10250 int rst_cnt;
994e04f1
HT
10251 int ret;
10252
10253 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10254 !hdev->nic_client)
10255 return 0;
10256
10257 client = hdev->roce_client;
10258 ret = hclge_init_roce_base_info(vport);
10259 if (ret)
10260 return ret;
10261
7cf9c069 10262 rst_cnt = hdev->rst_stats.reset_cnt;
994e04f1
HT
10263 ret = client->ops->init_instance(&vport->roce);
10264 if (ret)
10265 return ret;
10266
10267 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
10268 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10269 rst_cnt != hdev->rst_stats.reset_cnt) {
10270 ret = -EBUSY;
10271 goto init_roce_err;
10272 }
10273
72fcd2be
HT
10274 /* Enable roce ras interrupts */
10275 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10276 if (ret) {
10277 dev_err(&ae_dev->pdev->dev,
10278 "fail(%d) to enable roce ras interrupts\n", ret);
10279 goto init_roce_err;
10280 }
10281
994e04f1
HT
10282 hnae3_set_client_init_flag(client, ae_dev, 1);
10283
10284 return 0;
7cf9c069
HT
10285
10286init_roce_err:
10287 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10288 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10289 msleep(HCLGE_WAIT_RESET_DONE);
10290
10291 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10292
10293 return ret;
994e04f1
HT
10294}
10295
46a3df9f
S
10296static int hclge_init_client_instance(struct hnae3_client *client,
10297 struct hnae3_ae_dev *ae_dev)
10298{
10299 struct hclge_dev *hdev = ae_dev->priv;
10300 struct hclge_vport *vport;
10301 int i, ret;
10302
10303 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10304 vport = &hdev->vport[i];
10305
10306 switch (client->type) {
10307 case HNAE3_CLIENT_KNIC:
46a3df9f
S
10308 hdev->nic_client = client;
10309 vport->nic.client = client;
994e04f1 10310 ret = hclge_init_nic_client_instance(ae_dev, vport);
46a3df9f 10311 if (ret)
49dd8054 10312 goto clear_nic;
46a3df9f 10313
994e04f1
HT
10314 ret = hclge_init_roce_client_instance(ae_dev, vport);
10315 if (ret)
10316 goto clear_roce;
46a3df9f 10317
46a3df9f
S
10318 break;
10319 case HNAE3_CLIENT_ROCE:
e92a0843 10320 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
10321 hdev->roce_client = client;
10322 vport->roce.client = client;
10323 }
10324
994e04f1
HT
10325 ret = hclge_init_roce_client_instance(ae_dev, vport);
10326 if (ret)
10327 goto clear_roce;
fa7a4bd5
JS
10328
10329 break;
10330 default:
10331 return -EINVAL;
46a3df9f
S
10332 }
10333 }
10334
37417c66 10335 return 0;
49dd8054
JS
10336
10337clear_nic:
10338 hdev->nic_client = NULL;
10339 vport->nic.client = NULL;
10340 return ret;
10341clear_roce:
10342 hdev->roce_client = NULL;
10343 vport->roce.client = NULL;
10344 return ret;
46a3df9f
S
10345}
10346
10347static void hclge_uninit_client_instance(struct hnae3_client *client,
10348 struct hnae3_ae_dev *ae_dev)
10349{
10350 struct hclge_dev *hdev = ae_dev->priv;
10351 struct hclge_vport *vport;
10352 int i;
10353
10354 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10355 vport = &hdev->vport[i];
a17dcf3f 10356 if (hdev->roce_client) {
2a0bfc36 10357 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
7cf9c069
HT
10358 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10359 msleep(HCLGE_WAIT_RESET_DONE);
10360
46a3df9f
S
10361 hdev->roce_client->ops->uninit_instance(&vport->roce,
10362 0);
a17dcf3f
L
10363 hdev->roce_client = NULL;
10364 vport->roce.client = NULL;
10365 }
46a3df9f
S
10366 if (client->type == HNAE3_CLIENT_ROCE)
10367 return;
49dd8054 10368 if (hdev->nic_client && client->ops->uninit_instance) {
bd9109c9 10369 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
7cf9c069
HT
10370 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10371 msleep(HCLGE_WAIT_RESET_DONE);
10372
46a3df9f 10373 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
10374 hdev->nic_client = NULL;
10375 vport->nic.client = NULL;
10376 }
46a3df9f
S
10377 }
10378}
10379
30ae7f8a
HT
10380static int hclge_dev_mem_map(struct hclge_dev *hdev)
10381{
10382#define HCLGE_MEM_BAR 4
10383
10384 struct pci_dev *pdev = hdev->pdev;
10385 struct hclge_hw *hw = &hdev->hw;
10386
10387 /* for device does not have device memory, return directly */
10388 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10389 return 0;
10390
10391 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10392 pci_resource_start(pdev, HCLGE_MEM_BAR),
10393 pci_resource_len(pdev, HCLGE_MEM_BAR));
10394 if (!hw->mem_base) {
be419fca 10395 dev_err(&pdev->dev, "failed to map device memory\n");
30ae7f8a
HT
10396 return -EFAULT;
10397 }
10398
10399 return 0;
10400}
10401
46a3df9f
S
10402static int hclge_pci_init(struct hclge_dev *hdev)
10403{
10404 struct pci_dev *pdev = hdev->pdev;
10405 struct hclge_hw *hw;
10406 int ret;
10407
10408 ret = pci_enable_device(pdev);
10409 if (ret) {
10410 dev_err(&pdev->dev, "failed to enable PCI device\n");
3e249d3b 10411 return ret;
46a3df9f
S
10412 }
10413
10414 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10415 if (ret) {
10416 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10417 if (ret) {
10418 dev_err(&pdev->dev,
10419 "can't set consistent PCI DMA");
10420 goto err_disable_device;
10421 }
10422 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10423 }
10424
10425 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10426 if (ret) {
10427 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10428 goto err_disable_device;
10429 }
10430
10431 pci_set_master(pdev);
10432 hw = &hdev->hw;
46a3df9f
S
10433 hw->io_base = pcim_iomap(pdev, 2, 0);
10434 if (!hw->io_base) {
10435 dev_err(&pdev->dev, "Can't map configuration register space\n");
10436 ret = -ENOMEM;
10437 goto err_clr_master;
10438 }
10439
30ae7f8a
HT
10440 ret = hclge_dev_mem_map(hdev);
10441 if (ret)
10442 goto err_unmap_io_base;
10443
709eb41a
L
10444 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10445
46a3df9f 10446 return 0;
30ae7f8a
HT
10447
10448err_unmap_io_base:
10449 pcim_iounmap(pdev, hdev->hw.io_base);
46a3df9f
S
10450err_clr_master:
10451 pci_clear_master(pdev);
10452 pci_release_regions(pdev);
10453err_disable_device:
10454 pci_disable_device(pdev);
46a3df9f
S
10455
10456 return ret;
10457}
10458
10459static void hclge_pci_uninit(struct hclge_dev *hdev)
10460{
10461 struct pci_dev *pdev = hdev->pdev;
10462
30ae7f8a
HT
10463 if (hdev->hw.mem_base)
10464 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10465
6a814413 10466 pcim_iounmap(pdev, hdev->hw.io_base);
887c3820 10467 pci_free_irq_vectors(pdev);
46a3df9f
S
10468 pci_clear_master(pdev);
10469 pci_release_mem_regions(pdev);
10470 pci_disable_device(pdev);
10471}
10472
48569cda
PL
10473static void hclge_state_init(struct hclge_dev *hdev)
10474{
10475 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10476 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10477 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10478 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
d5432455 10479 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
48569cda
PL
10480 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10481 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10482}
10483
10484static void hclge_state_uninit(struct hclge_dev *hdev)
10485{
10486 set_bit(HCLGE_STATE_DOWN, &hdev->state);
acfc3d55 10487 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
48569cda 10488
65e41e7e
HT
10489 if (hdev->reset_timer.function)
10490 del_timer_sync(&hdev->reset_timer);
7be1b9f3
YL
10491 if (hdev->service_task.work.func)
10492 cancel_delayed_work_sync(&hdev->service_task);
48569cda
PL
10493}
10494
6b9a97ee
HT
10495static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10496{
8627bded
HT
10497#define HCLGE_FLR_RETRY_WAIT_MS 500
10498#define HCLGE_FLR_RETRY_CNT 5
6b9a97ee 10499
8627bded
HT
10500 struct hclge_dev *hdev = ae_dev->priv;
10501 int retry_cnt = 0;
10502 int ret;
6b9a97ee 10503
8627bded
HT
10504retry:
10505 down(&hdev->reset_sem);
10506 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10507 hdev->reset_type = HNAE3_FLR_RESET;
10508 ret = hclge_reset_prepare(hdev);
bb3d8668 10509 if (ret || hdev->reset_pending) {
8627bded
HT
10510 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10511 ret);
10512 if (hdev->reset_pending ||
10513 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10514 dev_err(&hdev->pdev->dev,
10515 "reset_pending:0x%lx, retry_cnt:%d\n",
10516 hdev->reset_pending, retry_cnt);
10517 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10518 up(&hdev->reset_sem);
10519 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10520 goto retry;
10521 }
10522 }
6b9a97ee 10523
8627bded
HT
10524 /* disable misc vector before FLR done */
10525 hclge_enable_vector(&hdev->misc_vector, false);
10526 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10527 hdev->rst_stats.flr_rst_cnt++;
6b9a97ee
HT
10528}
10529
10530static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10531{
10532 struct hclge_dev *hdev = ae_dev->priv;
8627bded
HT
10533 int ret;
10534
10535 hclge_enable_vector(&hdev->misc_vector, true);
6b9a97ee 10536
8627bded
HT
10537 ret = hclge_reset_rebuild(hdev);
10538 if (ret)
10539 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10540
10541 hdev->reset_type = HNAE3_NONE_RESET;
10542 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10543 up(&hdev->reset_sem);
6b9a97ee
HT
10544}
10545
31bb229d
PL
10546static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10547{
10548 u16 i;
10549
10550 for (i = 0; i < hdev->num_alloc_vport; i++) {
10551 struct hclge_vport *vport = &hdev->vport[i];
10552 int ret;
10553
10554 /* Send cmd to clear VF's FUNC_RST_ING */
10555 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10556 if (ret)
10557 dev_warn(&hdev->pdev->dev,
adcf738b 10558 "clear vf(%u) rst failed %d!\n",
31bb229d
PL
10559 vport->vport_id, ret);
10560 }
10561}
10562
46a3df9f
S
10563static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10564{
10565 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
10566 struct hclge_dev *hdev;
10567 int ret;
10568
10569 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2421ee24
HT
10570 if (!hdev)
10571 return -ENOMEM;
46a3df9f 10572
46a3df9f
S
10573 hdev->pdev = pdev;
10574 hdev->ae_dev = ae_dev;
4ed340ab 10575 hdev->reset_type = HNAE3_NONE_RESET;
0742ed7c 10576 hdev->reset_level = HNAE3_FUNC_RESET;
46a3df9f 10577 ae_dev->priv = hdev;
9e690456
GH
10578
10579 /* HW supprt 2 layer vlan */
e6d7d79d 10580 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
46a3df9f 10581
818f1675 10582 mutex_init(&hdev->vport_lock);
44122887 10583 spin_lock_init(&hdev->fd_rule_lock);
8627bded 10584 sema_init(&hdev->reset_sem, 1);
818f1675 10585
46a3df9f 10586 ret = hclge_pci_init(hdev);
60df7e91 10587 if (ret)
ffd5656e 10588 goto out;
46a3df9f 10589
3efb960f
L
10590 /* Firmware command queue initialize */
10591 ret = hclge_cmd_queue_init(hdev);
60df7e91 10592 if (ret)
ffd5656e 10593 goto err_pci_uninit;
3efb960f
L
10594
10595 /* Firmware command initialize */
46a3df9f
S
10596 ret = hclge_cmd_init(hdev);
10597 if (ret)
ffd5656e 10598 goto err_cmd_uninit;
46a3df9f
S
10599
10600 ret = hclge_get_cap(hdev);
60df7e91 10601 if (ret)
ffd5656e 10602 goto err_cmd_uninit;
46a3df9f 10603
af2aedc5
GH
10604 ret = hclge_query_dev_specs(hdev);
10605 if (ret) {
10606 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10607 ret);
10608 goto err_cmd_uninit;
10609 }
10610
46a3df9f
S
10611 ret = hclge_configure(hdev);
10612 if (ret) {
10613 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
ffd5656e 10614 goto err_cmd_uninit;
46a3df9f
S
10615 }
10616
887c3820 10617 ret = hclge_init_msi(hdev);
46a3df9f 10618 if (ret) {
887c3820 10619 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
ffd5656e 10620 goto err_cmd_uninit;
46a3df9f
S
10621 }
10622
466b0c00 10623 ret = hclge_misc_irq_init(hdev);
60df7e91 10624 if (ret)
ffd5656e 10625 goto err_msi_uninit;
466b0c00 10626
46a3df9f
S
10627 ret = hclge_alloc_tqps(hdev);
10628 if (ret) {
10629 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
ffd5656e 10630 goto err_msi_irq_uninit;
46a3df9f
S
10631 }
10632
10633 ret = hclge_alloc_vport(hdev);
60df7e91 10634 if (ret)
ffd5656e 10635 goto err_msi_irq_uninit;
46a3df9f 10636
7df7dad6 10637 ret = hclge_map_tqp(hdev);
60df7e91 10638 if (ret)
2312e050 10639 goto err_msi_irq_uninit;
7df7dad6 10640
c5ef83cb
HT
10641 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10642 ret = hclge_mac_mdio_config(hdev);
60df7e91 10643 if (ret)
2312e050 10644 goto err_msi_irq_uninit;
cf9cca2d 10645 }
10646
39932473 10647 ret = hclge_init_umv_space(hdev);
60df7e91 10648 if (ret)
9fc55413 10649 goto err_mdiobus_unreg;
39932473 10650
46a3df9f
S
10651 ret = hclge_mac_init(hdev);
10652 if (ret) {
10653 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
ffd5656e 10654 goto err_mdiobus_unreg;
46a3df9f 10655 }
46a3df9f
S
10656
10657 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10658 if (ret) {
10659 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
ffd5656e 10660 goto err_mdiobus_unreg;
46a3df9f
S
10661 }
10662
b26a6fea
PL
10663 ret = hclge_config_gro(hdev, true);
10664 if (ret)
10665 goto err_mdiobus_unreg;
10666
46a3df9f
S
10667 ret = hclge_init_vlan_config(hdev);
10668 if (ret) {
10669 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
ffd5656e 10670 goto err_mdiobus_unreg;
46a3df9f
S
10671 }
10672
10673 ret = hclge_tm_schd_init(hdev);
10674 if (ret) {
10675 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
ffd5656e 10676 goto err_mdiobus_unreg;
68ece54e
YL
10677 }
10678
87ce161e
GH
10679 ret = hclge_rss_init_cfg(hdev);
10680 if (ret) {
10681 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10682 goto err_mdiobus_unreg;
10683 }
10684
68ece54e
YL
10685 ret = hclge_rss_init_hw(hdev);
10686 if (ret) {
10687 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
ffd5656e 10688 goto err_mdiobus_unreg;
46a3df9f
S
10689 }
10690
f5aac71c
FL
10691 ret = init_mgr_tbl(hdev);
10692 if (ret) {
10693 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
ffd5656e 10694 goto err_mdiobus_unreg;
f5aac71c
FL
10695 }
10696
d695964d
JS
10697 ret = hclge_init_fd_config(hdev);
10698 if (ret) {
10699 dev_err(&pdev->dev,
10700 "fd table init fail, ret=%d\n", ret);
10701 goto err_mdiobus_unreg;
10702 }
10703
a6345787
WL
10704 INIT_KFIFO(hdev->mac_tnl_log);
10705
cacde272
YL
10706 hclge_dcb_ops_set(hdev);
10707
65e41e7e 10708 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7be1b9f3 10709 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
46a3df9f 10710
08125454
YL
10711 /* Setup affinity after service timer setup because add_timer_on
10712 * is called in affinity notify.
10713 */
10714 hclge_misc_affinity_setup(hdev);
10715
8e52a602 10716 hclge_clear_all_event_cause(hdev);
31bb229d 10717 hclge_clear_resetting_state(hdev);
8e52a602 10718
e4193e24
SJ
10719 /* Log and clear the hw errors those already occurred */
10720 hclge_handle_all_hns_hw_errors(ae_dev);
10721
e3b84ed2
SJ
10722 /* request delayed reset for the error recovery because an immediate
10723 * global reset on a PF affecting pending initialization of other PFs
10724 */
10725 if (ae_dev->hw_err_reset_req) {
10726 enum hnae3_reset_type reset_level;
10727
10728 reset_level = hclge_get_reset_level(ae_dev,
10729 &ae_dev->hw_err_reset_req);
10730 hclge_set_def_reset_request(ae_dev, reset_level);
10731 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10732 }
10733
466b0c00
L
10734 /* Enable MISC vector(vector0) */
10735 hclge_enable_vector(&hdev->misc_vector, true);
10736
48569cda 10737 hclge_state_init(hdev);
0742ed7c 10738 hdev->last_reset_time = jiffies;
46a3df9f 10739
08d80a4c
HT
10740 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10741 HCLGE_DRIVER_NAME);
10742
1c6dfe6f
YL
10743 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10744
46a3df9f
S
10745 return 0;
10746
ffd5656e
HT
10747err_mdiobus_unreg:
10748 if (hdev->hw.mac.phydev)
10749 mdiobus_unregister(hdev->hw.mac.mdio_bus);
ffd5656e
HT
10750err_msi_irq_uninit:
10751 hclge_misc_irq_uninit(hdev);
10752err_msi_uninit:
10753 pci_free_irq_vectors(pdev);
10754err_cmd_uninit:
232d0d55 10755 hclge_cmd_uninit(hdev);
ffd5656e 10756err_pci_uninit:
6a814413 10757 pcim_iounmap(pdev, hdev->hw.io_base);
ffd5656e 10758 pci_clear_master(pdev);
46a3df9f 10759 pci_release_regions(pdev);
ffd5656e 10760 pci_disable_device(pdev);
ffd5656e 10761out:
95163521 10762 mutex_destroy(&hdev->vport_lock);
46a3df9f
S
10763 return ret;
10764}
10765
c6dc5213 10766static void hclge_stats_clear(struct hclge_dev *hdev)
10767{
1c6dfe6f 10768 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
c6dc5213 10769}
10770
22044f95
JS
10771static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10772{
10773 return hclge_config_switch_param(hdev, vf, enable,
10774 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10775}
10776
10777static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10778{
10779 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10780 HCLGE_FILTER_FE_NIC_INGRESS_B,
10781 enable, vf);
10782}
10783
10784static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10785{
10786 int ret;
10787
10788 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10789 if (ret) {
10790 dev_err(&hdev->pdev->dev,
10791 "Set vf %d mac spoof check %s failed, ret=%d\n",
10792 vf, enable ? "on" : "off", ret);
10793 return ret;
10794 }
10795
10796 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10797 if (ret)
10798 dev_err(&hdev->pdev->dev,
10799 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10800 vf, enable ? "on" : "off", ret);
10801
10802 return ret;
10803}
10804
10805static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10806 bool enable)
10807{
10808 struct hclge_vport *vport = hclge_get_vport(handle);
10809 struct hclge_dev *hdev = vport->back;
10810 u32 new_spoofchk = enable ? 1 : 0;
10811 int ret;
10812
295ba232 10813 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
22044f95
JS
10814 return -EOPNOTSUPP;
10815
10816 vport = hclge_get_vf_vport(hdev, vf);
10817 if (!vport)
10818 return -EINVAL;
10819
10820 if (vport->vf_info.spoofchk == new_spoofchk)
10821 return 0;
10822
10823 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10824 dev_warn(&hdev->pdev->dev,
10825 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10826 vf);
7d0b3451 10827 else if (enable && hclge_is_umv_space_full(vport, true))
22044f95
JS
10828 dev_warn(&hdev->pdev->dev,
10829 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10830 vf);
10831
10832 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10833 if (ret)
10834 return ret;
10835
10836 vport->vf_info.spoofchk = new_spoofchk;
10837 return 0;
10838}
10839
10840static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10841{
10842 struct hclge_vport *vport = hdev->vport;
10843 int ret;
10844 int i;
10845
295ba232 10846 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
22044f95
JS
10847 return 0;
10848
10849 /* resume the vf spoof check state after reset */
10850 for (i = 0; i < hdev->num_alloc_vport; i++) {
10851 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10852 vport->vf_info.spoofchk);
10853 if (ret)
10854 return ret;
10855
10856 vport++;
10857 }
10858
10859 return 0;
10860}
10861
e196ec75
JS
10862static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10863{
10864 struct hclge_vport *vport = hclge_get_vport(handle);
10865 struct hclge_dev *hdev = vport->back;
295ba232 10866 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
e196ec75
JS
10867 u32 new_trusted = enable ? 1 : 0;
10868 bool en_bc_pmc;
10869 int ret;
10870
10871 vport = hclge_get_vf_vport(hdev, vf);
10872 if (!vport)
10873 return -EINVAL;
10874
10875 if (vport->vf_info.trusted == new_trusted)
10876 return 0;
10877
10878 /* Disable promisc mode for VF if it is not trusted any more. */
10879 if (!enable && vport->vf_info.promisc_enable) {
295ba232 10880 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
e196ec75
JS
10881 ret = hclge_set_vport_promisc_mode(vport, false, false,
10882 en_bc_pmc);
10883 if (ret)
10884 return ret;
10885 vport->vf_info.promisc_enable = 0;
10886 hclge_inform_vf_promisc_info(vport);
10887 }
10888
10889 vport->vf_info.trusted = new_trusted;
10890
10891 return 0;
10892}
10893
ee9e4424
YL
10894static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10895{
10896 int ret;
10897 int vf;
10898
10899 /* reset vf rate to default value */
10900 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10901 struct hclge_vport *vport = &hdev->vport[vf];
10902
10903 vport->vf_info.max_tx_rate = 0;
10904 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10905 if (ret)
10906 dev_err(&hdev->pdev->dev,
10907 "vf%d failed to reset to default, ret=%d\n",
10908 vf - HCLGE_VF_VPORT_START_NUM, ret);
10909 }
10910}
10911
11ef971f 10912static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
ee9e4424
YL
10913 int min_tx_rate, int max_tx_rate)
10914{
10915 if (min_tx_rate != 0 ||
10916 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10917 dev_err(&hdev->pdev->dev,
10918 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10919 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10920 return -EINVAL;
10921 }
10922
10923 return 0;
10924}
10925
10926static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10927 int min_tx_rate, int max_tx_rate, bool force)
10928{
10929 struct hclge_vport *vport = hclge_get_vport(handle);
10930 struct hclge_dev *hdev = vport->back;
10931 int ret;
10932
11ef971f 10933 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
ee9e4424
YL
10934 if (ret)
10935 return ret;
10936
10937 vport = hclge_get_vf_vport(hdev, vf);
10938 if (!vport)
10939 return -EINVAL;
10940
10941 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10942 return 0;
10943
10944 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10945 if (ret)
10946 return ret;
10947
10948 vport->vf_info.max_tx_rate = max_tx_rate;
10949
10950 return 0;
10951}
10952
10953static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10954{
10955 struct hnae3_handle *handle = &hdev->vport->nic;
10956 struct hclge_vport *vport;
10957 int ret;
10958 int vf;
10959
10960 /* resume the vf max_tx_rate after reset */
10961 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10962 vport = hclge_get_vf_vport(hdev, vf);
10963 if (!vport)
10964 return -EINVAL;
10965
10966 /* zero means max rate, after reset, firmware already set it to
10967 * max rate, so just continue.
10968 */
10969 if (!vport->vf_info.max_tx_rate)
10970 continue;
10971
10972 ret = hclge_set_vf_rate(handle, vf, 0,
10973 vport->vf_info.max_tx_rate, true);
10974 if (ret) {
10975 dev_err(&hdev->pdev->dev,
10976 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10977 vf, vport->vf_info.max_tx_rate, ret);
10978 return ret;
10979 }
10980 }
10981
10982 return 0;
10983}
10984
a6d818e3
YL
10985static void hclge_reset_vport_state(struct hclge_dev *hdev)
10986{
10987 struct hclge_vport *vport = hdev->vport;
10988 int i;
10989
10990 for (i = 0; i < hdev->num_alloc_vport; i++) {
0f14c5b1 10991 hclge_vport_stop(vport);
a6d818e3
YL
10992 vport++;
10993 }
10994}
10995
4ed340ab
L
10996static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10997{
10998 struct hclge_dev *hdev = ae_dev->priv;
10999 struct pci_dev *pdev = ae_dev->pdev;
11000 int ret;
11001
11002 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11003
c6dc5213 11004 hclge_stats_clear(hdev);
ee4bcd3b
JS
11005 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11006 * so here should not clean table in memory.
11007 */
11008 if (hdev->reset_type == HNAE3_IMP_RESET ||
11009 hdev->reset_type == HNAE3_GLOBAL_RESET) {
039ba863
JS
11010 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11011 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
ee4bcd3b
JS
11012 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11013 hclge_reset_umv_space(hdev);
11014 }
11015
4ed340ab
L
11016 ret = hclge_cmd_init(hdev);
11017 if (ret) {
11018 dev_err(&pdev->dev, "Cmd queue init failed\n");
11019 return ret;
11020 }
11021
4ed340ab
L
11022 ret = hclge_map_tqp(hdev);
11023 if (ret) {
11024 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11025 return ret;
11026 }
11027
11028 ret = hclge_mac_init(hdev);
11029 if (ret) {
11030 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11031 return ret;
11032 }
11033
4ed340ab
L
11034 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11035 if (ret) {
11036 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11037 return ret;
11038 }
11039
b26a6fea
PL
11040 ret = hclge_config_gro(hdev, true);
11041 if (ret)
11042 return ret;
11043
4ed340ab
L
11044 ret = hclge_init_vlan_config(hdev);
11045 if (ret) {
11046 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11047 return ret;
11048 }
11049
44e59e37 11050 ret = hclge_tm_init_hw(hdev, true);
4ed340ab 11051 if (ret) {
f31c1ba6 11052 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
4ed340ab
L
11053 return ret;
11054 }
11055
11056 ret = hclge_rss_init_hw(hdev);
11057 if (ret) {
11058 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11059 return ret;
11060 }
11061
d0db7ed3
YM
11062 ret = init_mgr_tbl(hdev);
11063 if (ret) {
11064 dev_err(&pdev->dev,
11065 "failed to reinit manager table, ret = %d\n", ret);
11066 return ret;
11067 }
11068
d695964d
JS
11069 ret = hclge_init_fd_config(hdev);
11070 if (ret) {
9b2f3477 11071 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
d695964d
JS
11072 return ret;
11073 }
11074
4fdd0bca
JS
11075 /* Log and clear the hw errors those already occurred */
11076 hclge_handle_all_hns_hw_errors(ae_dev);
11077
f3fa4a94 11078 /* Re-enable the hw error interrupts because
00ea6e5f 11079 * the interrupts get disabled on global reset.
01865a50 11080 */
00ea6e5f 11081 ret = hclge_config_nic_hw_error(hdev, true);
f3fa4a94
SJ
11082 if (ret) {
11083 dev_err(&pdev->dev,
00ea6e5f
WL
11084 "fail(%d) to re-enable NIC hw error interrupts\n",
11085 ret);
f3fa4a94
SJ
11086 return ret;
11087 }
01865a50 11088
00ea6e5f
WL
11089 if (hdev->roce_client) {
11090 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11091 if (ret) {
11092 dev_err(&pdev->dev,
11093 "fail(%d) to re-enable roce ras interrupts\n",
11094 ret);
11095 return ret;
11096 }
11097 }
11098
a6d818e3 11099 hclge_reset_vport_state(hdev);
22044f95
JS
11100 ret = hclge_reset_vport_spoofchk(hdev);
11101 if (ret)
11102 return ret;
a6d818e3 11103
ee9e4424
YL
11104 ret = hclge_resume_vf_rate(hdev);
11105 if (ret)
11106 return ret;
11107
4ed340ab
L
11108 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11109 HCLGE_DRIVER_NAME);
11110
11111 return 0;
11112}
11113
46a3df9f
S
11114static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11115{
11116 struct hclge_dev *hdev = ae_dev->priv;
11117 struct hclge_mac *mac = &hdev->hw.mac;
11118
ee9e4424 11119 hclge_reset_vf_rate(hdev);
59359fc8 11120 hclge_clear_vf_vlan(hdev);
08125454 11121 hclge_misc_affinity_teardown(hdev);
48569cda 11122 hclge_state_uninit(hdev);
ee4bcd3b 11123 hclge_uninit_mac_table(hdev);
46a3df9f
S
11124
11125 if (mac->phydev)
11126 mdiobus_unregister(mac->mdio_bus);
11127
466b0c00
L
11128 /* Disable MISC vector(vector0) */
11129 hclge_enable_vector(&hdev->misc_vector, false);
8e52a602
XW
11130 synchronize_irq(hdev->misc_vector.vector_irq);
11131
00ea6e5f 11132 /* Disable all hw interrupts */
a6345787 11133 hclge_config_mac_tnl_int(hdev, false);
00ea6e5f
WL
11134 hclge_config_nic_hw_error(hdev, false);
11135 hclge_config_rocee_ras_interrupt(hdev, false);
11136
232d0d55 11137 hclge_cmd_uninit(hdev);
ca1d7669 11138 hclge_misc_irq_uninit(hdev);
46a3df9f 11139 hclge_pci_uninit(hdev);
818f1675 11140 mutex_destroy(&hdev->vport_lock);
c6075b19 11141 hclge_uninit_vport_vlan_table(hdev);
46a3df9f
S
11142 ae_dev->priv = NULL;
11143}
11144
482d2e9c
PL
11145static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11146{
482d2e9c
PL
11147 struct hclge_vport *vport = hclge_get_vport(handle);
11148 struct hclge_dev *hdev = vport->back;
11149
f1c2e66d 11150 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
482d2e9c
PL
11151}
11152
11153static void hclge_get_channels(struct hnae3_handle *handle,
11154 struct ethtool_channels *ch)
11155{
482d2e9c
PL
11156 ch->max_combined = hclge_get_max_channels(handle);
11157 ch->other_count = 1;
11158 ch->max_other = 1;
c3b9c50d 11159 ch->combined_count = handle->kinfo.rss_size;
482d2e9c
PL
11160}
11161
09f2af64 11162static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
0d43bf45 11163 u16 *alloc_tqps, u16 *max_rss_size)
09f2af64
PL
11164{
11165 struct hclge_vport *vport = hclge_get_vport(handle);
11166 struct hclge_dev *hdev = vport->back;
09f2af64 11167
0d43bf45 11168 *alloc_tqps = vport->alloc_tqps;
f1c2e66d 11169 *max_rss_size = hdev->pf_rss_size_max;
09f2af64
PL
11170}
11171
90c68a41
YL
11172static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11173 bool rxfh_configured)
09f2af64 11174{
87ce161e 11175 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
09f2af64
PL
11176 struct hclge_vport *vport = hclge_get_vport(handle);
11177 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
354d0fab 11178 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
09f2af64 11179 struct hclge_dev *hdev = vport->back;
354d0fab 11180 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
adcf738b
GL
11181 u16 cur_rss_size = kinfo->rss_size;
11182 u16 cur_tqps = kinfo->num_tqps;
09f2af64 11183 u16 tc_valid[HCLGE_MAX_TC_NUM];
09f2af64
PL
11184 u16 roundup_size;
11185 u32 *rss_indir;
ebaf1908
WL
11186 unsigned int i;
11187 int ret;
09f2af64 11188
672ad0ed 11189 kinfo->req_rss_size = new_tqps_num;
09f2af64 11190
672ad0ed 11191 ret = hclge_tm_vport_map_update(hdev);
09f2af64 11192 if (ret) {
672ad0ed 11193 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
09f2af64
PL
11194 return ret;
11195 }
11196
11197 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11198 roundup_size = ilog2(roundup_size);
11199 /* Set the RSS TC mode according to the new RSS size */
11200 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11201 tc_valid[i] = 0;
11202
11203 if (!(hdev->hw_tc_map & BIT(i)))
11204 continue;
11205
11206 tc_valid[i] = 1;
11207 tc_size[i] = roundup_size;
11208 tc_offset[i] = kinfo->rss_size * i;
11209 }
11210 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11211 if (ret)
11212 return ret;
11213
90c68a41
YL
11214 /* RSS indirection table has been configuared by user */
11215 if (rxfh_configured)
11216 goto out;
11217
09f2af64 11218 /* Reinitializes the rss indirect table according to the new RSS size */
87ce161e
GH
11219 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11220 GFP_KERNEL);
09f2af64
PL
11221 if (!rss_indir)
11222 return -ENOMEM;
11223
87ce161e 11224 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
09f2af64
PL
11225 rss_indir[i] = i % kinfo->rss_size;
11226
11227 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11228 if (ret)
11229 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11230 ret);
11231
11232 kfree(rss_indir);
11233
90c68a41 11234out:
09f2af64
PL
11235 if (!ret)
11236 dev_info(&hdev->pdev->dev,
adcf738b 11237 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
09f2af64 11238 cur_rss_size, kinfo->rss_size,
35244430 11239 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
09f2af64
PL
11240
11241 return ret;
11242}
11243
77b34110
FL
11244static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11245 u32 *regs_num_64_bit)
11246{
11247 struct hclge_desc desc;
11248 u32 total_num;
11249 int ret;
11250
11251 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11252 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11253 if (ret) {
11254 dev_err(&hdev->pdev->dev,
11255 "Query register number cmd failed, ret = %d.\n", ret);
11256 return ret;
11257 }
11258
11259 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11260 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11261
11262 total_num = *regs_num_32_bit + *regs_num_64_bit;
11263 if (!total_num)
11264 return -EINVAL;
11265
11266 return 0;
11267}
11268
11269static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11270 void *data)
11271{
11272#define HCLGE_32_BIT_REG_RTN_DATANUM 8
b37ce587 11273#define HCLGE_32_BIT_DESC_NODATA_LEN 2
77b34110
FL
11274
11275 struct hclge_desc *desc;
11276 u32 *reg_val = data;
11277 __le32 *desc_data;
b37ce587 11278 int nodata_num;
77b34110
FL
11279 int cmd_num;
11280 int i, k, n;
11281 int ret;
11282
11283 if (regs_num == 0)
11284 return 0;
11285
b37ce587
YM
11286 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11287 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11288 HCLGE_32_BIT_REG_RTN_DATANUM);
77b34110
FL
11289 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11290 if (!desc)
11291 return -ENOMEM;
11292
11293 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11294 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11295 if (ret) {
11296 dev_err(&hdev->pdev->dev,
11297 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11298 kfree(desc);
11299 return ret;
11300 }
11301
11302 for (i = 0; i < cmd_num; i++) {
11303 if (i == 0) {
11304 desc_data = (__le32 *)(&desc[i].data[0]);
b37ce587 11305 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
77b34110
FL
11306 } else {
11307 desc_data = (__le32 *)(&desc[i]);
11308 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11309 }
11310 for (k = 0; k < n; k++) {
11311 *reg_val++ = le32_to_cpu(*desc_data++);
11312
11313 regs_num--;
11314 if (!regs_num)
11315 break;
11316 }
11317 }
11318
11319 kfree(desc);
11320 return 0;
11321}
11322
11323static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11324 void *data)
11325{
11326#define HCLGE_64_BIT_REG_RTN_DATANUM 4
b37ce587 11327#define HCLGE_64_BIT_DESC_NODATA_LEN 1
77b34110
FL
11328
11329 struct hclge_desc *desc;
11330 u64 *reg_val = data;
11331 __le64 *desc_data;
b37ce587 11332 int nodata_len;
77b34110
FL
11333 int cmd_num;
11334 int i, k, n;
11335 int ret;
11336
11337 if (regs_num == 0)
11338 return 0;
11339
b37ce587
YM
11340 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11341 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11342 HCLGE_64_BIT_REG_RTN_DATANUM);
77b34110
FL
11343 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11344 if (!desc)
11345 return -ENOMEM;
11346
11347 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11348 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11349 if (ret) {
11350 dev_err(&hdev->pdev->dev,
11351 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11352 kfree(desc);
11353 return ret;
11354 }
11355
11356 for (i = 0; i < cmd_num; i++) {
11357 if (i == 0) {
11358 desc_data = (__le64 *)(&desc[i].data[0]);
b37ce587 11359 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
77b34110
FL
11360 } else {
11361 desc_data = (__le64 *)(&desc[i]);
11362 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11363 }
11364 for (k = 0; k < n; k++) {
11365 *reg_val++ = le64_to_cpu(*desc_data++);
11366
11367 regs_num--;
11368 if (!regs_num)
11369 break;
11370 }
11371 }
11372
11373 kfree(desc);
11374 return 0;
11375}
11376
ea4750ca 11377#define MAX_SEPARATE_NUM 4
ddb54554 11378#define SEPARATOR_VALUE 0xFDFCFBFA
ea4750ca
JS
11379#define REG_NUM_PER_LINE 4
11380#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
ddb54554
GH
11381#define REG_SEPARATOR_LINE 1
11382#define REG_NUM_REMAIN_MASK 3
11383#define BD_LIST_MAX_NUM 30
ea4750ca 11384
ddb54554 11385int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
77b34110 11386{
5caa039f
HT
11387 int i;
11388
11389 /* initialize command BD except the last one */
11390 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11391 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11392 true);
11393 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11394 }
11395
11396 /* initialize the last command BD */
11397 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
ddb54554 11398
5caa039f 11399 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
ddb54554
GH
11400}
11401
11402static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11403 int *bd_num_list,
11404 u32 type_num)
11405{
ddb54554 11406 u32 entries_per_desc, desc_index, index, offset, i;
9027d043 11407 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
77b34110
FL
11408 int ret;
11409
ddb54554 11410 ret = hclge_query_bd_num_cmd_send(hdev, desc);
77b34110
FL
11411 if (ret) {
11412 dev_err(&hdev->pdev->dev,
ddb54554
GH
11413 "Get dfx bd num fail, status is %d.\n", ret);
11414 return ret;
77b34110
FL
11415 }
11416
ddb54554
GH
11417 entries_per_desc = ARRAY_SIZE(desc[0].data);
11418 for (i = 0; i < type_num; i++) {
11419 offset = hclge_dfx_bd_offset_list[i];
11420 index = offset % entries_per_desc;
11421 desc_index = offset / entries_per_desc;
11422 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11423 }
ea4750ca 11424
ddb54554 11425 return ret;
77b34110
FL
11426}
11427
ddb54554
GH
11428static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11429 struct hclge_desc *desc_src, int bd_num,
11430 enum hclge_opcode_type cmd)
77b34110 11431{
ddb54554
GH
11432 struct hclge_desc *desc = desc_src;
11433 int i, ret;
11434
11435 hclge_cmd_setup_basic_desc(desc, cmd, true);
11436 for (i = 0; i < bd_num - 1; i++) {
11437 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11438 desc++;
11439 hclge_cmd_setup_basic_desc(desc, cmd, true);
11440 }
11441
11442 desc = desc_src;
11443 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11444 if (ret)
11445 dev_err(&hdev->pdev->dev,
11446 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11447 cmd, ret);
11448
11449 return ret;
11450}
11451
11452static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11453 void *data)
11454{
11455 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11456 struct hclge_desc *desc = desc_src;
ea4750ca 11457 u32 *reg = data;
ddb54554
GH
11458
11459 entries_per_desc = ARRAY_SIZE(desc->data);
11460 reg_num = entries_per_desc * bd_num;
11461 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11462 for (i = 0; i < reg_num; i++) {
11463 index = i % entries_per_desc;
11464 desc_index = i / entries_per_desc;
11465 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11466 }
11467 for (i = 0; i < separator_num; i++)
11468 *reg++ = SEPARATOR_VALUE;
11469
11470 return reg_num + separator_num;
11471}
11472
11473static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11474{
11475 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
72fa4904 11476 int data_len_per_desc, bd_num, i;
ddb54554 11477 int bd_num_list[BD_LIST_MAX_NUM];
72fa4904 11478 u32 data_len;
77b34110
FL
11479 int ret;
11480
ddb54554
GH
11481 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11482 if (ret) {
11483 dev_err(&hdev->pdev->dev,
11484 "Get dfx reg bd num fail, status is %d.\n", ret);
11485 return ret;
11486 }
77b34110 11487
c593642c 11488 data_len_per_desc = sizeof_field(struct hclge_desc, data);
ddb54554
GH
11489 *len = 0;
11490 for (i = 0; i < dfx_reg_type_num; i++) {
11491 bd_num = bd_num_list[i];
11492 data_len = data_len_per_desc * bd_num;
11493 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11494 }
11495
11496 return ret;
11497}
11498
11499static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11500{
11501 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11502 int bd_num, bd_num_max, buf_len, i;
11503 int bd_num_list[BD_LIST_MAX_NUM];
11504 struct hclge_desc *desc_src;
11505 u32 *reg = data;
11506 int ret;
11507
11508 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
77b34110
FL
11509 if (ret) {
11510 dev_err(&hdev->pdev->dev,
ddb54554
GH
11511 "Get dfx reg bd num fail, status is %d.\n", ret);
11512 return ret;
11513 }
11514
11515 bd_num_max = bd_num_list[0];
11516 for (i = 1; i < dfx_reg_type_num; i++)
11517 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11518
11519 buf_len = sizeof(*desc_src) * bd_num_max;
11520 desc_src = kzalloc(buf_len, GFP_KERNEL);
322cb97c 11521 if (!desc_src)
ddb54554 11522 return -ENOMEM;
77b34110 11523
ddb54554
GH
11524 for (i = 0; i < dfx_reg_type_num; i++) {
11525 bd_num = bd_num_list[i];
11526 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11527 hclge_dfx_reg_opcode_list[i]);
11528 if (ret) {
11529 dev_err(&hdev->pdev->dev,
11530 "Get dfx reg fail, status is %d.\n", ret);
11531 break;
11532 }
11533
11534 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11535 }
11536
11537 kfree(desc_src);
11538 return ret;
11539}
11540
11541static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11542 struct hnae3_knic_private_info *kinfo)
11543{
11544#define HCLGE_RING_REG_OFFSET 0x200
11545#define HCLGE_RING_INT_REG_OFFSET 0x4
11546
11547 int i, j, reg_num, separator_num;
11548 int data_num_sum;
11549 u32 *reg = data;
11550
ea4750ca 11551 /* fetching per-PF registers valus from PF PCIe register space */
ddb54554
GH
11552 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11553 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11554 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11555 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11556 for (i = 0; i < separator_num; i++)
11557 *reg++ = SEPARATOR_VALUE;
ddb54554 11558 data_num_sum = reg_num + separator_num;
ea4750ca 11559
ddb54554
GH
11560 reg_num = ARRAY_SIZE(common_reg_addr_list);
11561 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11562 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11563 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11564 for (i = 0; i < separator_num; i++)
11565 *reg++ = SEPARATOR_VALUE;
ddb54554 11566 data_num_sum += reg_num + separator_num;
ea4750ca 11567
ddb54554
GH
11568 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11569 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 11570 for (j = 0; j < kinfo->num_tqps; j++) {
ddb54554 11571 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11572 *reg++ = hclge_read_dev(&hdev->hw,
11573 ring_reg_addr_list[i] +
ddb54554 11574 HCLGE_RING_REG_OFFSET * j);
ea4750ca
JS
11575 for (i = 0; i < separator_num; i++)
11576 *reg++ = SEPARATOR_VALUE;
11577 }
ddb54554 11578 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
ea4750ca 11579
ddb54554
GH
11580 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11581 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
ea4750ca 11582 for (j = 0; j < hdev->num_msi_used - 1; j++) {
ddb54554 11583 for (i = 0; i < reg_num; i++)
ea4750ca
JS
11584 *reg++ = hclge_read_dev(&hdev->hw,
11585 tqp_intr_reg_addr_list[i] +
ddb54554 11586 HCLGE_RING_INT_REG_OFFSET * j);
ea4750ca
JS
11587 for (i = 0; i < separator_num; i++)
11588 *reg++ = SEPARATOR_VALUE;
11589 }
ddb54554
GH
11590 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11591
11592 return data_num_sum;
11593}
11594
11595static int hclge_get_regs_len(struct hnae3_handle *handle)
11596{
11597 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11598 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11599 struct hclge_vport *vport = hclge_get_vport(handle);
11600 struct hclge_dev *hdev = vport->back;
11601 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11602 int regs_lines_32_bit, regs_lines_64_bit;
11603 int ret;
11604
11605 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11606 if (ret) {
11607 dev_err(&hdev->pdev->dev,
11608 "Get register number failed, ret = %d.\n", ret);
11609 return ret;
11610 }
11611
11612 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11613 if (ret) {
11614 dev_err(&hdev->pdev->dev,
11615 "Get dfx reg len failed, ret = %d.\n", ret);
11616 return ret;
11617 }
11618
11619 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11620 REG_SEPARATOR_LINE;
11621 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11622 REG_SEPARATOR_LINE;
11623 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11624 REG_SEPARATOR_LINE;
11625 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11626 REG_SEPARATOR_LINE;
11627 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11628 REG_SEPARATOR_LINE;
11629 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11630 REG_SEPARATOR_LINE;
11631
11632 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11633 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11634 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11635}
11636
11637static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11638 void *data)
11639{
11640 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11641 struct hclge_vport *vport = hclge_get_vport(handle);
11642 struct hclge_dev *hdev = vport->back;
11643 u32 regs_num_32_bit, regs_num_64_bit;
11644 int i, reg_num, separator_num, ret;
11645 u32 *reg = data;
11646
11647 *version = hdev->fw_version;
11648
11649 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11650 if (ret) {
11651 dev_err(&hdev->pdev->dev,
11652 "Get register number failed, ret = %d.\n", ret);
11653 return;
11654 }
11655
11656 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
ea4750ca 11657
ea4750ca 11658 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
77b34110
FL
11659 if (ret) {
11660 dev_err(&hdev->pdev->dev,
11661 "Get 32 bit register failed, ret = %d.\n", ret);
11662 return;
11663 }
ddb54554
GH
11664 reg_num = regs_num_32_bit;
11665 reg += reg_num;
11666 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11667 for (i = 0; i < separator_num; i++)
11668 *reg++ = SEPARATOR_VALUE;
77b34110 11669
ea4750ca 11670 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
ddb54554 11671 if (ret) {
77b34110
FL
11672 dev_err(&hdev->pdev->dev,
11673 "Get 64 bit register failed, ret = %d.\n", ret);
ddb54554
GH
11674 return;
11675 }
11676 reg_num = regs_num_64_bit * 2;
11677 reg += reg_num;
11678 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11679 for (i = 0; i < separator_num; i++)
11680 *reg++ = SEPARATOR_VALUE;
11681
11682 ret = hclge_get_dfx_reg(hdev, reg);
11683 if (ret)
11684 dev_err(&hdev->pdev->dev,
11685 "Get dfx register failed, ret = %d.\n", ret);
77b34110
FL
11686}
11687
f6f75abc 11688static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
07f8e940
JS
11689{
11690 struct hclge_set_led_state_cmd *req;
11691 struct hclge_desc desc;
11692 int ret;
11693
11694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11695
11696 req = (struct hclge_set_led_state_cmd *)desc.data;
e4e87715
PL
11697 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11698 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
07f8e940
JS
11699
11700 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11701 if (ret)
11702 dev_err(&hdev->pdev->dev,
11703 "Send set led state cmd error, ret =%d\n", ret);
11704
11705 return ret;
11706}
11707
11708enum hclge_led_status {
11709 HCLGE_LED_OFF,
11710 HCLGE_LED_ON,
11711 HCLGE_LED_NO_CHANGE = 0xFF,
11712};
11713
11714static int hclge_set_led_id(struct hnae3_handle *handle,
11715 enum ethtool_phys_id_state status)
11716{
07f8e940
JS
11717 struct hclge_vport *vport = hclge_get_vport(handle);
11718 struct hclge_dev *hdev = vport->back;
07f8e940
JS
11719
11720 switch (status) {
11721 case ETHTOOL_ID_ACTIVE:
f6f75abc 11722 return hclge_set_led_status(hdev, HCLGE_LED_ON);
07f8e940 11723 case ETHTOOL_ID_INACTIVE:
f6f75abc 11724 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
07f8e940 11725 default:
f6f75abc 11726 return -EINVAL;
07f8e940 11727 }
07f8e940
JS
11728}
11729
0979aa0b
FL
11730static void hclge_get_link_mode(struct hnae3_handle *handle,
11731 unsigned long *supported,
11732 unsigned long *advertising)
11733{
11734 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11735 struct hclge_vport *vport = hclge_get_vport(handle);
11736 struct hclge_dev *hdev = vport->back;
11737 unsigned int idx = 0;
11738
11739 for (; idx < size; idx++) {
11740 supported[idx] = hdev->hw.mac.supported[idx];
11741 advertising[idx] = hdev->hw.mac.advertising[idx];
11742 }
11743}
11744
1731be4c 11745static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
5c9f6b39
PL
11746{
11747 struct hclge_vport *vport = hclge_get_vport(handle);
11748 struct hclge_dev *hdev = vport->back;
11749
11750 return hclge_config_gro(hdev, enable);
11751}
11752
c631c696
JS
11753static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11754{
11755 struct hclge_vport *vport = &hdev->vport[0];
11756 struct hnae3_handle *handle = &vport->nic;
9d8d5a36 11757 u8 tmp_flags;
c631c696
JS
11758 int ret;
11759
11760 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11761 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11762 vport->last_promisc_flags = vport->overflow_promisc_flags;
11763 }
11764
11765 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11766 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11767 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11768 tmp_flags & HNAE3_MPE);
11769 if (!ret) {
11770 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11771 hclge_enable_vlan_filter(handle,
11772 tmp_flags & HNAE3_VLAN_FLTR);
11773 }
11774 }
11775}
11776
cb10228d
YL
11777static bool hclge_module_existed(struct hclge_dev *hdev)
11778{
11779 struct hclge_desc desc;
11780 u32 existed;
11781 int ret;
11782
11783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11785 if (ret) {
11786 dev_err(&hdev->pdev->dev,
11787 "failed to get SFP exist state, ret = %d\n", ret);
11788 return false;
11789 }
11790
11791 existed = le32_to_cpu(desc.data[0]);
11792
11793 return existed != 0;
11794}
11795
11796/* need 6 bds(total 140 bytes) in one reading
11797 * return the number of bytes actually read, 0 means read failed.
11798 */
11799static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11800 u32 len, u8 *data)
11801{
11802 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11803 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11804 u16 read_len;
11805 u16 copy_len;
11806 int ret;
11807 int i;
11808
11809 /* setup all 6 bds to read module eeprom info. */
11810 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11811 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11812 true);
11813
11814 /* bd0~bd4 need next flag */
11815 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11816 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11817 }
11818
11819 /* setup bd0, this bd contains offset and read length. */
11820 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11821 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11822 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11823 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11824
11825 ret = hclge_cmd_send(&hdev->hw, desc, i);
11826 if (ret) {
11827 dev_err(&hdev->pdev->dev,
11828 "failed to get SFP eeprom info, ret = %d\n", ret);
11829 return 0;
11830 }
11831
11832 /* copy sfp info from bd0 to out buffer. */
11833 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11834 memcpy(data, sfp_info_bd0->data, copy_len);
11835 read_len = copy_len;
11836
11837 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11838 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11839 if (read_len >= len)
11840 return read_len;
11841
11842 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11843 memcpy(data + read_len, desc[i].data, copy_len);
11844 read_len += copy_len;
11845 }
11846
11847 return read_len;
11848}
11849
11850static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11851 u32 len, u8 *data)
11852{
11853 struct hclge_vport *vport = hclge_get_vport(handle);
11854 struct hclge_dev *hdev = vport->back;
11855 u32 read_len = 0;
11856 u16 data_len;
11857
11858 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11859 return -EOPNOTSUPP;
11860
11861 if (!hclge_module_existed(hdev))
11862 return -ENXIO;
11863
11864 while (read_len < len) {
11865 data_len = hclge_get_sfp_eeprom_info(hdev,
11866 offset + read_len,
11867 len - read_len,
11868 data + read_len);
11869 if (!data_len)
11870 return -EIO;
11871
11872 read_len += data_len;
11873 }
11874
11875 return 0;
11876}
11877
46a3df9f
S
11878static const struct hnae3_ae_ops hclge_ops = {
11879 .init_ae_dev = hclge_init_ae_dev,
11880 .uninit_ae_dev = hclge_uninit_ae_dev,
6b9a97ee
HT
11881 .flr_prepare = hclge_flr_prepare,
11882 .flr_done = hclge_flr_done,
46a3df9f
S
11883 .init_client_instance = hclge_init_client_instance,
11884 .uninit_client_instance = hclge_uninit_client_instance,
84e095d6
SM
11885 .map_ring_to_vector = hclge_map_ring_to_vector,
11886 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f 11887 .get_vector = hclge_get_vector,
0d3e6631 11888 .put_vector = hclge_put_vector,
46a3df9f 11889 .set_promisc_mode = hclge_set_promisc_mode,
c631c696 11890 .request_update_promisc_mode = hclge_request_update_promisc_mode,
c39c4d98 11891 .set_loopback = hclge_set_loopback,
46a3df9f
S
11892 .start = hclge_ae_start,
11893 .stop = hclge_ae_stop,
a6d818e3
YL
11894 .client_start = hclge_client_start,
11895 .client_stop = hclge_client_stop,
46a3df9f
S
11896 .get_status = hclge_get_status,
11897 .get_ksettings_an_result = hclge_get_ksettings_an_result,
46a3df9f
S
11898 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11899 .get_media_type = hclge_get_media_type,
22f48e24 11900 .check_port_speed = hclge_check_port_speed,
7e6ec914
JS
11901 .get_fec = hclge_get_fec,
11902 .set_fec = hclge_set_fec,
46a3df9f 11903 .get_rss_key_size = hclge_get_rss_key_size,
46a3df9f
S
11904 .get_rss = hclge_get_rss,
11905 .set_rss = hclge_set_rss,
f7db940a 11906 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 11907 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
11908 .get_tc_size = hclge_get_tc_size,
11909 .get_mac_addr = hclge_get_mac_addr,
11910 .set_mac_addr = hclge_set_mac_addr,
26483246 11911 .do_ioctl = hclge_do_ioctl,
46a3df9f
S
11912 .add_uc_addr = hclge_add_uc_addr,
11913 .rm_uc_addr = hclge_rm_uc_addr,
11914 .add_mc_addr = hclge_add_mc_addr,
11915 .rm_mc_addr = hclge_rm_mc_addr,
11916 .set_autoneg = hclge_set_autoneg,
11917 .get_autoneg = hclge_get_autoneg,
22f48e24 11918 .restart_autoneg = hclge_restart_autoneg,
7786a996 11919 .halt_autoneg = hclge_halt_autoneg,
46a3df9f 11920 .get_pauseparam = hclge_get_pauseparam,
61387774 11921 .set_pauseparam = hclge_set_pauseparam,
46a3df9f
S
11922 .set_mtu = hclge_set_mtu,
11923 .reset_queue = hclge_reset_tqp,
11924 .get_stats = hclge_get_stats,
615466ce 11925 .get_mac_stats = hclge_get_mac_stat,
46a3df9f
S
11926 .update_stats = hclge_update_stats,
11927 .get_strings = hclge_get_strings,
11928 .get_sset_count = hclge_get_sset_count,
11929 .get_fw_version = hclge_get_fw_version,
11930 .get_mdix_mode = hclge_get_mdix_mode,
391b5e93 11931 .enable_vlan_filter = hclge_enable_vlan_filter,
dc8131d8 11932 .set_vlan_filter = hclge_set_vlan_filter,
46a3df9f 11933 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
052ece6d 11934 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
4ed340ab 11935 .reset_event = hclge_reset_event,
123297b7 11936 .get_reset_level = hclge_get_reset_level,
720bd583 11937 .set_default_reset_request = hclge_set_def_reset_request,
09f2af64
PL
11938 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11939 .set_channels = hclge_set_channels,
482d2e9c 11940 .get_channels = hclge_get_channels,
77b34110
FL
11941 .get_regs_len = hclge_get_regs_len,
11942 .get_regs = hclge_get_regs,
07f8e940 11943 .set_led_id = hclge_set_led_id,
0979aa0b 11944 .get_link_mode = hclge_get_link_mode,
dd74f815
JS
11945 .add_fd_entry = hclge_add_fd_entry,
11946 .del_fd_entry = hclge_del_fd_entry,
6871af29 11947 .del_all_fd_entries = hclge_del_all_fd_entries,
05c2314f
JS
11948 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11949 .get_fd_rule_info = hclge_get_fd_rule_info,
11950 .get_fd_all_rules = hclge_get_all_rules,
c17852a8 11951 .enable_fd = hclge_enable_fd,
d93ed94f 11952 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
3c666b58 11953 .dbg_run_cmd = hclge_dbg_run_cmd,
04987ca1 11954 .dbg_read_cmd = hclge_dbg_read_cmd,
381c356e 11955 .handle_hw_ras_error = hclge_handle_hw_ras_error,
4d60291b
HT
11956 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11957 .ae_dev_resetting = hclge_ae_dev_resetting,
11958 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
5c9f6b39 11959 .set_gro_en = hclge_gro_en,
0c29d191 11960 .get_global_queue_id = hclge_covert_handle_qid_global,
8cdb992f 11961 .set_timer_task = hclge_set_timer_task,
c8a8045b
HT
11962 .mac_connect_phy = hclge_mac_connect_phy,
11963 .mac_disconnect_phy = hclge_mac_disconnect_phy,
6430f744
YM
11964 .get_vf_config = hclge_get_vf_config,
11965 .set_vf_link_state = hclge_set_vf_link_state,
22044f95 11966 .set_vf_spoofchk = hclge_set_vf_spoofchk,
e196ec75 11967 .set_vf_trust = hclge_set_vf_trust,
ee9e4424 11968 .set_vf_rate = hclge_set_vf_rate,
8e6de441 11969 .set_vf_mac = hclge_set_vf_mac,
cb10228d 11970 .get_module_eeprom = hclge_get_module_eeprom,
a4de0228 11971 .get_cmdq_stat = hclge_get_cmdq_stat,
0205ec04
JS
11972 .add_cls_flower = hclge_add_cls_flower,
11973 .del_cls_flower = hclge_del_cls_flower,
11974 .cls_flower_active = hclge_is_cls_flower_active,
46a3df9f
S
11975};
11976
11977static struct hnae3_ae_algo ae_algo = {
11978 .ops = &hclge_ops,
46a3df9f
S
11979 .pdev_id_table = ae_algo_pci_tbl,
11980};
11981
11982static int hclge_init(void)
11983{
11984 pr_info("%s is initializing\n", HCLGE_NAME);
11985
16deaef2 11986 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
0ea68902
YL
11987 if (!hclge_wq) {
11988 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11989 return -ENOMEM;
11990 }
11991
854cf33a
FL
11992 hnae3_register_ae_algo(&ae_algo);
11993
11994 return 0;
46a3df9f
S
11995}
11996
11997static void hclge_exit(void)
11998{
11999 hnae3_unregister_ae_algo(&ae_algo);
0ea68902 12000 destroy_workqueue(hclge_wq);
46a3df9f
S
12001}
12002module_init(hclge_init);
12003module_exit(hclge_exit);
12004
12005MODULE_LICENSE("GPL");
12006MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12007MODULE_DESCRIPTION("HCLGE Driver");
12008MODULE_VERSION(HCLGE_MOD_VERSION);