710200119fe8616e7eabac5f600a86a72e7b0efa
[linux-2.6-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88         /* required last entry */
89         {0, }
90 };
91
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95                                          HCLGE_CMDQ_TX_ADDR_H_REG,
96                                          HCLGE_CMDQ_TX_DEPTH_REG,
97                                          HCLGE_CMDQ_TX_TAIL_REG,
98                                          HCLGE_CMDQ_TX_HEAD_REG,
99                                          HCLGE_CMDQ_RX_ADDR_L_REG,
100                                          HCLGE_CMDQ_RX_ADDR_H_REG,
101                                          HCLGE_CMDQ_RX_DEPTH_REG,
102                                          HCLGE_CMDQ_RX_TAIL_REG,
103                                          HCLGE_CMDQ_RX_HEAD_REG,
104                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
105                                          HCLGE_CMDQ_INTR_STS_REG,
106                                          HCLGE_CMDQ_INTR_EN_REG,
107                                          HCLGE_CMDQ_INTR_GEN_REG};
108
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110                                            HCLGE_VECTOR0_OTER_EN_REG,
111                                            HCLGE_MISC_RESET_STS_REG,
112                                            HCLGE_MISC_VECTOR_INT_STS,
113                                            HCLGE_GLOBAL_RESET_REG,
114                                            HCLGE_FUN_RST_ING,
115                                            HCLGE_GRO_EN_REG};
116
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118                                          HCLGE_RING_RX_ADDR_H_REG,
119                                          HCLGE_RING_RX_BD_NUM_REG,
120                                          HCLGE_RING_RX_BD_LENGTH_REG,
121                                          HCLGE_RING_RX_MERGE_EN_REG,
122                                          HCLGE_RING_RX_TAIL_REG,
123                                          HCLGE_RING_RX_HEAD_REG,
124                                          HCLGE_RING_RX_FBD_NUM_REG,
125                                          HCLGE_RING_RX_OFFSET_REG,
126                                          HCLGE_RING_RX_FBD_OFFSET_REG,
127                                          HCLGE_RING_RX_STASH_REG,
128                                          HCLGE_RING_RX_BD_ERR_REG,
129                                          HCLGE_RING_TX_ADDR_L_REG,
130                                          HCLGE_RING_TX_ADDR_H_REG,
131                                          HCLGE_RING_TX_BD_NUM_REG,
132                                          HCLGE_RING_TX_PRIORITY_REG,
133                                          HCLGE_RING_TX_TC_REG,
134                                          HCLGE_RING_TX_MERGE_EN_REG,
135                                          HCLGE_RING_TX_TAIL_REG,
136                                          HCLGE_RING_TX_HEAD_REG,
137                                          HCLGE_RING_TX_FBD_NUM_REG,
138                                          HCLGE_RING_TX_OFFSET_REG,
139                                          HCLGE_RING_TX_EBD_NUM_REG,
140                                          HCLGE_RING_TX_EBD_OFFSET_REG,
141                                          HCLGE_RING_TX_BD_ERR_REG,
142                                          HCLGE_RING_EN_REG};
143
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145                                              HCLGE_TQP_INTR_GL0_REG,
146                                              HCLGE_TQP_INTR_GL1_REG,
147                                              HCLGE_TQP_INTR_GL2_REG,
148                                              HCLGE_TQP_INTR_RL_REG};
149
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151         "App    Loopback test",
152         "Serdes serial Loopback test",
153         "Serdes parallel Loopback test",
154         "Phy    Loopback test"
155 };
156
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158         {"mac_tx_mac_pause_num",
159                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160         {"mac_rx_mac_pause_num",
161                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162         {"mac_tx_control_pkt_num",
163                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164         {"mac_rx_control_pkt_num",
165                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166         {"mac_tx_pfc_pkt_num",
167                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168         {"mac_tx_pfc_pri0_pkt_num",
169                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170         {"mac_tx_pfc_pri1_pkt_num",
171                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172         {"mac_tx_pfc_pri2_pkt_num",
173                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174         {"mac_tx_pfc_pri3_pkt_num",
175                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176         {"mac_tx_pfc_pri4_pkt_num",
177                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178         {"mac_tx_pfc_pri5_pkt_num",
179                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180         {"mac_tx_pfc_pri6_pkt_num",
181                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182         {"mac_tx_pfc_pri7_pkt_num",
183                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184         {"mac_rx_pfc_pkt_num",
185                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186         {"mac_rx_pfc_pri0_pkt_num",
187                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188         {"mac_rx_pfc_pri1_pkt_num",
189                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190         {"mac_rx_pfc_pri2_pkt_num",
191                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192         {"mac_rx_pfc_pri3_pkt_num",
193                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194         {"mac_rx_pfc_pri4_pkt_num",
195                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196         {"mac_rx_pfc_pri5_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198         {"mac_rx_pfc_pri6_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200         {"mac_rx_pfc_pri7_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202         {"mac_tx_total_pkt_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204         {"mac_tx_total_oct_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206         {"mac_tx_good_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208         {"mac_tx_bad_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210         {"mac_tx_good_oct_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212         {"mac_tx_bad_oct_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214         {"mac_tx_uni_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216         {"mac_tx_multi_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218         {"mac_tx_broad_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220         {"mac_tx_undersize_pkt_num",
221                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222         {"mac_tx_oversize_pkt_num",
223                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224         {"mac_tx_64_oct_pkt_num",
225                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226         {"mac_tx_65_127_oct_pkt_num",
227                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228         {"mac_tx_128_255_oct_pkt_num",
229                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230         {"mac_tx_256_511_oct_pkt_num",
231                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232         {"mac_tx_512_1023_oct_pkt_num",
233                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234         {"mac_tx_1024_1518_oct_pkt_num",
235                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236         {"mac_tx_1519_2047_oct_pkt_num",
237                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238         {"mac_tx_2048_4095_oct_pkt_num",
239                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240         {"mac_tx_4096_8191_oct_pkt_num",
241                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242         {"mac_tx_8192_9216_oct_pkt_num",
243                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244         {"mac_tx_9217_12287_oct_pkt_num",
245                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246         {"mac_tx_12288_16383_oct_pkt_num",
247                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248         {"mac_tx_1519_max_good_pkt_num",
249                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250         {"mac_tx_1519_max_bad_pkt_num",
251                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252         {"mac_rx_total_pkt_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254         {"mac_rx_total_oct_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256         {"mac_rx_good_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258         {"mac_rx_bad_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260         {"mac_rx_good_oct_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262         {"mac_rx_bad_oct_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264         {"mac_rx_uni_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266         {"mac_rx_multi_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268         {"mac_rx_broad_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270         {"mac_rx_undersize_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272         {"mac_rx_oversize_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274         {"mac_rx_64_oct_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276         {"mac_rx_65_127_oct_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278         {"mac_rx_128_255_oct_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280         {"mac_rx_256_511_oct_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282         {"mac_rx_512_1023_oct_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284         {"mac_rx_1024_1518_oct_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286         {"mac_rx_1519_2047_oct_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288         {"mac_rx_2048_4095_oct_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290         {"mac_rx_4096_8191_oct_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292         {"mac_rx_8192_9216_oct_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294         {"mac_rx_9217_12287_oct_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296         {"mac_rx_12288_16383_oct_pkt_num",
297                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298         {"mac_rx_1519_max_good_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300         {"mac_rx_1519_max_bad_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303         {"mac_tx_fragment_pkt_num",
304                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305         {"mac_tx_undermin_pkt_num",
306                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307         {"mac_tx_jabber_pkt_num",
308                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309         {"mac_tx_err_all_pkt_num",
310                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311         {"mac_tx_from_app_good_pkt_num",
312                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313         {"mac_tx_from_app_bad_pkt_num",
314                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315         {"mac_rx_fragment_pkt_num",
316                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317         {"mac_rx_undermin_pkt_num",
318                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319         {"mac_rx_jabber_pkt_num",
320                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321         {"mac_rx_fcs_err_pkt_num",
322                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323         {"mac_rx_send_app_good_pkt_num",
324                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325         {"mac_rx_send_app_bad_pkt_num",
326                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330         {
331                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334                 .i_port_bitmap = 0x1,
335         },
336 };
337
338 static const u8 hclge_hash_key[] = {
339         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345
346 static const u32 hclge_dfx_bd_offset_list[] = {
347         HCLGE_DFX_BIOS_BD_OFFSET,
348         HCLGE_DFX_SSU_0_BD_OFFSET,
349         HCLGE_DFX_SSU_1_BD_OFFSET,
350         HCLGE_DFX_IGU_BD_OFFSET,
351         HCLGE_DFX_RPU_0_BD_OFFSET,
352         HCLGE_DFX_RPU_1_BD_OFFSET,
353         HCLGE_DFX_NCSI_BD_OFFSET,
354         HCLGE_DFX_RTC_BD_OFFSET,
355         HCLGE_DFX_PPP_BD_OFFSET,
356         HCLGE_DFX_RCB_BD_OFFSET,
357         HCLGE_DFX_TQP_BD_OFFSET,
358         HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362         HCLGE_OPC_DFX_BIOS_COMMON_REG,
363         HCLGE_OPC_DFX_SSU_REG_0,
364         HCLGE_OPC_DFX_SSU_REG_1,
365         HCLGE_OPC_DFX_IGU_EGU_REG,
366         HCLGE_OPC_DFX_RPU_REG_0,
367         HCLGE_OPC_DFX_RPU_REG_1,
368         HCLGE_OPC_DFX_NCSI_REG,
369         HCLGE_OPC_DFX_RTC_REG,
370         HCLGE_OPC_DFX_PPP_REG,
371         HCLGE_OPC_DFX_RCB_REG,
372         HCLGE_OPC_DFX_TQP_REG,
373         HCLGE_OPC_DFX_SSU_REG_2
374 };
375
376 static const struct key_info meta_data_key_info[] = {
377         { PACKET_TYPE_ID, 6},
378         { IP_FRAGEMENT, 1},
379         { ROCE_TYPE, 1},
380         { NEXT_KEY, 5},
381         { VLAN_NUMBER, 2},
382         { SRC_VPORT, 12},
383         { DST_VPORT, 12},
384         { TUNNEL_PACKET, 1},
385 };
386
387 static const struct key_info tuple_key_info[] = {
388         { OUTER_DST_MAC, 48},
389         { OUTER_SRC_MAC, 48},
390         { OUTER_VLAN_TAG_FST, 16},
391         { OUTER_VLAN_TAG_SEC, 16},
392         { OUTER_ETH_TYPE, 16},
393         { OUTER_L2_RSV, 16},
394         { OUTER_IP_TOS, 8},
395         { OUTER_IP_PROTO, 8},
396         { OUTER_SRC_IP, 32},
397         { OUTER_DST_IP, 32},
398         { OUTER_L3_RSV, 16},
399         { OUTER_SRC_PORT, 16},
400         { OUTER_DST_PORT, 16},
401         { OUTER_L4_RSV, 32},
402         { OUTER_TUN_VNI, 24},
403         { OUTER_TUN_FLOW_ID, 8},
404         { INNER_DST_MAC, 48},
405         { INNER_SRC_MAC, 48},
406         { INNER_VLAN_TAG_FST, 16},
407         { INNER_VLAN_TAG_SEC, 16},
408         { INNER_ETH_TYPE, 16},
409         { INNER_L2_RSV, 16},
410         { INNER_IP_TOS, 8},
411         { INNER_IP_PROTO, 8},
412         { INNER_SRC_IP, 32},
413         { INNER_DST_IP, 32},
414         { INNER_L3_RSV, 16},
415         { INNER_SRC_PORT, 16},
416         { INNER_DST_PORT, 16},
417         { INNER_L4_RSV, 32},
418 };
419
420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423
424         u64 *data = (u64 *)(&hdev->mac_stats);
425         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426         __le64 *desc_data;
427         int i, k, n;
428         int ret;
429
430         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432         if (ret) {
433                 dev_err(&hdev->pdev->dev,
434                         "Get MAC pkt stats fail, status = %d.\n", ret);
435
436                 return ret;
437         }
438
439         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440                 /* for special opcode 0032, only the first desc has the head */
441                 if (unlikely(i == 0)) {
442                         desc_data = (__le64 *)(&desc[i].data[0]);
443                         n = HCLGE_RD_FIRST_STATS_NUM;
444                 } else {
445                         desc_data = (__le64 *)(&desc[i]);
446                         n = HCLGE_RD_OTHER_STATS_NUM;
447                 }
448
449                 for (k = 0; k < n; k++) {
450                         *data += le64_to_cpu(*desc_data);
451                         data++;
452                         desc_data++;
453                 }
454         }
455
456         return 0;
457 }
458
459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461         u64 *data = (u64 *)(&hdev->mac_stats);
462         struct hclge_desc *desc;
463         __le64 *desc_data;
464         u16 i, k, n;
465         int ret;
466
467         /* This may be called inside atomic sections,
468          * so GFP_ATOMIC is more suitalbe here
469          */
470         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471         if (!desc)
472                 return -ENOMEM;
473
474         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476         if (ret) {
477                 kfree(desc);
478                 return ret;
479         }
480
481         for (i = 0; i < desc_num; i++) {
482                 /* for special opcode 0034, only the first desc has the head */
483                 if (i == 0) {
484                         desc_data = (__le64 *)(&desc[i].data[0]);
485                         n = HCLGE_RD_FIRST_STATS_NUM;
486                 } else {
487                         desc_data = (__le64 *)(&desc[i]);
488                         n = HCLGE_RD_OTHER_STATS_NUM;
489                 }
490
491                 for (k = 0; k < n; k++) {
492                         *data += le64_to_cpu(*desc_data);
493                         data++;
494                         desc_data++;
495                 }
496         }
497
498         kfree(desc);
499
500         return 0;
501 }
502
503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505         struct hclge_desc desc;
506         __le32 *desc_data;
507         u32 reg_num;
508         int ret;
509
510         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512         if (ret)
513                 return ret;
514
515         desc_data = (__le32 *)(&desc.data[0]);
516         reg_num = le32_to_cpu(*desc_data);
517
518         *desc_num = 1 + ((reg_num - 3) >> 2) +
519                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520
521         return 0;
522 }
523
524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526         u32 desc_num;
527         int ret;
528
529         ret = hclge_mac_query_reg_num(hdev, &desc_num);
530
531         /* The firmware supports the new statistics acquisition method */
532         if (!ret)
533                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534         else if (ret == -EOPNOTSUPP)
535                 ret = hclge_mac_update_stats_defective(hdev);
536         else
537                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538
539         return ret;
540 }
541
542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545         struct hclge_vport *vport = hclge_get_vport(handle);
546         struct hclge_dev *hdev = vport->back;
547         struct hnae3_queue *queue;
548         struct hclge_desc desc[1];
549         struct hclge_tqp *tqp;
550         int ret, i;
551
552         for (i = 0; i < kinfo->num_tqps; i++) {
553                 queue = handle->kinfo.tqp[i];
554                 tqp = container_of(queue, struct hclge_tqp, q);
555                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
556                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557                                            true);
558
559                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
560                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561                 if (ret) {
562                         dev_err(&hdev->pdev->dev,
563                                 "Query tqp stat fail, status = %d,queue = %d\n",
564                                 ret, i);
565                         return ret;
566                 }
567                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568                         le32_to_cpu(desc[0].data[1]);
569         }
570
571         for (i = 0; i < kinfo->num_tqps; i++) {
572                 queue = handle->kinfo.tqp[i];
573                 tqp = container_of(queue, struct hclge_tqp, q);
574                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575                 hclge_cmd_setup_basic_desc(&desc[0],
576                                            HCLGE_OPC_QUERY_TX_STATS,
577                                            true);
578
579                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
580                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581                 if (ret) {
582                         dev_err(&hdev->pdev->dev,
583                                 "Query tqp stat fail, status = %d,queue = %d\n",
584                                 ret, i);
585                         return ret;
586                 }
587                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588                         le32_to_cpu(desc[0].data[1]);
589         }
590
591         return 0;
592 }
593
594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597         struct hclge_tqp *tqp;
598         u64 *buff = data;
599         int i;
600
601         for (i = 0; i < kinfo->num_tqps; i++) {
602                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604         }
605
606         for (i = 0; i < kinfo->num_tqps; i++) {
607                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609         }
610
611         return buff;
612 }
613
614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617
618         /* each tqp has TX & RX two queues */
619         return kinfo->num_tqps * (2);
620 }
621
622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625         u8 *buff = data;
626         int i;
627
628         for (i = 0; i < kinfo->num_tqps; i++) {
629                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630                         struct hclge_tqp, q);
631                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632                          tqp->index);
633                 buff = buff + ETH_GSTRING_LEN;
634         }
635
636         for (i = 0; i < kinfo->num_tqps; i++) {
637                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638                         struct hclge_tqp, q);
639                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640                          tqp->index);
641                 buff = buff + ETH_GSTRING_LEN;
642         }
643
644         return buff;
645 }
646
647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648                                  const struct hclge_comm_stats_str strs[],
649                                  int size, u64 *data)
650 {
651         u64 *buf = data;
652         u32 i;
653
654         for (i = 0; i < size; i++)
655                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656
657         return buf + size;
658 }
659
660 static u8 *hclge_comm_get_strings(u32 stringset,
661                                   const struct hclge_comm_stats_str strs[],
662                                   int size, u8 *data)
663 {
664         char *buff = (char *)data;
665         u32 i;
666
667         if (stringset != ETH_SS_STATS)
668                 return buff;
669
670         for (i = 0; i < size; i++) {
671                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672                 buff = buff + ETH_GSTRING_LEN;
673         }
674
675         return (u8 *)buff;
676 }
677
678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680         struct hnae3_handle *handle;
681         int status;
682
683         handle = &hdev->vport[0].nic;
684         if (handle->client) {
685                 status = hclge_tqps_update_stats(handle);
686                 if (status) {
687                         dev_err(&hdev->pdev->dev,
688                                 "Update TQPS stats fail, status = %d.\n",
689                                 status);
690                 }
691         }
692
693         status = hclge_mac_update_stats(hdev);
694         if (status)
695                 dev_err(&hdev->pdev->dev,
696                         "Update MAC stats fail, status = %d.\n", status);
697 }
698
699 static void hclge_update_stats(struct hnae3_handle *handle,
700                                struct net_device_stats *net_stats)
701 {
702         struct hclge_vport *vport = hclge_get_vport(handle);
703         struct hclge_dev *hdev = vport->back;
704         int status;
705
706         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707                 return;
708
709         status = hclge_mac_update_stats(hdev);
710         if (status)
711                 dev_err(&hdev->pdev->dev,
712                         "Update MAC stats fail, status = %d.\n",
713                         status);
714
715         status = hclge_tqps_update_stats(handle);
716         if (status)
717                 dev_err(&hdev->pdev->dev,
718                         "Update TQPS stats fail, status = %d.\n",
719                         status);
720
721         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723
724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727                 HNAE3_SUPPORT_PHY_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730
731         struct hclge_vport *vport = hclge_get_vport(handle);
732         struct hclge_dev *hdev = vport->back;
733         int count = 0;
734
735         /* Loopback test support rules:
736          * mac: only GE mode support
737          * serdes: all mac mode will support include GE/XGE/LGE/CGE
738          * phy: only support when phy device exist on board
739          */
740         if (stringset == ETH_SS_TEST) {
741                 /* clear loopback bit flags at first */
742                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747                         count += 1;
748                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749                 }
750
751                 count += 2;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754
755                 if (hdev->hw.mac.phydev) {
756                         count += 1;
757                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758                 }
759
760         } else if (stringset == ETH_SS_STATS) {
761                 count = ARRAY_SIZE(g_mac_stats_string) +
762                         hclge_tqps_get_sset_count(handle, stringset);
763         }
764
765         return count;
766 }
767
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769                               u8 *data)
770 {
771         u8 *p = (char *)data;
772         int size;
773
774         if (stringset == ETH_SS_STATS) {
775                 size = ARRAY_SIZE(g_mac_stats_string);
776                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777                                            size, p);
778                 p = hclge_tqps_get_strings(handle, p);
779         } else if (stringset == ETH_SS_TEST) {
780                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782                                ETH_GSTRING_LEN);
783                         p += ETH_GSTRING_LEN;
784                 }
785                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787                                ETH_GSTRING_LEN);
788                         p += ETH_GSTRING_LEN;
789                 }
790                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791                         memcpy(p,
792                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793                                ETH_GSTRING_LEN);
794                         p += ETH_GSTRING_LEN;
795                 }
796                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798                                ETH_GSTRING_LEN);
799                         p += ETH_GSTRING_LEN;
800                 }
801         }
802 }
803
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806         struct hclge_vport *vport = hclge_get_vport(handle);
807         struct hclge_dev *hdev = vport->back;
808         u64 *p;
809
810         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811                                  ARRAY_SIZE(g_mac_stats_string), data);
812         p = hclge_tqps_get_stats(handle, p);
813 }
814
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816                                struct hns3_mac_stats *mac_stats)
817 {
818         struct hclge_vport *vport = hclge_get_vport(handle);
819         struct hclge_dev *hdev = vport->back;
820
821         hclge_update_stats(handle, NULL);
822
823         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828                                    struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK       0xF
831
832         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833                 return -EINVAL;
834
835         /* Set the pf to main pf */
836         if (status->pf_state & HCLGE_PF_STATE_MAIN)
837                 hdev->flag |= HCLGE_FLAG_MAIN;
838         else
839                 hdev->flag &= ~HCLGE_FLAG_MAIN;
840
841         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842         return 0;
843 }
844
845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT     5
848
849         struct hclge_func_status_cmd *req;
850         struct hclge_desc desc;
851         int timeout = 0;
852         int ret;
853
854         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855         req = (struct hclge_func_status_cmd *)desc.data;
856
857         do {
858                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859                 if (ret) {
860                         dev_err(&hdev->pdev->dev,
861                                 "query function status failed %d.\n", ret);
862                         return ret;
863                 }
864
865                 /* Check pf reset is done */
866                 if (req->pf_state)
867                         break;
868                 usleep_range(1000, 2000);
869         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870
871         return hclge_parse_func_status(hdev, req);
872 }
873
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876         struct hclge_pf_res_cmd *req;
877         struct hclge_desc desc;
878         int ret;
879
880         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882         if (ret) {
883                 dev_err(&hdev->pdev->dev,
884                         "query pf resource failed %d.\n", ret);
885                 return ret;
886         }
887
888         req = (struct hclge_pf_res_cmd *)desc.data;
889         hdev->num_tqps = le16_to_cpu(req->tqp_num);
890         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
891
892         if (req->tx_buf_size)
893                 hdev->tx_buf_size =
894                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
895         else
896                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
897
898         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
899
900         if (req->dv_buf_size)
901                 hdev->dv_buf_size =
902                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
903         else
904                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
905
906         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
907
908         if (hnae3_dev_roce_supported(hdev)) {
909                 hdev->roce_base_msix_offset =
910                 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
911                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
912                 hdev->num_roce_msi =
913                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
914                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
915
916                 /* nic's msix numbers is always equals to the roce's. */
917                 hdev->num_nic_msi = hdev->num_roce_msi;
918
919                 /* PF should have NIC vectors and Roce vectors,
920                  * NIC vectors are queued before Roce vectors.
921                  */
922                 hdev->num_msi = hdev->num_roce_msi +
923                                 hdev->roce_base_msix_offset;
924         } else {
925                 hdev->num_msi =
926                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
927                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
928
929                 hdev->num_nic_msi = hdev->num_msi;
930         }
931
932         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
933                 dev_err(&hdev->pdev->dev,
934                         "Just %u msi resources, not enough for pf(min:2).\n",
935                         hdev->num_nic_msi);
936                 return -EINVAL;
937         }
938
939         return 0;
940 }
941
942 static int hclge_parse_speed(int speed_cmd, int *speed)
943 {
944         switch (speed_cmd) {
945         case 6:
946                 *speed = HCLGE_MAC_SPEED_10M;
947                 break;
948         case 7:
949                 *speed = HCLGE_MAC_SPEED_100M;
950                 break;
951         case 0:
952                 *speed = HCLGE_MAC_SPEED_1G;
953                 break;
954         case 1:
955                 *speed = HCLGE_MAC_SPEED_10G;
956                 break;
957         case 2:
958                 *speed = HCLGE_MAC_SPEED_25G;
959                 break;
960         case 3:
961                 *speed = HCLGE_MAC_SPEED_40G;
962                 break;
963         case 4:
964                 *speed = HCLGE_MAC_SPEED_50G;
965                 break;
966         case 5:
967                 *speed = HCLGE_MAC_SPEED_100G;
968                 break;
969         case 8:
970                 *speed = HCLGE_MAC_SPEED_200G;
971                 break;
972         default:
973                 return -EINVAL;
974         }
975
976         return 0;
977 }
978
979 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
980 {
981         struct hclge_vport *vport = hclge_get_vport(handle);
982         struct hclge_dev *hdev = vport->back;
983         u32 speed_ability = hdev->hw.mac.speed_ability;
984         u32 speed_bit = 0;
985
986         switch (speed) {
987         case HCLGE_MAC_SPEED_10M:
988                 speed_bit = HCLGE_SUPPORT_10M_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_100M:
991                 speed_bit = HCLGE_SUPPORT_100M_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_1G:
994                 speed_bit = HCLGE_SUPPORT_1G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_10G:
997                 speed_bit = HCLGE_SUPPORT_10G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_25G:
1000                 speed_bit = HCLGE_SUPPORT_25G_BIT;
1001                 break;
1002         case HCLGE_MAC_SPEED_40G:
1003                 speed_bit = HCLGE_SUPPORT_40G_BIT;
1004                 break;
1005         case HCLGE_MAC_SPEED_50G:
1006                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1007                 break;
1008         case HCLGE_MAC_SPEED_100G:
1009                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1010                 break;
1011         case HCLGE_MAC_SPEED_200G:
1012                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1013                 break;
1014         default:
1015                 return -EINVAL;
1016         }
1017
1018         if (speed_bit & speed_ability)
1019                 return 0;
1020
1021         return -EINVAL;
1022 }
1023
1024 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1025 {
1026         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1028                                  mac->supported);
1029         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1030                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1031                                  mac->supported);
1032         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1033                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1034                                  mac->supported);
1035         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1036                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1037                                  mac->supported);
1038         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1043                                  mac->supported);
1044 }
1045
1046 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1047 {
1048         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1064                 linkmode_set_bit(
1065                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1066                         mac->supported);
1067 }
1068
1069 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1070 {
1071         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1079                                  mac->supported);
1080         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1081                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1082                                  mac->supported);
1083         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1088                                  mac->supported);
1089 }
1090
1091 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1092 {
1093         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1104                                  mac->supported);
1105         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1106                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1107                                  mac->supported);
1108         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1109                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1110                                  mac->supported);
1111         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1112                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1113                                  mac->supported);
1114 }
1115
1116 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1117 {
1118         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1119         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1120
1121         switch (mac->speed) {
1122         case HCLGE_MAC_SPEED_10G:
1123         case HCLGE_MAC_SPEED_40G:
1124                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1125                                  mac->supported);
1126                 mac->fec_ability =
1127                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1128                 break;
1129         case HCLGE_MAC_SPEED_25G:
1130         case HCLGE_MAC_SPEED_50G:
1131                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1132                                  mac->supported);
1133                 mac->fec_ability =
1134                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1135                         BIT(HNAE3_FEC_AUTO);
1136                 break;
1137         case HCLGE_MAC_SPEED_100G:
1138         case HCLGE_MAC_SPEED_200G:
1139                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1140                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1141                 break;
1142         default:
1143                 mac->fec_ability = 0;
1144                 break;
1145         }
1146 }
1147
1148 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1149                                         u16 speed_ability)
1150 {
1151         struct hclge_mac *mac = &hdev->hw.mac;
1152
1153         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1154                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1155                                  mac->supported);
1156
1157         hclge_convert_setting_sr(mac, speed_ability);
1158         hclge_convert_setting_lr(mac, speed_ability);
1159         hclge_convert_setting_cr(mac, speed_ability);
1160         if (hnae3_dev_fec_supported(hdev))
1161                 hclge_convert_setting_fec(mac);
1162
1163         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1164         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1165         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1166 }
1167
1168 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1169                                             u16 speed_ability)
1170 {
1171         struct hclge_mac *mac = &hdev->hw.mac;
1172
1173         hclge_convert_setting_kr(mac, speed_ability);
1174         if (hnae3_dev_fec_supported(hdev))
1175                 hclge_convert_setting_fec(mac);
1176         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1177         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1178         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1179 }
1180
1181 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1182                                          u16 speed_ability)
1183 {
1184         unsigned long *supported = hdev->hw.mac.supported;
1185
1186         /* default to support all speed for GE port */
1187         if (!speed_ability)
1188                 speed_ability = HCLGE_SUPPORT_GE;
1189
1190         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1191                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1192                                  supported);
1193
1194         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1195                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1196                                  supported);
1197                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1198                                  supported);
1199         }
1200
1201         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1202                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1203                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1204         }
1205
1206         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1207         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1208         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1209         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1210 }
1211
1212 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1213 {
1214         u8 media_type = hdev->hw.mac.media_type;
1215
1216         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1217                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1218         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1219                 hclge_parse_copper_link_mode(hdev, speed_ability);
1220         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1221                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1222 }
1223
1224 static u32 hclge_get_max_speed(u16 speed_ability)
1225 {
1226         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1227                 return HCLGE_MAC_SPEED_200G;
1228
1229         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1230                 return HCLGE_MAC_SPEED_100G;
1231
1232         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1233                 return HCLGE_MAC_SPEED_50G;
1234
1235         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1236                 return HCLGE_MAC_SPEED_40G;
1237
1238         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1239                 return HCLGE_MAC_SPEED_25G;
1240
1241         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1242                 return HCLGE_MAC_SPEED_10G;
1243
1244         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1245                 return HCLGE_MAC_SPEED_1G;
1246
1247         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1248                 return HCLGE_MAC_SPEED_100M;
1249
1250         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1251                 return HCLGE_MAC_SPEED_10M;
1252
1253         return HCLGE_MAC_SPEED_1G;
1254 }
1255
1256 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1257 {
1258 #define SPEED_ABILITY_EXT_SHIFT                 8
1259
1260         struct hclge_cfg_param_cmd *req;
1261         u64 mac_addr_tmp_high;
1262         u16 speed_ability_ext;
1263         u64 mac_addr_tmp;
1264         unsigned int i;
1265
1266         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1267
1268         /* get the configuration */
1269         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1270                                               HCLGE_CFG_VMDQ_M,
1271                                               HCLGE_CFG_VMDQ_S);
1272         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1273                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1274         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1275                                             HCLGE_CFG_TQP_DESC_N_M,
1276                                             HCLGE_CFG_TQP_DESC_N_S);
1277
1278         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279                                         HCLGE_CFG_PHY_ADDR_M,
1280                                         HCLGE_CFG_PHY_ADDR_S);
1281         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282                                           HCLGE_CFG_MEDIA_TP_M,
1283                                           HCLGE_CFG_MEDIA_TP_S);
1284         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285                                           HCLGE_CFG_RX_BUF_LEN_M,
1286                                           HCLGE_CFG_RX_BUF_LEN_S);
1287         /* get mac_address */
1288         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1289         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1290                                             HCLGE_CFG_MAC_ADDR_H_M,
1291                                             HCLGE_CFG_MAC_ADDR_H_S);
1292
1293         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1294
1295         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1296                                              HCLGE_CFG_DEFAULT_SPEED_M,
1297                                              HCLGE_CFG_DEFAULT_SPEED_S);
1298         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1299                                             HCLGE_CFG_RSS_SIZE_M,
1300                                             HCLGE_CFG_RSS_SIZE_S);
1301
1302         for (i = 0; i < ETH_ALEN; i++)
1303                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1304
1305         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1306         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1307
1308         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1309                                              HCLGE_CFG_SPEED_ABILITY_M,
1310                                              HCLGE_CFG_SPEED_ABILITY_S);
1311         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1312                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1313                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1314         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1315
1316         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1317                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1318                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1319         if (!cfg->umv_space)
1320                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1321 }
1322
1323 /* hclge_get_cfg: query the static parameter from flash
1324  * @hdev: pointer to struct hclge_dev
1325  * @hcfg: the config structure to be getted
1326  */
1327 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1328 {
1329         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1330         struct hclge_cfg_param_cmd *req;
1331         unsigned int i;
1332         int ret;
1333
1334         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1335                 u32 offset = 0;
1336
1337                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1338                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1339                                            true);
1340                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1341                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1342                 /* Len should be united by 4 bytes when send to hardware */
1343                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1344                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1345                 req->offset = cpu_to_le32(offset);
1346         }
1347
1348         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1349         if (ret) {
1350                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1351                 return ret;
1352         }
1353
1354         hclge_parse_cfg(hcfg, desc);
1355
1356         return 0;
1357 }
1358
1359 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1360 {
1361 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1362
1363         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1364
1365         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1366         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1367         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1368         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1369         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1370 }
1371
1372 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1373                                   struct hclge_desc *desc)
1374 {
1375         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1376         struct hclge_dev_specs_0_cmd *req0;
1377         struct hclge_dev_specs_1_cmd *req1;
1378
1379         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1380         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1381
1382         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1383         ae_dev->dev_specs.rss_ind_tbl_size =
1384                 le16_to_cpu(req0->rss_ind_tbl_size);
1385         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1386         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1387         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1388         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1389 }
1390
1391 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1392 {
1393         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1394
1395         if (!dev_specs->max_non_tso_bd_num)
1396                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1397         if (!dev_specs->rss_ind_tbl_size)
1398                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1399         if (!dev_specs->rss_key_size)
1400                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1401         if (!dev_specs->max_tm_rate)
1402                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1403         if (!dev_specs->max_int_gl)
1404                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1405 }
1406
1407 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1408 {
1409         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1410         int ret;
1411         int i;
1412
1413         /* set default specifications as devices lower than version V3 do not
1414          * support querying specifications from firmware.
1415          */
1416         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1417                 hclge_set_default_dev_specs(hdev);
1418                 return 0;
1419         }
1420
1421         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1422                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1423                                            true);
1424                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1425         }
1426         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1427
1428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1429         if (ret)
1430                 return ret;
1431
1432         hclge_parse_dev_specs(hdev, desc);
1433         hclge_check_dev_specs(hdev);
1434
1435         return 0;
1436 }
1437
1438 static int hclge_get_cap(struct hclge_dev *hdev)
1439 {
1440         int ret;
1441
1442         ret = hclge_query_function_status(hdev);
1443         if (ret) {
1444                 dev_err(&hdev->pdev->dev,
1445                         "query function status error %d.\n", ret);
1446                 return ret;
1447         }
1448
1449         /* get pf resource */
1450         return hclge_query_pf_resource(hdev);
1451 }
1452
1453 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1454 {
1455 #define HCLGE_MIN_TX_DESC       64
1456 #define HCLGE_MIN_RX_DESC       64
1457
1458         if (!is_kdump_kernel())
1459                 return;
1460
1461         dev_info(&hdev->pdev->dev,
1462                  "Running kdump kernel. Using minimal resources\n");
1463
1464         /* minimal queue pairs equals to the number of vports */
1465         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1466         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1467         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1468 }
1469
1470 static int hclge_configure(struct hclge_dev *hdev)
1471 {
1472         struct hclge_cfg cfg;
1473         unsigned int i;
1474         int ret;
1475
1476         ret = hclge_get_cfg(hdev, &cfg);
1477         if (ret)
1478                 return ret;
1479
1480         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1481         hdev->base_tqp_pid = 0;
1482         hdev->rss_size_max = cfg.rss_size_max;
1483         hdev->rx_buf_len = cfg.rx_buf_len;
1484         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1485         hdev->hw.mac.media_type = cfg.media_type;
1486         hdev->hw.mac.phy_addr = cfg.phy_addr;
1487         hdev->num_tx_desc = cfg.tqp_desc_num;
1488         hdev->num_rx_desc = cfg.tqp_desc_num;
1489         hdev->tm_info.num_pg = 1;
1490         hdev->tc_max = cfg.tc_num;
1491         hdev->tm_info.hw_pfc_map = 0;
1492         hdev->wanted_umv_size = cfg.umv_space;
1493
1494         if (hnae3_dev_fd_supported(hdev)) {
1495                 hdev->fd_en = true;
1496                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1497         }
1498
1499         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1500         if (ret) {
1501                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1502                         cfg.default_speed, ret);
1503                 return ret;
1504         }
1505
1506         hclge_parse_link_mode(hdev, cfg.speed_ability);
1507
1508         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1509
1510         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1511             (hdev->tc_max < 1)) {
1512                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1513                          hdev->tc_max);
1514                 hdev->tc_max = 1;
1515         }
1516
1517         /* Dev does not support DCB */
1518         if (!hnae3_dev_dcb_supported(hdev)) {
1519                 hdev->tc_max = 1;
1520                 hdev->pfc_max = 0;
1521         } else {
1522                 hdev->pfc_max = hdev->tc_max;
1523         }
1524
1525         hdev->tm_info.num_tc = 1;
1526
1527         /* Currently not support uncontiuous tc */
1528         for (i = 0; i < hdev->tm_info.num_tc; i++)
1529                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1530
1531         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1532
1533         hclge_init_kdump_kernel_config(hdev);
1534
1535         /* Set the init affinity based on pci func number */
1536         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1537         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1538         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1539                         &hdev->affinity_mask);
1540
1541         return ret;
1542 }
1543
1544 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1545                             u16 tso_mss_max)
1546 {
1547         struct hclge_cfg_tso_status_cmd *req;
1548         struct hclge_desc desc;
1549
1550         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1551
1552         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1553         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1554         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1555
1556         return hclge_cmd_send(&hdev->hw, &desc, 1);
1557 }
1558
1559 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1560 {
1561         struct hclge_cfg_gro_status_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564
1565         if (!hnae3_dev_gro_supported(hdev))
1566                 return 0;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1569         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1570
1571         req->gro_en = en ? 1 : 0;
1572
1573         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1574         if (ret)
1575                 dev_err(&hdev->pdev->dev,
1576                         "GRO hardware config cmd failed, ret = %d\n", ret);
1577
1578         return ret;
1579 }
1580
1581 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1582 {
1583         struct hclge_tqp *tqp;
1584         int i;
1585
1586         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1587                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1588         if (!hdev->htqp)
1589                 return -ENOMEM;
1590
1591         tqp = hdev->htqp;
1592
1593         for (i = 0; i < hdev->num_tqps; i++) {
1594                 tqp->dev = &hdev->pdev->dev;
1595                 tqp->index = i;
1596
1597                 tqp->q.ae_algo = &ae_algo;
1598                 tqp->q.buf_size = hdev->rx_buf_len;
1599                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1600                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1601                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1602                         i * HCLGE_TQP_REG_SIZE;
1603
1604                 tqp++;
1605         }
1606
1607         return 0;
1608 }
1609
1610 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1611                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1612 {
1613         struct hclge_tqp_map_cmd *req;
1614         struct hclge_desc desc;
1615         int ret;
1616
1617         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1618
1619         req = (struct hclge_tqp_map_cmd *)desc.data;
1620         req->tqp_id = cpu_to_le16(tqp_pid);
1621         req->tqp_vf = func_id;
1622         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1623         if (!is_pf)
1624                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1625         req->tqp_vid = cpu_to_le16(tqp_vid);
1626
1627         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1628         if (ret)
1629                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1630
1631         return ret;
1632 }
1633
1634 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1635 {
1636         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1637         struct hclge_dev *hdev = vport->back;
1638         int i, alloced;
1639
1640         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1641              alloced < num_tqps; i++) {
1642                 if (!hdev->htqp[i].alloced) {
1643                         hdev->htqp[i].q.handle = &vport->nic;
1644                         hdev->htqp[i].q.tqp_index = alloced;
1645                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1646                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1647                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1648                         hdev->htqp[i].alloced = true;
1649                         alloced++;
1650                 }
1651         }
1652         vport->alloc_tqps = alloced;
1653         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1654                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1655
1656         /* ensure one to one mapping between irq and queue at default */
1657         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1658                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1659
1660         return 0;
1661 }
1662
1663 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1664                             u16 num_tx_desc, u16 num_rx_desc)
1665
1666 {
1667         struct hnae3_handle *nic = &vport->nic;
1668         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1669         struct hclge_dev *hdev = vport->back;
1670         int ret;
1671
1672         kinfo->num_tx_desc = num_tx_desc;
1673         kinfo->num_rx_desc = num_rx_desc;
1674
1675         kinfo->rx_buf_len = hdev->rx_buf_len;
1676
1677         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1678                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1679         if (!kinfo->tqp)
1680                 return -ENOMEM;
1681
1682         ret = hclge_assign_tqp(vport, num_tqps);
1683         if (ret)
1684                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1685
1686         return ret;
1687 }
1688
1689 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1690                                   struct hclge_vport *vport)
1691 {
1692         struct hnae3_handle *nic = &vport->nic;
1693         struct hnae3_knic_private_info *kinfo;
1694         u16 i;
1695
1696         kinfo = &nic->kinfo;
1697         for (i = 0; i < vport->alloc_tqps; i++) {
1698                 struct hclge_tqp *q =
1699                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1700                 bool is_pf;
1701                 int ret;
1702
1703                 is_pf = !(vport->vport_id);
1704                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1705                                              i, is_pf);
1706                 if (ret)
1707                         return ret;
1708         }
1709
1710         return 0;
1711 }
1712
1713 static int hclge_map_tqp(struct hclge_dev *hdev)
1714 {
1715         struct hclge_vport *vport = hdev->vport;
1716         u16 i, num_vport;
1717
1718         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1719         for (i = 0; i < num_vport; i++) {
1720                 int ret;
1721
1722                 ret = hclge_map_tqp_to_vport(hdev, vport);
1723                 if (ret)
1724                         return ret;
1725
1726                 vport++;
1727         }
1728
1729         return 0;
1730 }
1731
1732 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1733 {
1734         struct hnae3_handle *nic = &vport->nic;
1735         struct hclge_dev *hdev = vport->back;
1736         int ret;
1737
1738         nic->pdev = hdev->pdev;
1739         nic->ae_algo = &ae_algo;
1740         nic->numa_node_mask = hdev->numa_node_mask;
1741
1742         ret = hclge_knic_setup(vport, num_tqps,
1743                                hdev->num_tx_desc, hdev->num_rx_desc);
1744         if (ret)
1745                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1746
1747         return ret;
1748 }
1749
1750 static int hclge_alloc_vport(struct hclge_dev *hdev)
1751 {
1752         struct pci_dev *pdev = hdev->pdev;
1753         struct hclge_vport *vport;
1754         u32 tqp_main_vport;
1755         u32 tqp_per_vport;
1756         int num_vport, i;
1757         int ret;
1758
1759         /* We need to alloc a vport for main NIC of PF */
1760         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1761
1762         if (hdev->num_tqps < num_vport) {
1763                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1764                         hdev->num_tqps, num_vport);
1765                 return -EINVAL;
1766         }
1767
1768         /* Alloc the same number of TQPs for every vport */
1769         tqp_per_vport = hdev->num_tqps / num_vport;
1770         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1771
1772         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1773                              GFP_KERNEL);
1774         if (!vport)
1775                 return -ENOMEM;
1776
1777         hdev->vport = vport;
1778         hdev->num_alloc_vport = num_vport;
1779
1780         if (IS_ENABLED(CONFIG_PCI_IOV))
1781                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1782
1783         for (i = 0; i < num_vport; i++) {
1784                 vport->back = hdev;
1785                 vport->vport_id = i;
1786                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1787                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1788                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1789                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1790                 INIT_LIST_HEAD(&vport->vlan_list);
1791                 INIT_LIST_HEAD(&vport->uc_mac_list);
1792                 INIT_LIST_HEAD(&vport->mc_mac_list);
1793                 spin_lock_init(&vport->mac_list_lock);
1794
1795                 if (i == 0)
1796                         ret = hclge_vport_setup(vport, tqp_main_vport);
1797                 else
1798                         ret = hclge_vport_setup(vport, tqp_per_vport);
1799                 if (ret) {
1800                         dev_err(&pdev->dev,
1801                                 "vport setup failed for vport %d, %d\n",
1802                                 i, ret);
1803                         return ret;
1804                 }
1805
1806                 vport++;
1807         }
1808
1809         return 0;
1810 }
1811
1812 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1813                                     struct hclge_pkt_buf_alloc *buf_alloc)
1814 {
1815 /* TX buffer size is unit by 128 byte */
1816 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1817 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1818         struct hclge_tx_buff_alloc_cmd *req;
1819         struct hclge_desc desc;
1820         int ret;
1821         u8 i;
1822
1823         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1824
1825         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1826         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1827                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1828
1829                 req->tx_pkt_buff[i] =
1830                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1831                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1832         }
1833
1834         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1835         if (ret)
1836                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1837                         ret);
1838
1839         return ret;
1840 }
1841
1842 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1843                                  struct hclge_pkt_buf_alloc *buf_alloc)
1844 {
1845         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1846
1847         if (ret)
1848                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1849
1850         return ret;
1851 }
1852
1853 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1854 {
1855         unsigned int i;
1856         u32 cnt = 0;
1857
1858         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1859                 if (hdev->hw_tc_map & BIT(i))
1860                         cnt++;
1861         return cnt;
1862 }
1863
1864 /* Get the number of pfc enabled TCs, which have private buffer */
1865 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1866                                   struct hclge_pkt_buf_alloc *buf_alloc)
1867 {
1868         struct hclge_priv_buf *priv;
1869         unsigned int i;
1870         int cnt = 0;
1871
1872         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1873                 priv = &buf_alloc->priv_buf[i];
1874                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1875                     priv->enable)
1876                         cnt++;
1877         }
1878
1879         return cnt;
1880 }
1881
1882 /* Get the number of pfc disabled TCs, which have private buffer */
1883 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1884                                      struct hclge_pkt_buf_alloc *buf_alloc)
1885 {
1886         struct hclge_priv_buf *priv;
1887         unsigned int i;
1888         int cnt = 0;
1889
1890         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891                 priv = &buf_alloc->priv_buf[i];
1892                 if (hdev->hw_tc_map & BIT(i) &&
1893                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1894                     priv->enable)
1895                         cnt++;
1896         }
1897
1898         return cnt;
1899 }
1900
1901 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1902 {
1903         struct hclge_priv_buf *priv;
1904         u32 rx_priv = 0;
1905         int i;
1906
1907         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1908                 priv = &buf_alloc->priv_buf[i];
1909                 if (priv->enable)
1910                         rx_priv += priv->buf_size;
1911         }
1912         return rx_priv;
1913 }
1914
1915 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1916 {
1917         u32 i, total_tx_size = 0;
1918
1919         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1920                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1921
1922         return total_tx_size;
1923 }
1924
1925 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1926                                 struct hclge_pkt_buf_alloc *buf_alloc,
1927                                 u32 rx_all)
1928 {
1929         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1930         u32 tc_num = hclge_get_tc_num(hdev);
1931         u32 shared_buf, aligned_mps;
1932         u32 rx_priv;
1933         int i;
1934
1935         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1936
1937         if (hnae3_dev_dcb_supported(hdev))
1938                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1939                                         hdev->dv_buf_size;
1940         else
1941                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1942                                         + hdev->dv_buf_size;
1943
1944         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1945         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1946                              HCLGE_BUF_SIZE_UNIT);
1947
1948         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1949         if (rx_all < rx_priv + shared_std)
1950                 return false;
1951
1952         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1953         buf_alloc->s_buf.buf_size = shared_buf;
1954         if (hnae3_dev_dcb_supported(hdev)) {
1955                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1956                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1957                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1958                                   HCLGE_BUF_SIZE_UNIT);
1959         } else {
1960                 buf_alloc->s_buf.self.high = aligned_mps +
1961                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1962                 buf_alloc->s_buf.self.low = aligned_mps;
1963         }
1964
1965         if (hnae3_dev_dcb_supported(hdev)) {
1966                 hi_thrd = shared_buf - hdev->dv_buf_size;
1967
1968                 if (tc_num <= NEED_RESERVE_TC_NUM)
1969                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1970                                         / BUF_MAX_PERCENT;
1971
1972                 if (tc_num)
1973                         hi_thrd = hi_thrd / tc_num;
1974
1975                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1976                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1977                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1978         } else {
1979                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1980                 lo_thrd = aligned_mps;
1981         }
1982
1983         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1984                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1985                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1986         }
1987
1988         return true;
1989 }
1990
1991 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1992                                 struct hclge_pkt_buf_alloc *buf_alloc)
1993 {
1994         u32 i, total_size;
1995
1996         total_size = hdev->pkt_buf_size;
1997
1998         /* alloc tx buffer for all enabled tc */
1999         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2000                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2001
2002                 if (hdev->hw_tc_map & BIT(i)) {
2003                         if (total_size < hdev->tx_buf_size)
2004                                 return -ENOMEM;
2005
2006                         priv->tx_buf_size = hdev->tx_buf_size;
2007                 } else {
2008                         priv->tx_buf_size = 0;
2009                 }
2010
2011                 total_size -= priv->tx_buf_size;
2012         }
2013
2014         return 0;
2015 }
2016
2017 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2018                                   struct hclge_pkt_buf_alloc *buf_alloc)
2019 {
2020         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2021         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2022         unsigned int i;
2023
2024         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2025                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2026
2027                 priv->enable = 0;
2028                 priv->wl.low = 0;
2029                 priv->wl.high = 0;
2030                 priv->buf_size = 0;
2031
2032                 if (!(hdev->hw_tc_map & BIT(i)))
2033                         continue;
2034
2035                 priv->enable = 1;
2036
2037                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2038                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2039                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2040                                                 HCLGE_BUF_SIZE_UNIT);
2041                 } else {
2042                         priv->wl.low = 0;
2043                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2044                                         aligned_mps;
2045                 }
2046
2047                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2048         }
2049
2050         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2051 }
2052
2053 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2054                                           struct hclge_pkt_buf_alloc *buf_alloc)
2055 {
2056         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2057         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2058         int i;
2059
2060         /* let the last to be cleared first */
2061         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2062                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2063                 unsigned int mask = BIT((unsigned int)i);
2064
2065                 if (hdev->hw_tc_map & mask &&
2066                     !(hdev->tm_info.hw_pfc_map & mask)) {
2067                         /* Clear the no pfc TC private buffer */
2068                         priv->wl.low = 0;
2069                         priv->wl.high = 0;
2070                         priv->buf_size = 0;
2071                         priv->enable = 0;
2072                         no_pfc_priv_num--;
2073                 }
2074
2075                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2076                     no_pfc_priv_num == 0)
2077                         break;
2078         }
2079
2080         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2081 }
2082
2083 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2084                                         struct hclge_pkt_buf_alloc *buf_alloc)
2085 {
2086         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2087         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2088         int i;
2089
2090         /* let the last to be cleared first */
2091         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2092                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2093                 unsigned int mask = BIT((unsigned int)i);
2094
2095                 if (hdev->hw_tc_map & mask &&
2096                     hdev->tm_info.hw_pfc_map & mask) {
2097                         /* Reduce the number of pfc TC with private buffer */
2098                         priv->wl.low = 0;
2099                         priv->enable = 0;
2100                         priv->wl.high = 0;
2101                         priv->buf_size = 0;
2102                         pfc_priv_num--;
2103                 }
2104
2105                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2106                     pfc_priv_num == 0)
2107                         break;
2108         }
2109
2110         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2111 }
2112
2113 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2114                                       struct hclge_pkt_buf_alloc *buf_alloc)
2115 {
2116 #define COMPENSATE_BUFFER       0x3C00
2117 #define COMPENSATE_HALF_MPS_NUM 5
2118 #define PRIV_WL_GAP             0x1800
2119
2120         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121         u32 tc_num = hclge_get_tc_num(hdev);
2122         u32 half_mps = hdev->mps >> 1;
2123         u32 min_rx_priv;
2124         unsigned int i;
2125
2126         if (tc_num)
2127                 rx_priv = rx_priv / tc_num;
2128
2129         if (tc_num <= NEED_RESERVE_TC_NUM)
2130                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2131
2132         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2133                         COMPENSATE_HALF_MPS_NUM * half_mps;
2134         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2135         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2136
2137         if (rx_priv < min_rx_priv)
2138                 return false;
2139
2140         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2141                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2142
2143                 priv->enable = 0;
2144                 priv->wl.low = 0;
2145                 priv->wl.high = 0;
2146                 priv->buf_size = 0;
2147
2148                 if (!(hdev->hw_tc_map & BIT(i)))
2149                         continue;
2150
2151                 priv->enable = 1;
2152                 priv->buf_size = rx_priv;
2153                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2154                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2155         }
2156
2157         buf_alloc->s_buf.buf_size = 0;
2158
2159         return true;
2160 }
2161
2162 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2163  * @hdev: pointer to struct hclge_dev
2164  * @buf_alloc: pointer to buffer calculation data
2165  * @return: 0: calculate sucessful, negative: fail
2166  */
2167 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2168                                 struct hclge_pkt_buf_alloc *buf_alloc)
2169 {
2170         /* When DCB is not supported, rx private buffer is not allocated. */
2171         if (!hnae3_dev_dcb_supported(hdev)) {
2172                 u32 rx_all = hdev->pkt_buf_size;
2173
2174                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2175                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2176                         return -ENOMEM;
2177
2178                 return 0;
2179         }
2180
2181         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2182                 return 0;
2183
2184         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2185                 return 0;
2186
2187         /* try to decrease the buffer size */
2188         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2189                 return 0;
2190
2191         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2192                 return 0;
2193
2194         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2195                 return 0;
2196
2197         return -ENOMEM;
2198 }
2199
2200 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2201                                    struct hclge_pkt_buf_alloc *buf_alloc)
2202 {
2203         struct hclge_rx_priv_buff_cmd *req;
2204         struct hclge_desc desc;
2205         int ret;
2206         int i;
2207
2208         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2209         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2210
2211         /* Alloc private buffer TCs */
2212         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2213                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2214
2215                 req->buf_num[i] =
2216                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2217                 req->buf_num[i] |=
2218                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2219         }
2220
2221         req->shared_buf =
2222                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2223                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2224
2225         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2226         if (ret)
2227                 dev_err(&hdev->pdev->dev,
2228                         "rx private buffer alloc cmd failed %d\n", ret);
2229
2230         return ret;
2231 }
2232
2233 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2234                                    struct hclge_pkt_buf_alloc *buf_alloc)
2235 {
2236         struct hclge_rx_priv_wl_buf *req;
2237         struct hclge_priv_buf *priv;
2238         struct hclge_desc desc[2];
2239         int i, j;
2240         int ret;
2241
2242         for (i = 0; i < 2; i++) {
2243                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2244                                            false);
2245                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2246
2247                 /* The first descriptor set the NEXT bit to 1 */
2248                 if (i == 0)
2249                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2250                 else
2251                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2252
2253                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2254                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2255
2256                         priv = &buf_alloc->priv_buf[idx];
2257                         req->tc_wl[j].high =
2258                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2259                         req->tc_wl[j].high |=
2260                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2261                         req->tc_wl[j].low =
2262                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2263                         req->tc_wl[j].low |=
2264                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2265                 }
2266         }
2267
2268         /* Send 2 descriptor at one time */
2269         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2270         if (ret)
2271                 dev_err(&hdev->pdev->dev,
2272                         "rx private waterline config cmd failed %d\n",
2273                         ret);
2274         return ret;
2275 }
2276
2277 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2278                                     struct hclge_pkt_buf_alloc *buf_alloc)
2279 {
2280         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2281         struct hclge_rx_com_thrd *req;
2282         struct hclge_desc desc[2];
2283         struct hclge_tc_thrd *tc;
2284         int i, j;
2285         int ret;
2286
2287         for (i = 0; i < 2; i++) {
2288                 hclge_cmd_setup_basic_desc(&desc[i],
2289                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2290                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2291
2292                 /* The first descriptor set the NEXT bit to 1 */
2293                 if (i == 0)
2294                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2295                 else
2296                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2297
2298                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2299                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2300
2301                         req->com_thrd[j].high =
2302                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2303                         req->com_thrd[j].high |=
2304                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2305                         req->com_thrd[j].low =
2306                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2307                         req->com_thrd[j].low |=
2308                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2309                 }
2310         }
2311
2312         /* Send 2 descriptors at one time */
2313         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2314         if (ret)
2315                 dev_err(&hdev->pdev->dev,
2316                         "common threshold config cmd failed %d\n", ret);
2317         return ret;
2318 }
2319
2320 static int hclge_common_wl_config(struct hclge_dev *hdev,
2321                                   struct hclge_pkt_buf_alloc *buf_alloc)
2322 {
2323         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2324         struct hclge_rx_com_wl *req;
2325         struct hclge_desc desc;
2326         int ret;
2327
2328         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2329
2330         req = (struct hclge_rx_com_wl *)desc.data;
2331         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2332         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2333
2334         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2335         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2336
2337         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2338         if (ret)
2339                 dev_err(&hdev->pdev->dev,
2340                         "common waterline config cmd failed %d\n", ret);
2341
2342         return ret;
2343 }
2344
2345 int hclge_buffer_alloc(struct hclge_dev *hdev)
2346 {
2347         struct hclge_pkt_buf_alloc *pkt_buf;
2348         int ret;
2349
2350         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2351         if (!pkt_buf)
2352                 return -ENOMEM;
2353
2354         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2355         if (ret) {
2356                 dev_err(&hdev->pdev->dev,
2357                         "could not calc tx buffer size for all TCs %d\n", ret);
2358                 goto out;
2359         }
2360
2361         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2362         if (ret) {
2363                 dev_err(&hdev->pdev->dev,
2364                         "could not alloc tx buffers %d\n", ret);
2365                 goto out;
2366         }
2367
2368         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2369         if (ret) {
2370                 dev_err(&hdev->pdev->dev,
2371                         "could not calc rx priv buffer size for all TCs %d\n",
2372                         ret);
2373                 goto out;
2374         }
2375
2376         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2377         if (ret) {
2378                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2379                         ret);
2380                 goto out;
2381         }
2382
2383         if (hnae3_dev_dcb_supported(hdev)) {
2384                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2385                 if (ret) {
2386                         dev_err(&hdev->pdev->dev,
2387                                 "could not configure rx private waterline %d\n",
2388                                 ret);
2389                         goto out;
2390                 }
2391
2392                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2393                 if (ret) {
2394                         dev_err(&hdev->pdev->dev,
2395                                 "could not configure common threshold %d\n",
2396                                 ret);
2397                         goto out;
2398                 }
2399         }
2400
2401         ret = hclge_common_wl_config(hdev, pkt_buf);
2402         if (ret)
2403                 dev_err(&hdev->pdev->dev,
2404                         "could not configure common waterline %d\n", ret);
2405
2406 out:
2407         kfree(pkt_buf);
2408         return ret;
2409 }
2410
2411 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2412 {
2413         struct hnae3_handle *roce = &vport->roce;
2414         struct hnae3_handle *nic = &vport->nic;
2415
2416         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2417
2418         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2419             vport->back->num_msi_left == 0)
2420                 return -EINVAL;
2421
2422         roce->rinfo.base_vector = vport->back->roce_base_vector;
2423
2424         roce->rinfo.netdev = nic->kinfo.netdev;
2425         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2426
2427         roce->pdev = nic->pdev;
2428         roce->ae_algo = nic->ae_algo;
2429         roce->numa_node_mask = nic->numa_node_mask;
2430
2431         return 0;
2432 }
2433
2434 static int hclge_init_msi(struct hclge_dev *hdev)
2435 {
2436         struct pci_dev *pdev = hdev->pdev;
2437         int vectors;
2438         int i;
2439
2440         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2441                                         hdev->num_msi,
2442                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2443         if (vectors < 0) {
2444                 dev_err(&pdev->dev,
2445                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2446                         vectors);
2447                 return vectors;
2448         }
2449         if (vectors < hdev->num_msi)
2450                 dev_warn(&hdev->pdev->dev,
2451                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2452                          hdev->num_msi, vectors);
2453
2454         hdev->num_msi = vectors;
2455         hdev->num_msi_left = vectors;
2456
2457         hdev->base_msi_vector = pdev->irq;
2458         hdev->roce_base_vector = hdev->base_msi_vector +
2459                                 hdev->roce_base_msix_offset;
2460
2461         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2462                                            sizeof(u16), GFP_KERNEL);
2463         if (!hdev->vector_status) {
2464                 pci_free_irq_vectors(pdev);
2465                 return -ENOMEM;
2466         }
2467
2468         for (i = 0; i < hdev->num_msi; i++)
2469                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2470
2471         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2472                                         sizeof(int), GFP_KERNEL);
2473         if (!hdev->vector_irq) {
2474                 pci_free_irq_vectors(pdev);
2475                 return -ENOMEM;
2476         }
2477
2478         return 0;
2479 }
2480
2481 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2482 {
2483         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2484                 duplex = HCLGE_MAC_FULL;
2485
2486         return duplex;
2487 }
2488
2489 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2490                                       u8 duplex)
2491 {
2492         struct hclge_config_mac_speed_dup_cmd *req;
2493         struct hclge_desc desc;
2494         int ret;
2495
2496         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2497
2498         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2499
2500         if (duplex)
2501                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2502
2503         switch (speed) {
2504         case HCLGE_MAC_SPEED_10M:
2505                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2506                                 HCLGE_CFG_SPEED_S, 6);
2507                 break;
2508         case HCLGE_MAC_SPEED_100M:
2509                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2510                                 HCLGE_CFG_SPEED_S, 7);
2511                 break;
2512         case HCLGE_MAC_SPEED_1G:
2513                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2514                                 HCLGE_CFG_SPEED_S, 0);
2515                 break;
2516         case HCLGE_MAC_SPEED_10G:
2517                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2518                                 HCLGE_CFG_SPEED_S, 1);
2519                 break;
2520         case HCLGE_MAC_SPEED_25G:
2521                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2522                                 HCLGE_CFG_SPEED_S, 2);
2523                 break;
2524         case HCLGE_MAC_SPEED_40G:
2525                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2526                                 HCLGE_CFG_SPEED_S, 3);
2527                 break;
2528         case HCLGE_MAC_SPEED_50G:
2529                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2530                                 HCLGE_CFG_SPEED_S, 4);
2531                 break;
2532         case HCLGE_MAC_SPEED_100G:
2533                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534                                 HCLGE_CFG_SPEED_S, 5);
2535                 break;
2536         case HCLGE_MAC_SPEED_200G:
2537                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2538                                 HCLGE_CFG_SPEED_S, 8);
2539                 break;
2540         default:
2541                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2542                 return -EINVAL;
2543         }
2544
2545         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2546                       1);
2547
2548         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2549         if (ret) {
2550                 dev_err(&hdev->pdev->dev,
2551                         "mac speed/duplex config cmd failed %d.\n", ret);
2552                 return ret;
2553         }
2554
2555         return 0;
2556 }
2557
2558 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2559 {
2560         struct hclge_mac *mac = &hdev->hw.mac;
2561         int ret;
2562
2563         duplex = hclge_check_speed_dup(duplex, speed);
2564         if (!mac->support_autoneg && mac->speed == speed &&
2565             mac->duplex == duplex)
2566                 return 0;
2567
2568         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2569         if (ret)
2570                 return ret;
2571
2572         hdev->hw.mac.speed = speed;
2573         hdev->hw.mac.duplex = duplex;
2574
2575         return 0;
2576 }
2577
2578 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2579                                      u8 duplex)
2580 {
2581         struct hclge_vport *vport = hclge_get_vport(handle);
2582         struct hclge_dev *hdev = vport->back;
2583
2584         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2585 }
2586
2587 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2588 {
2589         struct hclge_config_auto_neg_cmd *req;
2590         struct hclge_desc desc;
2591         u32 flag = 0;
2592         int ret;
2593
2594         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2595
2596         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2597         if (enable)
2598                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2599         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2600
2601         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2602         if (ret)
2603                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2604                         ret);
2605
2606         return ret;
2607 }
2608
2609 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2610 {
2611         struct hclge_vport *vport = hclge_get_vport(handle);
2612         struct hclge_dev *hdev = vport->back;
2613
2614         if (!hdev->hw.mac.support_autoneg) {
2615                 if (enable) {
2616                         dev_err(&hdev->pdev->dev,
2617                                 "autoneg is not supported by current port\n");
2618                         return -EOPNOTSUPP;
2619                 } else {
2620                         return 0;
2621                 }
2622         }
2623
2624         return hclge_set_autoneg_en(hdev, enable);
2625 }
2626
2627 static int hclge_get_autoneg(struct hnae3_handle *handle)
2628 {
2629         struct hclge_vport *vport = hclge_get_vport(handle);
2630         struct hclge_dev *hdev = vport->back;
2631         struct phy_device *phydev = hdev->hw.mac.phydev;
2632
2633         if (phydev)
2634                 return phydev->autoneg;
2635
2636         return hdev->hw.mac.autoneg;
2637 }
2638
2639 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2640 {
2641         struct hclge_vport *vport = hclge_get_vport(handle);
2642         struct hclge_dev *hdev = vport->back;
2643         int ret;
2644
2645         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2646
2647         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2648         if (ret)
2649                 return ret;
2650         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2651 }
2652
2653 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2654 {
2655         struct hclge_vport *vport = hclge_get_vport(handle);
2656         struct hclge_dev *hdev = vport->back;
2657
2658         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2659                 return hclge_set_autoneg_en(hdev, !halt);
2660
2661         return 0;
2662 }
2663
2664 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2665 {
2666         struct hclge_config_fec_cmd *req;
2667         struct hclge_desc desc;
2668         int ret;
2669
2670         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2671
2672         req = (struct hclge_config_fec_cmd *)desc.data;
2673         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2674                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2675         if (fec_mode & BIT(HNAE3_FEC_RS))
2676                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2677                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2678         if (fec_mode & BIT(HNAE3_FEC_BASER))
2679                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2680                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2681
2682         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2683         if (ret)
2684                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2685
2686         return ret;
2687 }
2688
2689 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2690 {
2691         struct hclge_vport *vport = hclge_get_vport(handle);
2692         struct hclge_dev *hdev = vport->back;
2693         struct hclge_mac *mac = &hdev->hw.mac;
2694         int ret;
2695
2696         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2697                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2698                 return -EINVAL;
2699         }
2700
2701         ret = hclge_set_fec_hw(hdev, fec_mode);
2702         if (ret)
2703                 return ret;
2704
2705         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2706         return 0;
2707 }
2708
2709 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2710                           u8 *fec_mode)
2711 {
2712         struct hclge_vport *vport = hclge_get_vport(handle);
2713         struct hclge_dev *hdev = vport->back;
2714         struct hclge_mac *mac = &hdev->hw.mac;
2715
2716         if (fec_ability)
2717                 *fec_ability = mac->fec_ability;
2718         if (fec_mode)
2719                 *fec_mode = mac->fec_mode;
2720 }
2721
2722 static int hclge_mac_init(struct hclge_dev *hdev)
2723 {
2724         struct hclge_mac *mac = &hdev->hw.mac;
2725         int ret;
2726
2727         hdev->support_sfp_query = true;
2728         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2729         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2730                                          hdev->hw.mac.duplex);
2731         if (ret)
2732                 return ret;
2733
2734         if (hdev->hw.mac.support_autoneg) {
2735                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2736                 if (ret)
2737                         return ret;
2738         }
2739
2740         mac->link = 0;
2741
2742         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2743                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2744                 if (ret)
2745                         return ret;
2746         }
2747
2748         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2749         if (ret) {
2750                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2751                 return ret;
2752         }
2753
2754         ret = hclge_set_default_loopback(hdev);
2755         if (ret)
2756                 return ret;
2757
2758         ret = hclge_buffer_alloc(hdev);
2759         if (ret)
2760                 dev_err(&hdev->pdev->dev,
2761                         "allocate buffer fail, ret=%d\n", ret);
2762
2763         return ret;
2764 }
2765
2766 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2767 {
2768         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2769             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2770                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2771                                     hclge_wq, &hdev->service_task, 0);
2772 }
2773
2774 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2775 {
2776         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2777             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2778                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2779                                     hclge_wq, &hdev->service_task, 0);
2780 }
2781
2782 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2783 {
2784         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2785             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2786                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2787                                     hclge_wq, &hdev->service_task,
2788                                     delay_time);
2789 }
2790
2791 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2792 {
2793         struct hclge_link_status_cmd *req;
2794         struct hclge_desc desc;
2795         int ret;
2796
2797         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2798         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2799         if (ret) {
2800                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2801                         ret);
2802                 return ret;
2803         }
2804
2805         req = (struct hclge_link_status_cmd *)desc.data;
2806         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2807                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2808
2809         return 0;
2810 }
2811
2812 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2813 {
2814         struct phy_device *phydev = hdev->hw.mac.phydev;
2815
2816         *link_status = HCLGE_LINK_STATUS_DOWN;
2817
2818         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2819                 return 0;
2820
2821         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2822                 return 0;
2823
2824         return hclge_get_mac_link_status(hdev, link_status);
2825 }
2826
2827 static void hclge_update_link_status(struct hclge_dev *hdev)
2828 {
2829         struct hnae3_client *rclient = hdev->roce_client;
2830         struct hnae3_client *client = hdev->nic_client;
2831         struct hnae3_handle *rhandle;
2832         struct hnae3_handle *handle;
2833         int state;
2834         int ret;
2835         int i;
2836
2837         if (!client)
2838                 return;
2839
2840         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2841                 return;
2842
2843         ret = hclge_get_mac_phy_link(hdev, &state);
2844         if (ret) {
2845                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2846                 return;
2847         }
2848
2849         if (state != hdev->hw.mac.link) {
2850                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2851                         handle = &hdev->vport[i].nic;
2852                         client->ops->link_status_change(handle, state);
2853                         hclge_config_mac_tnl_int(hdev, state);
2854                         rhandle = &hdev->vport[i].roce;
2855                         if (rclient && rclient->ops->link_status_change)
2856                                 rclient->ops->link_status_change(rhandle,
2857                                                                  state);
2858                 }
2859                 hdev->hw.mac.link = state;
2860         }
2861
2862         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2863 }
2864
2865 static void hclge_update_port_capability(struct hclge_mac *mac)
2866 {
2867         /* update fec ability by speed */
2868         hclge_convert_setting_fec(mac);
2869
2870         /* firmware can not identify back plane type, the media type
2871          * read from configuration can help deal it
2872          */
2873         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2874             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2875                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2876         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2877                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2878
2879         if (mac->support_autoneg) {
2880                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2881                 linkmode_copy(mac->advertising, mac->supported);
2882         } else {
2883                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2884                                    mac->supported);
2885                 linkmode_zero(mac->advertising);
2886         }
2887 }
2888
2889 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2890 {
2891         struct hclge_sfp_info_cmd *resp;
2892         struct hclge_desc desc;
2893         int ret;
2894
2895         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2896         resp = (struct hclge_sfp_info_cmd *)desc.data;
2897         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2898         if (ret == -EOPNOTSUPP) {
2899                 dev_warn(&hdev->pdev->dev,
2900                          "IMP do not support get SFP speed %d\n", ret);
2901                 return ret;
2902         } else if (ret) {
2903                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2904                 return ret;
2905         }
2906
2907         *speed = le32_to_cpu(resp->speed);
2908
2909         return 0;
2910 }
2911
2912 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2913 {
2914         struct hclge_sfp_info_cmd *resp;
2915         struct hclge_desc desc;
2916         int ret;
2917
2918         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2919         resp = (struct hclge_sfp_info_cmd *)desc.data;
2920
2921         resp->query_type = QUERY_ACTIVE_SPEED;
2922
2923         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2924         if (ret == -EOPNOTSUPP) {
2925                 dev_warn(&hdev->pdev->dev,
2926                          "IMP does not support get SFP info %d\n", ret);
2927                 return ret;
2928         } else if (ret) {
2929                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2930                 return ret;
2931         }
2932
2933         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2934          * set to mac->speed.
2935          */
2936         if (!le32_to_cpu(resp->speed))
2937                 return 0;
2938
2939         mac->speed = le32_to_cpu(resp->speed);
2940         /* if resp->speed_ability is 0, it means it's an old version
2941          * firmware, do not update these params
2942          */
2943         if (resp->speed_ability) {
2944                 mac->module_type = le32_to_cpu(resp->module_type);
2945                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2946                 mac->autoneg = resp->autoneg;
2947                 mac->support_autoneg = resp->autoneg_ability;
2948                 mac->speed_type = QUERY_ACTIVE_SPEED;
2949                 if (!resp->active_fec)
2950                         mac->fec_mode = 0;
2951                 else
2952                         mac->fec_mode = BIT(resp->active_fec);
2953         } else {
2954                 mac->speed_type = QUERY_SFP_SPEED;
2955         }
2956
2957         return 0;
2958 }
2959
2960 static int hclge_update_port_info(struct hclge_dev *hdev)
2961 {
2962         struct hclge_mac *mac = &hdev->hw.mac;
2963         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2964         int ret;
2965
2966         /* get the port info from SFP cmd if not copper port */
2967         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2968                 return 0;
2969
2970         /* if IMP does not support get SFP/qSFP info, return directly */
2971         if (!hdev->support_sfp_query)
2972                 return 0;
2973
2974         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2975                 ret = hclge_get_sfp_info(hdev, mac);
2976         else
2977                 ret = hclge_get_sfp_speed(hdev, &speed);
2978
2979         if (ret == -EOPNOTSUPP) {
2980                 hdev->support_sfp_query = false;
2981                 return ret;
2982         } else if (ret) {
2983                 return ret;
2984         }
2985
2986         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2987                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2988                         hclge_update_port_capability(mac);
2989                         return 0;
2990                 }
2991                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2992                                                HCLGE_MAC_FULL);
2993         } else {
2994                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2995                         return 0; /* do nothing if no SFP */
2996
2997                 /* must config full duplex for SFP */
2998                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2999         }
3000 }
3001
3002 static int hclge_get_status(struct hnae3_handle *handle)
3003 {
3004         struct hclge_vport *vport = hclge_get_vport(handle);
3005         struct hclge_dev *hdev = vport->back;
3006
3007         hclge_update_link_status(hdev);
3008
3009         return hdev->hw.mac.link;
3010 }
3011
3012 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3013 {
3014         if (!pci_num_vf(hdev->pdev)) {
3015                 dev_err(&hdev->pdev->dev,
3016                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3017                 return NULL;
3018         }
3019
3020         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3021                 dev_err(&hdev->pdev->dev,
3022                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3023                         vf, pci_num_vf(hdev->pdev));
3024                 return NULL;
3025         }
3026
3027         /* VF start from 1 in vport */
3028         vf += HCLGE_VF_VPORT_START_NUM;
3029         return &hdev->vport[vf];
3030 }
3031
3032 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3033                                struct ifla_vf_info *ivf)
3034 {
3035         struct hclge_vport *vport = hclge_get_vport(handle);
3036         struct hclge_dev *hdev = vport->back;
3037
3038         vport = hclge_get_vf_vport(hdev, vf);
3039         if (!vport)
3040                 return -EINVAL;
3041
3042         ivf->vf = vf;
3043         ivf->linkstate = vport->vf_info.link_state;
3044         ivf->spoofchk = vport->vf_info.spoofchk;
3045         ivf->trusted = vport->vf_info.trusted;
3046         ivf->min_tx_rate = 0;
3047         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3048         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3049         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3050         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3051         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3052
3053         return 0;
3054 }
3055
3056 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3057                                    int link_state)
3058 {
3059         struct hclge_vport *vport = hclge_get_vport(handle);
3060         struct hclge_dev *hdev = vport->back;
3061
3062         vport = hclge_get_vf_vport(hdev, vf);
3063         if (!vport)
3064                 return -EINVAL;
3065
3066         vport->vf_info.link_state = link_state;
3067
3068         return 0;
3069 }
3070
3071 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3072 {
3073         u32 cmdq_src_reg, msix_src_reg;
3074
3075         /* fetch the events from their corresponding regs */
3076         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3077         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3078
3079         /* Assumption: If by any chance reset and mailbox events are reported
3080          * together then we will only process reset event in this go and will
3081          * defer the processing of the mailbox events. Since, we would have not
3082          * cleared RX CMDQ event this time we would receive again another
3083          * interrupt from H/W just for the mailbox.
3084          *
3085          * check for vector0 reset event sources
3086          */
3087         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3088                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3089                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3090                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3091                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3092                 hdev->rst_stats.imp_rst_cnt++;
3093                 return HCLGE_VECTOR0_EVENT_RST;
3094         }
3095
3096         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3097                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3098                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3099                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3100                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3101                 hdev->rst_stats.global_rst_cnt++;
3102                 return HCLGE_VECTOR0_EVENT_RST;
3103         }
3104
3105         /* check for vector0 msix event source */
3106         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3107                 *clearval = msix_src_reg;
3108                 return HCLGE_VECTOR0_EVENT_ERR;
3109         }
3110
3111         /* check for vector0 mailbox(=CMDQ RX) event source */
3112         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3113                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3114                 *clearval = cmdq_src_reg;
3115                 return HCLGE_VECTOR0_EVENT_MBX;
3116         }
3117
3118         /* print other vector0 event source */
3119         dev_info(&hdev->pdev->dev,
3120                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3121                  cmdq_src_reg, msix_src_reg);
3122         *clearval = msix_src_reg;
3123
3124         return HCLGE_VECTOR0_EVENT_OTHER;
3125 }
3126
3127 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3128                                     u32 regclr)
3129 {
3130         switch (event_type) {
3131         case HCLGE_VECTOR0_EVENT_RST:
3132                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3133                 break;
3134         case HCLGE_VECTOR0_EVENT_MBX:
3135                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3136                 break;
3137         default:
3138                 break;
3139         }
3140 }
3141
3142 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3143 {
3144         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3145                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3146                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3147                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3148         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3149 }
3150
3151 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3152 {
3153         writel(enable ? 1 : 0, vector->addr);
3154 }
3155
3156 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3157 {
3158         struct hclge_dev *hdev = data;
3159         u32 clearval = 0;
3160         u32 event_cause;
3161
3162         hclge_enable_vector(&hdev->misc_vector, false);
3163         event_cause = hclge_check_event_cause(hdev, &clearval);
3164
3165         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3166         switch (event_cause) {
3167         case HCLGE_VECTOR0_EVENT_ERR:
3168                 /* we do not know what type of reset is required now. This could
3169                  * only be decided after we fetch the type of errors which
3170                  * caused this event. Therefore, we will do below for now:
3171                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3172                  *    have defered type of reset to be used.
3173                  * 2. Schedule the reset serivce task.
3174                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3175                  *    will fetch the correct type of reset.  This would be done
3176                  *    by first decoding the types of errors.
3177                  */
3178                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3179                 fallthrough;
3180         case HCLGE_VECTOR0_EVENT_RST:
3181                 hclge_reset_task_schedule(hdev);
3182                 break;
3183         case HCLGE_VECTOR0_EVENT_MBX:
3184                 /* If we are here then,
3185                  * 1. Either we are not handling any mbx task and we are not
3186                  *    scheduled as well
3187                  *                        OR
3188                  * 2. We could be handling a mbx task but nothing more is
3189                  *    scheduled.
3190                  * In both cases, we should schedule mbx task as there are more
3191                  * mbx messages reported by this interrupt.
3192                  */
3193                 hclge_mbx_task_schedule(hdev);
3194                 break;
3195         default:
3196                 dev_warn(&hdev->pdev->dev,
3197                          "received unknown or unhandled event of vector0\n");
3198                 break;
3199         }
3200
3201         hclge_clear_event_cause(hdev, event_cause, clearval);
3202
3203         /* Enable interrupt if it is not cause by reset. And when
3204          * clearval equal to 0, it means interrupt status may be
3205          * cleared by hardware before driver reads status register.
3206          * For this case, vector0 interrupt also should be enabled.
3207          */
3208         if (!clearval ||
3209             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3210                 hclge_enable_vector(&hdev->misc_vector, true);
3211         }
3212
3213         return IRQ_HANDLED;
3214 }
3215
3216 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3217 {
3218         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3219                 dev_warn(&hdev->pdev->dev,
3220                          "vector(vector_id %d) has been freed.\n", vector_id);
3221                 return;
3222         }
3223
3224         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3225         hdev->num_msi_left += 1;
3226         hdev->num_msi_used -= 1;
3227 }
3228
3229 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3230 {
3231         struct hclge_misc_vector *vector = &hdev->misc_vector;
3232
3233         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3234
3235         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3236         hdev->vector_status[0] = 0;
3237
3238         hdev->num_msi_left -= 1;
3239         hdev->num_msi_used += 1;
3240 }
3241
3242 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3243                                       const cpumask_t *mask)
3244 {
3245         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3246                                               affinity_notify);
3247
3248         cpumask_copy(&hdev->affinity_mask, mask);
3249 }
3250
3251 static void hclge_irq_affinity_release(struct kref *ref)
3252 {
3253 }
3254
3255 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3256 {
3257         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3258                               &hdev->affinity_mask);
3259
3260         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3261         hdev->affinity_notify.release = hclge_irq_affinity_release;
3262         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3263                                   &hdev->affinity_notify);
3264 }
3265
3266 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3267 {
3268         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3269         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3270 }
3271
3272 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3273 {
3274         int ret;
3275
3276         hclge_get_misc_vector(hdev);
3277
3278         /* this would be explicitly freed in the end */
3279         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3280                  HCLGE_NAME, pci_name(hdev->pdev));
3281         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3282                           0, hdev->misc_vector.name, hdev);
3283         if (ret) {
3284                 hclge_free_vector(hdev, 0);
3285                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3286                         hdev->misc_vector.vector_irq);
3287         }
3288
3289         return ret;
3290 }
3291
3292 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3293 {
3294         free_irq(hdev->misc_vector.vector_irq, hdev);
3295         hclge_free_vector(hdev, 0);
3296 }
3297
3298 int hclge_notify_client(struct hclge_dev *hdev,
3299                         enum hnae3_reset_notify_type type)
3300 {
3301         struct hnae3_client *client = hdev->nic_client;
3302         u16 i;
3303
3304         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3305                 return 0;
3306
3307         if (!client->ops->reset_notify)
3308                 return -EOPNOTSUPP;
3309
3310         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3311                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3312                 int ret;
3313
3314                 ret = client->ops->reset_notify(handle, type);
3315                 if (ret) {
3316                         dev_err(&hdev->pdev->dev,
3317                                 "notify nic client failed %d(%d)\n", type, ret);
3318                         return ret;
3319                 }
3320         }
3321
3322         return 0;
3323 }
3324
3325 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3326                                     enum hnae3_reset_notify_type type)
3327 {
3328         struct hnae3_client *client = hdev->roce_client;
3329         int ret;
3330         u16 i;
3331
3332         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3333                 return 0;
3334
3335         if (!client->ops->reset_notify)
3336                 return -EOPNOTSUPP;
3337
3338         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3339                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3340
3341                 ret = client->ops->reset_notify(handle, type);
3342                 if (ret) {
3343                         dev_err(&hdev->pdev->dev,
3344                                 "notify roce client failed %d(%d)",
3345                                 type, ret);
3346                         return ret;
3347                 }
3348         }
3349
3350         return ret;
3351 }
3352
3353 static int hclge_reset_wait(struct hclge_dev *hdev)
3354 {
3355 #define HCLGE_RESET_WATI_MS     100
3356 #define HCLGE_RESET_WAIT_CNT    350
3357
3358         u32 val, reg, reg_bit;
3359         u32 cnt = 0;
3360
3361         switch (hdev->reset_type) {
3362         case HNAE3_IMP_RESET:
3363                 reg = HCLGE_GLOBAL_RESET_REG;
3364                 reg_bit = HCLGE_IMP_RESET_BIT;
3365                 break;
3366         case HNAE3_GLOBAL_RESET:
3367                 reg = HCLGE_GLOBAL_RESET_REG;
3368                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3369                 break;
3370         case HNAE3_FUNC_RESET:
3371                 reg = HCLGE_FUN_RST_ING;
3372                 reg_bit = HCLGE_FUN_RST_ING_B;
3373                 break;
3374         default:
3375                 dev_err(&hdev->pdev->dev,
3376                         "Wait for unsupported reset type: %d\n",
3377                         hdev->reset_type);
3378                 return -EINVAL;
3379         }
3380
3381         val = hclge_read_dev(&hdev->hw, reg);
3382         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3383                 msleep(HCLGE_RESET_WATI_MS);
3384                 val = hclge_read_dev(&hdev->hw, reg);
3385                 cnt++;
3386         }
3387
3388         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3389                 dev_warn(&hdev->pdev->dev,
3390                          "Wait for reset timeout: %d\n", hdev->reset_type);
3391                 return -EBUSY;
3392         }
3393
3394         return 0;
3395 }
3396
3397 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3398 {
3399         struct hclge_vf_rst_cmd *req;
3400         struct hclge_desc desc;
3401
3402         req = (struct hclge_vf_rst_cmd *)desc.data;
3403         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3404         req->dest_vfid = func_id;
3405
3406         if (reset)
3407                 req->vf_rst = 0x1;
3408
3409         return hclge_cmd_send(&hdev->hw, &desc, 1);
3410 }
3411
3412 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3413 {
3414         int i;
3415
3416         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3417                 struct hclge_vport *vport = &hdev->vport[i];
3418                 int ret;
3419
3420                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3421                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3422                 if (ret) {
3423                         dev_err(&hdev->pdev->dev,
3424                                 "set vf(%u) rst failed %d!\n",
3425                                 vport->vport_id, ret);
3426                         return ret;
3427                 }
3428
3429                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3430                         continue;
3431
3432                 /* Inform VF to process the reset.
3433                  * hclge_inform_reset_assert_to_vf may fail if VF
3434                  * driver is not loaded.
3435                  */
3436                 ret = hclge_inform_reset_assert_to_vf(vport);
3437                 if (ret)
3438                         dev_warn(&hdev->pdev->dev,
3439                                  "inform reset to vf(%u) failed %d!\n",
3440                                  vport->vport_id, ret);
3441         }
3442
3443         return 0;
3444 }
3445
3446 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3447 {
3448         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3449             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3450             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3451                 return;
3452
3453         hclge_mbx_handler(hdev);
3454
3455         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3456 }
3457
3458 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3459 {
3460         struct hclge_pf_rst_sync_cmd *req;
3461         struct hclge_desc desc;
3462         int cnt = 0;
3463         int ret;
3464
3465         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3466         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3467
3468         do {
3469                 /* vf need to down netdev by mbx during PF or FLR reset */
3470                 hclge_mailbox_service_task(hdev);
3471
3472                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3473                 /* for compatible with old firmware, wait
3474                  * 100 ms for VF to stop IO
3475                  */
3476                 if (ret == -EOPNOTSUPP) {
3477                         msleep(HCLGE_RESET_SYNC_TIME);
3478                         return;
3479                 } else if (ret) {
3480                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3481                                  ret);
3482                         return;
3483                 } else if (req->all_vf_ready) {
3484                         return;
3485                 }
3486                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3487                 hclge_cmd_reuse_desc(&desc, true);
3488         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3489
3490         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3491 }
3492
3493 void hclge_report_hw_error(struct hclge_dev *hdev,
3494                            enum hnae3_hw_error_type type)
3495 {
3496         struct hnae3_client *client = hdev->nic_client;
3497         u16 i;
3498
3499         if (!client || !client->ops->process_hw_error ||
3500             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3501                 return;
3502
3503         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3504                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3505 }
3506
3507 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3508 {
3509         u32 reg_val;
3510
3511         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3512         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3513                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3514                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3515                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3516         }
3517
3518         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3519                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3520                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3521                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3522         }
3523 }
3524
3525 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3526 {
3527         struct hclge_desc desc;
3528         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3529         int ret;
3530
3531         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3532         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3533         req->fun_reset_vfid = func_id;
3534
3535         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3536         if (ret)
3537                 dev_err(&hdev->pdev->dev,
3538                         "send function reset cmd fail, status =%d\n", ret);
3539
3540         return ret;
3541 }
3542
3543 static void hclge_do_reset(struct hclge_dev *hdev)
3544 {
3545         struct hnae3_handle *handle = &hdev->vport[0].nic;
3546         struct pci_dev *pdev = hdev->pdev;
3547         u32 val;
3548
3549         if (hclge_get_hw_reset_stat(handle)) {
3550                 dev_info(&pdev->dev, "hardware reset not finish\n");
3551                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3552                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3553                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3554                 return;
3555         }
3556
3557         switch (hdev->reset_type) {
3558         case HNAE3_GLOBAL_RESET:
3559                 dev_info(&pdev->dev, "global reset requested\n");
3560                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3561                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3562                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3563                 break;
3564         case HNAE3_FUNC_RESET:
3565                 dev_info(&pdev->dev, "PF reset requested\n");
3566                 /* schedule again to check later */
3567                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3568                 hclge_reset_task_schedule(hdev);
3569                 break;
3570         default:
3571                 dev_warn(&pdev->dev,
3572                          "unsupported reset type: %d\n", hdev->reset_type);
3573                 break;
3574         }
3575 }
3576
3577 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3578                                                    unsigned long *addr)
3579 {
3580         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3581         struct hclge_dev *hdev = ae_dev->priv;
3582
3583         /* first, resolve any unknown reset type to the known type(s) */
3584         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3585                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3586                                         HCLGE_MISC_VECTOR_INT_STS);
3587                 /* we will intentionally ignore any errors from this function
3588                  *  as we will end up in *some* reset request in any case
3589                  */
3590                 if (hclge_handle_hw_msix_error(hdev, addr))
3591                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3592                                  msix_sts_reg);
3593
3594                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3595                 /* We defered the clearing of the error event which caused
3596                  * interrupt since it was not posssible to do that in
3597                  * interrupt context (and this is the reason we introduced
3598                  * new UNKNOWN reset type). Now, the errors have been
3599                  * handled and cleared in hardware we can safely enable
3600                  * interrupts. This is an exception to the norm.
3601                  */
3602                 hclge_enable_vector(&hdev->misc_vector, true);
3603         }
3604
3605         /* return the highest priority reset level amongst all */
3606         if (test_bit(HNAE3_IMP_RESET, addr)) {
3607                 rst_level = HNAE3_IMP_RESET;
3608                 clear_bit(HNAE3_IMP_RESET, addr);
3609                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3610                 clear_bit(HNAE3_FUNC_RESET, addr);
3611         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3612                 rst_level = HNAE3_GLOBAL_RESET;
3613                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3614                 clear_bit(HNAE3_FUNC_RESET, addr);
3615         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3616                 rst_level = HNAE3_FUNC_RESET;
3617                 clear_bit(HNAE3_FUNC_RESET, addr);
3618         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3619                 rst_level = HNAE3_FLR_RESET;
3620                 clear_bit(HNAE3_FLR_RESET, addr);
3621         }
3622
3623         if (hdev->reset_type != HNAE3_NONE_RESET &&
3624             rst_level < hdev->reset_type)
3625                 return HNAE3_NONE_RESET;
3626
3627         return rst_level;
3628 }
3629
3630 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3631 {
3632         u32 clearval = 0;
3633
3634         switch (hdev->reset_type) {
3635         case HNAE3_IMP_RESET:
3636                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3637                 break;
3638         case HNAE3_GLOBAL_RESET:
3639                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3640                 break;
3641         default:
3642                 break;
3643         }
3644
3645         if (!clearval)
3646                 return;
3647
3648         /* For revision 0x20, the reset interrupt source
3649          * can only be cleared after hardware reset done
3650          */
3651         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3652                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3653                                 clearval);
3654
3655         hclge_enable_vector(&hdev->misc_vector, true);
3656 }
3657
3658 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3659 {
3660         u32 reg_val;
3661
3662         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3663         if (enable)
3664                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3665         else
3666                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3667
3668         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3669 }
3670
3671 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3672 {
3673         int ret;
3674
3675         ret = hclge_set_all_vf_rst(hdev, true);
3676         if (ret)
3677                 return ret;
3678
3679         hclge_func_reset_sync_vf(hdev);
3680
3681         return 0;
3682 }
3683
3684 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3685 {
3686         u32 reg_val;
3687         int ret = 0;
3688
3689         switch (hdev->reset_type) {
3690         case HNAE3_FUNC_RESET:
3691                 ret = hclge_func_reset_notify_vf(hdev);
3692                 if (ret)
3693                         return ret;
3694
3695                 ret = hclge_func_reset_cmd(hdev, 0);
3696                 if (ret) {
3697                         dev_err(&hdev->pdev->dev,
3698                                 "asserting function reset fail %d!\n", ret);
3699                         return ret;
3700                 }
3701
3702                 /* After performaning pf reset, it is not necessary to do the
3703                  * mailbox handling or send any command to firmware, because
3704                  * any mailbox handling or command to firmware is only valid
3705                  * after hclge_cmd_init is called.
3706                  */
3707                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3708                 hdev->rst_stats.pf_rst_cnt++;
3709                 break;
3710         case HNAE3_FLR_RESET:
3711                 ret = hclge_func_reset_notify_vf(hdev);
3712                 if (ret)
3713                         return ret;
3714                 break;
3715         case HNAE3_IMP_RESET:
3716                 hclge_handle_imp_error(hdev);
3717                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3718                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3719                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3720                 break;
3721         default:
3722                 break;
3723         }
3724
3725         /* inform hardware that preparatory work is done */
3726         msleep(HCLGE_RESET_SYNC_TIME);
3727         hclge_reset_handshake(hdev, true);
3728         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3729
3730         return ret;
3731 }
3732
3733 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3734 {
3735 #define MAX_RESET_FAIL_CNT 5
3736
3737         if (hdev->reset_pending) {
3738                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3739                          hdev->reset_pending);
3740                 return true;
3741         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3742                    HCLGE_RESET_INT_M) {
3743                 dev_info(&hdev->pdev->dev,
3744                          "reset failed because new reset interrupt\n");
3745                 hclge_clear_reset_cause(hdev);
3746                 return false;
3747         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3748                 hdev->rst_stats.reset_fail_cnt++;
3749                 set_bit(hdev->reset_type, &hdev->reset_pending);
3750                 dev_info(&hdev->pdev->dev,
3751                          "re-schedule reset task(%u)\n",
3752                          hdev->rst_stats.reset_fail_cnt);
3753                 return true;
3754         }
3755
3756         hclge_clear_reset_cause(hdev);
3757
3758         /* recover the handshake status when reset fail */
3759         hclge_reset_handshake(hdev, true);
3760
3761         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3762
3763         hclge_dbg_dump_rst_info(hdev);
3764
3765         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3766
3767         return false;
3768 }
3769
3770 static int hclge_set_rst_done(struct hclge_dev *hdev)
3771 {
3772         struct hclge_pf_rst_done_cmd *req;
3773         struct hclge_desc desc;
3774         int ret;
3775
3776         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3777         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3778         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3779
3780         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3781         /* To be compatible with the old firmware, which does not support
3782          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3783          * return success
3784          */
3785         if (ret == -EOPNOTSUPP) {
3786                 dev_warn(&hdev->pdev->dev,
3787                          "current firmware does not support command(0x%x)!\n",
3788                          HCLGE_OPC_PF_RST_DONE);
3789                 return 0;
3790         } else if (ret) {
3791                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3792                         ret);
3793         }
3794
3795         return ret;
3796 }
3797
3798 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3799 {
3800         int ret = 0;
3801
3802         switch (hdev->reset_type) {
3803         case HNAE3_FUNC_RESET:
3804         case HNAE3_FLR_RESET:
3805                 ret = hclge_set_all_vf_rst(hdev, false);
3806                 break;
3807         case HNAE3_GLOBAL_RESET:
3808         case HNAE3_IMP_RESET:
3809                 ret = hclge_set_rst_done(hdev);
3810                 break;
3811         default:
3812                 break;
3813         }
3814
3815         /* clear up the handshake status after re-initialize done */
3816         hclge_reset_handshake(hdev, false);
3817
3818         return ret;
3819 }
3820
3821 static int hclge_reset_stack(struct hclge_dev *hdev)
3822 {
3823         int ret;
3824
3825         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3826         if (ret)
3827                 return ret;
3828
3829         ret = hclge_reset_ae_dev(hdev->ae_dev);
3830         if (ret)
3831                 return ret;
3832
3833         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3834 }
3835
3836 static int hclge_reset_prepare(struct hclge_dev *hdev)
3837 {
3838         int ret;
3839
3840         hdev->rst_stats.reset_cnt++;
3841         /* perform reset of the stack & ae device for a client */
3842         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3843         if (ret)
3844                 return ret;
3845
3846         rtnl_lock();
3847         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3848         rtnl_unlock();
3849         if (ret)
3850                 return ret;
3851
3852         return hclge_reset_prepare_wait(hdev);
3853 }
3854
3855 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3856 {
3857         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3858         enum hnae3_reset_type reset_level;
3859         int ret;
3860
3861         hdev->rst_stats.hw_reset_done_cnt++;
3862
3863         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3864         if (ret)
3865                 return ret;
3866
3867         rtnl_lock();
3868         ret = hclge_reset_stack(hdev);
3869         rtnl_unlock();
3870         if (ret)
3871                 return ret;
3872
3873         hclge_clear_reset_cause(hdev);
3874
3875         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3876         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3877          * times
3878          */
3879         if (ret &&
3880             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3881                 return ret;
3882
3883         ret = hclge_reset_prepare_up(hdev);
3884         if (ret)
3885                 return ret;
3886
3887         rtnl_lock();
3888         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3889         rtnl_unlock();
3890         if (ret)
3891                 return ret;
3892
3893         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3894         if (ret)
3895                 return ret;
3896
3897         hdev->last_reset_time = jiffies;
3898         hdev->rst_stats.reset_fail_cnt = 0;
3899         hdev->rst_stats.reset_done_cnt++;
3900         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3901
3902         /* if default_reset_request has a higher level reset request,
3903          * it should be handled as soon as possible. since some errors
3904          * need this kind of reset to fix.
3905          */
3906         reset_level = hclge_get_reset_level(ae_dev,
3907                                             &hdev->default_reset_request);
3908         if (reset_level != HNAE3_NONE_RESET)
3909                 set_bit(reset_level, &hdev->reset_request);
3910
3911         return 0;
3912 }
3913
3914 static void hclge_reset(struct hclge_dev *hdev)
3915 {
3916         if (hclge_reset_prepare(hdev))
3917                 goto err_reset;
3918
3919         if (hclge_reset_wait(hdev))
3920                 goto err_reset;
3921
3922         if (hclge_reset_rebuild(hdev))
3923                 goto err_reset;
3924
3925         return;
3926
3927 err_reset:
3928         if (hclge_reset_err_handle(hdev))
3929                 hclge_reset_task_schedule(hdev);
3930 }
3931
3932 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3933 {
3934         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3935         struct hclge_dev *hdev = ae_dev->priv;
3936
3937         /* We might end up getting called broadly because of 2 below cases:
3938          * 1. Recoverable error was conveyed through APEI and only way to bring
3939          *    normalcy is to reset.
3940          * 2. A new reset request from the stack due to timeout
3941          *
3942          * For the first case,error event might not have ae handle available.
3943          * check if this is a new reset request and we are not here just because
3944          * last reset attempt did not succeed and watchdog hit us again. We will
3945          * know this if last reset request did not occur very recently (watchdog
3946          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3947          * In case of new request we reset the "reset level" to PF reset.
3948          * And if it is a repeat reset request of the most recent one then we
3949          * want to make sure we throttle the reset request. Therefore, we will
3950          * not allow it again before 3*HZ times.
3951          */
3952         if (!handle)
3953                 handle = &hdev->vport[0].nic;
3954
3955         if (time_before(jiffies, (hdev->last_reset_time +
3956                                   HCLGE_RESET_INTERVAL))) {
3957                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3958                 return;
3959         } else if (hdev->default_reset_request) {
3960                 hdev->reset_level =
3961                         hclge_get_reset_level(ae_dev,
3962                                               &hdev->default_reset_request);
3963         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3964                 hdev->reset_level = HNAE3_FUNC_RESET;
3965         }
3966
3967         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3968                  hdev->reset_level);
3969
3970         /* request reset & schedule reset task */
3971         set_bit(hdev->reset_level, &hdev->reset_request);
3972         hclge_reset_task_schedule(hdev);
3973
3974         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3975                 hdev->reset_level++;
3976 }
3977
3978 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3979                                         enum hnae3_reset_type rst_type)
3980 {
3981         struct hclge_dev *hdev = ae_dev->priv;
3982
3983         set_bit(rst_type, &hdev->default_reset_request);
3984 }
3985
3986 static void hclge_reset_timer(struct timer_list *t)
3987 {
3988         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3989
3990         /* if default_reset_request has no value, it means that this reset
3991          * request has already be handled, so just return here
3992          */
3993         if (!hdev->default_reset_request)
3994                 return;
3995
3996         dev_info(&hdev->pdev->dev,
3997                  "triggering reset in reset timer\n");
3998         hclge_reset_event(hdev->pdev, NULL);
3999 }
4000
4001 static void hclge_reset_subtask(struct hclge_dev *hdev)
4002 {
4003         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4004
4005         /* check if there is any ongoing reset in the hardware. This status can
4006          * be checked from reset_pending. If there is then, we need to wait for
4007          * hardware to complete reset.
4008          *    a. If we are able to figure out in reasonable time that hardware
4009          *       has fully resetted then, we can proceed with driver, client
4010          *       reset.
4011          *    b. else, we can come back later to check this status so re-sched
4012          *       now.
4013          */
4014         hdev->last_reset_time = jiffies;
4015         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4016         if (hdev->reset_type != HNAE3_NONE_RESET)
4017                 hclge_reset(hdev);
4018
4019         /* check if we got any *new* reset requests to be honored */
4020         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4021         if (hdev->reset_type != HNAE3_NONE_RESET)
4022                 hclge_do_reset(hdev);
4023
4024         hdev->reset_type = HNAE3_NONE_RESET;
4025 }
4026
4027 static void hclge_reset_service_task(struct hclge_dev *hdev)
4028 {
4029         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4030                 return;
4031
4032         down(&hdev->reset_sem);
4033         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4034
4035         hclge_reset_subtask(hdev);
4036
4037         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4038         up(&hdev->reset_sem);
4039 }
4040
4041 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4042 {
4043         int i;
4044
4045         /* start from vport 1 for PF is always alive */
4046         for (i = 1; i < hdev->num_alloc_vport; i++) {
4047                 struct hclge_vport *vport = &hdev->vport[i];
4048
4049                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4050                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4051
4052                 /* If vf is not alive, set to default value */
4053                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4054                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4055         }
4056 }
4057
4058 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4059 {
4060         unsigned long delta = round_jiffies_relative(HZ);
4061
4062         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4063                 return;
4064
4065         /* Always handle the link updating to make sure link state is
4066          * updated when it is triggered by mbx.
4067          */
4068         hclge_update_link_status(hdev);
4069         hclge_sync_mac_table(hdev);
4070         hclge_sync_promisc_mode(hdev);
4071
4072         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4073                 delta = jiffies - hdev->last_serv_processed;
4074
4075                 if (delta < round_jiffies_relative(HZ)) {
4076                         delta = round_jiffies_relative(HZ) - delta;
4077                         goto out;
4078                 }
4079         }
4080
4081         hdev->serv_processed_cnt++;
4082         hclge_update_vport_alive(hdev);
4083
4084         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4085                 hdev->last_serv_processed = jiffies;
4086                 goto out;
4087         }
4088
4089         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4090                 hclge_update_stats_for_all(hdev);
4091
4092         hclge_update_port_info(hdev);
4093         hclge_sync_vlan_filter(hdev);
4094
4095         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4096                 hclge_rfs_filter_expire(hdev);
4097
4098         hdev->last_serv_processed = jiffies;
4099
4100 out:
4101         hclge_task_schedule(hdev, delta);
4102 }
4103
4104 static void hclge_service_task(struct work_struct *work)
4105 {
4106         struct hclge_dev *hdev =
4107                 container_of(work, struct hclge_dev, service_task.work);
4108
4109         hclge_reset_service_task(hdev);
4110         hclge_mailbox_service_task(hdev);
4111         hclge_periodic_service_task(hdev);
4112
4113         /* Handle reset and mbx again in case periodical task delays the
4114          * handling by calling hclge_task_schedule() in
4115          * hclge_periodic_service_task().
4116          */
4117         hclge_reset_service_task(hdev);
4118         hclge_mailbox_service_task(hdev);
4119 }
4120
4121 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4122 {
4123         /* VF handle has no client */
4124         if (!handle->client)
4125                 return container_of(handle, struct hclge_vport, nic);
4126         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4127                 return container_of(handle, struct hclge_vport, roce);
4128         else
4129                 return container_of(handle, struct hclge_vport, nic);
4130 }
4131
4132 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4133                             struct hnae3_vector_info *vector_info)
4134 {
4135         struct hclge_vport *vport = hclge_get_vport(handle);
4136         struct hnae3_vector_info *vector = vector_info;
4137         struct hclge_dev *hdev = vport->back;
4138         int alloc = 0;
4139         int i, j;
4140
4141         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4142         vector_num = min(hdev->num_msi_left, vector_num);
4143
4144         for (j = 0; j < vector_num; j++) {
4145                 for (i = 1; i < hdev->num_msi; i++) {
4146                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4147                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4148                                 vector->io_addr = hdev->hw.io_base +
4149                                         HCLGE_VECTOR_REG_BASE +
4150                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4151                                         vport->vport_id *
4152                                         HCLGE_VECTOR_VF_OFFSET;
4153                                 hdev->vector_status[i] = vport->vport_id;
4154                                 hdev->vector_irq[i] = vector->vector;
4155
4156                                 vector++;
4157                                 alloc++;
4158
4159                                 break;
4160                         }
4161                 }
4162         }
4163         hdev->num_msi_left -= alloc;
4164         hdev->num_msi_used += alloc;
4165
4166         return alloc;
4167 }
4168
4169 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4170 {
4171         int i;
4172
4173         for (i = 0; i < hdev->num_msi; i++)
4174                 if (vector == hdev->vector_irq[i])
4175                         return i;
4176
4177         return -EINVAL;
4178 }
4179
4180 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4181 {
4182         struct hclge_vport *vport = hclge_get_vport(handle);
4183         struct hclge_dev *hdev = vport->back;
4184         int vector_id;
4185
4186         vector_id = hclge_get_vector_index(hdev, vector);
4187         if (vector_id < 0) {
4188                 dev_err(&hdev->pdev->dev,
4189                         "Get vector index fail. vector = %d\n", vector);
4190                 return vector_id;
4191         }
4192
4193         hclge_free_vector(hdev, vector_id);
4194
4195         return 0;
4196 }
4197
4198 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4199 {
4200         return HCLGE_RSS_KEY_SIZE;
4201 }
4202
4203 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4204 {
4205         return HCLGE_RSS_IND_TBL_SIZE;
4206 }
4207
4208 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4209                                   const u8 hfunc, const u8 *key)
4210 {
4211         struct hclge_rss_config_cmd *req;
4212         unsigned int key_offset = 0;
4213         struct hclge_desc desc;
4214         int key_counts;
4215         int key_size;
4216         int ret;
4217
4218         key_counts = HCLGE_RSS_KEY_SIZE;
4219         req = (struct hclge_rss_config_cmd *)desc.data;
4220
4221         while (key_counts) {
4222                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4223                                            false);
4224
4225                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4226                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4227
4228                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4229                 memcpy(req->hash_key,
4230                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4231
4232                 key_counts -= key_size;
4233                 key_offset++;
4234                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4235                 if (ret) {
4236                         dev_err(&hdev->pdev->dev,
4237                                 "Configure RSS config fail, status = %d\n",
4238                                 ret);
4239                         return ret;
4240                 }
4241         }
4242         return 0;
4243 }
4244
4245 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4246 {
4247         struct hclge_rss_indirection_table_cmd *req;
4248         struct hclge_desc desc;
4249         int i, j;
4250         int ret;
4251
4252         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4253
4254         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4255                 hclge_cmd_setup_basic_desc
4256                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4257
4258                 req->start_table_index =
4259                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4260                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4261
4262                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4263                         req->rss_result[j] =
4264                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4265
4266                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4267                 if (ret) {
4268                         dev_err(&hdev->pdev->dev,
4269                                 "Configure rss indir table fail,status = %d\n",
4270                                 ret);
4271                         return ret;
4272                 }
4273         }
4274         return 0;
4275 }
4276
4277 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4278                                  u16 *tc_size, u16 *tc_offset)
4279 {
4280         struct hclge_rss_tc_mode_cmd *req;
4281         struct hclge_desc desc;
4282         int ret;
4283         int i;
4284
4285         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4286         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4287
4288         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4289                 u16 mode = 0;
4290
4291                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4292                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4293                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4294                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4295                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4296
4297                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4298         }
4299
4300         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4301         if (ret)
4302                 dev_err(&hdev->pdev->dev,
4303                         "Configure rss tc mode fail, status = %d\n", ret);
4304
4305         return ret;
4306 }
4307
4308 static void hclge_get_rss_type(struct hclge_vport *vport)
4309 {
4310         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4311             vport->rss_tuple_sets.ipv4_udp_en ||
4312             vport->rss_tuple_sets.ipv4_sctp_en ||
4313             vport->rss_tuple_sets.ipv6_tcp_en ||
4314             vport->rss_tuple_sets.ipv6_udp_en ||
4315             vport->rss_tuple_sets.ipv6_sctp_en)
4316                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4317         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4318                  vport->rss_tuple_sets.ipv6_fragment_en)
4319                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4320         else
4321                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4322 }
4323
4324 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4325 {
4326         struct hclge_rss_input_tuple_cmd *req;
4327         struct hclge_desc desc;
4328         int ret;
4329
4330         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4331
4332         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4333
4334         /* Get the tuple cfg from pf */
4335         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4336         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4337         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4338         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4339         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4340         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4341         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4342         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4343         hclge_get_rss_type(&hdev->vport[0]);
4344         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4345         if (ret)
4346                 dev_err(&hdev->pdev->dev,
4347                         "Configure rss input fail, status = %d\n", ret);
4348         return ret;
4349 }
4350
4351 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4352                          u8 *key, u8 *hfunc)
4353 {
4354         struct hclge_vport *vport = hclge_get_vport(handle);
4355         int i;
4356
4357         /* Get hash algorithm */
4358         if (hfunc) {
4359                 switch (vport->rss_algo) {
4360                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4361                         *hfunc = ETH_RSS_HASH_TOP;
4362                         break;
4363                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4364                         *hfunc = ETH_RSS_HASH_XOR;
4365                         break;
4366                 default:
4367                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4368                         break;
4369                 }
4370         }
4371
4372         /* Get the RSS Key required by the user */
4373         if (key)
4374                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4375
4376         /* Get indirect table */
4377         if (indir)
4378                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4379                         indir[i] =  vport->rss_indirection_tbl[i];
4380
4381         return 0;
4382 }
4383
4384 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4385                          const  u8 *key, const  u8 hfunc)
4386 {
4387         struct hclge_vport *vport = hclge_get_vport(handle);
4388         struct hclge_dev *hdev = vport->back;
4389         u8 hash_algo;
4390         int ret, i;
4391
4392         /* Set the RSS Hash Key if specififed by the user */
4393         if (key) {
4394                 switch (hfunc) {
4395                 case ETH_RSS_HASH_TOP:
4396                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4397                         break;
4398                 case ETH_RSS_HASH_XOR:
4399                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4400                         break;
4401                 case ETH_RSS_HASH_NO_CHANGE:
4402                         hash_algo = vport->rss_algo;
4403                         break;
4404                 default:
4405                         return -EINVAL;
4406                 }
4407
4408                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4409                 if (ret)
4410                         return ret;
4411
4412                 /* Update the shadow RSS key with user specified qids */
4413                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4414                 vport->rss_algo = hash_algo;
4415         }
4416
4417         /* Update the shadow RSS table with user specified qids */
4418         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4419                 vport->rss_indirection_tbl[i] = indir[i];
4420
4421         /* Update the hardware */
4422         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4423 }
4424
4425 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4426 {
4427         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4428
4429         if (nfc->data & RXH_L4_B_2_3)
4430                 hash_sets |= HCLGE_D_PORT_BIT;
4431         else
4432                 hash_sets &= ~HCLGE_D_PORT_BIT;
4433
4434         if (nfc->data & RXH_IP_SRC)
4435                 hash_sets |= HCLGE_S_IP_BIT;
4436         else
4437                 hash_sets &= ~HCLGE_S_IP_BIT;
4438
4439         if (nfc->data & RXH_IP_DST)
4440                 hash_sets |= HCLGE_D_IP_BIT;
4441         else
4442                 hash_sets &= ~HCLGE_D_IP_BIT;
4443
4444         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4445                 hash_sets |= HCLGE_V_TAG_BIT;
4446
4447         return hash_sets;
4448 }
4449
4450 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4451                                struct ethtool_rxnfc *nfc)
4452 {
4453         struct hclge_vport *vport = hclge_get_vport(handle);
4454         struct hclge_dev *hdev = vport->back;
4455         struct hclge_rss_input_tuple_cmd *req;
4456         struct hclge_desc desc;
4457         u8 tuple_sets;
4458         int ret;
4459
4460         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4461                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4462                 return -EINVAL;
4463
4464         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4465         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4466
4467         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4468         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4469         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4470         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4471         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4472         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4473         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4474         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4475
4476         tuple_sets = hclge_get_rss_hash_bits(nfc);
4477         switch (nfc->flow_type) {
4478         case TCP_V4_FLOW:
4479                 req->ipv4_tcp_en = tuple_sets;
4480                 break;
4481         case TCP_V6_FLOW:
4482                 req->ipv6_tcp_en = tuple_sets;
4483                 break;
4484         case UDP_V4_FLOW:
4485                 req->ipv4_udp_en = tuple_sets;
4486                 break;
4487         case UDP_V6_FLOW:
4488                 req->ipv6_udp_en = tuple_sets;
4489                 break;
4490         case SCTP_V4_FLOW:
4491                 req->ipv4_sctp_en = tuple_sets;
4492                 break;
4493         case SCTP_V6_FLOW:
4494                 if ((nfc->data & RXH_L4_B_0_1) ||
4495                     (nfc->data & RXH_L4_B_2_3))
4496                         return -EINVAL;
4497
4498                 req->ipv6_sctp_en = tuple_sets;
4499                 break;
4500         case IPV4_FLOW:
4501                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4502                 break;
4503         case IPV6_FLOW:
4504                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4505                 break;
4506         default:
4507                 return -EINVAL;
4508         }
4509
4510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4511         if (ret) {
4512                 dev_err(&hdev->pdev->dev,
4513                         "Set rss tuple fail, status = %d\n", ret);
4514                 return ret;
4515         }
4516
4517         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4518         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4519         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4520         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4521         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4522         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4523         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4524         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4525         hclge_get_rss_type(vport);
4526         return 0;
4527 }
4528
4529 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4530                                struct ethtool_rxnfc *nfc)
4531 {
4532         struct hclge_vport *vport = hclge_get_vport(handle);
4533         u8 tuple_sets;
4534
4535         nfc->data = 0;
4536
4537         switch (nfc->flow_type) {
4538         case TCP_V4_FLOW:
4539                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4540                 break;
4541         case UDP_V4_FLOW:
4542                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4543                 break;
4544         case TCP_V6_FLOW:
4545                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4546                 break;
4547         case UDP_V6_FLOW:
4548                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4549                 break;
4550         case SCTP_V4_FLOW:
4551                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4552                 break;
4553         case SCTP_V6_FLOW:
4554                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4555                 break;
4556         case IPV4_FLOW:
4557         case IPV6_FLOW:
4558                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4559                 break;
4560         default:
4561                 return -EINVAL;
4562         }
4563
4564         if (!tuple_sets)
4565                 return 0;
4566
4567         if (tuple_sets & HCLGE_D_PORT_BIT)
4568                 nfc->data |= RXH_L4_B_2_3;
4569         if (tuple_sets & HCLGE_S_PORT_BIT)
4570                 nfc->data |= RXH_L4_B_0_1;
4571         if (tuple_sets & HCLGE_D_IP_BIT)
4572                 nfc->data |= RXH_IP_DST;
4573         if (tuple_sets & HCLGE_S_IP_BIT)
4574                 nfc->data |= RXH_IP_SRC;
4575
4576         return 0;
4577 }
4578
4579 static int hclge_get_tc_size(struct hnae3_handle *handle)
4580 {
4581         struct hclge_vport *vport = hclge_get_vport(handle);
4582         struct hclge_dev *hdev = vport->back;
4583
4584         return hdev->rss_size_max;
4585 }
4586
4587 int hclge_rss_init_hw(struct hclge_dev *hdev)
4588 {
4589         struct hclge_vport *vport = hdev->vport;
4590         u8 *rss_indir = vport[0].rss_indirection_tbl;
4591         u16 rss_size = vport[0].alloc_rss_size;
4592         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4593         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4594         u8 *key = vport[0].rss_hash_key;
4595         u8 hfunc = vport[0].rss_algo;
4596         u16 tc_valid[HCLGE_MAX_TC_NUM];
4597         u16 roundup_size;
4598         unsigned int i;
4599         int ret;
4600
4601         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4602         if (ret)
4603                 return ret;
4604
4605         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4606         if (ret)
4607                 return ret;
4608
4609         ret = hclge_set_rss_input_tuple(hdev);
4610         if (ret)
4611                 return ret;
4612
4613         /* Each TC have the same queue size, and tc_size set to hardware is
4614          * the log2 of roundup power of two of rss_size, the acutal queue
4615          * size is limited by indirection table.
4616          */
4617         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4618                 dev_err(&hdev->pdev->dev,
4619                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4620                         rss_size);
4621                 return -EINVAL;
4622         }
4623
4624         roundup_size = roundup_pow_of_two(rss_size);
4625         roundup_size = ilog2(roundup_size);
4626
4627         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4628                 tc_valid[i] = 0;
4629
4630                 if (!(hdev->hw_tc_map & BIT(i)))
4631                         continue;
4632
4633                 tc_valid[i] = 1;
4634                 tc_size[i] = roundup_size;
4635                 tc_offset[i] = rss_size * i;
4636         }
4637
4638         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4639 }
4640
4641 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4642 {
4643         struct hclge_vport *vport = hdev->vport;
4644         int i, j;
4645
4646         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4647                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4648                         vport[j].rss_indirection_tbl[i] =
4649                                 i % vport[j].alloc_rss_size;
4650         }
4651 }
4652
4653 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4654 {
4655         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4656         struct hclge_vport *vport = hdev->vport;
4657
4658         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4659                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4660
4661         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4662                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4663                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4664                 vport[i].rss_tuple_sets.ipv4_udp_en =
4665                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4666                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4667                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4668                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4669                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4670                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4671                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4672                 vport[i].rss_tuple_sets.ipv6_udp_en =
4673                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4674                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4675                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4676                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4677                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4678
4679                 vport[i].rss_algo = rss_algo;
4680
4681                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4682                        HCLGE_RSS_KEY_SIZE);
4683         }
4684
4685         hclge_rss_indir_init_cfg(hdev);
4686 }
4687
4688 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4689                                 int vector_id, bool en,
4690                                 struct hnae3_ring_chain_node *ring_chain)
4691 {
4692         struct hclge_dev *hdev = vport->back;
4693         struct hnae3_ring_chain_node *node;
4694         struct hclge_desc desc;
4695         struct hclge_ctrl_vector_chain_cmd *req =
4696                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4697         enum hclge_cmd_status status;
4698         enum hclge_opcode_type op;
4699         u16 tqp_type_and_id;
4700         int i;
4701
4702         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4703         hclge_cmd_setup_basic_desc(&desc, op, false);
4704         req->int_vector_id = vector_id;
4705
4706         i = 0;
4707         for (node = ring_chain; node; node = node->next) {
4708                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4709                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4710                                 HCLGE_INT_TYPE_S,
4711                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4712                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4713                                 HCLGE_TQP_ID_S, node->tqp_index);
4714                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4715                                 HCLGE_INT_GL_IDX_S,
4716                                 hnae3_get_field(node->int_gl_idx,
4717                                                 HNAE3_RING_GL_IDX_M,
4718                                                 HNAE3_RING_GL_IDX_S));
4719                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4720                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4721                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4722                         req->vfid = vport->vport_id;
4723
4724                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4725                         if (status) {
4726                                 dev_err(&hdev->pdev->dev,
4727                                         "Map TQP fail, status is %d.\n",
4728                                         status);
4729                                 return -EIO;
4730                         }
4731                         i = 0;
4732
4733                         hclge_cmd_setup_basic_desc(&desc,
4734                                                    op,
4735                                                    false);
4736                         req->int_vector_id = vector_id;
4737                 }
4738         }
4739
4740         if (i > 0) {
4741                 req->int_cause_num = i;
4742                 req->vfid = vport->vport_id;
4743                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4744                 if (status) {
4745                         dev_err(&hdev->pdev->dev,
4746                                 "Map TQP fail, status is %d.\n", status);
4747                         return -EIO;
4748                 }
4749         }
4750
4751         return 0;
4752 }
4753
4754 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4755                                     struct hnae3_ring_chain_node *ring_chain)
4756 {
4757         struct hclge_vport *vport = hclge_get_vport(handle);
4758         struct hclge_dev *hdev = vport->back;
4759         int vector_id;
4760
4761         vector_id = hclge_get_vector_index(hdev, vector);
4762         if (vector_id < 0) {
4763                 dev_err(&hdev->pdev->dev,
4764                         "failed to get vector index. vector=%d\n", vector);
4765                 return vector_id;
4766         }
4767
4768         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4769 }
4770
4771 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4772                                        struct hnae3_ring_chain_node *ring_chain)
4773 {
4774         struct hclge_vport *vport = hclge_get_vport(handle);
4775         struct hclge_dev *hdev = vport->back;
4776         int vector_id, ret;
4777
4778         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4779                 return 0;
4780
4781         vector_id = hclge_get_vector_index(hdev, vector);
4782         if (vector_id < 0) {
4783                 dev_err(&handle->pdev->dev,
4784                         "Get vector index fail. ret =%d\n", vector_id);
4785                 return vector_id;
4786         }
4787
4788         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4789         if (ret)
4790                 dev_err(&handle->pdev->dev,
4791                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4792                         vector_id, ret);
4793
4794         return ret;
4795 }
4796
4797 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4798                                       struct hclge_promisc_param *param)
4799 {
4800         struct hclge_promisc_cfg_cmd *req;
4801         struct hclge_desc desc;
4802         int ret;
4803
4804         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4805
4806         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4807         req->vf_id = param->vf_id;
4808
4809         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4810          * pdev revision(0x20), new revision support them. The
4811          * value of this two fields will not return error when driver
4812          * send command to fireware in revision(0x20).
4813          */
4814         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4815                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4816
4817         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4818         if (ret)
4819                 dev_err(&hdev->pdev->dev,
4820                         "failed to set vport %d promisc mode, ret = %d.\n",
4821                         param->vf_id, ret);
4822
4823         return ret;
4824 }
4825
4826 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4827                                      bool en_uc, bool en_mc, bool en_bc,
4828                                      int vport_id)
4829 {
4830         if (!param)
4831                 return;
4832
4833         memset(param, 0, sizeof(struct hclge_promisc_param));
4834         if (en_uc)
4835                 param->enable = HCLGE_PROMISC_EN_UC;
4836         if (en_mc)
4837                 param->enable |= HCLGE_PROMISC_EN_MC;
4838         if (en_bc)
4839                 param->enable |= HCLGE_PROMISC_EN_BC;
4840         param->vf_id = vport_id;
4841 }
4842
4843 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4844                                  bool en_mc_pmc, bool en_bc_pmc)
4845 {
4846         struct hclge_dev *hdev = vport->back;
4847         struct hclge_promisc_param param;
4848
4849         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4850                                  vport->vport_id);
4851         return hclge_cmd_set_promisc_mode(hdev, &param);
4852 }
4853
4854 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4855                                   bool en_mc_pmc)
4856 {
4857         struct hclge_vport *vport = hclge_get_vport(handle);
4858         struct hclge_dev *hdev = vport->back;
4859         bool en_bc_pmc = true;
4860
4861         /* For device whose version below V2, if broadcast promisc enabled,
4862          * vlan filter is always bypassed. So broadcast promisc should be
4863          * disabled until user enable promisc mode
4864          */
4865         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4866                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4867
4868         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4869                                             en_bc_pmc);
4870 }
4871
4872 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4873 {
4874         struct hclge_vport *vport = hclge_get_vport(handle);
4875         struct hclge_dev *hdev = vport->back;
4876
4877         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4878 }
4879
4880 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4881 {
4882         struct hclge_get_fd_mode_cmd *req;
4883         struct hclge_desc desc;
4884         int ret;
4885
4886         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4887
4888         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4889
4890         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4891         if (ret) {
4892                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4893                 return ret;
4894         }
4895
4896         *fd_mode = req->mode;
4897
4898         return ret;
4899 }
4900
4901 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4902                                    u32 *stage1_entry_num,
4903                                    u32 *stage2_entry_num,
4904                                    u16 *stage1_counter_num,
4905                                    u16 *stage2_counter_num)
4906 {
4907         struct hclge_get_fd_allocation_cmd *req;
4908         struct hclge_desc desc;
4909         int ret;
4910
4911         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4912
4913         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4914
4915         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4916         if (ret) {
4917                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4918                         ret);
4919                 return ret;
4920         }
4921
4922         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4923         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4924         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4925         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4926
4927         return ret;
4928 }
4929
4930 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4931                                    enum HCLGE_FD_STAGE stage_num)
4932 {
4933         struct hclge_set_fd_key_config_cmd *req;
4934         struct hclge_fd_key_cfg *stage;
4935         struct hclge_desc desc;
4936         int ret;
4937
4938         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4939
4940         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4941         stage = &hdev->fd_cfg.key_cfg[stage_num];
4942         req->stage = stage_num;
4943         req->key_select = stage->key_sel;
4944         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4945         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4946         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4947         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4948         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4949         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4950
4951         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4952         if (ret)
4953                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4954
4955         return ret;
4956 }
4957
4958 static int hclge_init_fd_config(struct hclge_dev *hdev)
4959 {
4960 #define LOW_2_WORDS             0x03
4961         struct hclge_fd_key_cfg *key_cfg;
4962         int ret;
4963
4964         if (!hnae3_dev_fd_supported(hdev))
4965                 return 0;
4966
4967         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4968         if (ret)
4969                 return ret;
4970
4971         switch (hdev->fd_cfg.fd_mode) {
4972         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4973                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4974                 break;
4975         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4976                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4977                 break;
4978         default:
4979                 dev_err(&hdev->pdev->dev,
4980                         "Unsupported flow director mode %u\n",
4981                         hdev->fd_cfg.fd_mode);
4982                 return -EOPNOTSUPP;
4983         }
4984
4985         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4986         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4987         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4988         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4989         key_cfg->outer_sipv6_word_en = 0;
4990         key_cfg->outer_dipv6_word_en = 0;
4991
4992         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4993                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4994                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4995                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4996
4997         /* If use max 400bit key, we can support tuples for ether type */
4998         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4999                 key_cfg->tuple_active |=
5000                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5001
5002         /* roce_type is used to filter roce frames
5003          * dst_vport is used to specify the rule
5004          */
5005         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5006
5007         ret = hclge_get_fd_allocation(hdev,
5008                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5009                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5010                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5011                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5012         if (ret)
5013                 return ret;
5014
5015         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5016 }
5017
5018 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5019                                 int loc, u8 *key, bool is_add)
5020 {
5021         struct hclge_fd_tcam_config_1_cmd *req1;
5022         struct hclge_fd_tcam_config_2_cmd *req2;
5023         struct hclge_fd_tcam_config_3_cmd *req3;
5024         struct hclge_desc desc[3];
5025         int ret;
5026
5027         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5028         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5029         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5030         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5031         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5032
5033         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5034         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5035         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5036
5037         req1->stage = stage;
5038         req1->xy_sel = sel_x ? 1 : 0;
5039         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5040         req1->index = cpu_to_le32(loc);
5041         req1->entry_vld = sel_x ? is_add : 0;
5042
5043         if (key) {
5044                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5045                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5046                        sizeof(req2->tcam_data));
5047                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5048                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5049         }
5050
5051         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5052         if (ret)
5053                 dev_err(&hdev->pdev->dev,
5054                         "config tcam key fail, ret=%d\n",
5055                         ret);
5056
5057         return ret;
5058 }
5059
5060 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5061                               struct hclge_fd_ad_data *action)
5062 {
5063         struct hclge_fd_ad_config_cmd *req;
5064         struct hclge_desc desc;
5065         u64 ad_data = 0;
5066         int ret;
5067
5068         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5069
5070         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5071         req->index = cpu_to_le32(loc);
5072         req->stage = stage;
5073
5074         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5075                       action->write_rule_id_to_bd);
5076         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5077                         action->rule_id);
5078         ad_data <<= 32;
5079         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5080         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5081                       action->forward_to_direct_queue);
5082         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5083                         action->queue_id);
5084         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5085         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5086                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5087         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5088         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5089                         action->counter_id);
5090
5091         req->ad_data = cpu_to_le64(ad_data);
5092         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5093         if (ret)
5094                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5095
5096         return ret;
5097 }
5098
5099 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5100                                    struct hclge_fd_rule *rule)
5101 {
5102         u16 tmp_x_s, tmp_y_s;
5103         u32 tmp_x_l, tmp_y_l;
5104         int i;
5105
5106         if (rule->unused_tuple & tuple_bit)
5107                 return true;
5108
5109         switch (tuple_bit) {
5110         case BIT(INNER_DST_MAC):
5111                 for (i = 0; i < ETH_ALEN; i++) {
5112                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5113                                rule->tuples_mask.dst_mac[i]);
5114                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5115                                rule->tuples_mask.dst_mac[i]);
5116                 }
5117
5118                 return true;
5119         case BIT(INNER_SRC_MAC):
5120                 for (i = 0; i < ETH_ALEN; i++) {
5121                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5122                                rule->tuples.src_mac[i]);
5123                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5124                                rule->tuples.src_mac[i]);
5125                 }
5126
5127                 return true;
5128         case BIT(INNER_VLAN_TAG_FST):
5129                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5130                        rule->tuples_mask.vlan_tag1);
5131                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5132                        rule->tuples_mask.vlan_tag1);
5133                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5134                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5135
5136                 return true;
5137         case BIT(INNER_ETH_TYPE):
5138                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5139                        rule->tuples_mask.ether_proto);
5140                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5141                        rule->tuples_mask.ether_proto);
5142                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5143                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5144
5145                 return true;
5146         case BIT(INNER_IP_TOS):
5147                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5148                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5149
5150                 return true;
5151         case BIT(INNER_IP_PROTO):
5152                 calc_x(*key_x, rule->tuples.ip_proto,
5153                        rule->tuples_mask.ip_proto);
5154                 calc_y(*key_y, rule->tuples.ip_proto,
5155                        rule->tuples_mask.ip_proto);
5156
5157                 return true;
5158         case BIT(INNER_SRC_IP):
5159                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5160                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5161                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5162                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5163                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5164                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5165
5166                 return true;
5167         case BIT(INNER_DST_IP):
5168                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5169                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5170                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5171                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5172                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5173                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5174
5175                 return true;
5176         case BIT(INNER_SRC_PORT):
5177                 calc_x(tmp_x_s, rule->tuples.src_port,
5178                        rule->tuples_mask.src_port);
5179                 calc_y(tmp_y_s, rule->tuples.src_port,
5180                        rule->tuples_mask.src_port);
5181                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5182                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5183
5184                 return true;
5185         case BIT(INNER_DST_PORT):
5186                 calc_x(tmp_x_s, rule->tuples.dst_port,
5187                        rule->tuples_mask.dst_port);
5188                 calc_y(tmp_y_s, rule->tuples.dst_port,
5189                        rule->tuples_mask.dst_port);
5190                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5191                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5192
5193                 return true;
5194         default:
5195                 return false;
5196         }
5197 }
5198
5199 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5200                                  u8 vf_id, u8 network_port_id)
5201 {
5202         u32 port_number = 0;
5203
5204         if (port_type == HOST_PORT) {
5205                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5206                                 pf_id);
5207                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5208                                 vf_id);
5209                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5210         } else {
5211                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5212                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5213                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5214         }
5215
5216         return port_number;
5217 }
5218
5219 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5220                                        __le32 *key_x, __le32 *key_y,
5221                                        struct hclge_fd_rule *rule)
5222 {
5223         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5224         u8 cur_pos = 0, tuple_size, shift_bits;
5225         unsigned int i;
5226
5227         for (i = 0; i < MAX_META_DATA; i++) {
5228                 tuple_size = meta_data_key_info[i].key_length;
5229                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5230
5231                 switch (tuple_bit) {
5232                 case BIT(ROCE_TYPE):
5233                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5234                         cur_pos += tuple_size;
5235                         break;
5236                 case BIT(DST_VPORT):
5237                         port_number = hclge_get_port_number(HOST_PORT, 0,
5238                                                             rule->vf_id, 0);
5239                         hnae3_set_field(meta_data,
5240                                         GENMASK(cur_pos + tuple_size, cur_pos),
5241                                         cur_pos, port_number);
5242                         cur_pos += tuple_size;
5243                         break;
5244                 default:
5245                         break;
5246                 }
5247         }
5248
5249         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5250         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5251         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5252
5253         *key_x = cpu_to_le32(tmp_x << shift_bits);
5254         *key_y = cpu_to_le32(tmp_y << shift_bits);
5255 }
5256
5257 /* A complete key is combined with meta data key and tuple key.
5258  * Meta data key is stored at the MSB region, and tuple key is stored at
5259  * the LSB region, unused bits will be filled 0.
5260  */
5261 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5262                             struct hclge_fd_rule *rule)
5263 {
5264         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5265         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5266         u8 *cur_key_x, *cur_key_y;
5267         u8 meta_data_region;
5268         u8 tuple_size;
5269         int ret;
5270         u32 i;
5271
5272         memset(key_x, 0, sizeof(key_x));
5273         memset(key_y, 0, sizeof(key_y));
5274         cur_key_x = key_x;
5275         cur_key_y = key_y;
5276
5277         for (i = 0 ; i < MAX_TUPLE; i++) {
5278                 bool tuple_valid;
5279                 u32 check_tuple;
5280
5281                 tuple_size = tuple_key_info[i].key_length / 8;
5282                 check_tuple = key_cfg->tuple_active & BIT(i);
5283
5284                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5285                                                      cur_key_y, rule);
5286                 if (tuple_valid) {
5287                         cur_key_x += tuple_size;
5288                         cur_key_y += tuple_size;
5289                 }
5290         }
5291
5292         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5293                         MAX_META_DATA_LENGTH / 8;
5294
5295         hclge_fd_convert_meta_data(key_cfg,
5296                                    (__le32 *)(key_x + meta_data_region),
5297                                    (__le32 *)(key_y + meta_data_region),
5298                                    rule);
5299
5300         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5301                                    true);
5302         if (ret) {
5303                 dev_err(&hdev->pdev->dev,
5304                         "fd key_y config fail, loc=%u, ret=%d\n",
5305                         rule->queue_id, ret);
5306                 return ret;
5307         }
5308
5309         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5310                                    true);
5311         if (ret)
5312                 dev_err(&hdev->pdev->dev,
5313                         "fd key_x config fail, loc=%u, ret=%d\n",
5314                         rule->queue_id, ret);
5315         return ret;
5316 }
5317
5318 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5319                                struct hclge_fd_rule *rule)
5320 {
5321         struct hclge_fd_ad_data ad_data;
5322
5323         ad_data.ad_id = rule->location;
5324
5325         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5326                 ad_data.drop_packet = true;
5327                 ad_data.forward_to_direct_queue = false;
5328                 ad_data.queue_id = 0;
5329         } else {
5330                 ad_data.drop_packet = false;
5331                 ad_data.forward_to_direct_queue = true;
5332                 ad_data.queue_id = rule->queue_id;
5333         }
5334
5335         ad_data.use_counter = false;
5336         ad_data.counter_id = 0;
5337
5338         ad_data.use_next_stage = false;
5339         ad_data.next_input_key = 0;
5340
5341         ad_data.write_rule_id_to_bd = true;
5342         ad_data.rule_id = rule->location;
5343
5344         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5345 }
5346
5347 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5348                                        u32 *unused_tuple)
5349 {
5350         if (!spec || !unused_tuple)
5351                 return -EINVAL;
5352
5353         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5354
5355         if (!spec->ip4src)
5356                 *unused_tuple |= BIT(INNER_SRC_IP);
5357
5358         if (!spec->ip4dst)
5359                 *unused_tuple |= BIT(INNER_DST_IP);
5360
5361         if (!spec->psrc)
5362                 *unused_tuple |= BIT(INNER_SRC_PORT);
5363
5364         if (!spec->pdst)
5365                 *unused_tuple |= BIT(INNER_DST_PORT);
5366
5367         if (!spec->tos)
5368                 *unused_tuple |= BIT(INNER_IP_TOS);
5369
5370         return 0;
5371 }
5372
5373 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5374                                     u32 *unused_tuple)
5375 {
5376         if (!spec || !unused_tuple)
5377                 return -EINVAL;
5378
5379         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5380                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5381
5382         if (!spec->ip4src)
5383                 *unused_tuple |= BIT(INNER_SRC_IP);
5384
5385         if (!spec->ip4dst)
5386                 *unused_tuple |= BIT(INNER_DST_IP);
5387
5388         if (!spec->tos)
5389                 *unused_tuple |= BIT(INNER_IP_TOS);
5390
5391         if (!spec->proto)
5392                 *unused_tuple |= BIT(INNER_IP_PROTO);
5393
5394         if (spec->l4_4_bytes)
5395                 return -EOPNOTSUPP;
5396
5397         if (spec->ip_ver != ETH_RX_NFC_IP4)
5398                 return -EOPNOTSUPP;
5399
5400         return 0;
5401 }
5402
5403 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5404                                        u32 *unused_tuple)
5405 {
5406         if (!spec || !unused_tuple)
5407                 return -EINVAL;
5408
5409         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5410                 BIT(INNER_IP_TOS);
5411
5412         /* check whether src/dst ip address used */
5413         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5414             !spec->ip6src[2] && !spec->ip6src[3])
5415                 *unused_tuple |= BIT(INNER_SRC_IP);
5416
5417         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5418             !spec->ip6dst[2] && !spec->ip6dst[3])
5419                 *unused_tuple |= BIT(INNER_DST_IP);
5420
5421         if (!spec->psrc)
5422                 *unused_tuple |= BIT(INNER_SRC_PORT);
5423
5424         if (!spec->pdst)
5425                 *unused_tuple |= BIT(INNER_DST_PORT);
5426
5427         if (spec->tclass)
5428                 return -EOPNOTSUPP;
5429
5430         return 0;
5431 }
5432
5433 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5434                                     u32 *unused_tuple)
5435 {
5436         if (!spec || !unused_tuple)
5437                 return -EINVAL;
5438
5439         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5440                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5441
5442         /* check whether src/dst ip address used */
5443         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5444             !spec->ip6src[2] && !spec->ip6src[3])
5445                 *unused_tuple |= BIT(INNER_SRC_IP);
5446
5447         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5448             !spec->ip6dst[2] && !spec->ip6dst[3])
5449                 *unused_tuple |= BIT(INNER_DST_IP);
5450
5451         if (!spec->l4_proto)
5452                 *unused_tuple |= BIT(INNER_IP_PROTO);
5453
5454         if (spec->tclass)
5455                 return -EOPNOTSUPP;
5456
5457         if (spec->l4_4_bytes)
5458                 return -EOPNOTSUPP;
5459
5460         return 0;
5461 }
5462
5463 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5464 {
5465         if (!spec || !unused_tuple)
5466                 return -EINVAL;
5467
5468         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5469                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5470                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5471
5472         if (is_zero_ether_addr(spec->h_source))
5473                 *unused_tuple |= BIT(INNER_SRC_MAC);
5474
5475         if (is_zero_ether_addr(spec->h_dest))
5476                 *unused_tuple |= BIT(INNER_DST_MAC);
5477
5478         if (!spec->h_proto)
5479                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5480
5481         return 0;
5482 }
5483
5484 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5485                                     struct ethtool_rx_flow_spec *fs,
5486                                     u32 *unused_tuple)
5487 {
5488         if (fs->flow_type & FLOW_EXT) {
5489                 if (fs->h_ext.vlan_etype) {
5490                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5491                         return -EOPNOTSUPP;
5492                 }
5493
5494                 if (!fs->h_ext.vlan_tci)
5495                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5496
5497                 if (fs->m_ext.vlan_tci &&
5498                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5499                         dev_err(&hdev->pdev->dev,
5500                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5501                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5502                         return -EINVAL;
5503                 }
5504         } else {
5505                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5506         }
5507
5508         if (fs->flow_type & FLOW_MAC_EXT) {
5509                 if (hdev->fd_cfg.fd_mode !=
5510                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5511                         dev_err(&hdev->pdev->dev,
5512                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5513                         return -EOPNOTSUPP;
5514                 }
5515
5516                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5517                         *unused_tuple |= BIT(INNER_DST_MAC);
5518                 else
5519                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5520         }
5521
5522         return 0;
5523 }
5524
5525 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5526                                struct ethtool_rx_flow_spec *fs,
5527                                u32 *unused_tuple)
5528 {
5529         u32 flow_type;
5530         int ret;
5531
5532         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5533                 dev_err(&hdev->pdev->dev,
5534                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5535                         fs->location,
5536                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5537                 return -EINVAL;
5538         }
5539
5540         if ((fs->flow_type & FLOW_EXT) &&
5541             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5542                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5543                 return -EOPNOTSUPP;
5544         }
5545
5546         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5547         switch (flow_type) {
5548         case SCTP_V4_FLOW:
5549         case TCP_V4_FLOW:
5550         case UDP_V4_FLOW:
5551                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5552                                                   unused_tuple);
5553                 break;
5554         case IP_USER_FLOW:
5555                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5556                                                unused_tuple);
5557                 break;
5558         case SCTP_V6_FLOW:
5559         case TCP_V6_FLOW:
5560         case UDP_V6_FLOW:
5561                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5562                                                   unused_tuple);
5563                 break;
5564         case IPV6_USER_FLOW:
5565                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5566                                                unused_tuple);
5567                 break;
5568         case ETHER_FLOW:
5569                 if (hdev->fd_cfg.fd_mode !=
5570                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5571                         dev_err(&hdev->pdev->dev,
5572                                 "ETHER_FLOW is not supported in current fd mode!\n");
5573                         return -EOPNOTSUPP;
5574                 }
5575
5576                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5577                                                  unused_tuple);
5578                 break;
5579         default:
5580                 dev_err(&hdev->pdev->dev,
5581                         "unsupported protocol type, protocol type = %#x\n",
5582                         flow_type);
5583                 return -EOPNOTSUPP;
5584         }
5585
5586         if (ret) {
5587                 dev_err(&hdev->pdev->dev,
5588                         "failed to check flow union tuple, ret = %d\n",
5589                         ret);
5590                 return ret;
5591         }
5592
5593         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5594 }
5595
5596 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5597 {
5598         struct hclge_fd_rule *rule = NULL;
5599         struct hlist_node *node2;
5600
5601         spin_lock_bh(&hdev->fd_rule_lock);
5602         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5603                 if (rule->location >= location)
5604                         break;
5605         }
5606
5607         spin_unlock_bh(&hdev->fd_rule_lock);
5608
5609         return  rule && rule->location == location;
5610 }
5611
5612 /* make sure being called after lock up with fd_rule_lock */
5613 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5614                                      struct hclge_fd_rule *new_rule,
5615                                      u16 location,
5616                                      bool is_add)
5617 {
5618         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5619         struct hlist_node *node2;
5620
5621         if (is_add && !new_rule)
5622                 return -EINVAL;
5623
5624         hlist_for_each_entry_safe(rule, node2,
5625                                   &hdev->fd_rule_list, rule_node) {
5626                 if (rule->location >= location)
5627                         break;
5628                 parent = rule;
5629         }
5630
5631         if (rule && rule->location == location) {
5632                 hlist_del(&rule->rule_node);
5633                 kfree(rule);
5634                 hdev->hclge_fd_rule_num--;
5635
5636                 if (!is_add) {
5637                         if (!hdev->hclge_fd_rule_num)
5638                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5639                         clear_bit(location, hdev->fd_bmap);
5640
5641                         return 0;
5642                 }
5643         } else if (!is_add) {
5644                 dev_err(&hdev->pdev->dev,
5645                         "delete fail, rule %u is inexistent\n",
5646                         location);
5647                 return -EINVAL;
5648         }
5649
5650         INIT_HLIST_NODE(&new_rule->rule_node);
5651
5652         if (parent)
5653                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5654         else
5655                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5656
5657         set_bit(location, hdev->fd_bmap);
5658         hdev->hclge_fd_rule_num++;
5659         hdev->fd_active_type = new_rule->rule_type;
5660
5661         return 0;
5662 }
5663
5664 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5665                               struct ethtool_rx_flow_spec *fs,
5666                               struct hclge_fd_rule *rule)
5667 {
5668         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5669
5670         switch (flow_type) {
5671         case SCTP_V4_FLOW:
5672         case TCP_V4_FLOW:
5673         case UDP_V4_FLOW:
5674                 rule->tuples.src_ip[IPV4_INDEX] =
5675                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5676                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5677                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5678
5679                 rule->tuples.dst_ip[IPV4_INDEX] =
5680                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5681                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5682                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5683
5684                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5685                 rule->tuples_mask.src_port =
5686                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5687
5688                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5689                 rule->tuples_mask.dst_port =
5690                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5691
5692                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5693                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5694
5695                 rule->tuples.ether_proto = ETH_P_IP;
5696                 rule->tuples_mask.ether_proto = 0xFFFF;
5697
5698                 break;
5699         case IP_USER_FLOW:
5700                 rule->tuples.src_ip[IPV4_INDEX] =
5701                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5702                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5703                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5704
5705                 rule->tuples.dst_ip[IPV4_INDEX] =
5706                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5707                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5708                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5709
5710                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5711                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5712
5713                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5714                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5715
5716                 rule->tuples.ether_proto = ETH_P_IP;
5717                 rule->tuples_mask.ether_proto = 0xFFFF;
5718
5719                 break;
5720         case SCTP_V6_FLOW:
5721         case TCP_V6_FLOW:
5722         case UDP_V6_FLOW:
5723                 be32_to_cpu_array(rule->tuples.src_ip,
5724                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5725                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5726                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5727
5728                 be32_to_cpu_array(rule->tuples.dst_ip,
5729                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5730                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5731                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5732
5733                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5734                 rule->tuples_mask.src_port =
5735                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5736
5737                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5738                 rule->tuples_mask.dst_port =
5739                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5740
5741                 rule->tuples.ether_proto = ETH_P_IPV6;
5742                 rule->tuples_mask.ether_proto = 0xFFFF;
5743
5744                 break;
5745         case IPV6_USER_FLOW:
5746                 be32_to_cpu_array(rule->tuples.src_ip,
5747                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5748                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5749                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5750
5751                 be32_to_cpu_array(rule->tuples.dst_ip,
5752                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5753                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5754                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5755
5756                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5757                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5758
5759                 rule->tuples.ether_proto = ETH_P_IPV6;
5760                 rule->tuples_mask.ether_proto = 0xFFFF;
5761
5762                 break;
5763         case ETHER_FLOW:
5764                 ether_addr_copy(rule->tuples.src_mac,
5765                                 fs->h_u.ether_spec.h_source);
5766                 ether_addr_copy(rule->tuples_mask.src_mac,
5767                                 fs->m_u.ether_spec.h_source);
5768
5769                 ether_addr_copy(rule->tuples.dst_mac,
5770                                 fs->h_u.ether_spec.h_dest);
5771                 ether_addr_copy(rule->tuples_mask.dst_mac,
5772                                 fs->m_u.ether_spec.h_dest);
5773
5774                 rule->tuples.ether_proto =
5775                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5776                 rule->tuples_mask.ether_proto =
5777                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5778
5779                 break;
5780         default:
5781                 return -EOPNOTSUPP;
5782         }
5783
5784         switch (flow_type) {
5785         case SCTP_V4_FLOW:
5786         case SCTP_V6_FLOW:
5787                 rule->tuples.ip_proto = IPPROTO_SCTP;
5788                 rule->tuples_mask.ip_proto = 0xFF;
5789                 break;
5790         case TCP_V4_FLOW:
5791         case TCP_V6_FLOW:
5792                 rule->tuples.ip_proto = IPPROTO_TCP;
5793                 rule->tuples_mask.ip_proto = 0xFF;
5794                 break;
5795         case UDP_V4_FLOW:
5796         case UDP_V6_FLOW:
5797                 rule->tuples.ip_proto = IPPROTO_UDP;
5798                 rule->tuples_mask.ip_proto = 0xFF;
5799                 break;
5800         default:
5801                 break;
5802         }
5803
5804         if (fs->flow_type & FLOW_EXT) {
5805                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5806                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5807         }
5808
5809         if (fs->flow_type & FLOW_MAC_EXT) {
5810                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5811                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5812         }
5813
5814         return 0;
5815 }
5816
5817 /* make sure being called after lock up with fd_rule_lock */
5818 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5819                                 struct hclge_fd_rule *rule)
5820 {
5821         int ret;
5822
5823         if (!rule) {
5824                 dev_err(&hdev->pdev->dev,
5825                         "The flow director rule is NULL\n");
5826                 return -EINVAL;
5827         }
5828
5829         /* it will never fail here, so needn't to check return value */
5830         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5831
5832         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5833         if (ret)
5834                 goto clear_rule;
5835
5836         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5837         if (ret)
5838                 goto clear_rule;
5839
5840         return 0;
5841
5842 clear_rule:
5843         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5844         return ret;
5845 }
5846
5847 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5848                               struct ethtool_rxnfc *cmd)
5849 {
5850         struct hclge_vport *vport = hclge_get_vport(handle);
5851         struct hclge_dev *hdev = vport->back;
5852         u16 dst_vport_id = 0, q_index = 0;
5853         struct ethtool_rx_flow_spec *fs;
5854         struct hclge_fd_rule *rule;
5855         u32 unused = 0;
5856         u8 action;
5857         int ret;
5858
5859         if (!hnae3_dev_fd_supported(hdev)) {
5860                 dev_err(&hdev->pdev->dev,
5861                         "flow table director is not supported\n");
5862                 return -EOPNOTSUPP;
5863         }
5864
5865         if (!hdev->fd_en) {
5866                 dev_err(&hdev->pdev->dev,
5867                         "please enable flow director first\n");
5868                 return -EOPNOTSUPP;
5869         }
5870
5871         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5872
5873         ret = hclge_fd_check_spec(hdev, fs, &unused);
5874         if (ret)
5875                 return ret;
5876
5877         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5878                 action = HCLGE_FD_ACTION_DROP_PACKET;
5879         } else {
5880                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5881                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5882                 u16 tqps;
5883
5884                 if (vf > hdev->num_req_vfs) {
5885                         dev_err(&hdev->pdev->dev,
5886                                 "Error: vf id (%u) > max vf num (%u)\n",
5887                                 vf, hdev->num_req_vfs);
5888                         return -EINVAL;
5889                 }
5890
5891                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5892                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5893
5894                 if (ring >= tqps) {
5895                         dev_err(&hdev->pdev->dev,
5896                                 "Error: queue id (%u) > max tqp num (%u)\n",
5897                                 ring, tqps - 1);
5898                         return -EINVAL;
5899                 }
5900
5901                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5902                 q_index = ring;
5903         }
5904
5905         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5906         if (!rule)
5907                 return -ENOMEM;
5908
5909         ret = hclge_fd_get_tuple(hdev, fs, rule);
5910         if (ret) {
5911                 kfree(rule);
5912                 return ret;
5913         }
5914
5915         rule->flow_type = fs->flow_type;
5916         rule->location = fs->location;
5917         rule->unused_tuple = unused;
5918         rule->vf_id = dst_vport_id;
5919         rule->queue_id = q_index;
5920         rule->action = action;
5921         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5922
5923         /* to avoid rule conflict, when user configure rule by ethtool,
5924          * we need to clear all arfs rules
5925          */
5926         spin_lock_bh(&hdev->fd_rule_lock);
5927         hclge_clear_arfs_rules(handle);
5928
5929         ret = hclge_fd_config_rule(hdev, rule);
5930
5931         spin_unlock_bh(&hdev->fd_rule_lock);
5932
5933         return ret;
5934 }
5935
5936 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5937                               struct ethtool_rxnfc *cmd)
5938 {
5939         struct hclge_vport *vport = hclge_get_vport(handle);
5940         struct hclge_dev *hdev = vport->back;
5941         struct ethtool_rx_flow_spec *fs;
5942         int ret;
5943
5944         if (!hnae3_dev_fd_supported(hdev))
5945                 return -EOPNOTSUPP;
5946
5947         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5948
5949         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5950                 return -EINVAL;
5951
5952         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5953                 dev_err(&hdev->pdev->dev,
5954                         "Delete fail, rule %u is inexistent\n", fs->location);
5955                 return -ENOENT;
5956         }
5957
5958         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5959                                    NULL, false);
5960         if (ret)
5961                 return ret;
5962
5963         spin_lock_bh(&hdev->fd_rule_lock);
5964         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5965
5966         spin_unlock_bh(&hdev->fd_rule_lock);
5967
5968         return ret;
5969 }
5970
5971 /* make sure being called after lock up with fd_rule_lock */
5972 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5973                                      bool clear_list)
5974 {
5975         struct hclge_vport *vport = hclge_get_vport(handle);
5976         struct hclge_dev *hdev = vport->back;
5977         struct hclge_fd_rule *rule;
5978         struct hlist_node *node;
5979         u16 location;
5980
5981         if (!hnae3_dev_fd_supported(hdev))
5982                 return;
5983
5984         for_each_set_bit(location, hdev->fd_bmap,
5985                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5986                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5987                                      NULL, false);
5988
5989         if (clear_list) {
5990                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5991                                           rule_node) {
5992                         hlist_del(&rule->rule_node);
5993                         kfree(rule);
5994                 }
5995                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5996                 hdev->hclge_fd_rule_num = 0;
5997                 bitmap_zero(hdev->fd_bmap,
5998                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5999         }
6000 }
6001
6002 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6003 {
6004         struct hclge_vport *vport = hclge_get_vport(handle);
6005         struct hclge_dev *hdev = vport->back;
6006         struct hclge_fd_rule *rule;
6007         struct hlist_node *node;
6008         int ret;
6009
6010         /* Return ok here, because reset error handling will check this
6011          * return value. If error is returned here, the reset process will
6012          * fail.
6013          */
6014         if (!hnae3_dev_fd_supported(hdev))
6015                 return 0;
6016
6017         /* if fd is disabled, should not restore it when reset */
6018         if (!hdev->fd_en)
6019                 return 0;
6020
6021         spin_lock_bh(&hdev->fd_rule_lock);
6022         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6023                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6024                 if (!ret)
6025                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6026
6027                 if (ret) {
6028                         dev_warn(&hdev->pdev->dev,
6029                                  "Restore rule %u failed, remove it\n",
6030                                  rule->location);
6031                         clear_bit(rule->location, hdev->fd_bmap);
6032                         hlist_del(&rule->rule_node);
6033                         kfree(rule);
6034                         hdev->hclge_fd_rule_num--;
6035                 }
6036         }
6037
6038         if (hdev->hclge_fd_rule_num)
6039                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6040
6041         spin_unlock_bh(&hdev->fd_rule_lock);
6042
6043         return 0;
6044 }
6045
6046 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6047                                  struct ethtool_rxnfc *cmd)
6048 {
6049         struct hclge_vport *vport = hclge_get_vport(handle);
6050         struct hclge_dev *hdev = vport->back;
6051
6052         if (!hnae3_dev_fd_supported(hdev))
6053                 return -EOPNOTSUPP;
6054
6055         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6056         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6057
6058         return 0;
6059 }
6060
6061 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6062                                      struct ethtool_tcpip4_spec *spec,
6063                                      struct ethtool_tcpip4_spec *spec_mask)
6064 {
6065         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6066         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6067                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6068
6069         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6070         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6071                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6072
6073         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6074         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6075                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6076
6077         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6078         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6079                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6080
6081         spec->tos = rule->tuples.ip_tos;
6082         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6083                         0 : rule->tuples_mask.ip_tos;
6084 }
6085
6086 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6087                                   struct ethtool_usrip4_spec *spec,
6088                                   struct ethtool_usrip4_spec *spec_mask)
6089 {
6090         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6091         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6092                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6093
6094         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6095         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6096                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6097
6098         spec->tos = rule->tuples.ip_tos;
6099         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6100                         0 : rule->tuples_mask.ip_tos;
6101
6102         spec->proto = rule->tuples.ip_proto;
6103         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6104                         0 : rule->tuples_mask.ip_proto;
6105
6106         spec->ip_ver = ETH_RX_NFC_IP4;
6107 }
6108
6109 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6110                                      struct ethtool_tcpip6_spec *spec,
6111                                      struct ethtool_tcpip6_spec *spec_mask)
6112 {
6113         cpu_to_be32_array(spec->ip6src,
6114                           rule->tuples.src_ip, IPV6_SIZE);
6115         cpu_to_be32_array(spec->ip6dst,
6116                           rule->tuples.dst_ip, IPV6_SIZE);
6117         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6118                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6119         else
6120                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6121                                   IPV6_SIZE);
6122
6123         if (rule->unused_tuple & BIT(INNER_DST_IP))
6124                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6125         else
6126                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6127                                   IPV6_SIZE);
6128
6129         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6130         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6131                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6132
6133         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6134         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6135                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6136 }
6137
6138 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6139                                   struct ethtool_usrip6_spec *spec,
6140                                   struct ethtool_usrip6_spec *spec_mask)
6141 {
6142         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6143         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6144         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6145                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6146         else
6147                 cpu_to_be32_array(spec_mask->ip6src,
6148                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6149
6150         if (rule->unused_tuple & BIT(INNER_DST_IP))
6151                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6152         else
6153                 cpu_to_be32_array(spec_mask->ip6dst,
6154                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6155
6156         spec->l4_proto = rule->tuples.ip_proto;
6157         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6158                         0 : rule->tuples_mask.ip_proto;
6159 }
6160
6161 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6162                                     struct ethhdr *spec,
6163                                     struct ethhdr *spec_mask)
6164 {
6165         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6166         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6167
6168         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6169                 eth_zero_addr(spec_mask->h_source);
6170         else
6171                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6172
6173         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6174                 eth_zero_addr(spec_mask->h_dest);
6175         else
6176                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6177
6178         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6179         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6180                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6181 }
6182
6183 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6184                                   struct hclge_fd_rule *rule)
6185 {
6186         if (fs->flow_type & FLOW_EXT) {
6187                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6188                 fs->m_ext.vlan_tci =
6189                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6190                                 cpu_to_be16(VLAN_VID_MASK) :
6191                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6192         }
6193
6194         if (fs->flow_type & FLOW_MAC_EXT) {
6195                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6196                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6197                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6198                 else
6199                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6200                                         rule->tuples_mask.dst_mac);
6201         }
6202 }
6203
6204 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6205                                   struct ethtool_rxnfc *cmd)
6206 {
6207         struct hclge_vport *vport = hclge_get_vport(handle);
6208         struct hclge_fd_rule *rule = NULL;
6209         struct hclge_dev *hdev = vport->back;
6210         struct ethtool_rx_flow_spec *fs;
6211         struct hlist_node *node2;
6212
6213         if (!hnae3_dev_fd_supported(hdev))
6214                 return -EOPNOTSUPP;
6215
6216         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6217
6218         spin_lock_bh(&hdev->fd_rule_lock);
6219
6220         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6221                 if (rule->location >= fs->location)
6222                         break;
6223         }
6224
6225         if (!rule || fs->location != rule->location) {
6226                 spin_unlock_bh(&hdev->fd_rule_lock);
6227
6228                 return -ENOENT;
6229         }
6230
6231         fs->flow_type = rule->flow_type;
6232         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6233         case SCTP_V4_FLOW:
6234         case TCP_V4_FLOW:
6235         case UDP_V4_FLOW:
6236                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6237                                          &fs->m_u.tcp_ip4_spec);
6238                 break;
6239         case IP_USER_FLOW:
6240                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6241                                       &fs->m_u.usr_ip4_spec);
6242                 break;
6243         case SCTP_V6_FLOW:
6244         case TCP_V6_FLOW:
6245         case UDP_V6_FLOW:
6246                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6247                                          &fs->m_u.tcp_ip6_spec);
6248                 break;
6249         case IPV6_USER_FLOW:
6250                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6251                                       &fs->m_u.usr_ip6_spec);
6252                 break;
6253         /* The flow type of fd rule has been checked before adding in to rule
6254          * list. As other flow types have been handled, it must be ETHER_FLOW
6255          * for the default case
6256          */
6257         default:
6258                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6259                                         &fs->m_u.ether_spec);
6260                 break;
6261         }
6262
6263         hclge_fd_get_ext_info(fs, rule);
6264
6265         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6266                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6267         } else {
6268                 u64 vf_id;
6269
6270                 fs->ring_cookie = rule->queue_id;
6271                 vf_id = rule->vf_id;
6272                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6273                 fs->ring_cookie |= vf_id;
6274         }
6275
6276         spin_unlock_bh(&hdev->fd_rule_lock);
6277
6278         return 0;
6279 }
6280
6281 static int hclge_get_all_rules(struct hnae3_handle *handle,
6282                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6283 {
6284         struct hclge_vport *vport = hclge_get_vport(handle);
6285         struct hclge_dev *hdev = vport->back;
6286         struct hclge_fd_rule *rule;
6287         struct hlist_node *node2;
6288         int cnt = 0;
6289
6290         if (!hnae3_dev_fd_supported(hdev))
6291                 return -EOPNOTSUPP;
6292
6293         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6294
6295         spin_lock_bh(&hdev->fd_rule_lock);
6296         hlist_for_each_entry_safe(rule, node2,
6297                                   &hdev->fd_rule_list, rule_node) {
6298                 if (cnt == cmd->rule_cnt) {
6299                         spin_unlock_bh(&hdev->fd_rule_lock);
6300                         return -EMSGSIZE;
6301                 }
6302
6303                 rule_locs[cnt] = rule->location;
6304                 cnt++;
6305         }
6306
6307         spin_unlock_bh(&hdev->fd_rule_lock);
6308
6309         cmd->rule_cnt = cnt;
6310
6311         return 0;
6312 }
6313
6314 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6315                                      struct hclge_fd_rule_tuples *tuples)
6316 {
6317 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6318 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6319
6320         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6321         tuples->ip_proto = fkeys->basic.ip_proto;
6322         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6323
6324         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6325                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6326                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6327         } else {
6328                 int i;
6329
6330                 for (i = 0; i < IPV6_SIZE; i++) {
6331                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6332                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6333                 }
6334         }
6335 }
6336
6337 /* traverse all rules, check whether an existed rule has the same tuples */
6338 static struct hclge_fd_rule *
6339 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6340                           const struct hclge_fd_rule_tuples *tuples)
6341 {
6342         struct hclge_fd_rule *rule = NULL;
6343         struct hlist_node *node;
6344
6345         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6346                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6347                         return rule;
6348         }
6349
6350         return NULL;
6351 }
6352
6353 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6354                                      struct hclge_fd_rule *rule)
6355 {
6356         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6357                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6358                              BIT(INNER_SRC_PORT);
6359         rule->action = 0;
6360         rule->vf_id = 0;
6361         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6362         if (tuples->ether_proto == ETH_P_IP) {
6363                 if (tuples->ip_proto == IPPROTO_TCP)
6364                         rule->flow_type = TCP_V4_FLOW;
6365                 else
6366                         rule->flow_type = UDP_V4_FLOW;
6367         } else {
6368                 if (tuples->ip_proto == IPPROTO_TCP)
6369                         rule->flow_type = TCP_V6_FLOW;
6370                 else
6371                         rule->flow_type = UDP_V6_FLOW;
6372         }
6373         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6374         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6375 }
6376
6377 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6378                                       u16 flow_id, struct flow_keys *fkeys)
6379 {
6380         struct hclge_vport *vport = hclge_get_vport(handle);
6381         struct hclge_fd_rule_tuples new_tuples = {};
6382         struct hclge_dev *hdev = vport->back;
6383         struct hclge_fd_rule *rule;
6384         u16 tmp_queue_id;
6385         u16 bit_id;
6386         int ret;
6387
6388         if (!hnae3_dev_fd_supported(hdev))
6389                 return -EOPNOTSUPP;
6390
6391         /* when there is already fd rule existed add by user,
6392          * arfs should not work
6393          */
6394         spin_lock_bh(&hdev->fd_rule_lock);
6395         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6396                 spin_unlock_bh(&hdev->fd_rule_lock);
6397                 return -EOPNOTSUPP;
6398         }
6399
6400         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6401
6402         /* check is there flow director filter existed for this flow,
6403          * if not, create a new filter for it;
6404          * if filter exist with different queue id, modify the filter;
6405          * if filter exist with same queue id, do nothing
6406          */
6407         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6408         if (!rule) {
6409                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6410                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6411                         spin_unlock_bh(&hdev->fd_rule_lock);
6412                         return -ENOSPC;
6413                 }
6414
6415                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6416                 if (!rule) {
6417                         spin_unlock_bh(&hdev->fd_rule_lock);
6418                         return -ENOMEM;
6419                 }
6420
6421                 set_bit(bit_id, hdev->fd_bmap);
6422                 rule->location = bit_id;
6423                 rule->flow_id = flow_id;
6424                 rule->queue_id = queue_id;
6425                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6426                 ret = hclge_fd_config_rule(hdev, rule);
6427
6428                 spin_unlock_bh(&hdev->fd_rule_lock);
6429
6430                 if (ret)
6431                         return ret;
6432
6433                 return rule->location;
6434         }
6435
6436         spin_unlock_bh(&hdev->fd_rule_lock);
6437
6438         if (rule->queue_id == queue_id)
6439                 return rule->location;
6440
6441         tmp_queue_id = rule->queue_id;
6442         rule->queue_id = queue_id;
6443         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6444         if (ret) {
6445                 rule->queue_id = tmp_queue_id;
6446                 return ret;
6447         }
6448
6449         return rule->location;
6450 }
6451
6452 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6453 {
6454 #ifdef CONFIG_RFS_ACCEL
6455         struct hnae3_handle *handle = &hdev->vport[0].nic;
6456         struct hclge_fd_rule *rule;
6457         struct hlist_node *node;
6458         HLIST_HEAD(del_list);
6459
6460         spin_lock_bh(&hdev->fd_rule_lock);
6461         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6462                 spin_unlock_bh(&hdev->fd_rule_lock);
6463                 return;
6464         }
6465         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6466                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6467                                         rule->flow_id, rule->location)) {
6468                         hlist_del_init(&rule->rule_node);
6469                         hlist_add_head(&rule->rule_node, &del_list);
6470                         hdev->hclge_fd_rule_num--;
6471                         clear_bit(rule->location, hdev->fd_bmap);
6472                 }
6473         }
6474         spin_unlock_bh(&hdev->fd_rule_lock);
6475
6476         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6477                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6478                                      rule->location, NULL, false);
6479                 kfree(rule);
6480         }
6481 #endif
6482 }
6483
6484 /* make sure being called after lock up with fd_rule_lock */
6485 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6486 {
6487 #ifdef CONFIG_RFS_ACCEL
6488         struct hclge_vport *vport = hclge_get_vport(handle);
6489         struct hclge_dev *hdev = vport->back;
6490
6491         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6492                 hclge_del_all_fd_entries(handle, true);
6493 #endif
6494 }
6495
6496 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6497 {
6498         struct hclge_vport *vport = hclge_get_vport(handle);
6499         struct hclge_dev *hdev = vport->back;
6500
6501         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6502                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6503 }
6504
6505 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6506 {
6507         struct hclge_vport *vport = hclge_get_vport(handle);
6508         struct hclge_dev *hdev = vport->back;
6509
6510         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6511 }
6512
6513 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6514 {
6515         struct hclge_vport *vport = hclge_get_vport(handle);
6516         struct hclge_dev *hdev = vport->back;
6517
6518         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6519 }
6520
6521 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6522 {
6523         struct hclge_vport *vport = hclge_get_vport(handle);
6524         struct hclge_dev *hdev = vport->back;
6525
6526         return hdev->rst_stats.hw_reset_done_cnt;
6527 }
6528
6529 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6530 {
6531         struct hclge_vport *vport = hclge_get_vport(handle);
6532         struct hclge_dev *hdev = vport->back;
6533         bool clear;
6534
6535         hdev->fd_en = enable;
6536         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6537
6538         if (!enable) {
6539                 spin_lock_bh(&hdev->fd_rule_lock);
6540                 hclge_del_all_fd_entries(handle, clear);
6541                 spin_unlock_bh(&hdev->fd_rule_lock);
6542         } else {
6543                 hclge_restore_fd_entries(handle);
6544         }
6545 }
6546
6547 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6548 {
6549         struct hclge_desc desc;
6550         struct hclge_config_mac_mode_cmd *req =
6551                 (struct hclge_config_mac_mode_cmd *)desc.data;
6552         u32 loop_en = 0;
6553         int ret;
6554
6555         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6556
6557         if (enable) {
6558                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6559                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6560                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6561                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6562                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6563                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6564                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6565                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6566                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6567                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6568         }
6569
6570         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6571
6572         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6573         if (ret)
6574                 dev_err(&hdev->pdev->dev,
6575                         "mac enable fail, ret =%d.\n", ret);
6576 }
6577
6578 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6579                                      u8 switch_param, u8 param_mask)
6580 {
6581         struct hclge_mac_vlan_switch_cmd *req;
6582         struct hclge_desc desc;
6583         u32 func_id;
6584         int ret;
6585
6586         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6587         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6588
6589         /* read current config parameter */
6590         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6591                                    true);
6592         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6593         req->func_id = cpu_to_le32(func_id);
6594
6595         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6596         if (ret) {
6597                 dev_err(&hdev->pdev->dev,
6598                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6599                 return ret;
6600         }
6601
6602         /* modify and write new config parameter */
6603         hclge_cmd_reuse_desc(&desc, false);
6604         req->switch_param = (req->switch_param & param_mask) | switch_param;
6605         req->param_mask = param_mask;
6606
6607         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6608         if (ret)
6609                 dev_err(&hdev->pdev->dev,
6610                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6611         return ret;
6612 }
6613
6614 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6615                                        int link_ret)
6616 {
6617 #define HCLGE_PHY_LINK_STATUS_NUM  200
6618
6619         struct phy_device *phydev = hdev->hw.mac.phydev;
6620         int i = 0;
6621         int ret;
6622
6623         do {
6624                 ret = phy_read_status(phydev);
6625                 if (ret) {
6626                         dev_err(&hdev->pdev->dev,
6627                                 "phy update link status fail, ret = %d\n", ret);
6628                         return;
6629                 }
6630
6631                 if (phydev->link == link_ret)
6632                         break;
6633
6634                 msleep(HCLGE_LINK_STATUS_MS);
6635         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6636 }
6637
6638 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6639 {
6640 #define HCLGE_MAC_LINK_STATUS_NUM  100
6641
6642         int link_status;
6643         int i = 0;
6644         int ret;
6645
6646         do {
6647                 ret = hclge_get_mac_link_status(hdev, &link_status);
6648                 if (ret)
6649                         return ret;
6650                 if (link_status == link_ret)
6651                         return 0;
6652
6653                 msleep(HCLGE_LINK_STATUS_MS);
6654         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6655         return -EBUSY;
6656 }
6657
6658 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6659                                           bool is_phy)
6660 {
6661         int link_ret;
6662
6663         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6664
6665         if (is_phy)
6666                 hclge_phy_link_status_wait(hdev, link_ret);
6667
6668         return hclge_mac_link_status_wait(hdev, link_ret);
6669 }
6670
6671 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6672 {
6673         struct hclge_config_mac_mode_cmd *req;
6674         struct hclge_desc desc;
6675         u32 loop_en;
6676         int ret;
6677
6678         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6679         /* 1 Read out the MAC mode config at first */
6680         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6681         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6682         if (ret) {
6683                 dev_err(&hdev->pdev->dev,
6684                         "mac loopback get fail, ret =%d.\n", ret);
6685                 return ret;
6686         }
6687
6688         /* 2 Then setup the loopback flag */
6689         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6690         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6691
6692         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6693
6694         /* 3 Config mac work mode with loopback flag
6695          * and its original configure parameters
6696          */
6697         hclge_cmd_reuse_desc(&desc, false);
6698         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6699         if (ret)
6700                 dev_err(&hdev->pdev->dev,
6701                         "mac loopback set fail, ret =%d.\n", ret);
6702         return ret;
6703 }
6704
6705 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6706                                      enum hnae3_loop loop_mode)
6707 {
6708 #define HCLGE_SERDES_RETRY_MS   10
6709 #define HCLGE_SERDES_RETRY_NUM  100
6710
6711         struct hclge_serdes_lb_cmd *req;
6712         struct hclge_desc desc;
6713         int ret, i = 0;
6714         u8 loop_mode_b;
6715
6716         req = (struct hclge_serdes_lb_cmd *)desc.data;
6717         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6718
6719         switch (loop_mode) {
6720         case HNAE3_LOOP_SERIAL_SERDES:
6721                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6722                 break;
6723         case HNAE3_LOOP_PARALLEL_SERDES:
6724                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6725                 break;
6726         default:
6727                 dev_err(&hdev->pdev->dev,
6728                         "unsupported serdes loopback mode %d\n", loop_mode);
6729                 return -ENOTSUPP;
6730         }
6731
6732         if (en) {
6733                 req->enable = loop_mode_b;
6734                 req->mask = loop_mode_b;
6735         } else {
6736                 req->mask = loop_mode_b;
6737         }
6738
6739         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6740         if (ret) {
6741                 dev_err(&hdev->pdev->dev,
6742                         "serdes loopback set fail, ret = %d\n", ret);
6743                 return ret;
6744         }
6745
6746         do {
6747                 msleep(HCLGE_SERDES_RETRY_MS);
6748                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6749                                            true);
6750                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6751                 if (ret) {
6752                         dev_err(&hdev->pdev->dev,
6753                                 "serdes loopback get, ret = %d\n", ret);
6754                         return ret;
6755                 }
6756         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6757                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6758
6759         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6760                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6761                 return -EBUSY;
6762         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6763                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6764                 return -EIO;
6765         }
6766         return ret;
6767 }
6768
6769 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6770                                      enum hnae3_loop loop_mode)
6771 {
6772         int ret;
6773
6774         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6775         if (ret)
6776                 return ret;
6777
6778         hclge_cfg_mac_mode(hdev, en);
6779
6780         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6781         if (ret)
6782                 dev_err(&hdev->pdev->dev,
6783                         "serdes loopback config mac mode timeout\n");
6784
6785         return ret;
6786 }
6787
6788 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6789                                      struct phy_device *phydev)
6790 {
6791         int ret;
6792
6793         if (!phydev->suspended) {
6794                 ret = phy_suspend(phydev);
6795                 if (ret)
6796                         return ret;
6797         }
6798
6799         ret = phy_resume(phydev);
6800         if (ret)
6801                 return ret;
6802
6803         return phy_loopback(phydev, true);
6804 }
6805
6806 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6807                                       struct phy_device *phydev)
6808 {
6809         int ret;
6810
6811         ret = phy_loopback(phydev, false);
6812         if (ret)
6813                 return ret;
6814
6815         return phy_suspend(phydev);
6816 }
6817
6818 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6819 {
6820         struct phy_device *phydev = hdev->hw.mac.phydev;
6821         int ret;
6822
6823         if (!phydev)
6824                 return -ENOTSUPP;
6825
6826         if (en)
6827                 ret = hclge_enable_phy_loopback(hdev, phydev);
6828         else
6829                 ret = hclge_disable_phy_loopback(hdev, phydev);
6830         if (ret) {
6831                 dev_err(&hdev->pdev->dev,
6832                         "set phy loopback fail, ret = %d\n", ret);
6833                 return ret;
6834         }
6835
6836         hclge_cfg_mac_mode(hdev, en);
6837
6838         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6839         if (ret)
6840                 dev_err(&hdev->pdev->dev,
6841                         "phy loopback config mac mode timeout\n");
6842
6843         return ret;
6844 }
6845
6846 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6847                             int stream_id, bool enable)
6848 {
6849         struct hclge_desc desc;
6850         struct hclge_cfg_com_tqp_queue_cmd *req =
6851                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6852         int ret;
6853
6854         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6855         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6856         req->stream_id = cpu_to_le16(stream_id);
6857         if (enable)
6858                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6859
6860         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6861         if (ret)
6862                 dev_err(&hdev->pdev->dev,
6863                         "Tqp enable fail, status =%d.\n", ret);
6864         return ret;
6865 }
6866
6867 static int hclge_set_loopback(struct hnae3_handle *handle,
6868                               enum hnae3_loop loop_mode, bool en)
6869 {
6870         struct hclge_vport *vport = hclge_get_vport(handle);
6871         struct hnae3_knic_private_info *kinfo;
6872         struct hclge_dev *hdev = vport->back;
6873         int i, ret;
6874
6875         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6876          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6877          * the same, the packets are looped back in the SSU. If SSU loopback
6878          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6879          */
6880         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6881                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6882
6883                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6884                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6885                 if (ret)
6886                         return ret;
6887         }
6888
6889         switch (loop_mode) {
6890         case HNAE3_LOOP_APP:
6891                 ret = hclge_set_app_loopback(hdev, en);
6892                 break;
6893         case HNAE3_LOOP_SERIAL_SERDES:
6894         case HNAE3_LOOP_PARALLEL_SERDES:
6895                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6896                 break;
6897         case HNAE3_LOOP_PHY:
6898                 ret = hclge_set_phy_loopback(hdev, en);
6899                 break;
6900         default:
6901                 ret = -ENOTSUPP;
6902                 dev_err(&hdev->pdev->dev,
6903                         "loop_mode %d is not supported\n", loop_mode);
6904                 break;
6905         }
6906
6907         if (ret)
6908                 return ret;
6909
6910         kinfo = &vport->nic.kinfo;
6911         for (i = 0; i < kinfo->num_tqps; i++) {
6912                 ret = hclge_tqp_enable(hdev, i, 0, en);
6913                 if (ret)
6914                         return ret;
6915         }
6916
6917         return 0;
6918 }
6919
6920 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6921 {
6922         int ret;
6923
6924         ret = hclge_set_app_loopback(hdev, false);
6925         if (ret)
6926                 return ret;
6927
6928         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6929         if (ret)
6930                 return ret;
6931
6932         return hclge_cfg_serdes_loopback(hdev, false,
6933                                          HNAE3_LOOP_PARALLEL_SERDES);
6934 }
6935
6936 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6937 {
6938         struct hclge_vport *vport = hclge_get_vport(handle);
6939         struct hnae3_knic_private_info *kinfo;
6940         struct hnae3_queue *queue;
6941         struct hclge_tqp *tqp;
6942         int i;
6943
6944         kinfo = &vport->nic.kinfo;
6945         for (i = 0; i < kinfo->num_tqps; i++) {
6946                 queue = handle->kinfo.tqp[i];
6947                 tqp = container_of(queue, struct hclge_tqp, q);
6948                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6949         }
6950 }
6951
6952 static void hclge_flush_link_update(struct hclge_dev *hdev)
6953 {
6954 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6955
6956         unsigned long last = hdev->serv_processed_cnt;
6957         int i = 0;
6958
6959         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6960                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6961                last == hdev->serv_processed_cnt)
6962                 usleep_range(1, 1);
6963 }
6964
6965 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6966 {
6967         struct hclge_vport *vport = hclge_get_vport(handle);
6968         struct hclge_dev *hdev = vport->back;
6969
6970         if (enable) {
6971                 hclge_task_schedule(hdev, 0);
6972         } else {
6973                 /* Set the DOWN flag here to disable link updating */
6974                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6975
6976                 /* flush memory to make sure DOWN is seen by service task */
6977                 smp_mb__before_atomic();
6978                 hclge_flush_link_update(hdev);
6979         }
6980 }
6981
6982 static int hclge_ae_start(struct hnae3_handle *handle)
6983 {
6984         struct hclge_vport *vport = hclge_get_vport(handle);
6985         struct hclge_dev *hdev = vport->back;
6986
6987         /* mac enable */
6988         hclge_cfg_mac_mode(hdev, true);
6989         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6990         hdev->hw.mac.link = 0;
6991
6992         /* reset tqp stats */
6993         hclge_reset_tqp_stats(handle);
6994
6995         hclge_mac_start_phy(hdev);
6996
6997         return 0;
6998 }
6999
7000 static void hclge_ae_stop(struct hnae3_handle *handle)
7001 {
7002         struct hclge_vport *vport = hclge_get_vport(handle);
7003         struct hclge_dev *hdev = vport->back;
7004         int i;
7005
7006         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7007         spin_lock_bh(&hdev->fd_rule_lock);
7008         hclge_clear_arfs_rules(handle);
7009         spin_unlock_bh(&hdev->fd_rule_lock);
7010
7011         /* If it is not PF reset, the firmware will disable the MAC,
7012          * so it only need to stop phy here.
7013          */
7014         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7015             hdev->reset_type != HNAE3_FUNC_RESET) {
7016                 hclge_mac_stop_phy(hdev);
7017                 hclge_update_link_status(hdev);
7018                 return;
7019         }
7020
7021         for (i = 0; i < handle->kinfo.num_tqps; i++)
7022                 hclge_reset_tqp(handle, i);
7023
7024         hclge_config_mac_tnl_int(hdev, false);
7025
7026         /* Mac disable */
7027         hclge_cfg_mac_mode(hdev, false);
7028
7029         hclge_mac_stop_phy(hdev);
7030
7031         /* reset tqp stats */
7032         hclge_reset_tqp_stats(handle);
7033         hclge_update_link_status(hdev);
7034 }
7035
7036 int hclge_vport_start(struct hclge_vport *vport)
7037 {
7038         struct hclge_dev *hdev = vport->back;
7039
7040         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7041         vport->last_active_jiffies = jiffies;
7042
7043         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7044                 if (vport->vport_id) {
7045                         hclge_restore_mac_table_common(vport);
7046                         hclge_restore_vport_vlan_table(vport);
7047                 } else {
7048                         hclge_restore_hw_table(hdev);
7049                 }
7050         }
7051
7052         clear_bit(vport->vport_id, hdev->vport_config_block);
7053
7054         return 0;
7055 }
7056
7057 void hclge_vport_stop(struct hclge_vport *vport)
7058 {
7059         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7060 }
7061
7062 static int hclge_client_start(struct hnae3_handle *handle)
7063 {
7064         struct hclge_vport *vport = hclge_get_vport(handle);
7065
7066         return hclge_vport_start(vport);
7067 }
7068
7069 static void hclge_client_stop(struct hnae3_handle *handle)
7070 {
7071         struct hclge_vport *vport = hclge_get_vport(handle);
7072
7073         hclge_vport_stop(vport);
7074 }
7075
7076 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7077                                          u16 cmdq_resp, u8  resp_code,
7078                                          enum hclge_mac_vlan_tbl_opcode op)
7079 {
7080         struct hclge_dev *hdev = vport->back;
7081
7082         if (cmdq_resp) {
7083                 dev_err(&hdev->pdev->dev,
7084                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7085                         cmdq_resp);
7086                 return -EIO;
7087         }
7088
7089         if (op == HCLGE_MAC_VLAN_ADD) {
7090                 if (!resp_code || resp_code == 1)
7091                         return 0;
7092                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7093                          resp_code == HCLGE_ADD_MC_OVERFLOW)
7094                         return -ENOSPC;
7095
7096                 dev_err(&hdev->pdev->dev,
7097                         "add mac addr failed for undefined, code=%u.\n",
7098                         resp_code);
7099                 return -EIO;
7100         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7101                 if (!resp_code) {
7102                         return 0;
7103                 } else if (resp_code == 1) {
7104                         dev_dbg(&hdev->pdev->dev,
7105                                 "remove mac addr failed for miss.\n");
7106                         return -ENOENT;
7107                 }
7108
7109                 dev_err(&hdev->pdev->dev,
7110                         "remove mac addr failed for undefined, code=%u.\n",
7111                         resp_code);
7112                 return -EIO;
7113         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7114                 if (!resp_code) {
7115                         return 0;
7116                 } else if (resp_code == 1) {
7117                         dev_dbg(&hdev->pdev->dev,
7118                                 "lookup mac addr failed for miss.\n");
7119                         return -ENOENT;
7120                 }
7121
7122                 dev_err(&hdev->pdev->dev,
7123                         "lookup mac addr failed for undefined, code=%u.\n",
7124                         resp_code);
7125                 return -EIO;
7126         }
7127
7128         dev_err(&hdev->pdev->dev,
7129                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7130
7131         return -EINVAL;
7132 }
7133
7134 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7135 {
7136 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7137
7138         unsigned int word_num;
7139         unsigned int bit_num;
7140
7141         if (vfid > 255 || vfid < 0)
7142                 return -EIO;
7143
7144         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7145                 word_num = vfid / 32;
7146                 bit_num  = vfid % 32;
7147                 if (clr)
7148                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7149                 else
7150                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7151         } else {
7152                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7153                 bit_num  = vfid % 32;
7154                 if (clr)
7155                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7156                 else
7157                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7158         }
7159
7160         return 0;
7161 }
7162
7163 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7164 {
7165 #define HCLGE_DESC_NUMBER 3
7166 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7167         int i, j;
7168
7169         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7170                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7171                         if (desc[i].data[j])
7172                                 return false;
7173
7174         return true;
7175 }
7176
7177 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7178                                    const u8 *addr, bool is_mc)
7179 {
7180         const unsigned char *mac_addr = addr;
7181         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7182                        (mac_addr[0]) | (mac_addr[1] << 8);
7183         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7184
7185         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7186         if (is_mc) {
7187                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7188                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7189         }
7190
7191         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7192         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7193 }
7194
7195 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7196                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7197 {
7198         struct hclge_dev *hdev = vport->back;
7199         struct hclge_desc desc;
7200         u8 resp_code;
7201         u16 retval;
7202         int ret;
7203
7204         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7205
7206         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7207
7208         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7209         if (ret) {
7210                 dev_err(&hdev->pdev->dev,
7211                         "del mac addr failed for cmd_send, ret =%d.\n",
7212                         ret);
7213                 return ret;
7214         }
7215         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7216         retval = le16_to_cpu(desc.retval);
7217
7218         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7219                                              HCLGE_MAC_VLAN_REMOVE);
7220 }
7221
7222 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7223                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7224                                      struct hclge_desc *desc,
7225                                      bool is_mc)
7226 {
7227         struct hclge_dev *hdev = vport->back;
7228         u8 resp_code;
7229         u16 retval;
7230         int ret;
7231
7232         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7233         if (is_mc) {
7234                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7235                 memcpy(desc[0].data,
7236                        req,
7237                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7238                 hclge_cmd_setup_basic_desc(&desc[1],
7239                                            HCLGE_OPC_MAC_VLAN_ADD,
7240                                            true);
7241                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7242                 hclge_cmd_setup_basic_desc(&desc[2],
7243                                            HCLGE_OPC_MAC_VLAN_ADD,
7244                                            true);
7245                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7246         } else {
7247                 memcpy(desc[0].data,
7248                        req,
7249                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7250                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7251         }
7252         if (ret) {
7253                 dev_err(&hdev->pdev->dev,
7254                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7255                         ret);
7256                 return ret;
7257         }
7258         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7259         retval = le16_to_cpu(desc[0].retval);
7260
7261         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7262                                              HCLGE_MAC_VLAN_LKUP);
7263 }
7264
7265 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7266                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7267                                   struct hclge_desc *mc_desc)
7268 {
7269         struct hclge_dev *hdev = vport->back;
7270         int cfg_status;
7271         u8 resp_code;
7272         u16 retval;
7273         int ret;
7274
7275         if (!mc_desc) {
7276                 struct hclge_desc desc;
7277
7278                 hclge_cmd_setup_basic_desc(&desc,
7279                                            HCLGE_OPC_MAC_VLAN_ADD,
7280                                            false);
7281                 memcpy(desc.data, req,
7282                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7283                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7284                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7285                 retval = le16_to_cpu(desc.retval);
7286
7287                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7288                                                            resp_code,
7289                                                            HCLGE_MAC_VLAN_ADD);
7290         } else {
7291                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7292                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7293                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7294                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7295                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7296                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7297                 memcpy(mc_desc[0].data, req,
7298                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7299                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7300                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7301                 retval = le16_to_cpu(mc_desc[0].retval);
7302
7303                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7304                                                            resp_code,
7305                                                            HCLGE_MAC_VLAN_ADD);
7306         }
7307
7308         if (ret) {
7309                 dev_err(&hdev->pdev->dev,
7310                         "add mac addr failed for cmd_send, ret =%d.\n",
7311                         ret);
7312                 return ret;
7313         }
7314
7315         return cfg_status;
7316 }
7317
7318 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7319                                u16 *allocated_size)
7320 {
7321         struct hclge_umv_spc_alc_cmd *req;
7322         struct hclge_desc desc;
7323         int ret;
7324
7325         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7327
7328         req->space_size = cpu_to_le32(space_size);
7329
7330         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7331         if (ret) {
7332                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7333                         ret);
7334                 return ret;
7335         }
7336
7337         *allocated_size = le32_to_cpu(desc.data[1]);
7338
7339         return 0;
7340 }
7341
7342 static int hclge_init_umv_space(struct hclge_dev *hdev)
7343 {
7344         u16 allocated_size = 0;
7345         int ret;
7346
7347         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7348         if (ret)
7349                 return ret;
7350
7351         if (allocated_size < hdev->wanted_umv_size)
7352                 dev_warn(&hdev->pdev->dev,
7353                          "failed to alloc umv space, want %u, get %u\n",
7354                          hdev->wanted_umv_size, allocated_size);
7355
7356         hdev->max_umv_size = allocated_size;
7357         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7358         hdev->share_umv_size = hdev->priv_umv_size +
7359                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7360
7361         return 0;
7362 }
7363
7364 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7365 {
7366         struct hclge_vport *vport;
7367         int i;
7368
7369         for (i = 0; i < hdev->num_alloc_vport; i++) {
7370                 vport = &hdev->vport[i];
7371                 vport->used_umv_num = 0;
7372         }
7373
7374         mutex_lock(&hdev->vport_lock);
7375         hdev->share_umv_size = hdev->priv_umv_size +
7376                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7377         mutex_unlock(&hdev->vport_lock);
7378 }
7379
7380 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7381 {
7382         struct hclge_dev *hdev = vport->back;
7383         bool is_full;
7384
7385         if (need_lock)
7386                 mutex_lock(&hdev->vport_lock);
7387
7388         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7389                    hdev->share_umv_size == 0);
7390
7391         if (need_lock)
7392                 mutex_unlock(&hdev->vport_lock);
7393
7394         return is_full;
7395 }
7396
7397 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7398 {
7399         struct hclge_dev *hdev = vport->back;
7400
7401         if (is_free) {
7402                 if (vport->used_umv_num > hdev->priv_umv_size)
7403                         hdev->share_umv_size++;
7404
7405                 if (vport->used_umv_num > 0)
7406                         vport->used_umv_num--;
7407         } else {
7408                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7409                     hdev->share_umv_size > 0)
7410                         hdev->share_umv_size--;
7411                 vport->used_umv_num++;
7412         }
7413 }
7414
7415 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7416                                                   const u8 *mac_addr)
7417 {
7418         struct hclge_mac_node *mac_node, *tmp;
7419
7420         list_for_each_entry_safe(mac_node, tmp, list, node)
7421                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7422                         return mac_node;
7423
7424         return NULL;
7425 }
7426
7427 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7428                                   enum HCLGE_MAC_NODE_STATE state)
7429 {
7430         switch (state) {
7431         /* from set_rx_mode or tmp_add_list */
7432         case HCLGE_MAC_TO_ADD:
7433                 if (mac_node->state == HCLGE_MAC_TO_DEL)
7434                         mac_node->state = HCLGE_MAC_ACTIVE;
7435                 break;
7436         /* only from set_rx_mode */
7437         case HCLGE_MAC_TO_DEL:
7438                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7439                         list_del(&mac_node->node);
7440                         kfree(mac_node);
7441                 } else {
7442                         mac_node->state = HCLGE_MAC_TO_DEL;
7443                 }
7444                 break;
7445         /* only from tmp_add_list, the mac_node->state won't be
7446          * ACTIVE.
7447          */
7448         case HCLGE_MAC_ACTIVE:
7449                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7450                         mac_node->state = HCLGE_MAC_ACTIVE;
7451
7452                 break;
7453         }
7454 }
7455
7456 int hclge_update_mac_list(struct hclge_vport *vport,
7457                           enum HCLGE_MAC_NODE_STATE state,
7458                           enum HCLGE_MAC_ADDR_TYPE mac_type,
7459                           const unsigned char *addr)
7460 {
7461         struct hclge_dev *hdev = vport->back;
7462         struct hclge_mac_node *mac_node;
7463         struct list_head *list;
7464
7465         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7466                 &vport->uc_mac_list : &vport->mc_mac_list;
7467
7468         spin_lock_bh(&vport->mac_list_lock);
7469
7470         /* if the mac addr is already in the mac list, no need to add a new
7471          * one into it, just check the mac addr state, convert it to a new
7472          * new state, or just remove it, or do nothing.
7473          */
7474         mac_node = hclge_find_mac_node(list, addr);
7475         if (mac_node) {
7476                 hclge_update_mac_node(mac_node, state);
7477                 spin_unlock_bh(&vport->mac_list_lock);
7478                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7479                 return 0;
7480         }
7481
7482         /* if this address is never added, unnecessary to delete */
7483         if (state == HCLGE_MAC_TO_DEL) {
7484                 spin_unlock_bh(&vport->mac_list_lock);
7485                 dev_err(&hdev->pdev->dev,
7486                         "failed to delete address %pM from mac list\n",
7487                         addr);
7488                 return -ENOENT;
7489         }
7490
7491         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7492         if (!mac_node) {
7493                 spin_unlock_bh(&vport->mac_list_lock);
7494                 return -ENOMEM;
7495         }
7496
7497         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7498
7499         mac_node->state = state;
7500         ether_addr_copy(mac_node->mac_addr, addr);
7501         list_add_tail(&mac_node->node, list);
7502
7503         spin_unlock_bh(&vport->mac_list_lock);
7504
7505         return 0;
7506 }
7507
7508 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7509                              const unsigned char *addr)
7510 {
7511         struct hclge_vport *vport = hclge_get_vport(handle);
7512
7513         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7514                                      addr);
7515 }
7516
7517 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7518                              const unsigned char *addr)
7519 {
7520         struct hclge_dev *hdev = vport->back;
7521         struct hclge_mac_vlan_tbl_entry_cmd req;
7522         struct hclge_desc desc;
7523         u16 egress_port = 0;
7524         int ret;
7525
7526         /* mac addr check */
7527         if (is_zero_ether_addr(addr) ||
7528             is_broadcast_ether_addr(addr) ||
7529             is_multicast_ether_addr(addr)) {
7530                 dev_err(&hdev->pdev->dev,
7531                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7532                          addr, is_zero_ether_addr(addr),
7533                          is_broadcast_ether_addr(addr),
7534                          is_multicast_ether_addr(addr));
7535                 return -EINVAL;
7536         }
7537
7538         memset(&req, 0, sizeof(req));
7539
7540         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7541                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7542
7543         req.egress_port = cpu_to_le16(egress_port);
7544
7545         hclge_prepare_mac_addr(&req, addr, false);
7546
7547         /* Lookup the mac address in the mac_vlan table, and add
7548          * it if the entry is inexistent. Repeated unicast entry
7549          * is not allowed in the mac vlan table.
7550          */
7551         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7552         if (ret == -ENOENT) {
7553                 mutex_lock(&hdev->vport_lock);
7554                 if (!hclge_is_umv_space_full(vport, false)) {
7555                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7556                         if (!ret)
7557                                 hclge_update_umv_space(vport, false);
7558                         mutex_unlock(&hdev->vport_lock);
7559                         return ret;
7560                 }
7561                 mutex_unlock(&hdev->vport_lock);
7562
7563                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7564                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7565                                 hdev->priv_umv_size);
7566
7567                 return -ENOSPC;
7568         }
7569
7570         /* check if we just hit the duplicate */
7571         if (!ret) {
7572                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7573                          vport->vport_id, addr);
7574                 return 0;
7575         }
7576
7577         dev_err(&hdev->pdev->dev,
7578                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7579                 addr);
7580
7581         return ret;
7582 }
7583
7584 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7585                             const unsigned char *addr)
7586 {
7587         struct hclge_vport *vport = hclge_get_vport(handle);
7588
7589         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7590                                      addr);
7591 }
7592
7593 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7594                             const unsigned char *addr)
7595 {
7596         struct hclge_dev *hdev = vport->back;
7597         struct hclge_mac_vlan_tbl_entry_cmd req;
7598         int ret;
7599
7600         /* mac addr check */
7601         if (is_zero_ether_addr(addr) ||
7602             is_broadcast_ether_addr(addr) ||
7603             is_multicast_ether_addr(addr)) {
7604                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7605                         addr);
7606                 return -EINVAL;
7607         }
7608
7609         memset(&req, 0, sizeof(req));
7610         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7611         hclge_prepare_mac_addr(&req, addr, false);
7612         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7613         if (!ret) {
7614                 mutex_lock(&hdev->vport_lock);
7615                 hclge_update_umv_space(vport, true);
7616                 mutex_unlock(&hdev->vport_lock);
7617         } else if (ret == -ENOENT) {
7618                 ret = 0;
7619         }
7620
7621         return ret;
7622 }
7623
7624 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7625                              const unsigned char *addr)
7626 {
7627         struct hclge_vport *vport = hclge_get_vport(handle);
7628
7629         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7630                                      addr);
7631 }
7632
7633 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7634                              const unsigned char *addr)
7635 {
7636         struct hclge_dev *hdev = vport->back;
7637         struct hclge_mac_vlan_tbl_entry_cmd req;
7638         struct hclge_desc desc[3];
7639         int status;
7640
7641         /* mac addr check */
7642         if (!is_multicast_ether_addr(addr)) {
7643                 dev_err(&hdev->pdev->dev,
7644                         "Add mc mac err! invalid mac:%pM.\n",
7645                          addr);
7646                 return -EINVAL;
7647         }
7648         memset(&req, 0, sizeof(req));
7649         hclge_prepare_mac_addr(&req, addr, true);
7650         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7651         if (status) {
7652                 /* This mac addr do not exist, add new entry for it */
7653                 memset(desc[0].data, 0, sizeof(desc[0].data));
7654                 memset(desc[1].data, 0, sizeof(desc[0].data));
7655                 memset(desc[2].data, 0, sizeof(desc[0].data));
7656         }
7657         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7658         if (status)
7659                 return status;
7660         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7661
7662         /* if already overflow, not to print each time */
7663         if (status == -ENOSPC &&
7664             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7665                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7666
7667         return status;
7668 }
7669
7670 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7671                             const unsigned char *addr)
7672 {
7673         struct hclge_vport *vport = hclge_get_vport(handle);
7674
7675         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7676                                      addr);
7677 }
7678
7679 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7680                             const unsigned char *addr)
7681 {
7682         struct hclge_dev *hdev = vport->back;
7683         struct hclge_mac_vlan_tbl_entry_cmd req;
7684         enum hclge_cmd_status status;
7685         struct hclge_desc desc[3];
7686
7687         /* mac addr check */
7688         if (!is_multicast_ether_addr(addr)) {
7689                 dev_dbg(&hdev->pdev->dev,
7690                         "Remove mc mac err! invalid mac:%pM.\n",
7691                          addr);
7692                 return -EINVAL;
7693         }
7694
7695         memset(&req, 0, sizeof(req));
7696         hclge_prepare_mac_addr(&req, addr, true);
7697         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7698         if (!status) {
7699                 /* This mac addr exist, remove this handle's VFID for it */
7700                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7701                 if (status)
7702                         return status;
7703
7704                 if (hclge_is_all_function_id_zero(desc))
7705                         /* All the vfid is zero, so need to delete this entry */
7706                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7707                 else
7708                         /* Not all the vfid is zero, update the vfid */
7709                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7710
7711         } else if (status == -ENOENT) {
7712                 status = 0;
7713         }
7714
7715         return status;
7716 }
7717
7718 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7719                                       struct list_head *list,
7720                                       int (*sync)(struct hclge_vport *,
7721                                                   const unsigned char *))
7722 {
7723         struct hclge_mac_node *mac_node, *tmp;
7724         int ret;
7725
7726         list_for_each_entry_safe(mac_node, tmp, list, node) {
7727                 ret = sync(vport, mac_node->mac_addr);
7728                 if (!ret) {
7729                         mac_node->state = HCLGE_MAC_ACTIVE;
7730                 } else {
7731                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7732                                 &vport->state);
7733                         break;
7734                 }
7735         }
7736 }
7737
7738 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7739                                         struct list_head *list,
7740                                         int (*unsync)(struct hclge_vport *,
7741                                                       const unsigned char *))
7742 {
7743         struct hclge_mac_node *mac_node, *tmp;
7744         int ret;
7745
7746         list_for_each_entry_safe(mac_node, tmp, list, node) {
7747                 ret = unsync(vport, mac_node->mac_addr);
7748                 if (!ret || ret == -ENOENT) {
7749                         list_del(&mac_node->node);
7750                         kfree(mac_node);
7751                 } else {
7752                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7753                                 &vport->state);
7754                         break;
7755                 }
7756         }
7757 }
7758
7759 static bool hclge_sync_from_add_list(struct list_head *add_list,
7760                                      struct list_head *mac_list)
7761 {
7762         struct hclge_mac_node *mac_node, *tmp, *new_node;
7763         bool all_added = true;
7764
7765         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7766                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7767                         all_added = false;
7768
7769                 /* if the mac address from tmp_add_list is not in the
7770                  * uc/mc_mac_list, it means have received a TO_DEL request
7771                  * during the time window of adding the mac address into mac
7772                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7773                  * then it will be removed at next time. else it must be TO_ADD,
7774                  * this address hasn't been added into mac table,
7775                  * so just remove the mac node.
7776                  */
7777                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7778                 if (new_node) {
7779                         hclge_update_mac_node(new_node, mac_node->state);
7780                         list_del(&mac_node->node);
7781                         kfree(mac_node);
7782                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7783                         mac_node->state = HCLGE_MAC_TO_DEL;
7784                         list_del(&mac_node->node);
7785                         list_add_tail(&mac_node->node, mac_list);
7786                 } else {
7787                         list_del(&mac_node->node);
7788                         kfree(mac_node);
7789                 }
7790         }
7791
7792         return all_added;
7793 }
7794
7795 static void hclge_sync_from_del_list(struct list_head *del_list,
7796                                      struct list_head *mac_list)
7797 {
7798         struct hclge_mac_node *mac_node, *tmp, *new_node;
7799
7800         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7801                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7802                 if (new_node) {
7803                         /* If the mac addr exists in the mac list, it means
7804                          * received a new TO_ADD request during the time window
7805                          * of configuring the mac address. For the mac node
7806                          * state is TO_ADD, and the address is already in the
7807                          * in the hardware(due to delete fail), so we just need
7808                          * to change the mac node state to ACTIVE.
7809                          */
7810                         new_node->state = HCLGE_MAC_ACTIVE;
7811                         list_del(&mac_node->node);
7812                         kfree(mac_node);
7813                 } else {
7814                         list_del(&mac_node->node);
7815                         list_add_tail(&mac_node->node, mac_list);
7816                 }
7817         }
7818 }
7819
7820 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7821                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
7822                                         bool is_all_added)
7823 {
7824         if (mac_type == HCLGE_MAC_ADDR_UC) {
7825                 if (is_all_added)
7826                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7827                 else
7828                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7829         } else {
7830                 if (is_all_added)
7831                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7832                 else
7833                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7834         }
7835 }
7836
7837 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7838                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
7839 {
7840         struct hclge_mac_node *mac_node, *tmp, *new_node;
7841         struct list_head tmp_add_list, tmp_del_list;
7842         struct list_head *list;
7843         bool all_added;
7844
7845         INIT_LIST_HEAD(&tmp_add_list);
7846         INIT_LIST_HEAD(&tmp_del_list);
7847
7848         /* move the mac addr to the tmp_add_list and tmp_del_list, then
7849          * we can add/delete these mac addr outside the spin lock
7850          */
7851         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7852                 &vport->uc_mac_list : &vport->mc_mac_list;
7853
7854         spin_lock_bh(&vport->mac_list_lock);
7855
7856         list_for_each_entry_safe(mac_node, tmp, list, node) {
7857                 switch (mac_node->state) {
7858                 case HCLGE_MAC_TO_DEL:
7859                         list_del(&mac_node->node);
7860                         list_add_tail(&mac_node->node, &tmp_del_list);
7861                         break;
7862                 case HCLGE_MAC_TO_ADD:
7863                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7864                         if (!new_node)
7865                                 goto stop_traverse;
7866                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7867                         new_node->state = mac_node->state;
7868                         list_add_tail(&new_node->node, &tmp_add_list);
7869                         break;
7870                 default:
7871                         break;
7872                 }
7873         }
7874
7875 stop_traverse:
7876         spin_unlock_bh(&vport->mac_list_lock);
7877
7878         /* delete first, in order to get max mac table space for adding */
7879         if (mac_type == HCLGE_MAC_ADDR_UC) {
7880                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7881                                             hclge_rm_uc_addr_common);
7882                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7883                                           hclge_add_uc_addr_common);
7884         } else {
7885                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7886                                             hclge_rm_mc_addr_common);
7887                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7888                                           hclge_add_mc_addr_common);
7889         }
7890
7891         /* if some mac addresses were added/deleted fail, move back to the
7892          * mac_list, and retry at next time.
7893          */
7894         spin_lock_bh(&vport->mac_list_lock);
7895
7896         hclge_sync_from_del_list(&tmp_del_list, list);
7897         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7898
7899         spin_unlock_bh(&vport->mac_list_lock);
7900
7901         hclge_update_overflow_flags(vport, mac_type, all_added);
7902 }
7903
7904 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7905 {
7906         struct hclge_dev *hdev = vport->back;
7907
7908         if (test_bit(vport->vport_id, hdev->vport_config_block))
7909                 return false;
7910
7911         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7912                 return true;
7913
7914         return false;
7915 }
7916
7917 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7918 {
7919         int i;
7920
7921         for (i = 0; i < hdev->num_alloc_vport; i++) {
7922                 struct hclge_vport *vport = &hdev->vport[i];
7923
7924                 if (!hclge_need_sync_mac_table(vport))
7925                         continue;
7926
7927                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7928                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7929         }
7930 }
7931
7932 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7933                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7934 {
7935         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7936         struct hclge_mac_node *mac_cfg, *tmp;
7937         struct hclge_dev *hdev = vport->back;
7938         struct list_head tmp_del_list, *list;
7939         int ret;
7940
7941         if (mac_type == HCLGE_MAC_ADDR_UC) {
7942                 list = &vport->uc_mac_list;
7943                 unsync = hclge_rm_uc_addr_common;
7944         } else {
7945                 list = &vport->mc_mac_list;
7946                 unsync = hclge_rm_mc_addr_common;
7947         }
7948
7949         INIT_LIST_HEAD(&tmp_del_list);
7950
7951         if (!is_del_list)
7952                 set_bit(vport->vport_id, hdev->vport_config_block);
7953
7954         spin_lock_bh(&vport->mac_list_lock);
7955
7956         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7957                 switch (mac_cfg->state) {
7958                 case HCLGE_MAC_TO_DEL:
7959                 case HCLGE_MAC_ACTIVE:
7960                         list_del(&mac_cfg->node);
7961                         list_add_tail(&mac_cfg->node, &tmp_del_list);
7962                         break;
7963                 case HCLGE_MAC_TO_ADD:
7964                         if (is_del_list) {
7965                                 list_del(&mac_cfg->node);
7966                                 kfree(mac_cfg);
7967                         }
7968                         break;
7969                 }
7970         }
7971
7972         spin_unlock_bh(&vport->mac_list_lock);
7973
7974         list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7975                 ret = unsync(vport, mac_cfg->mac_addr);
7976                 if (!ret || ret == -ENOENT) {
7977                         /* clear all mac addr from hardware, but remain these
7978                          * mac addr in the mac list, and restore them after
7979                          * vf reset finished.
7980                          */
7981                         if (!is_del_list &&
7982                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
7983                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
7984                         } else {
7985                                 list_del(&mac_cfg->node);
7986                                 kfree(mac_cfg);
7987                         }
7988                 } else if (is_del_list) {
7989                         mac_cfg->state = HCLGE_MAC_TO_DEL;
7990                 }
7991         }
7992
7993         spin_lock_bh(&vport->mac_list_lock);
7994
7995         hclge_sync_from_del_list(&tmp_del_list, list);
7996
7997         spin_unlock_bh(&vport->mac_list_lock);
7998 }
7999
8000 /* remove all mac address when uninitailize */
8001 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8002                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8003 {
8004         struct hclge_mac_node *mac_node, *tmp;
8005         struct hclge_dev *hdev = vport->back;
8006         struct list_head tmp_del_list, *list;
8007
8008         INIT_LIST_HEAD(&tmp_del_list);
8009
8010         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8011                 &vport->uc_mac_list : &vport->mc_mac_list;
8012
8013         spin_lock_bh(&vport->mac_list_lock);
8014
8015         list_for_each_entry_safe(mac_node, tmp, list, node) {
8016                 switch (mac_node->state) {
8017                 case HCLGE_MAC_TO_DEL:
8018                 case HCLGE_MAC_ACTIVE:
8019                         list_del(&mac_node->node);
8020                         list_add_tail(&mac_node->node, &tmp_del_list);
8021                         break;
8022                 case HCLGE_MAC_TO_ADD:
8023                         list_del(&mac_node->node);
8024                         kfree(mac_node);
8025                         break;
8026                 }
8027         }
8028
8029         spin_unlock_bh(&vport->mac_list_lock);
8030
8031         if (mac_type == HCLGE_MAC_ADDR_UC)
8032                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8033                                             hclge_rm_uc_addr_common);
8034         else
8035                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8036                                             hclge_rm_mc_addr_common);
8037
8038         if (!list_empty(&tmp_del_list))
8039                 dev_warn(&hdev->pdev->dev,
8040                          "uninit %s mac list for vport %u not completely.\n",
8041                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8042                          vport->vport_id);
8043
8044         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8045                 list_del(&mac_node->node);
8046                 kfree(mac_node);
8047         }
8048 }
8049
8050 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8051 {
8052         struct hclge_vport *vport;
8053         int i;
8054
8055         for (i = 0; i < hdev->num_alloc_vport; i++) {
8056                 vport = &hdev->vport[i];
8057                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8058                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8059         }
8060 }
8061
8062 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8063                                               u16 cmdq_resp, u8 resp_code)
8064 {
8065 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
8066 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
8067 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
8068 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
8069
8070         int return_status;
8071
8072         if (cmdq_resp) {
8073                 dev_err(&hdev->pdev->dev,
8074                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8075                         cmdq_resp);
8076                 return -EIO;
8077         }
8078
8079         switch (resp_code) {
8080         case HCLGE_ETHERTYPE_SUCCESS_ADD:
8081         case HCLGE_ETHERTYPE_ALREADY_ADD:
8082                 return_status = 0;
8083                 break;
8084         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8085                 dev_err(&hdev->pdev->dev,
8086                         "add mac ethertype failed for manager table overflow.\n");
8087                 return_status = -EIO;
8088                 break;
8089         case HCLGE_ETHERTYPE_KEY_CONFLICT:
8090                 dev_err(&hdev->pdev->dev,
8091                         "add mac ethertype failed for key conflict.\n");
8092                 return_status = -EIO;
8093                 break;
8094         default:
8095                 dev_err(&hdev->pdev->dev,
8096                         "add mac ethertype failed for undefined, code=%u.\n",
8097                         resp_code);
8098                 return_status = -EIO;
8099         }
8100
8101         return return_status;
8102 }
8103
8104 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8105                                      u8 *mac_addr)
8106 {
8107         struct hclge_mac_vlan_tbl_entry_cmd req;
8108         struct hclge_dev *hdev = vport->back;
8109         struct hclge_desc desc;
8110         u16 egress_port = 0;
8111         int i;
8112
8113         if (is_zero_ether_addr(mac_addr))
8114                 return false;
8115
8116         memset(&req, 0, sizeof(req));
8117         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8118                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8119         req.egress_port = cpu_to_le16(egress_port);
8120         hclge_prepare_mac_addr(&req, mac_addr, false);
8121
8122         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8123                 return true;
8124
8125         vf_idx += HCLGE_VF_VPORT_START_NUM;
8126         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8127                 if (i != vf_idx &&
8128                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8129                         return true;
8130
8131         return false;
8132 }
8133
8134 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8135                             u8 *mac_addr)
8136 {
8137         struct hclge_vport *vport = hclge_get_vport(handle);
8138         struct hclge_dev *hdev = vport->back;
8139
8140         vport = hclge_get_vf_vport(hdev, vf);
8141         if (!vport)
8142                 return -EINVAL;
8143
8144         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8145                 dev_info(&hdev->pdev->dev,
8146                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8147                          mac_addr);
8148                 return 0;
8149         }
8150
8151         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8152                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8153                         mac_addr);
8154                 return -EEXIST;
8155         }
8156
8157         ether_addr_copy(vport->vf_info.mac, mac_addr);
8158
8159         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8160                 dev_info(&hdev->pdev->dev,
8161                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8162                          vf, mac_addr);
8163                 return hclge_inform_reset_assert_to_vf(vport);
8164         }
8165
8166         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8167                  vf, mac_addr);
8168         return 0;
8169 }
8170
8171 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8172                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8173 {
8174         struct hclge_desc desc;
8175         u8 resp_code;
8176         u16 retval;
8177         int ret;
8178
8179         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8180         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8181
8182         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8183         if (ret) {
8184                 dev_err(&hdev->pdev->dev,
8185                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8186                         ret);
8187                 return ret;
8188         }
8189
8190         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8191         retval = le16_to_cpu(desc.retval);
8192
8193         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8194 }
8195
8196 static int init_mgr_tbl(struct hclge_dev *hdev)
8197 {
8198         int ret;
8199         int i;
8200
8201         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8202                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8203                 if (ret) {
8204                         dev_err(&hdev->pdev->dev,
8205                                 "add mac ethertype failed, ret =%d.\n",
8206                                 ret);
8207                         return ret;
8208                 }
8209         }
8210
8211         return 0;
8212 }
8213
8214 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8215 {
8216         struct hclge_vport *vport = hclge_get_vport(handle);
8217         struct hclge_dev *hdev = vport->back;
8218
8219         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8220 }
8221
8222 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8223                                        const u8 *old_addr, const u8 *new_addr)
8224 {
8225         struct list_head *list = &vport->uc_mac_list;
8226         struct hclge_mac_node *old_node, *new_node;
8227
8228         new_node = hclge_find_mac_node(list, new_addr);
8229         if (!new_node) {
8230                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8231                 if (!new_node)
8232                         return -ENOMEM;
8233
8234                 new_node->state = HCLGE_MAC_TO_ADD;
8235                 ether_addr_copy(new_node->mac_addr, new_addr);
8236                 list_add(&new_node->node, list);
8237         } else {
8238                 if (new_node->state == HCLGE_MAC_TO_DEL)
8239                         new_node->state = HCLGE_MAC_ACTIVE;
8240
8241                 /* make sure the new addr is in the list head, avoid dev
8242                  * addr may be not re-added into mac table for the umv space
8243                  * limitation after global/imp reset which will clear mac
8244                  * table by hardware.
8245                  */
8246                 list_move(&new_node->node, list);
8247         }
8248
8249         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8250                 old_node = hclge_find_mac_node(list, old_addr);
8251                 if (old_node) {
8252                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8253                                 list_del(&old_node->node);
8254                                 kfree(old_node);
8255                         } else {
8256                                 old_node->state = HCLGE_MAC_TO_DEL;
8257                         }
8258                 }
8259         }
8260
8261         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8262
8263         return 0;
8264 }
8265
8266 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8267                               bool is_first)
8268 {
8269         const unsigned char *new_addr = (const unsigned char *)p;
8270         struct hclge_vport *vport = hclge_get_vport(handle);
8271         struct hclge_dev *hdev = vport->back;
8272         unsigned char *old_addr = NULL;
8273         int ret;
8274
8275         /* mac addr check */
8276         if (is_zero_ether_addr(new_addr) ||
8277             is_broadcast_ether_addr(new_addr) ||
8278             is_multicast_ether_addr(new_addr)) {
8279                 dev_err(&hdev->pdev->dev,
8280                         "change uc mac err! invalid mac: %pM.\n",
8281                          new_addr);
8282                 return -EINVAL;
8283         }
8284
8285         ret = hclge_pause_addr_cfg(hdev, new_addr);
8286         if (ret) {
8287                 dev_err(&hdev->pdev->dev,
8288                         "failed to configure mac pause address, ret = %d\n",
8289                         ret);
8290                 return ret;
8291         }
8292
8293         if (!is_first)
8294                 old_addr = hdev->hw.mac.mac_addr;
8295
8296         spin_lock_bh(&vport->mac_list_lock);
8297         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8298         if (ret) {
8299                 dev_err(&hdev->pdev->dev,
8300                         "failed to change the mac addr:%pM, ret = %d\n",
8301                         new_addr, ret);
8302                 spin_unlock_bh(&vport->mac_list_lock);
8303
8304                 if (!is_first)
8305                         hclge_pause_addr_cfg(hdev, old_addr);
8306
8307                 return ret;
8308         }
8309         /* we must update dev addr with spin lock protect, preventing dev addr
8310          * being removed by set_rx_mode path.
8311          */
8312         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8313         spin_unlock_bh(&vport->mac_list_lock);
8314
8315         hclge_task_schedule(hdev, 0);
8316
8317         return 0;
8318 }
8319
8320 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8321                           int cmd)
8322 {
8323         struct hclge_vport *vport = hclge_get_vport(handle);
8324         struct hclge_dev *hdev = vport->back;
8325
8326         if (!hdev->hw.mac.phydev)
8327                 return -EOPNOTSUPP;
8328
8329         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8330 }
8331
8332 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8333                                       u8 fe_type, bool filter_en, u8 vf_id)
8334 {
8335         struct hclge_vlan_filter_ctrl_cmd *req;
8336         struct hclge_desc desc;
8337         int ret;
8338
8339         /* read current vlan filter parameter */
8340         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8341         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8342         req->vlan_type = vlan_type;
8343         req->vf_id = vf_id;
8344
8345         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8346         if (ret) {
8347                 dev_err(&hdev->pdev->dev,
8348                         "failed to get vlan filter config, ret = %d.\n", ret);
8349                 return ret;
8350         }
8351
8352         /* modify and write new config parameter */
8353         hclge_cmd_reuse_desc(&desc, false);
8354         req->vlan_fe = filter_en ?
8355                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8356
8357         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8358         if (ret)
8359                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8360                         ret);
8361
8362         return ret;
8363 }
8364
8365 #define HCLGE_FILTER_TYPE_VF            0
8366 #define HCLGE_FILTER_TYPE_PORT          1
8367 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8368 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8369 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8370 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8371 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8372 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8373                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8374 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8375                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8376
8377 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8378 {
8379         struct hclge_vport *vport = hclge_get_vport(handle);
8380         struct hclge_dev *hdev = vport->back;
8381
8382         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8383                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8384                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
8385                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8386                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
8387         } else {
8388                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8389                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8390                                            0);
8391         }
8392         if (enable)
8393                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8394         else
8395                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8396 }
8397
8398 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8399                                     bool is_kill, u16 vlan,
8400                                     __be16 proto)
8401 {
8402         struct hclge_vport *vport = &hdev->vport[vfid];
8403         struct hclge_vlan_filter_vf_cfg_cmd *req0;
8404         struct hclge_vlan_filter_vf_cfg_cmd *req1;
8405         struct hclge_desc desc[2];
8406         u8 vf_byte_val;
8407         u8 vf_byte_off;
8408         int ret;
8409
8410         /* if vf vlan table is full, firmware will close vf vlan filter, it
8411          * is unable and unnecessary to add new vlan id to vf vlan filter.
8412          * If spoof check is enable, and vf vlan is full, it shouldn't add
8413          * new vlan, because tx packets with these vlan id will be dropped.
8414          */
8415         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8416                 if (vport->vf_info.spoofchk && vlan) {
8417                         dev_err(&hdev->pdev->dev,
8418                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8419                         return -EPERM;
8420                 }
8421                 return 0;
8422         }
8423
8424         hclge_cmd_setup_basic_desc(&desc[0],
8425                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8426         hclge_cmd_setup_basic_desc(&desc[1],
8427                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8428
8429         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8430
8431         vf_byte_off = vfid / 8;
8432         vf_byte_val = 1 << (vfid % 8);
8433
8434         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8435         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8436
8437         req0->vlan_id  = cpu_to_le16(vlan);
8438         req0->vlan_cfg = is_kill;
8439
8440         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8441                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8442         else
8443                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8444
8445         ret = hclge_cmd_send(&hdev->hw, desc, 2);
8446         if (ret) {
8447                 dev_err(&hdev->pdev->dev,
8448                         "Send vf vlan command fail, ret =%d.\n",
8449                         ret);
8450                 return ret;
8451         }
8452
8453         if (!is_kill) {
8454 #define HCLGE_VF_VLAN_NO_ENTRY  2
8455                 if (!req0->resp_code || req0->resp_code == 1)
8456                         return 0;
8457
8458                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8459                         set_bit(vfid, hdev->vf_vlan_full);
8460                         dev_warn(&hdev->pdev->dev,
8461                                  "vf vlan table is full, vf vlan filter is disabled\n");
8462                         return 0;
8463                 }
8464
8465                 dev_err(&hdev->pdev->dev,
8466                         "Add vf vlan filter fail, ret =%u.\n",
8467                         req0->resp_code);
8468         } else {
8469 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
8470                 if (!req0->resp_code)
8471                         return 0;
8472
8473                 /* vf vlan filter is disabled when vf vlan table is full,
8474                  * then new vlan id will not be added into vf vlan table.
8475                  * Just return 0 without warning, avoid massive verbose
8476                  * print logs when unload.
8477                  */
8478                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8479                         return 0;
8480
8481                 dev_err(&hdev->pdev->dev,
8482                         "Kill vf vlan filter fail, ret =%u.\n",
8483                         req0->resp_code);
8484         }
8485
8486         return -EIO;
8487 }
8488
8489 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8490                                       u16 vlan_id, bool is_kill)
8491 {
8492         struct hclge_vlan_filter_pf_cfg_cmd *req;
8493         struct hclge_desc desc;
8494         u8 vlan_offset_byte_val;
8495         u8 vlan_offset_byte;
8496         u8 vlan_offset_160;
8497         int ret;
8498
8499         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8500
8501         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8502         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8503                            HCLGE_VLAN_BYTE_SIZE;
8504         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8505
8506         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8507         req->vlan_offset = vlan_offset_160;
8508         req->vlan_cfg = is_kill;
8509         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8510
8511         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8512         if (ret)
8513                 dev_err(&hdev->pdev->dev,
8514                         "port vlan command, send fail, ret =%d.\n", ret);
8515         return ret;
8516 }
8517
8518 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8519                                     u16 vport_id, u16 vlan_id,
8520                                     bool is_kill)
8521 {
8522         u16 vport_idx, vport_num = 0;
8523         int ret;
8524
8525         if (is_kill && !vlan_id)
8526                 return 0;
8527
8528         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8529                                        proto);
8530         if (ret) {
8531                 dev_err(&hdev->pdev->dev,
8532                         "Set %u vport vlan filter config fail, ret =%d.\n",
8533                         vport_id, ret);
8534                 return ret;
8535         }
8536
8537         /* vlan 0 may be added twice when 8021q module is enabled */
8538         if (!is_kill && !vlan_id &&
8539             test_bit(vport_id, hdev->vlan_table[vlan_id]))
8540                 return 0;
8541
8542         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8543                 dev_err(&hdev->pdev->dev,
8544                         "Add port vlan failed, vport %u is already in vlan %u\n",
8545                         vport_id, vlan_id);
8546                 return -EINVAL;
8547         }
8548
8549         if (is_kill &&
8550             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8551                 dev_err(&hdev->pdev->dev,
8552                         "Delete port vlan failed, vport %u is not in vlan %u\n",
8553                         vport_id, vlan_id);
8554                 return -EINVAL;
8555         }
8556
8557         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8558                 vport_num++;
8559
8560         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8561                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8562                                                  is_kill);
8563
8564         return ret;
8565 }
8566
8567 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8568 {
8569         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8570         struct hclge_vport_vtag_tx_cfg_cmd *req;
8571         struct hclge_dev *hdev = vport->back;
8572         struct hclge_desc desc;
8573         u16 bmap_index;
8574         int status;
8575
8576         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8577
8578         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8579         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8580         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8581         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8582                       vcfg->accept_tag1 ? 1 : 0);
8583         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8584                       vcfg->accept_untag1 ? 1 : 0);
8585         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8586                       vcfg->accept_tag2 ? 1 : 0);
8587         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8588                       vcfg->accept_untag2 ? 1 : 0);
8589         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8590                       vcfg->insert_tag1_en ? 1 : 0);
8591         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8592                       vcfg->insert_tag2_en ? 1 : 0);
8593         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8594
8595         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8596         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8597                         HCLGE_VF_NUM_PER_BYTE;
8598         req->vf_bitmap[bmap_index] =
8599                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8600
8601         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8602         if (status)
8603                 dev_err(&hdev->pdev->dev,
8604                         "Send port txvlan cfg command fail, ret =%d\n",
8605                         status);
8606
8607         return status;
8608 }
8609
8610 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8611 {
8612         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8613         struct hclge_vport_vtag_rx_cfg_cmd *req;
8614         struct hclge_dev *hdev = vport->back;
8615         struct hclge_desc desc;
8616         u16 bmap_index;
8617         int status;
8618
8619         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8620
8621         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8622         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8623                       vcfg->strip_tag1_en ? 1 : 0);
8624         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8625                       vcfg->strip_tag2_en ? 1 : 0);
8626         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8627                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8628         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8629                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8630
8631         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8632         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8633                         HCLGE_VF_NUM_PER_BYTE;
8634         req->vf_bitmap[bmap_index] =
8635                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8636
8637         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8638         if (status)
8639                 dev_err(&hdev->pdev->dev,
8640                         "Send port rxvlan cfg command fail, ret =%d\n",
8641                         status);
8642
8643         return status;
8644 }
8645
8646 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8647                                   u16 port_base_vlan_state,
8648                                   u16 vlan_tag)
8649 {
8650         int ret;
8651
8652         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8653                 vport->txvlan_cfg.accept_tag1 = true;
8654                 vport->txvlan_cfg.insert_tag1_en = false;
8655                 vport->txvlan_cfg.default_tag1 = 0;
8656         } else {
8657                 vport->txvlan_cfg.accept_tag1 = false;
8658                 vport->txvlan_cfg.insert_tag1_en = true;
8659                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8660         }
8661
8662         vport->txvlan_cfg.accept_untag1 = true;
8663
8664         /* accept_tag2 and accept_untag2 are not supported on
8665          * pdev revision(0x20), new revision support them,
8666          * this two fields can not be configured by user.
8667          */
8668         vport->txvlan_cfg.accept_tag2 = true;
8669         vport->txvlan_cfg.accept_untag2 = true;
8670         vport->txvlan_cfg.insert_tag2_en = false;
8671         vport->txvlan_cfg.default_tag2 = 0;
8672
8673         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8674                 vport->rxvlan_cfg.strip_tag1_en = false;
8675                 vport->rxvlan_cfg.strip_tag2_en =
8676                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8677         } else {
8678                 vport->rxvlan_cfg.strip_tag1_en =
8679                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8680                 vport->rxvlan_cfg.strip_tag2_en = true;
8681         }
8682         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8683         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8684
8685         ret = hclge_set_vlan_tx_offload_cfg(vport);
8686         if (ret)
8687                 return ret;
8688
8689         return hclge_set_vlan_rx_offload_cfg(vport);
8690 }
8691
8692 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8693 {
8694         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8695         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8696         struct hclge_desc desc;
8697         int status;
8698
8699         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8700         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8701         rx_req->ot_fst_vlan_type =
8702                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8703         rx_req->ot_sec_vlan_type =
8704                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8705         rx_req->in_fst_vlan_type =
8706                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8707         rx_req->in_sec_vlan_type =
8708                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8709
8710         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8711         if (status) {
8712                 dev_err(&hdev->pdev->dev,
8713                         "Send rxvlan protocol type command fail, ret =%d\n",
8714                         status);
8715                 return status;
8716         }
8717
8718         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8719
8720         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8721         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8722         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8723
8724         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8725         if (status)
8726                 dev_err(&hdev->pdev->dev,
8727                         "Send txvlan protocol type command fail, ret =%d\n",
8728                         status);
8729
8730         return status;
8731 }
8732
8733 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8734 {
8735 #define HCLGE_DEF_VLAN_TYPE             0x8100
8736
8737         struct hnae3_handle *handle = &hdev->vport[0].nic;
8738         struct hclge_vport *vport;
8739         int ret;
8740         int i;
8741
8742         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8743                 /* for revision 0x21, vf vlan filter is per function */
8744                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8745                         vport = &hdev->vport[i];
8746                         ret = hclge_set_vlan_filter_ctrl(hdev,
8747                                                          HCLGE_FILTER_TYPE_VF,
8748                                                          HCLGE_FILTER_FE_EGRESS,
8749                                                          true,
8750                                                          vport->vport_id);
8751                         if (ret)
8752                                 return ret;
8753                 }
8754
8755                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8756                                                  HCLGE_FILTER_FE_INGRESS, true,
8757                                                  0);
8758                 if (ret)
8759                         return ret;
8760         } else {
8761                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8762                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8763                                                  true, 0);
8764                 if (ret)
8765                         return ret;
8766         }
8767
8768         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8769
8770         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8771         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8772         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8773         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8774         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8775         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8776
8777         ret = hclge_set_vlan_protocol_type(hdev);
8778         if (ret)
8779                 return ret;
8780
8781         for (i = 0; i < hdev->num_alloc_vport; i++) {
8782                 u16 vlan_tag;
8783
8784                 vport = &hdev->vport[i];
8785                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8786
8787                 ret = hclge_vlan_offload_cfg(vport,
8788                                              vport->port_base_vlan_cfg.state,
8789                                              vlan_tag);
8790                 if (ret)
8791                         return ret;
8792         }
8793
8794         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8795 }
8796
8797 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8798                                        bool writen_to_tbl)
8799 {
8800         struct hclge_vport_vlan_cfg *vlan;
8801
8802         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8803         if (!vlan)
8804                 return;
8805
8806         vlan->hd_tbl_status = writen_to_tbl;
8807         vlan->vlan_id = vlan_id;
8808
8809         list_add_tail(&vlan->node, &vport->vlan_list);
8810 }
8811
8812 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8813 {
8814         struct hclge_vport_vlan_cfg *vlan, *tmp;
8815         struct hclge_dev *hdev = vport->back;
8816         int ret;
8817
8818         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8819                 if (!vlan->hd_tbl_status) {
8820                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8821                                                        vport->vport_id,
8822                                                        vlan->vlan_id, false);
8823                         if (ret) {
8824                                 dev_err(&hdev->pdev->dev,
8825                                         "restore vport vlan list failed, ret=%d\n",
8826                                         ret);
8827                                 return ret;
8828                         }
8829                 }
8830                 vlan->hd_tbl_status = true;
8831         }
8832
8833         return 0;
8834 }
8835
8836 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8837                                       bool is_write_tbl)
8838 {
8839         struct hclge_vport_vlan_cfg *vlan, *tmp;
8840         struct hclge_dev *hdev = vport->back;
8841
8842         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8843                 if (vlan->vlan_id == vlan_id) {
8844                         if (is_write_tbl && vlan->hd_tbl_status)
8845                                 hclge_set_vlan_filter_hw(hdev,
8846                                                          htons(ETH_P_8021Q),
8847                                                          vport->vport_id,
8848                                                          vlan_id,
8849                                                          true);
8850
8851                         list_del(&vlan->node);
8852                         kfree(vlan);
8853                         break;
8854                 }
8855         }
8856 }
8857
8858 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8859 {
8860         struct hclge_vport_vlan_cfg *vlan, *tmp;
8861         struct hclge_dev *hdev = vport->back;
8862
8863         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8864                 if (vlan->hd_tbl_status)
8865                         hclge_set_vlan_filter_hw(hdev,
8866                                                  htons(ETH_P_8021Q),
8867                                                  vport->vport_id,
8868                                                  vlan->vlan_id,
8869                                                  true);
8870
8871                 vlan->hd_tbl_status = false;
8872                 if (is_del_list) {
8873                         list_del(&vlan->node);
8874                         kfree(vlan);
8875                 }
8876         }
8877         clear_bit(vport->vport_id, hdev->vf_vlan_full);
8878 }
8879
8880 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8881 {
8882         struct hclge_vport_vlan_cfg *vlan, *tmp;
8883         struct hclge_vport *vport;
8884         int i;
8885
8886         for (i = 0; i < hdev->num_alloc_vport; i++) {
8887                 vport = &hdev->vport[i];
8888                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8889                         list_del(&vlan->node);
8890                         kfree(vlan);
8891                 }
8892         }
8893 }
8894
8895 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8896 {
8897         struct hclge_vport_vlan_cfg *vlan, *tmp;
8898         struct hclge_dev *hdev = vport->back;
8899         u16 vlan_proto;
8900         u16 vlan_id;
8901         u16 state;
8902         int ret;
8903
8904         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8905         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8906         state = vport->port_base_vlan_cfg.state;
8907
8908         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8909                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8910                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8911                                          vport->vport_id, vlan_id,
8912                                          false);
8913                 return;
8914         }
8915
8916         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8917                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8918                                                vport->vport_id,
8919                                                vlan->vlan_id, false);
8920                 if (ret)
8921                         break;
8922                 vlan->hd_tbl_status = true;
8923         }
8924 }
8925
8926 /* For global reset and imp reset, hardware will clear the mac table,
8927  * so we change the mac address state from ACTIVE to TO_ADD, then they
8928  * can be restored in the service task after reset complete. Furtherly,
8929  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8930  * be restored after reset, so just remove these mac nodes from mac_list.
8931  */
8932 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8933 {
8934         struct hclge_mac_node *mac_node, *tmp;
8935
8936         list_for_each_entry_safe(mac_node, tmp, list, node) {
8937                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8938                         mac_node->state = HCLGE_MAC_TO_ADD;
8939                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8940                         list_del(&mac_node->node);
8941                         kfree(mac_node);
8942                 }
8943         }
8944 }
8945
8946 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8947 {
8948         spin_lock_bh(&vport->mac_list_lock);
8949
8950         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8951         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8952         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8953
8954         spin_unlock_bh(&vport->mac_list_lock);
8955 }
8956
8957 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8958 {
8959         struct hclge_vport *vport = &hdev->vport[0];
8960         struct hnae3_handle *handle = &vport->nic;
8961
8962         hclge_restore_mac_table_common(vport);
8963         hclge_restore_vport_vlan_table(vport);
8964         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8965
8966         hclge_restore_fd_entries(handle);
8967 }
8968
8969 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8970 {
8971         struct hclge_vport *vport = hclge_get_vport(handle);
8972
8973         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8974                 vport->rxvlan_cfg.strip_tag1_en = false;
8975                 vport->rxvlan_cfg.strip_tag2_en = enable;
8976         } else {
8977                 vport->rxvlan_cfg.strip_tag1_en = enable;
8978                 vport->rxvlan_cfg.strip_tag2_en = true;
8979         }
8980         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8981         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8982         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8983
8984         return hclge_set_vlan_rx_offload_cfg(vport);
8985 }
8986
8987 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8988                                             u16 port_base_vlan_state,
8989                                             struct hclge_vlan_info *new_info,
8990                                             struct hclge_vlan_info *old_info)
8991 {
8992         struct hclge_dev *hdev = vport->back;
8993         int ret;
8994
8995         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8996                 hclge_rm_vport_all_vlan_table(vport, false);
8997                 return hclge_set_vlan_filter_hw(hdev,
8998                                                  htons(new_info->vlan_proto),
8999                                                  vport->vport_id,
9000                                                  new_info->vlan_tag,
9001                                                  false);
9002         }
9003
9004         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9005                                        vport->vport_id, old_info->vlan_tag,
9006                                        true);
9007         if (ret)
9008                 return ret;
9009
9010         return hclge_add_vport_all_vlan_table(vport);
9011 }
9012
9013 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9014                                     struct hclge_vlan_info *vlan_info)
9015 {
9016         struct hnae3_handle *nic = &vport->nic;
9017         struct hclge_vlan_info *old_vlan_info;
9018         struct hclge_dev *hdev = vport->back;
9019         int ret;
9020
9021         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9022
9023         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9024         if (ret)
9025                 return ret;
9026
9027         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9028                 /* add new VLAN tag */
9029                 ret = hclge_set_vlan_filter_hw(hdev,
9030                                                htons(vlan_info->vlan_proto),
9031                                                vport->vport_id,
9032                                                vlan_info->vlan_tag,
9033                                                false);
9034                 if (ret)
9035                         return ret;
9036
9037                 /* remove old VLAN tag */
9038                 ret = hclge_set_vlan_filter_hw(hdev,
9039                                                htons(old_vlan_info->vlan_proto),
9040                                                vport->vport_id,
9041                                                old_vlan_info->vlan_tag,
9042                                                true);
9043                 if (ret)
9044                         return ret;
9045
9046                 goto update;
9047         }
9048
9049         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9050                                                old_vlan_info);
9051         if (ret)
9052                 return ret;
9053
9054         /* update state only when disable/enable port based VLAN */
9055         vport->port_base_vlan_cfg.state = state;
9056         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9057                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9058         else
9059                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9060
9061 update:
9062         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9063         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9064         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9065
9066         return 0;
9067 }
9068
9069 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9070                                           enum hnae3_port_base_vlan_state state,
9071                                           u16 vlan)
9072 {
9073         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9074                 if (!vlan)
9075                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9076                 else
9077                         return HNAE3_PORT_BASE_VLAN_ENABLE;
9078         } else {
9079                 if (!vlan)
9080                         return HNAE3_PORT_BASE_VLAN_DISABLE;
9081                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9082                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9083                 else
9084                         return HNAE3_PORT_BASE_VLAN_MODIFY;
9085         }
9086 }
9087
9088 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9089                                     u16 vlan, u8 qos, __be16 proto)
9090 {
9091         struct hclge_vport *vport = hclge_get_vport(handle);
9092         struct hclge_dev *hdev = vport->back;
9093         struct hclge_vlan_info vlan_info;
9094         u16 state;
9095         int ret;
9096
9097         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9098                 return -EOPNOTSUPP;
9099
9100         vport = hclge_get_vf_vport(hdev, vfid);
9101         if (!vport)
9102                 return -EINVAL;
9103
9104         /* qos is a 3 bits value, so can not be bigger than 7 */
9105         if (vlan > VLAN_N_VID - 1 || qos > 7)
9106                 return -EINVAL;
9107         if (proto != htons(ETH_P_8021Q))
9108                 return -EPROTONOSUPPORT;
9109
9110         state = hclge_get_port_base_vlan_state(vport,
9111                                                vport->port_base_vlan_cfg.state,
9112                                                vlan);
9113         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9114                 return 0;
9115
9116         vlan_info.vlan_tag = vlan;
9117         vlan_info.qos = qos;
9118         vlan_info.vlan_proto = ntohs(proto);
9119
9120         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9121                 return hclge_update_port_base_vlan_cfg(vport, state,
9122                                                        &vlan_info);
9123         } else {
9124                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9125                                                         vport->vport_id, state,
9126                                                         vlan, qos,
9127                                                         ntohs(proto));
9128                 return ret;
9129         }
9130 }
9131
9132 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9133 {
9134         struct hclge_vlan_info *vlan_info;
9135         struct hclge_vport *vport;
9136         int ret;
9137         int vf;
9138
9139         /* clear port base vlan for all vf */
9140         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9141                 vport = &hdev->vport[vf];
9142                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9143
9144                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9145                                                vport->vport_id,
9146                                                vlan_info->vlan_tag, true);
9147                 if (ret)
9148                         dev_err(&hdev->pdev->dev,
9149                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9150                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9151         }
9152 }
9153
9154 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9155                           u16 vlan_id, bool is_kill)
9156 {
9157         struct hclge_vport *vport = hclge_get_vport(handle);
9158         struct hclge_dev *hdev = vport->back;
9159         bool writen_to_tbl = false;
9160         int ret = 0;
9161
9162         /* When device is resetting or reset failed, firmware is unable to
9163          * handle mailbox. Just record the vlan id, and remove it after
9164          * reset finished.
9165          */
9166         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9167              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9168                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9169                 return -EBUSY;
9170         }
9171
9172         /* when port base vlan enabled, we use port base vlan as the vlan
9173          * filter entry. In this case, we don't update vlan filter table
9174          * when user add new vlan or remove exist vlan, just update the vport
9175          * vlan list. The vlan id in vlan list will be writen in vlan filter
9176          * table until port base vlan disabled
9177          */
9178         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9179                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9180                                                vlan_id, is_kill);
9181                 writen_to_tbl = true;
9182         }
9183
9184         if (!ret) {
9185                 if (is_kill)
9186                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9187                 else
9188                         hclge_add_vport_vlan_table(vport, vlan_id,
9189                                                    writen_to_tbl);
9190         } else if (is_kill) {
9191                 /* when remove hw vlan filter failed, record the vlan id,
9192                  * and try to remove it from hw later, to be consistence
9193                  * with stack
9194                  */
9195                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9196         }
9197         return ret;
9198 }
9199
9200 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9201 {
9202 #define HCLGE_MAX_SYNC_COUNT    60
9203
9204         int i, ret, sync_cnt = 0;
9205         u16 vlan_id;
9206
9207         /* start from vport 1 for PF is always alive */
9208         for (i = 0; i < hdev->num_alloc_vport; i++) {
9209                 struct hclge_vport *vport = &hdev->vport[i];
9210
9211                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9212                                          VLAN_N_VID);
9213                 while (vlan_id != VLAN_N_VID) {
9214                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9215                                                        vport->vport_id, vlan_id,
9216                                                        true);
9217                         if (ret && ret != -EINVAL)
9218                                 return;
9219
9220                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9221                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9222
9223                         sync_cnt++;
9224                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9225                                 return;
9226
9227                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9228                                                  VLAN_N_VID);
9229                 }
9230         }
9231 }
9232
9233 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9234 {
9235         struct hclge_config_max_frm_size_cmd *req;
9236         struct hclge_desc desc;
9237
9238         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9239
9240         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9241         req->max_frm_size = cpu_to_le16(new_mps);
9242         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9243
9244         return hclge_cmd_send(&hdev->hw, &desc, 1);
9245 }
9246
9247 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9248 {
9249         struct hclge_vport *vport = hclge_get_vport(handle);
9250
9251         return hclge_set_vport_mtu(vport, new_mtu);
9252 }
9253
9254 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9255 {
9256         struct hclge_dev *hdev = vport->back;
9257         int i, max_frm_size, ret;
9258
9259         /* HW supprt 2 layer vlan */
9260         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9261         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9262             max_frm_size > HCLGE_MAC_MAX_FRAME)
9263                 return -EINVAL;
9264
9265         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9266         mutex_lock(&hdev->vport_lock);
9267         /* VF's mps must fit within hdev->mps */
9268         if (vport->vport_id && max_frm_size > hdev->mps) {
9269                 mutex_unlock(&hdev->vport_lock);
9270                 return -EINVAL;
9271         } else if (vport->vport_id) {
9272                 vport->mps = max_frm_size;
9273                 mutex_unlock(&hdev->vport_lock);
9274                 return 0;
9275         }
9276
9277         /* PF's mps must be greater then VF's mps */
9278         for (i = 1; i < hdev->num_alloc_vport; i++)
9279                 if (max_frm_size < hdev->vport[i].mps) {
9280                         mutex_unlock(&hdev->vport_lock);
9281                         return -EINVAL;
9282                 }
9283
9284         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9285
9286         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9287         if (ret) {
9288                 dev_err(&hdev->pdev->dev,
9289                         "Change mtu fail, ret =%d\n", ret);
9290                 goto out;
9291         }
9292
9293         hdev->mps = max_frm_size;
9294         vport->mps = max_frm_size;
9295
9296         ret = hclge_buffer_alloc(hdev);
9297         if (ret)
9298                 dev_err(&hdev->pdev->dev,
9299                         "Allocate buffer fail, ret =%d\n", ret);
9300
9301 out:
9302         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9303         mutex_unlock(&hdev->vport_lock);
9304         return ret;
9305 }
9306
9307 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9308                                     bool enable)
9309 {
9310         struct hclge_reset_tqp_queue_cmd *req;
9311         struct hclge_desc desc;
9312         int ret;
9313
9314         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9315
9316         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9317         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9318         if (enable)
9319                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9320
9321         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9322         if (ret) {
9323                 dev_err(&hdev->pdev->dev,
9324                         "Send tqp reset cmd error, status =%d\n", ret);
9325                 return ret;
9326         }
9327
9328         return 0;
9329 }
9330
9331 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9332 {
9333         struct hclge_reset_tqp_queue_cmd *req;
9334         struct hclge_desc desc;
9335         int ret;
9336
9337         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9338
9339         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9340         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9341
9342         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9343         if (ret) {
9344                 dev_err(&hdev->pdev->dev,
9345                         "Get reset status error, status =%d\n", ret);
9346                 return ret;
9347         }
9348
9349         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9350 }
9351
9352 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9353 {
9354         struct hnae3_queue *queue;
9355         struct hclge_tqp *tqp;
9356
9357         queue = handle->kinfo.tqp[queue_id];
9358         tqp = container_of(queue, struct hclge_tqp, q);
9359
9360         return tqp->index;
9361 }
9362
9363 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9364 {
9365         struct hclge_vport *vport = hclge_get_vport(handle);
9366         struct hclge_dev *hdev = vport->back;
9367         int reset_try_times = 0;
9368         int reset_status;
9369         u16 queue_gid;
9370         int ret;
9371
9372         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9373
9374         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9375         if (ret) {
9376                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9377                 return ret;
9378         }
9379
9380         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9381         if (ret) {
9382                 dev_err(&hdev->pdev->dev,
9383                         "Send reset tqp cmd fail, ret = %d\n", ret);
9384                 return ret;
9385         }
9386
9387         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9388                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9389                 if (reset_status)
9390                         break;
9391
9392                 /* Wait for tqp hw reset */
9393                 usleep_range(1000, 1200);
9394         }
9395
9396         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9397                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9398                 return ret;
9399         }
9400
9401         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9402         if (ret)
9403                 dev_err(&hdev->pdev->dev,
9404                         "Deassert the soft reset fail, ret = %d\n", ret);
9405
9406         return ret;
9407 }
9408
9409 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9410 {
9411         struct hclge_dev *hdev = vport->back;
9412         int reset_try_times = 0;
9413         int reset_status;
9414         u16 queue_gid;
9415         int ret;
9416
9417         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9418
9419         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9420         if (ret) {
9421                 dev_warn(&hdev->pdev->dev,
9422                          "Send reset tqp cmd fail, ret = %d\n", ret);
9423                 return;
9424         }
9425
9426         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9427                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9428                 if (reset_status)
9429                         break;
9430
9431                 /* Wait for tqp hw reset */
9432                 usleep_range(1000, 1200);
9433         }
9434
9435         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9436                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9437                 return;
9438         }
9439
9440         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9441         if (ret)
9442                 dev_warn(&hdev->pdev->dev,
9443                          "Deassert the soft reset fail, ret = %d\n", ret);
9444 }
9445
9446 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9447 {
9448         struct hclge_vport *vport = hclge_get_vport(handle);
9449         struct hclge_dev *hdev = vport->back;
9450
9451         return hdev->fw_version;
9452 }
9453
9454 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9455 {
9456         struct phy_device *phydev = hdev->hw.mac.phydev;
9457
9458         if (!phydev)
9459                 return;
9460
9461         phy_set_asym_pause(phydev, rx_en, tx_en);
9462 }
9463
9464 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9465 {
9466         int ret;
9467
9468         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9469                 return 0;
9470
9471         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9472         if (ret)
9473                 dev_err(&hdev->pdev->dev,
9474                         "configure pauseparam error, ret = %d.\n", ret);
9475
9476         return ret;
9477 }
9478
9479 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9480 {
9481         struct phy_device *phydev = hdev->hw.mac.phydev;
9482         u16 remote_advertising = 0;
9483         u16 local_advertising;
9484         u32 rx_pause, tx_pause;
9485         u8 flowctl;
9486
9487         if (!phydev->link || !phydev->autoneg)
9488                 return 0;
9489
9490         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9491
9492         if (phydev->pause)
9493                 remote_advertising = LPA_PAUSE_CAP;
9494
9495         if (phydev->asym_pause)
9496                 remote_advertising |= LPA_PAUSE_ASYM;
9497
9498         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9499                                            remote_advertising);
9500         tx_pause = flowctl & FLOW_CTRL_TX;
9501         rx_pause = flowctl & FLOW_CTRL_RX;
9502
9503         if (phydev->duplex == HCLGE_MAC_HALF) {
9504                 tx_pause = 0;
9505                 rx_pause = 0;
9506         }
9507
9508         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9509 }
9510
9511 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9512                                  u32 *rx_en, u32 *tx_en)
9513 {
9514         struct hclge_vport *vport = hclge_get_vport(handle);
9515         struct hclge_dev *hdev = vport->back;
9516         struct phy_device *phydev = hdev->hw.mac.phydev;
9517
9518         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9519
9520         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9521                 *rx_en = 0;
9522                 *tx_en = 0;
9523                 return;
9524         }
9525
9526         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9527                 *rx_en = 1;
9528                 *tx_en = 0;
9529         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9530                 *tx_en = 1;
9531                 *rx_en = 0;
9532         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9533                 *rx_en = 1;
9534                 *tx_en = 1;
9535         } else {
9536                 *rx_en = 0;
9537                 *tx_en = 0;
9538         }
9539 }
9540
9541 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9542                                          u32 rx_en, u32 tx_en)
9543 {
9544         if (rx_en && tx_en)
9545                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9546         else if (rx_en && !tx_en)
9547                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9548         else if (!rx_en && tx_en)
9549                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9550         else
9551                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9552
9553         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9554 }
9555
9556 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9557                                 u32 rx_en, u32 tx_en)
9558 {
9559         struct hclge_vport *vport = hclge_get_vport(handle);
9560         struct hclge_dev *hdev = vport->back;
9561         struct phy_device *phydev = hdev->hw.mac.phydev;
9562         u32 fc_autoneg;
9563
9564         if (phydev) {
9565                 fc_autoneg = hclge_get_autoneg(handle);
9566                 if (auto_neg != fc_autoneg) {
9567                         dev_info(&hdev->pdev->dev,
9568                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9569                         return -EOPNOTSUPP;
9570                 }
9571         }
9572
9573         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9574                 dev_info(&hdev->pdev->dev,
9575                          "Priority flow control enabled. Cannot set link flow control.\n");
9576                 return -EOPNOTSUPP;
9577         }
9578
9579         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9580
9581         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9582
9583         if (!auto_neg)
9584                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9585
9586         if (phydev)
9587                 return phy_start_aneg(phydev);
9588
9589         return -EOPNOTSUPP;
9590 }
9591
9592 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9593                                           u8 *auto_neg, u32 *speed, u8 *duplex)
9594 {
9595         struct hclge_vport *vport = hclge_get_vport(handle);
9596         struct hclge_dev *hdev = vport->back;
9597
9598         if (speed)
9599                 *speed = hdev->hw.mac.speed;
9600         if (duplex)
9601                 *duplex = hdev->hw.mac.duplex;
9602         if (auto_neg)
9603                 *auto_neg = hdev->hw.mac.autoneg;
9604 }
9605
9606 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9607                                  u8 *module_type)
9608 {
9609         struct hclge_vport *vport = hclge_get_vport(handle);
9610         struct hclge_dev *hdev = vport->back;
9611
9612         /* When nic is down, the service task is not running, doesn't update
9613          * the port information per second. Query the port information before
9614          * return the media type, ensure getting the correct media information.
9615          */
9616         hclge_update_port_info(hdev);
9617
9618         if (media_type)
9619                 *media_type = hdev->hw.mac.media_type;
9620
9621         if (module_type)
9622                 *module_type = hdev->hw.mac.module_type;
9623 }
9624
9625 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9626                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9627 {
9628         struct hclge_vport *vport = hclge_get_vport(handle);
9629         struct hclge_dev *hdev = vport->back;
9630         struct phy_device *phydev = hdev->hw.mac.phydev;
9631         int mdix_ctrl, mdix, is_resolved;
9632         unsigned int retval;
9633
9634         if (!phydev) {
9635                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9636                 *tp_mdix = ETH_TP_MDI_INVALID;
9637                 return;
9638         }
9639
9640         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9641
9642         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9643         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9644                                     HCLGE_PHY_MDIX_CTRL_S);
9645
9646         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9647         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9648         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9649
9650         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9651
9652         switch (mdix_ctrl) {
9653         case 0x0:
9654                 *tp_mdix_ctrl = ETH_TP_MDI;
9655                 break;
9656         case 0x1:
9657                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9658                 break;
9659         case 0x3:
9660                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9661                 break;
9662         default:
9663                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9664                 break;
9665         }
9666
9667         if (!is_resolved)
9668                 *tp_mdix = ETH_TP_MDI_INVALID;
9669         else if (mdix)
9670                 *tp_mdix = ETH_TP_MDI_X;
9671         else
9672                 *tp_mdix = ETH_TP_MDI;
9673 }
9674
9675 static void hclge_info_show(struct hclge_dev *hdev)
9676 {
9677         struct device *dev = &hdev->pdev->dev;
9678
9679         dev_info(dev, "PF info begin:\n");
9680
9681         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9682         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9683         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9684         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9685         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9686         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9687         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9688         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9689         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9690         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9691         dev_info(dev, "This is %s PF\n",
9692                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9693         dev_info(dev, "DCB %s\n",
9694                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9695         dev_info(dev, "MQPRIO %s\n",
9696                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9697
9698         dev_info(dev, "PF info end.\n");
9699 }
9700
9701 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9702                                           struct hclge_vport *vport)
9703 {
9704         struct hnae3_client *client = vport->nic.client;
9705         struct hclge_dev *hdev = ae_dev->priv;
9706         int rst_cnt = hdev->rst_stats.reset_cnt;
9707         int ret;
9708
9709         ret = client->ops->init_instance(&vport->nic);
9710         if (ret)
9711                 return ret;
9712
9713         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9714         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9715             rst_cnt != hdev->rst_stats.reset_cnt) {
9716                 ret = -EBUSY;
9717                 goto init_nic_err;
9718         }
9719
9720         /* Enable nic hw error interrupts */
9721         ret = hclge_config_nic_hw_error(hdev, true);
9722         if (ret) {
9723                 dev_err(&ae_dev->pdev->dev,
9724                         "fail(%d) to enable hw error interrupts\n", ret);
9725                 goto init_nic_err;
9726         }
9727
9728         hnae3_set_client_init_flag(client, ae_dev, 1);
9729
9730         if (netif_msg_drv(&hdev->vport->nic))
9731                 hclge_info_show(hdev);
9732
9733         return ret;
9734
9735 init_nic_err:
9736         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9737         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9738                 msleep(HCLGE_WAIT_RESET_DONE);
9739
9740         client->ops->uninit_instance(&vport->nic, 0);
9741
9742         return ret;
9743 }
9744
9745 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9746                                            struct hclge_vport *vport)
9747 {
9748         struct hclge_dev *hdev = ae_dev->priv;
9749         struct hnae3_client *client;
9750         int rst_cnt;
9751         int ret;
9752
9753         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9754             !hdev->nic_client)
9755                 return 0;
9756
9757         client = hdev->roce_client;
9758         ret = hclge_init_roce_base_info(vport);
9759         if (ret)
9760                 return ret;
9761
9762         rst_cnt = hdev->rst_stats.reset_cnt;
9763         ret = client->ops->init_instance(&vport->roce);
9764         if (ret)
9765                 return ret;
9766
9767         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9768         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9769             rst_cnt != hdev->rst_stats.reset_cnt) {
9770                 ret = -EBUSY;
9771                 goto init_roce_err;
9772         }
9773
9774         /* Enable roce ras interrupts */
9775         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9776         if (ret) {
9777                 dev_err(&ae_dev->pdev->dev,
9778                         "fail(%d) to enable roce ras interrupts\n", ret);
9779                 goto init_roce_err;
9780         }
9781
9782         hnae3_set_client_init_flag(client, ae_dev, 1);
9783
9784         return 0;
9785
9786 init_roce_err:
9787         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9788         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9789                 msleep(HCLGE_WAIT_RESET_DONE);
9790
9791         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9792
9793         return ret;
9794 }
9795
9796 static int hclge_init_client_instance(struct hnae3_client *client,
9797                                       struct hnae3_ae_dev *ae_dev)
9798 {
9799         struct hclge_dev *hdev = ae_dev->priv;
9800         struct hclge_vport *vport;
9801         int i, ret;
9802
9803         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9804                 vport = &hdev->vport[i];
9805
9806                 switch (client->type) {
9807                 case HNAE3_CLIENT_KNIC:
9808                         hdev->nic_client = client;
9809                         vport->nic.client = client;
9810                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9811                         if (ret)
9812                                 goto clear_nic;
9813
9814                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9815                         if (ret)
9816                                 goto clear_roce;
9817
9818                         break;
9819                 case HNAE3_CLIENT_ROCE:
9820                         if (hnae3_dev_roce_supported(hdev)) {
9821                                 hdev->roce_client = client;
9822                                 vport->roce.client = client;
9823                         }
9824
9825                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9826                         if (ret)
9827                                 goto clear_roce;
9828
9829                         break;
9830                 default:
9831                         return -EINVAL;
9832                 }
9833         }
9834
9835         return 0;
9836
9837 clear_nic:
9838         hdev->nic_client = NULL;
9839         vport->nic.client = NULL;
9840         return ret;
9841 clear_roce:
9842         hdev->roce_client = NULL;
9843         vport->roce.client = NULL;
9844         return ret;
9845 }
9846
9847 static void hclge_uninit_client_instance(struct hnae3_client *client,
9848                                          struct hnae3_ae_dev *ae_dev)
9849 {
9850         struct hclge_dev *hdev = ae_dev->priv;
9851         struct hclge_vport *vport;
9852         int i;
9853
9854         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9855                 vport = &hdev->vport[i];
9856                 if (hdev->roce_client) {
9857                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9858                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9859                                 msleep(HCLGE_WAIT_RESET_DONE);
9860
9861                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9862                                                                 0);
9863                         hdev->roce_client = NULL;
9864                         vport->roce.client = NULL;
9865                 }
9866                 if (client->type == HNAE3_CLIENT_ROCE)
9867                         return;
9868                 if (hdev->nic_client && client->ops->uninit_instance) {
9869                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9870                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9871                                 msleep(HCLGE_WAIT_RESET_DONE);
9872
9873                         client->ops->uninit_instance(&vport->nic, 0);
9874                         hdev->nic_client = NULL;
9875                         vport->nic.client = NULL;
9876                 }
9877         }
9878 }
9879
9880 static int hclge_pci_init(struct hclge_dev *hdev)
9881 {
9882         struct pci_dev *pdev = hdev->pdev;
9883         struct hclge_hw *hw;
9884         int ret;
9885
9886         ret = pci_enable_device(pdev);
9887         if (ret) {
9888                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9889                 return ret;
9890         }
9891
9892         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9893         if (ret) {
9894                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9895                 if (ret) {
9896                         dev_err(&pdev->dev,
9897                                 "can't set consistent PCI DMA");
9898                         goto err_disable_device;
9899                 }
9900                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9901         }
9902
9903         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9904         if (ret) {
9905                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9906                 goto err_disable_device;
9907         }
9908
9909         pci_set_master(pdev);
9910         hw = &hdev->hw;
9911         hw->io_base = pcim_iomap(pdev, 2, 0);
9912         if (!hw->io_base) {
9913                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9914                 ret = -ENOMEM;
9915                 goto err_clr_master;
9916         }
9917
9918         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9919
9920         return 0;
9921 err_clr_master:
9922         pci_clear_master(pdev);
9923         pci_release_regions(pdev);
9924 err_disable_device:
9925         pci_disable_device(pdev);
9926
9927         return ret;
9928 }
9929
9930 static void hclge_pci_uninit(struct hclge_dev *hdev)
9931 {
9932         struct pci_dev *pdev = hdev->pdev;
9933
9934         pcim_iounmap(pdev, hdev->hw.io_base);
9935         pci_free_irq_vectors(pdev);
9936         pci_clear_master(pdev);
9937         pci_release_mem_regions(pdev);
9938         pci_disable_device(pdev);
9939 }
9940
9941 static void hclge_state_init(struct hclge_dev *hdev)
9942 {
9943         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9944         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9945         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9946         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9947         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9948         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9949         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9950 }
9951
9952 static void hclge_state_uninit(struct hclge_dev *hdev)
9953 {
9954         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9955         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9956
9957         if (hdev->reset_timer.function)
9958                 del_timer_sync(&hdev->reset_timer);
9959         if (hdev->service_task.work.func)
9960                 cancel_delayed_work_sync(&hdev->service_task);
9961 }
9962
9963 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9964 {
9965 #define HCLGE_FLR_RETRY_WAIT_MS 500
9966 #define HCLGE_FLR_RETRY_CNT     5
9967
9968         struct hclge_dev *hdev = ae_dev->priv;
9969         int retry_cnt = 0;
9970         int ret;
9971
9972 retry:
9973         down(&hdev->reset_sem);
9974         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9975         hdev->reset_type = HNAE3_FLR_RESET;
9976         ret = hclge_reset_prepare(hdev);
9977         if (ret || hdev->reset_pending) {
9978                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9979                         ret);
9980                 if (hdev->reset_pending ||
9981                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9982                         dev_err(&hdev->pdev->dev,
9983                                 "reset_pending:0x%lx, retry_cnt:%d\n",
9984                                 hdev->reset_pending, retry_cnt);
9985                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9986                         up(&hdev->reset_sem);
9987                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
9988                         goto retry;
9989                 }
9990         }
9991
9992         /* disable misc vector before FLR done */
9993         hclge_enable_vector(&hdev->misc_vector, false);
9994         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9995         hdev->rst_stats.flr_rst_cnt++;
9996 }
9997
9998 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9999 {
10000         struct hclge_dev *hdev = ae_dev->priv;
10001         int ret;
10002
10003         hclge_enable_vector(&hdev->misc_vector, true);
10004
10005         ret = hclge_reset_rebuild(hdev);
10006         if (ret)
10007                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10008
10009         hdev->reset_type = HNAE3_NONE_RESET;
10010         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10011         up(&hdev->reset_sem);
10012 }
10013
10014 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10015 {
10016         u16 i;
10017
10018         for (i = 0; i < hdev->num_alloc_vport; i++) {
10019                 struct hclge_vport *vport = &hdev->vport[i];
10020                 int ret;
10021
10022                  /* Send cmd to clear VF's FUNC_RST_ING */
10023                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10024                 if (ret)
10025                         dev_warn(&hdev->pdev->dev,
10026                                  "clear vf(%u) rst failed %d!\n",
10027                                  vport->vport_id, ret);
10028         }
10029 }
10030
10031 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10032 {
10033         struct pci_dev *pdev = ae_dev->pdev;
10034         struct hclge_dev *hdev;
10035         int ret;
10036
10037         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10038         if (!hdev)
10039                 return -ENOMEM;
10040
10041         hdev->pdev = pdev;
10042         hdev->ae_dev = ae_dev;
10043         hdev->reset_type = HNAE3_NONE_RESET;
10044         hdev->reset_level = HNAE3_FUNC_RESET;
10045         ae_dev->priv = hdev;
10046
10047         /* HW supprt 2 layer vlan */
10048         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10049
10050         mutex_init(&hdev->vport_lock);
10051         spin_lock_init(&hdev->fd_rule_lock);
10052         sema_init(&hdev->reset_sem, 1);
10053
10054         ret = hclge_pci_init(hdev);
10055         if (ret)
10056                 goto out;
10057
10058         /* Firmware command queue initialize */
10059         ret = hclge_cmd_queue_init(hdev);
10060         if (ret)
10061                 goto err_pci_uninit;
10062
10063         /* Firmware command initialize */
10064         ret = hclge_cmd_init(hdev);
10065         if (ret)
10066                 goto err_cmd_uninit;
10067
10068         ret = hclge_get_cap(hdev);
10069         if (ret)
10070                 goto err_cmd_uninit;
10071
10072         ret = hclge_query_dev_specs(hdev);
10073         if (ret) {
10074                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10075                         ret);
10076                 goto err_cmd_uninit;
10077         }
10078
10079         ret = hclge_configure(hdev);
10080         if (ret) {
10081                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10082                 goto err_cmd_uninit;
10083         }
10084
10085         ret = hclge_init_msi(hdev);
10086         if (ret) {
10087                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10088                 goto err_cmd_uninit;
10089         }
10090
10091         ret = hclge_misc_irq_init(hdev);
10092         if (ret)
10093                 goto err_msi_uninit;
10094
10095         ret = hclge_alloc_tqps(hdev);
10096         if (ret) {
10097                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10098                 goto err_msi_irq_uninit;
10099         }
10100
10101         ret = hclge_alloc_vport(hdev);
10102         if (ret)
10103                 goto err_msi_irq_uninit;
10104
10105         ret = hclge_map_tqp(hdev);
10106         if (ret)
10107                 goto err_msi_irq_uninit;
10108
10109         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10110                 ret = hclge_mac_mdio_config(hdev);
10111                 if (ret)
10112                         goto err_msi_irq_uninit;
10113         }
10114
10115         ret = hclge_init_umv_space(hdev);
10116         if (ret)
10117                 goto err_mdiobus_unreg;
10118
10119         ret = hclge_mac_init(hdev);
10120         if (ret) {
10121                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10122                 goto err_mdiobus_unreg;
10123         }
10124
10125         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10126         if (ret) {
10127                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10128                 goto err_mdiobus_unreg;
10129         }
10130
10131         ret = hclge_config_gro(hdev, true);
10132         if (ret)
10133                 goto err_mdiobus_unreg;
10134
10135         ret = hclge_init_vlan_config(hdev);
10136         if (ret) {
10137                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10138                 goto err_mdiobus_unreg;
10139         }
10140
10141         ret = hclge_tm_schd_init(hdev);
10142         if (ret) {
10143                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10144                 goto err_mdiobus_unreg;
10145         }
10146
10147         hclge_rss_init_cfg(hdev);
10148         ret = hclge_rss_init_hw(hdev);
10149         if (ret) {
10150                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10151                 goto err_mdiobus_unreg;
10152         }
10153
10154         ret = init_mgr_tbl(hdev);
10155         if (ret) {
10156                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10157                 goto err_mdiobus_unreg;
10158         }
10159
10160         ret = hclge_init_fd_config(hdev);
10161         if (ret) {
10162                 dev_err(&pdev->dev,
10163                         "fd table init fail, ret=%d\n", ret);
10164                 goto err_mdiobus_unreg;
10165         }
10166
10167         INIT_KFIFO(hdev->mac_tnl_log);
10168
10169         hclge_dcb_ops_set(hdev);
10170
10171         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10172         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10173
10174         /* Setup affinity after service timer setup because add_timer_on
10175          * is called in affinity notify.
10176          */
10177         hclge_misc_affinity_setup(hdev);
10178
10179         hclge_clear_all_event_cause(hdev);
10180         hclge_clear_resetting_state(hdev);
10181
10182         /* Log and clear the hw errors those already occurred */
10183         hclge_handle_all_hns_hw_errors(ae_dev);
10184
10185         /* request delayed reset for the error recovery because an immediate
10186          * global reset on a PF affecting pending initialization of other PFs
10187          */
10188         if (ae_dev->hw_err_reset_req) {
10189                 enum hnae3_reset_type reset_level;
10190
10191                 reset_level = hclge_get_reset_level(ae_dev,
10192                                                     &ae_dev->hw_err_reset_req);
10193                 hclge_set_def_reset_request(ae_dev, reset_level);
10194                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10195         }
10196
10197         /* Enable MISC vector(vector0) */
10198         hclge_enable_vector(&hdev->misc_vector, true);
10199
10200         hclge_state_init(hdev);
10201         hdev->last_reset_time = jiffies;
10202
10203         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10204                  HCLGE_DRIVER_NAME);
10205
10206         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10207
10208         return 0;
10209
10210 err_mdiobus_unreg:
10211         if (hdev->hw.mac.phydev)
10212                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10213 err_msi_irq_uninit:
10214         hclge_misc_irq_uninit(hdev);
10215 err_msi_uninit:
10216         pci_free_irq_vectors(pdev);
10217 err_cmd_uninit:
10218         hclge_cmd_uninit(hdev);
10219 err_pci_uninit:
10220         pcim_iounmap(pdev, hdev->hw.io_base);
10221         pci_clear_master(pdev);
10222         pci_release_regions(pdev);
10223         pci_disable_device(pdev);
10224 out:
10225         mutex_destroy(&hdev->vport_lock);
10226         return ret;
10227 }
10228
10229 static void hclge_stats_clear(struct hclge_dev *hdev)
10230 {
10231         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10232 }
10233
10234 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10235 {
10236         return hclge_config_switch_param(hdev, vf, enable,
10237                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10238 }
10239
10240 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10241 {
10242         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10243                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10244                                           enable, vf);
10245 }
10246
10247 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10248 {
10249         int ret;
10250
10251         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10252         if (ret) {
10253                 dev_err(&hdev->pdev->dev,
10254                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10255                         vf, enable ? "on" : "off", ret);
10256                 return ret;
10257         }
10258
10259         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10260         if (ret)
10261                 dev_err(&hdev->pdev->dev,
10262                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10263                         vf, enable ? "on" : "off", ret);
10264
10265         return ret;
10266 }
10267
10268 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10269                                  bool enable)
10270 {
10271         struct hclge_vport *vport = hclge_get_vport(handle);
10272         struct hclge_dev *hdev = vport->back;
10273         u32 new_spoofchk = enable ? 1 : 0;
10274         int ret;
10275
10276         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10277                 return -EOPNOTSUPP;
10278
10279         vport = hclge_get_vf_vport(hdev, vf);
10280         if (!vport)
10281                 return -EINVAL;
10282
10283         if (vport->vf_info.spoofchk == new_spoofchk)
10284                 return 0;
10285
10286         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10287                 dev_warn(&hdev->pdev->dev,
10288                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10289                          vf);
10290         else if (enable && hclge_is_umv_space_full(vport, true))
10291                 dev_warn(&hdev->pdev->dev,
10292                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10293                          vf);
10294
10295         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10296         if (ret)
10297                 return ret;
10298
10299         vport->vf_info.spoofchk = new_spoofchk;
10300         return 0;
10301 }
10302
10303 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10304 {
10305         struct hclge_vport *vport = hdev->vport;
10306         int ret;
10307         int i;
10308
10309         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10310                 return 0;
10311
10312         /* resume the vf spoof check state after reset */
10313         for (i = 0; i < hdev->num_alloc_vport; i++) {
10314                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10315                                                vport->vf_info.spoofchk);
10316                 if (ret)
10317                         return ret;
10318
10319                 vport++;
10320         }
10321
10322         return 0;
10323 }
10324
10325 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10326 {
10327         struct hclge_vport *vport = hclge_get_vport(handle);
10328         struct hclge_dev *hdev = vport->back;
10329         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10330         u32 new_trusted = enable ? 1 : 0;
10331         bool en_bc_pmc;
10332         int ret;
10333
10334         vport = hclge_get_vf_vport(hdev, vf);
10335         if (!vport)
10336                 return -EINVAL;
10337
10338         if (vport->vf_info.trusted == new_trusted)
10339                 return 0;
10340
10341         /* Disable promisc mode for VF if it is not trusted any more. */
10342         if (!enable && vport->vf_info.promisc_enable) {
10343                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10344                 ret = hclge_set_vport_promisc_mode(vport, false, false,
10345                                                    en_bc_pmc);
10346                 if (ret)
10347                         return ret;
10348                 vport->vf_info.promisc_enable = 0;
10349                 hclge_inform_vf_promisc_info(vport);
10350         }
10351
10352         vport->vf_info.trusted = new_trusted;
10353
10354         return 0;
10355 }
10356
10357 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10358 {
10359         int ret;
10360         int vf;
10361
10362         /* reset vf rate to default value */
10363         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10364                 struct hclge_vport *vport = &hdev->vport[vf];
10365
10366                 vport->vf_info.max_tx_rate = 0;
10367                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10368                 if (ret)
10369                         dev_err(&hdev->pdev->dev,
10370                                 "vf%d failed to reset to default, ret=%d\n",
10371                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10372         }
10373 }
10374
10375 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10376                                      int min_tx_rate, int max_tx_rate)
10377 {
10378         if (min_tx_rate != 0 ||
10379             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10380                 dev_err(&hdev->pdev->dev,
10381                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10382                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10383                 return -EINVAL;
10384         }
10385
10386         return 0;
10387 }
10388
10389 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10390                              int min_tx_rate, int max_tx_rate, bool force)
10391 {
10392         struct hclge_vport *vport = hclge_get_vport(handle);
10393         struct hclge_dev *hdev = vport->back;
10394         int ret;
10395
10396         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10397         if (ret)
10398                 return ret;
10399
10400         vport = hclge_get_vf_vport(hdev, vf);
10401         if (!vport)
10402                 return -EINVAL;
10403
10404         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10405                 return 0;
10406
10407         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10408         if (ret)
10409                 return ret;
10410
10411         vport->vf_info.max_tx_rate = max_tx_rate;
10412
10413         return 0;
10414 }
10415
10416 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10417 {
10418         struct hnae3_handle *handle = &hdev->vport->nic;
10419         struct hclge_vport *vport;
10420         int ret;
10421         int vf;
10422
10423         /* resume the vf max_tx_rate after reset */
10424         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10425                 vport = hclge_get_vf_vport(hdev, vf);
10426                 if (!vport)
10427                         return -EINVAL;
10428
10429                 /* zero means max rate, after reset, firmware already set it to
10430                  * max rate, so just continue.
10431                  */
10432                 if (!vport->vf_info.max_tx_rate)
10433                         continue;
10434
10435                 ret = hclge_set_vf_rate(handle, vf, 0,
10436                                         vport->vf_info.max_tx_rate, true);
10437                 if (ret) {
10438                         dev_err(&hdev->pdev->dev,
10439                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10440                                 vf, vport->vf_info.max_tx_rate, ret);
10441                         return ret;
10442                 }
10443         }
10444
10445         return 0;
10446 }
10447
10448 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10449 {
10450         struct hclge_vport *vport = hdev->vport;
10451         int i;
10452
10453         for (i = 0; i < hdev->num_alloc_vport; i++) {
10454                 hclge_vport_stop(vport);
10455                 vport++;
10456         }
10457 }
10458
10459 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10460 {
10461         struct hclge_dev *hdev = ae_dev->priv;
10462         struct pci_dev *pdev = ae_dev->pdev;
10463         int ret;
10464
10465         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10466
10467         hclge_stats_clear(hdev);
10468         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10469          * so here should not clean table in memory.
10470          */
10471         if (hdev->reset_type == HNAE3_IMP_RESET ||
10472             hdev->reset_type == HNAE3_GLOBAL_RESET) {
10473                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10474                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10475                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10476                 hclge_reset_umv_space(hdev);
10477         }
10478
10479         ret = hclge_cmd_init(hdev);
10480         if (ret) {
10481                 dev_err(&pdev->dev, "Cmd queue init failed\n");
10482                 return ret;
10483         }
10484
10485         ret = hclge_map_tqp(hdev);
10486         if (ret) {
10487                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10488                 return ret;
10489         }
10490
10491         ret = hclge_mac_init(hdev);
10492         if (ret) {
10493                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10494                 return ret;
10495         }
10496
10497         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10498         if (ret) {
10499                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10500                 return ret;
10501         }
10502
10503         ret = hclge_config_gro(hdev, true);
10504         if (ret)
10505                 return ret;
10506
10507         ret = hclge_init_vlan_config(hdev);
10508         if (ret) {
10509                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10510                 return ret;
10511         }
10512
10513         ret = hclge_tm_init_hw(hdev, true);
10514         if (ret) {
10515                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10516                 return ret;
10517         }
10518
10519         ret = hclge_rss_init_hw(hdev);
10520         if (ret) {
10521                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10522                 return ret;
10523         }
10524
10525         ret = init_mgr_tbl(hdev);
10526         if (ret) {
10527                 dev_err(&pdev->dev,
10528                         "failed to reinit manager table, ret = %d\n", ret);
10529                 return ret;
10530         }
10531
10532         ret = hclge_init_fd_config(hdev);
10533         if (ret) {
10534                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10535                 return ret;
10536         }
10537
10538         /* Log and clear the hw errors those already occurred */
10539         hclge_handle_all_hns_hw_errors(ae_dev);
10540
10541         /* Re-enable the hw error interrupts because
10542          * the interrupts get disabled on global reset.
10543          */
10544         ret = hclge_config_nic_hw_error(hdev, true);
10545         if (ret) {
10546                 dev_err(&pdev->dev,
10547                         "fail(%d) to re-enable NIC hw error interrupts\n",
10548                         ret);
10549                 return ret;
10550         }
10551
10552         if (hdev->roce_client) {
10553                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10554                 if (ret) {
10555                         dev_err(&pdev->dev,
10556                                 "fail(%d) to re-enable roce ras interrupts\n",
10557                                 ret);
10558                         return ret;
10559                 }
10560         }
10561
10562         hclge_reset_vport_state(hdev);
10563         ret = hclge_reset_vport_spoofchk(hdev);
10564         if (ret)
10565                 return ret;
10566
10567         ret = hclge_resume_vf_rate(hdev);
10568         if (ret)
10569                 return ret;
10570
10571         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10572                  HCLGE_DRIVER_NAME);
10573
10574         return 0;
10575 }
10576
10577 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10578 {
10579         struct hclge_dev *hdev = ae_dev->priv;
10580         struct hclge_mac *mac = &hdev->hw.mac;
10581
10582         hclge_reset_vf_rate(hdev);
10583         hclge_clear_vf_vlan(hdev);
10584         hclge_misc_affinity_teardown(hdev);
10585         hclge_state_uninit(hdev);
10586         hclge_uninit_mac_table(hdev);
10587
10588         if (mac->phydev)
10589                 mdiobus_unregister(mac->mdio_bus);
10590
10591         /* Disable MISC vector(vector0) */
10592         hclge_enable_vector(&hdev->misc_vector, false);
10593         synchronize_irq(hdev->misc_vector.vector_irq);
10594
10595         /* Disable all hw interrupts */
10596         hclge_config_mac_tnl_int(hdev, false);
10597         hclge_config_nic_hw_error(hdev, false);
10598         hclge_config_rocee_ras_interrupt(hdev, false);
10599
10600         hclge_cmd_uninit(hdev);
10601         hclge_misc_irq_uninit(hdev);
10602         hclge_pci_uninit(hdev);
10603         mutex_destroy(&hdev->vport_lock);
10604         hclge_uninit_vport_vlan_table(hdev);
10605         ae_dev->priv = NULL;
10606 }
10607
10608 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10609 {
10610         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10611         struct hclge_vport *vport = hclge_get_vport(handle);
10612         struct hclge_dev *hdev = vport->back;
10613
10614         return min_t(u32, hdev->rss_size_max,
10615                      vport->alloc_tqps / kinfo->num_tc);
10616 }
10617
10618 static void hclge_get_channels(struct hnae3_handle *handle,
10619                                struct ethtool_channels *ch)
10620 {
10621         ch->max_combined = hclge_get_max_channels(handle);
10622         ch->other_count = 1;
10623         ch->max_other = 1;
10624         ch->combined_count = handle->kinfo.rss_size;
10625 }
10626
10627 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10628                                         u16 *alloc_tqps, u16 *max_rss_size)
10629 {
10630         struct hclge_vport *vport = hclge_get_vport(handle);
10631         struct hclge_dev *hdev = vport->back;
10632
10633         *alloc_tqps = vport->alloc_tqps;
10634         *max_rss_size = hdev->rss_size_max;
10635 }
10636
10637 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10638                               bool rxfh_configured)
10639 {
10640         struct hclge_vport *vport = hclge_get_vport(handle);
10641         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10642         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10643         struct hclge_dev *hdev = vport->back;
10644         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10645         u16 cur_rss_size = kinfo->rss_size;
10646         u16 cur_tqps = kinfo->num_tqps;
10647         u16 tc_valid[HCLGE_MAX_TC_NUM];
10648         u16 roundup_size;
10649         u32 *rss_indir;
10650         unsigned int i;
10651         int ret;
10652
10653         kinfo->req_rss_size = new_tqps_num;
10654
10655         ret = hclge_tm_vport_map_update(hdev);
10656         if (ret) {
10657                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10658                 return ret;
10659         }
10660
10661         roundup_size = roundup_pow_of_two(kinfo->rss_size);
10662         roundup_size = ilog2(roundup_size);
10663         /* Set the RSS TC mode according to the new RSS size */
10664         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10665                 tc_valid[i] = 0;
10666
10667                 if (!(hdev->hw_tc_map & BIT(i)))
10668                         continue;
10669
10670                 tc_valid[i] = 1;
10671                 tc_size[i] = roundup_size;
10672                 tc_offset[i] = kinfo->rss_size * i;
10673         }
10674         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10675         if (ret)
10676                 return ret;
10677
10678         /* RSS indirection table has been configuared by user */
10679         if (rxfh_configured)
10680                 goto out;
10681
10682         /* Reinitializes the rss indirect table according to the new RSS size */
10683         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10684         if (!rss_indir)
10685                 return -ENOMEM;
10686
10687         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10688                 rss_indir[i] = i % kinfo->rss_size;
10689
10690         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10691         if (ret)
10692                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10693                         ret);
10694
10695         kfree(rss_indir);
10696
10697 out:
10698         if (!ret)
10699                 dev_info(&hdev->pdev->dev,
10700                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10701                          cur_rss_size, kinfo->rss_size,
10702                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10703
10704         return ret;
10705 }
10706
10707 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10708                               u32 *regs_num_64_bit)
10709 {
10710         struct hclge_desc desc;
10711         u32 total_num;
10712         int ret;
10713
10714         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10715         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10716         if (ret) {
10717                 dev_err(&hdev->pdev->dev,
10718                         "Query register number cmd failed, ret = %d.\n", ret);
10719                 return ret;
10720         }
10721
10722         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10723         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10724
10725         total_num = *regs_num_32_bit + *regs_num_64_bit;
10726         if (!total_num)
10727                 return -EINVAL;
10728
10729         return 0;
10730 }
10731
10732 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10733                                  void *data)
10734 {
10735 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10736 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10737
10738         struct hclge_desc *desc;
10739         u32 *reg_val = data;
10740         __le32 *desc_data;
10741         int nodata_num;
10742         int cmd_num;
10743         int i, k, n;
10744         int ret;
10745
10746         if (regs_num == 0)
10747                 return 0;
10748
10749         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10750         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10751                                HCLGE_32_BIT_REG_RTN_DATANUM);
10752         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10753         if (!desc)
10754                 return -ENOMEM;
10755
10756         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10757         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10758         if (ret) {
10759                 dev_err(&hdev->pdev->dev,
10760                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10761                 kfree(desc);
10762                 return ret;
10763         }
10764
10765         for (i = 0; i < cmd_num; i++) {
10766                 if (i == 0) {
10767                         desc_data = (__le32 *)(&desc[i].data[0]);
10768                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10769                 } else {
10770                         desc_data = (__le32 *)(&desc[i]);
10771                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10772                 }
10773                 for (k = 0; k < n; k++) {
10774                         *reg_val++ = le32_to_cpu(*desc_data++);
10775
10776                         regs_num--;
10777                         if (!regs_num)
10778                                 break;
10779                 }
10780         }
10781
10782         kfree(desc);
10783         return 0;
10784 }
10785
10786 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10787                                  void *data)
10788 {
10789 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10790 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10791
10792         struct hclge_desc *desc;
10793         u64 *reg_val = data;
10794         __le64 *desc_data;
10795         int nodata_len;
10796         int cmd_num;
10797         int i, k, n;
10798         int ret;
10799
10800         if (regs_num == 0)
10801                 return 0;
10802
10803         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10804         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10805                                HCLGE_64_BIT_REG_RTN_DATANUM);
10806         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10807         if (!desc)
10808                 return -ENOMEM;
10809
10810         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10811         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10812         if (ret) {
10813                 dev_err(&hdev->pdev->dev,
10814                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10815                 kfree(desc);
10816                 return ret;
10817         }
10818
10819         for (i = 0; i < cmd_num; i++) {
10820                 if (i == 0) {
10821                         desc_data = (__le64 *)(&desc[i].data[0]);
10822                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10823                 } else {
10824                         desc_data = (__le64 *)(&desc[i]);
10825                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10826                 }
10827                 for (k = 0; k < n; k++) {
10828                         *reg_val++ = le64_to_cpu(*desc_data++);
10829
10830                         regs_num--;
10831                         if (!regs_num)
10832                                 break;
10833                 }
10834         }
10835
10836         kfree(desc);
10837         return 0;
10838 }
10839
10840 #define MAX_SEPARATE_NUM        4
10841 #define SEPARATOR_VALUE         0xFDFCFBFA
10842 #define REG_NUM_PER_LINE        4
10843 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10844 #define REG_SEPARATOR_LINE      1
10845 #define REG_NUM_REMAIN_MASK     3
10846 #define BD_LIST_MAX_NUM         30
10847
10848 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10849 {
10850         int i;
10851
10852         /* initialize command BD except the last one */
10853         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10854                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10855                                            true);
10856                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10857         }
10858
10859         /* initialize the last command BD */
10860         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10861
10862         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10863 }
10864
10865 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10866                                     int *bd_num_list,
10867                                     u32 type_num)
10868 {
10869         u32 entries_per_desc, desc_index, index, offset, i;
10870         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10871         int ret;
10872
10873         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10874         if (ret) {
10875                 dev_err(&hdev->pdev->dev,
10876                         "Get dfx bd num fail, status is %d.\n", ret);
10877                 return ret;
10878         }
10879
10880         entries_per_desc = ARRAY_SIZE(desc[0].data);
10881         for (i = 0; i < type_num; i++) {
10882                 offset = hclge_dfx_bd_offset_list[i];
10883                 index = offset % entries_per_desc;
10884                 desc_index = offset / entries_per_desc;
10885                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10886         }
10887
10888         return ret;
10889 }
10890
10891 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10892                                   struct hclge_desc *desc_src, int bd_num,
10893                                   enum hclge_opcode_type cmd)
10894 {
10895         struct hclge_desc *desc = desc_src;
10896         int i, ret;
10897
10898         hclge_cmd_setup_basic_desc(desc, cmd, true);
10899         for (i = 0; i < bd_num - 1; i++) {
10900                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10901                 desc++;
10902                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10903         }
10904
10905         desc = desc_src;
10906         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10907         if (ret)
10908                 dev_err(&hdev->pdev->dev,
10909                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10910                         cmd, ret);
10911
10912         return ret;
10913 }
10914
10915 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10916                                     void *data)
10917 {
10918         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10919         struct hclge_desc *desc = desc_src;
10920         u32 *reg = data;
10921
10922         entries_per_desc = ARRAY_SIZE(desc->data);
10923         reg_num = entries_per_desc * bd_num;
10924         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10925         for (i = 0; i < reg_num; i++) {
10926                 index = i % entries_per_desc;
10927                 desc_index = i / entries_per_desc;
10928                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10929         }
10930         for (i = 0; i < separator_num; i++)
10931                 *reg++ = SEPARATOR_VALUE;
10932
10933         return reg_num + separator_num;
10934 }
10935
10936 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10937 {
10938         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10939         int data_len_per_desc, bd_num, i;
10940         int bd_num_list[BD_LIST_MAX_NUM];
10941         u32 data_len;
10942         int ret;
10943
10944         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10945         if (ret) {
10946                 dev_err(&hdev->pdev->dev,
10947                         "Get dfx reg bd num fail, status is %d.\n", ret);
10948                 return ret;
10949         }
10950
10951         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10952         *len = 0;
10953         for (i = 0; i < dfx_reg_type_num; i++) {
10954                 bd_num = bd_num_list[i];
10955                 data_len = data_len_per_desc * bd_num;
10956                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10957         }
10958
10959         return ret;
10960 }
10961
10962 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10963 {
10964         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10965         int bd_num, bd_num_max, buf_len, i;
10966         int bd_num_list[BD_LIST_MAX_NUM];
10967         struct hclge_desc *desc_src;
10968         u32 *reg = data;
10969         int ret;
10970
10971         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10972         if (ret) {
10973                 dev_err(&hdev->pdev->dev,
10974                         "Get dfx reg bd num fail, status is %d.\n", ret);
10975                 return ret;
10976         }
10977
10978         bd_num_max = bd_num_list[0];
10979         for (i = 1; i < dfx_reg_type_num; i++)
10980                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10981
10982         buf_len = sizeof(*desc_src) * bd_num_max;
10983         desc_src = kzalloc(buf_len, GFP_KERNEL);
10984         if (!desc_src)
10985                 return -ENOMEM;
10986
10987         for (i = 0; i < dfx_reg_type_num; i++) {
10988                 bd_num = bd_num_list[i];
10989                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10990                                              hclge_dfx_reg_opcode_list[i]);
10991                 if (ret) {
10992                         dev_err(&hdev->pdev->dev,
10993                                 "Get dfx reg fail, status is %d.\n", ret);
10994                         break;
10995                 }
10996
10997                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10998         }
10999
11000         kfree(desc_src);
11001         return ret;
11002 }
11003
11004 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11005                               struct hnae3_knic_private_info *kinfo)
11006 {
11007 #define HCLGE_RING_REG_OFFSET           0x200
11008 #define HCLGE_RING_INT_REG_OFFSET       0x4
11009
11010         int i, j, reg_num, separator_num;
11011         int data_num_sum;
11012         u32 *reg = data;
11013
11014         /* fetching per-PF registers valus from PF PCIe register space */
11015         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11016         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11017         for (i = 0; i < reg_num; i++)
11018                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11019         for (i = 0; i < separator_num; i++)
11020                 *reg++ = SEPARATOR_VALUE;
11021         data_num_sum = reg_num + separator_num;
11022
11023         reg_num = ARRAY_SIZE(common_reg_addr_list);
11024         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11025         for (i = 0; i < reg_num; i++)
11026                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11027         for (i = 0; i < separator_num; i++)
11028                 *reg++ = SEPARATOR_VALUE;
11029         data_num_sum += reg_num + separator_num;
11030
11031         reg_num = ARRAY_SIZE(ring_reg_addr_list);
11032         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11033         for (j = 0; j < kinfo->num_tqps; j++) {
11034                 for (i = 0; i < reg_num; i++)
11035                         *reg++ = hclge_read_dev(&hdev->hw,
11036                                                 ring_reg_addr_list[i] +
11037                                                 HCLGE_RING_REG_OFFSET * j);
11038                 for (i = 0; i < separator_num; i++)
11039                         *reg++ = SEPARATOR_VALUE;
11040         }
11041         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11042
11043         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11044         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11045         for (j = 0; j < hdev->num_msi_used - 1; j++) {
11046                 for (i = 0; i < reg_num; i++)
11047                         *reg++ = hclge_read_dev(&hdev->hw,
11048                                                 tqp_intr_reg_addr_list[i] +
11049                                                 HCLGE_RING_INT_REG_OFFSET * j);
11050                 for (i = 0; i < separator_num; i++)
11051                         *reg++ = SEPARATOR_VALUE;
11052         }
11053         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11054
11055         return data_num_sum;
11056 }
11057
11058 static int hclge_get_regs_len(struct hnae3_handle *handle)
11059 {
11060         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11061         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11062         struct hclge_vport *vport = hclge_get_vport(handle);
11063         struct hclge_dev *hdev = vport->back;
11064         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11065         int regs_lines_32_bit, regs_lines_64_bit;
11066         int ret;
11067
11068         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11069         if (ret) {
11070                 dev_err(&hdev->pdev->dev,
11071                         "Get register number failed, ret = %d.\n", ret);
11072                 return ret;
11073         }
11074
11075         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11076         if (ret) {
11077                 dev_err(&hdev->pdev->dev,
11078                         "Get dfx reg len failed, ret = %d.\n", ret);
11079                 return ret;
11080         }
11081
11082         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11083                 REG_SEPARATOR_LINE;
11084         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11085                 REG_SEPARATOR_LINE;
11086         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11087                 REG_SEPARATOR_LINE;
11088         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11089                 REG_SEPARATOR_LINE;
11090         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11091                 REG_SEPARATOR_LINE;
11092         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11093                 REG_SEPARATOR_LINE;
11094
11095         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11096                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11097                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11098 }
11099
11100 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11101                            void *data)
11102 {
11103         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11104         struct hclge_vport *vport = hclge_get_vport(handle);
11105         struct hclge_dev *hdev = vport->back;
11106         u32 regs_num_32_bit, regs_num_64_bit;
11107         int i, reg_num, separator_num, ret;
11108         u32 *reg = data;
11109
11110         *version = hdev->fw_version;
11111
11112         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11113         if (ret) {
11114                 dev_err(&hdev->pdev->dev,
11115                         "Get register number failed, ret = %d.\n", ret);
11116                 return;
11117         }
11118
11119         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11120
11121         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11122         if (ret) {
11123                 dev_err(&hdev->pdev->dev,
11124                         "Get 32 bit register failed, ret = %d.\n", ret);
11125                 return;
11126         }
11127         reg_num = regs_num_32_bit;
11128         reg += reg_num;
11129         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11130         for (i = 0; i < separator_num; i++)
11131                 *reg++ = SEPARATOR_VALUE;
11132
11133         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11134         if (ret) {
11135                 dev_err(&hdev->pdev->dev,
11136                         "Get 64 bit register failed, ret = %d.\n", ret);
11137                 return;
11138         }
11139         reg_num = regs_num_64_bit * 2;
11140         reg += reg_num;
11141         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11142         for (i = 0; i < separator_num; i++)
11143                 *reg++ = SEPARATOR_VALUE;
11144
11145         ret = hclge_get_dfx_reg(hdev, reg);
11146         if (ret)
11147                 dev_err(&hdev->pdev->dev,
11148                         "Get dfx register failed, ret = %d.\n", ret);
11149 }
11150
11151 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11152 {
11153         struct hclge_set_led_state_cmd *req;
11154         struct hclge_desc desc;
11155         int ret;
11156
11157         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11158
11159         req = (struct hclge_set_led_state_cmd *)desc.data;
11160         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11161                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11162
11163         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11164         if (ret)
11165                 dev_err(&hdev->pdev->dev,
11166                         "Send set led state cmd error, ret =%d\n", ret);
11167
11168         return ret;
11169 }
11170
11171 enum hclge_led_status {
11172         HCLGE_LED_OFF,
11173         HCLGE_LED_ON,
11174         HCLGE_LED_NO_CHANGE = 0xFF,
11175 };
11176
11177 static int hclge_set_led_id(struct hnae3_handle *handle,
11178                             enum ethtool_phys_id_state status)
11179 {
11180         struct hclge_vport *vport = hclge_get_vport(handle);
11181         struct hclge_dev *hdev = vport->back;
11182
11183         switch (status) {
11184         case ETHTOOL_ID_ACTIVE:
11185                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11186         case ETHTOOL_ID_INACTIVE:
11187                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11188         default:
11189                 return -EINVAL;
11190         }
11191 }
11192
11193 static void hclge_get_link_mode(struct hnae3_handle *handle,
11194                                 unsigned long *supported,
11195                                 unsigned long *advertising)
11196 {
11197         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11198         struct hclge_vport *vport = hclge_get_vport(handle);
11199         struct hclge_dev *hdev = vport->back;
11200         unsigned int idx = 0;
11201
11202         for (; idx < size; idx++) {
11203                 supported[idx] = hdev->hw.mac.supported[idx];
11204                 advertising[idx] = hdev->hw.mac.advertising[idx];
11205         }
11206 }
11207
11208 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11209 {
11210         struct hclge_vport *vport = hclge_get_vport(handle);
11211         struct hclge_dev *hdev = vport->back;
11212
11213         return hclge_config_gro(hdev, enable);
11214 }
11215
11216 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11217 {
11218         struct hclge_vport *vport = &hdev->vport[0];
11219         struct hnae3_handle *handle = &vport->nic;
11220         u8 tmp_flags;
11221         int ret;
11222
11223         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11224                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11225                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11226         }
11227
11228         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11229                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11230                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11231                                              tmp_flags & HNAE3_MPE);
11232                 if (!ret) {
11233                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11234                         hclge_enable_vlan_filter(handle,
11235                                                  tmp_flags & HNAE3_VLAN_FLTR);
11236                 }
11237         }
11238 }
11239
11240 static bool hclge_module_existed(struct hclge_dev *hdev)
11241 {
11242         struct hclge_desc desc;
11243         u32 existed;
11244         int ret;
11245
11246         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11247         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11248         if (ret) {
11249                 dev_err(&hdev->pdev->dev,
11250                         "failed to get SFP exist state, ret = %d\n", ret);
11251                 return false;
11252         }
11253
11254         existed = le32_to_cpu(desc.data[0]);
11255
11256         return existed != 0;
11257 }
11258
11259 /* need 6 bds(total 140 bytes) in one reading
11260  * return the number of bytes actually read, 0 means read failed.
11261  */
11262 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11263                                      u32 len, u8 *data)
11264 {
11265         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11266         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11267         u16 read_len;
11268         u16 copy_len;
11269         int ret;
11270         int i;
11271
11272         /* setup all 6 bds to read module eeprom info. */
11273         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11274                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11275                                            true);
11276
11277                 /* bd0~bd4 need next flag */
11278                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11279                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11280         }
11281
11282         /* setup bd0, this bd contains offset and read length. */
11283         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11284         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11285         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11286         sfp_info_bd0->read_len = cpu_to_le16(read_len);
11287
11288         ret = hclge_cmd_send(&hdev->hw, desc, i);
11289         if (ret) {
11290                 dev_err(&hdev->pdev->dev,
11291                         "failed to get SFP eeprom info, ret = %d\n", ret);
11292                 return 0;
11293         }
11294
11295         /* copy sfp info from bd0 to out buffer. */
11296         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11297         memcpy(data, sfp_info_bd0->data, copy_len);
11298         read_len = copy_len;
11299
11300         /* copy sfp info from bd1~bd5 to out buffer if needed. */
11301         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11302                 if (read_len >= len)
11303                         return read_len;
11304
11305                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11306                 memcpy(data + read_len, desc[i].data, copy_len);
11307                 read_len += copy_len;
11308         }
11309
11310         return read_len;
11311 }
11312
11313 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11314                                    u32 len, u8 *data)
11315 {
11316         struct hclge_vport *vport = hclge_get_vport(handle);
11317         struct hclge_dev *hdev = vport->back;
11318         u32 read_len = 0;
11319         u16 data_len;
11320
11321         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11322                 return -EOPNOTSUPP;
11323
11324         if (!hclge_module_existed(hdev))
11325                 return -ENXIO;
11326
11327         while (read_len < len) {
11328                 data_len = hclge_get_sfp_eeprom_info(hdev,
11329                                                      offset + read_len,
11330                                                      len - read_len,
11331                                                      data + read_len);
11332                 if (!data_len)
11333                         return -EIO;
11334
11335                 read_len += data_len;
11336         }
11337
11338         return 0;
11339 }
11340
11341 static const struct hnae3_ae_ops hclge_ops = {
11342         .init_ae_dev = hclge_init_ae_dev,
11343         .uninit_ae_dev = hclge_uninit_ae_dev,
11344         .flr_prepare = hclge_flr_prepare,
11345         .flr_done = hclge_flr_done,
11346         .init_client_instance = hclge_init_client_instance,
11347         .uninit_client_instance = hclge_uninit_client_instance,
11348         .map_ring_to_vector = hclge_map_ring_to_vector,
11349         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11350         .get_vector = hclge_get_vector,
11351         .put_vector = hclge_put_vector,
11352         .set_promisc_mode = hclge_set_promisc_mode,
11353         .request_update_promisc_mode = hclge_request_update_promisc_mode,
11354         .set_loopback = hclge_set_loopback,
11355         .start = hclge_ae_start,
11356         .stop = hclge_ae_stop,
11357         .client_start = hclge_client_start,
11358         .client_stop = hclge_client_stop,
11359         .get_status = hclge_get_status,
11360         .get_ksettings_an_result = hclge_get_ksettings_an_result,
11361         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11362         .get_media_type = hclge_get_media_type,
11363         .check_port_speed = hclge_check_port_speed,
11364         .get_fec = hclge_get_fec,
11365         .set_fec = hclge_set_fec,
11366         .get_rss_key_size = hclge_get_rss_key_size,
11367         .get_rss_indir_size = hclge_get_rss_indir_size,
11368         .get_rss = hclge_get_rss,
11369         .set_rss = hclge_set_rss,
11370         .set_rss_tuple = hclge_set_rss_tuple,
11371         .get_rss_tuple = hclge_get_rss_tuple,
11372         .get_tc_size = hclge_get_tc_size,
11373         .get_mac_addr = hclge_get_mac_addr,
11374         .set_mac_addr = hclge_set_mac_addr,
11375         .do_ioctl = hclge_do_ioctl,
11376         .add_uc_addr = hclge_add_uc_addr,
11377         .rm_uc_addr = hclge_rm_uc_addr,
11378         .add_mc_addr = hclge_add_mc_addr,
11379         .rm_mc_addr = hclge_rm_mc_addr,
11380         .set_autoneg = hclge_set_autoneg,
11381         .get_autoneg = hclge_get_autoneg,
11382         .restart_autoneg = hclge_restart_autoneg,
11383         .halt_autoneg = hclge_halt_autoneg,
11384         .get_pauseparam = hclge_get_pauseparam,
11385         .set_pauseparam = hclge_set_pauseparam,
11386         .set_mtu = hclge_set_mtu,
11387         .reset_queue = hclge_reset_tqp,
11388         .get_stats = hclge_get_stats,
11389         .get_mac_stats = hclge_get_mac_stat,
11390         .update_stats = hclge_update_stats,
11391         .get_strings = hclge_get_strings,
11392         .get_sset_count = hclge_get_sset_count,
11393         .get_fw_version = hclge_get_fw_version,
11394         .get_mdix_mode = hclge_get_mdix_mode,
11395         .enable_vlan_filter = hclge_enable_vlan_filter,
11396         .set_vlan_filter = hclge_set_vlan_filter,
11397         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11398         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11399         .reset_event = hclge_reset_event,
11400         .get_reset_level = hclge_get_reset_level,
11401         .set_default_reset_request = hclge_set_def_reset_request,
11402         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11403         .set_channels = hclge_set_channels,
11404         .get_channels = hclge_get_channels,
11405         .get_regs_len = hclge_get_regs_len,
11406         .get_regs = hclge_get_regs,
11407         .set_led_id = hclge_set_led_id,
11408         .get_link_mode = hclge_get_link_mode,
11409         .add_fd_entry = hclge_add_fd_entry,
11410         .del_fd_entry = hclge_del_fd_entry,
11411         .del_all_fd_entries = hclge_del_all_fd_entries,
11412         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11413         .get_fd_rule_info = hclge_get_fd_rule_info,
11414         .get_fd_all_rules = hclge_get_all_rules,
11415         .enable_fd = hclge_enable_fd,
11416         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11417         .dbg_run_cmd = hclge_dbg_run_cmd,
11418         .handle_hw_ras_error = hclge_handle_hw_ras_error,
11419         .get_hw_reset_stat = hclge_get_hw_reset_stat,
11420         .ae_dev_resetting = hclge_ae_dev_resetting,
11421         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11422         .set_gro_en = hclge_gro_en,
11423         .get_global_queue_id = hclge_covert_handle_qid_global,
11424         .set_timer_task = hclge_set_timer_task,
11425         .mac_connect_phy = hclge_mac_connect_phy,
11426         .mac_disconnect_phy = hclge_mac_disconnect_phy,
11427         .get_vf_config = hclge_get_vf_config,
11428         .set_vf_link_state = hclge_set_vf_link_state,
11429         .set_vf_spoofchk = hclge_set_vf_spoofchk,
11430         .set_vf_trust = hclge_set_vf_trust,
11431         .set_vf_rate = hclge_set_vf_rate,
11432         .set_vf_mac = hclge_set_vf_mac,
11433         .get_module_eeprom = hclge_get_module_eeprom,
11434         .get_cmdq_stat = hclge_get_cmdq_stat,
11435 };
11436
11437 static struct hnae3_ae_algo ae_algo = {
11438         .ops = &hclge_ops,
11439         .pdev_id_table = ae_algo_pci_tbl,
11440 };
11441
11442 static int hclge_init(void)
11443 {
11444         pr_info("%s is initializing\n", HCLGE_NAME);
11445
11446         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11447         if (!hclge_wq) {
11448                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11449                 return -ENOMEM;
11450         }
11451
11452         hnae3_register_ae_algo(&ae_algo);
11453
11454         return 0;
11455 }
11456
11457 static void hclge_exit(void)
11458 {
11459         hnae3_unregister_ae_algo(&ae_algo);
11460         destroy_workqueue(hclge_wq);
11461 }
11462 module_init(hclge_init);
11463 module_exit(hclge_exit);
11464
11465 MODULE_LICENSE("GPL");
11466 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11467 MODULE_DESCRIPTION("HCLGE Driver");
11468 MODULE_VERSION(HCLGE_MOD_VERSION);