Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27
28 #define HCLGE_NAME                      "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31
32 #define HCLGE_BUF_SIZE_UNIT     256U
33 #define HCLGE_BUF_MUL_BY        2
34 #define HCLGE_BUF_DIV_BY        2
35 #define NEED_RESERVE_TC_NUM     2
36 #define BUF_MAX_PERCENT         100
37 #define BUF_RESERVE_PERCENT     90
38
39 #define HCLGE_RESET_MAX_FAIL_CNT        5
40 #define HCLGE_RESET_SYNC_TIME           100
41 #define HCLGE_PF_RESET_SYNC_TIME        20
42 #define HCLGE_PF_RESET_SYNC_CNT         1500
43
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57
58 #define HCLGE_LINK_STATUS_MS    10
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75
76 static struct hnae3_ae_algo ae_algo;
77
78 static struct workqueue_struct *hclge_wq;
79
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89         /* required last entry */
90         {0, }
91 };
92
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96                                          HCLGE_NIC_CSQ_BASEADDR_H_REG,
97                                          HCLGE_NIC_CSQ_DEPTH_REG,
98                                          HCLGE_NIC_CSQ_TAIL_REG,
99                                          HCLGE_NIC_CSQ_HEAD_REG,
100                                          HCLGE_NIC_CRQ_BASEADDR_L_REG,
101                                          HCLGE_NIC_CRQ_BASEADDR_H_REG,
102                                          HCLGE_NIC_CRQ_DEPTH_REG,
103                                          HCLGE_NIC_CRQ_TAIL_REG,
104                                          HCLGE_NIC_CRQ_HEAD_REG,
105                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
106                                          HCLGE_CMDQ_INTR_STS_REG,
107                                          HCLGE_CMDQ_INTR_EN_REG,
108                                          HCLGE_CMDQ_INTR_GEN_REG};
109
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111                                            HCLGE_PF_OTHER_INT_REG,
112                                            HCLGE_MISC_RESET_STS_REG,
113                                            HCLGE_MISC_VECTOR_INT_STS,
114                                            HCLGE_GLOBAL_RESET_REG,
115                                            HCLGE_FUN_RST_ING,
116                                            HCLGE_GRO_EN_REG};
117
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119                                          HCLGE_RING_RX_ADDR_H_REG,
120                                          HCLGE_RING_RX_BD_NUM_REG,
121                                          HCLGE_RING_RX_BD_LENGTH_REG,
122                                          HCLGE_RING_RX_MERGE_EN_REG,
123                                          HCLGE_RING_RX_TAIL_REG,
124                                          HCLGE_RING_RX_HEAD_REG,
125                                          HCLGE_RING_RX_FBD_NUM_REG,
126                                          HCLGE_RING_RX_OFFSET_REG,
127                                          HCLGE_RING_RX_FBD_OFFSET_REG,
128                                          HCLGE_RING_RX_STASH_REG,
129                                          HCLGE_RING_RX_BD_ERR_REG,
130                                          HCLGE_RING_TX_ADDR_L_REG,
131                                          HCLGE_RING_TX_ADDR_H_REG,
132                                          HCLGE_RING_TX_BD_NUM_REG,
133                                          HCLGE_RING_TX_PRIORITY_REG,
134                                          HCLGE_RING_TX_TC_REG,
135                                          HCLGE_RING_TX_MERGE_EN_REG,
136                                          HCLGE_RING_TX_TAIL_REG,
137                                          HCLGE_RING_TX_HEAD_REG,
138                                          HCLGE_RING_TX_FBD_NUM_REG,
139                                          HCLGE_RING_TX_OFFSET_REG,
140                                          HCLGE_RING_TX_EBD_NUM_REG,
141                                          HCLGE_RING_TX_EBD_OFFSET_REG,
142                                          HCLGE_RING_TX_BD_ERR_REG,
143                                          HCLGE_RING_EN_REG};
144
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146                                              HCLGE_TQP_INTR_GL0_REG,
147                                              HCLGE_TQP_INTR_GL1_REG,
148                                              HCLGE_TQP_INTR_GL2_REG,
149                                              HCLGE_TQP_INTR_RL_REG};
150
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152         "App    Loopback test",
153         "Serdes serial Loopback test",
154         "Serdes parallel Loopback test",
155         "Phy    Loopback test"
156 };
157
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159         {"mac_tx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161         {"mac_rx_mac_pause_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163         {"mac_tx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165         {"mac_rx_control_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167         {"mac_tx_pfc_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169         {"mac_tx_pfc_pri0_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171         {"mac_tx_pfc_pri1_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173         {"mac_tx_pfc_pri2_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175         {"mac_tx_pfc_pri3_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177         {"mac_tx_pfc_pri4_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179         {"mac_tx_pfc_pri5_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181         {"mac_tx_pfc_pri6_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183         {"mac_tx_pfc_pri7_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185         {"mac_rx_pfc_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187         {"mac_rx_pfc_pri0_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189         {"mac_rx_pfc_pri1_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191         {"mac_rx_pfc_pri2_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193         {"mac_rx_pfc_pri3_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195         {"mac_rx_pfc_pri4_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197         {"mac_rx_pfc_pri5_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199         {"mac_rx_pfc_pri6_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201         {"mac_rx_pfc_pri7_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203         {"mac_tx_total_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205         {"mac_tx_total_oct_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207         {"mac_tx_good_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209         {"mac_tx_bad_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211         {"mac_tx_good_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213         {"mac_tx_bad_oct_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215         {"mac_tx_uni_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217         {"mac_tx_multi_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219         {"mac_tx_broad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221         {"mac_tx_undersize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223         {"mac_tx_oversize_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225         {"mac_tx_64_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227         {"mac_tx_65_127_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229         {"mac_tx_128_255_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231         {"mac_tx_256_511_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233         {"mac_tx_512_1023_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235         {"mac_tx_1024_1518_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237         {"mac_tx_1519_2047_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239         {"mac_tx_2048_4095_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241         {"mac_tx_4096_8191_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243         {"mac_tx_8192_9216_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245         {"mac_tx_9217_12287_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247         {"mac_tx_12288_16383_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249         {"mac_tx_1519_max_good_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251         {"mac_tx_1519_max_bad_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253         {"mac_rx_total_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255         {"mac_rx_total_oct_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257         {"mac_rx_good_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259         {"mac_rx_bad_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261         {"mac_rx_good_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263         {"mac_rx_bad_oct_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265         {"mac_rx_uni_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267         {"mac_rx_multi_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269         {"mac_rx_broad_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271         {"mac_rx_undersize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273         {"mac_rx_oversize_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275         {"mac_rx_64_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277         {"mac_rx_65_127_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279         {"mac_rx_128_255_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281         {"mac_rx_256_511_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283         {"mac_rx_512_1023_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285         {"mac_rx_1024_1518_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287         {"mac_rx_1519_2047_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289         {"mac_rx_2048_4095_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291         {"mac_rx_4096_8191_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293         {"mac_rx_8192_9216_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295         {"mac_rx_9217_12287_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297         {"mac_rx_12288_16383_oct_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299         {"mac_rx_1519_max_good_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301         {"mac_rx_1519_max_bad_pkt_num",
302                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303
304         {"mac_tx_fragment_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306         {"mac_tx_undermin_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308         {"mac_tx_jabber_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310         {"mac_tx_err_all_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312         {"mac_tx_from_app_good_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314         {"mac_tx_from_app_bad_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316         {"mac_rx_fragment_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318         {"mac_rx_undermin_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320         {"mac_rx_jabber_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322         {"mac_rx_fcs_err_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324         {"mac_rx_send_app_good_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326         {"mac_rx_send_app_bad_pkt_num",
327                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331         {
332                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335                 .i_port_bitmap = 0x1,
336         },
337 };
338
339 static const u8 hclge_hash_key[] = {
340         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346
347 static const u32 hclge_dfx_bd_offset_list[] = {
348         HCLGE_DFX_BIOS_BD_OFFSET,
349         HCLGE_DFX_SSU_0_BD_OFFSET,
350         HCLGE_DFX_SSU_1_BD_OFFSET,
351         HCLGE_DFX_IGU_BD_OFFSET,
352         HCLGE_DFX_RPU_0_BD_OFFSET,
353         HCLGE_DFX_RPU_1_BD_OFFSET,
354         HCLGE_DFX_NCSI_BD_OFFSET,
355         HCLGE_DFX_RTC_BD_OFFSET,
356         HCLGE_DFX_PPP_BD_OFFSET,
357         HCLGE_DFX_RCB_BD_OFFSET,
358         HCLGE_DFX_TQP_BD_OFFSET,
359         HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363         HCLGE_OPC_DFX_BIOS_COMMON_REG,
364         HCLGE_OPC_DFX_SSU_REG_0,
365         HCLGE_OPC_DFX_SSU_REG_1,
366         HCLGE_OPC_DFX_IGU_EGU_REG,
367         HCLGE_OPC_DFX_RPU_REG_0,
368         HCLGE_OPC_DFX_RPU_REG_1,
369         HCLGE_OPC_DFX_NCSI_REG,
370         HCLGE_OPC_DFX_RTC_REG,
371         HCLGE_OPC_DFX_PPP_REG,
372         HCLGE_OPC_DFX_RCB_REG,
373         HCLGE_OPC_DFX_TQP_REG,
374         HCLGE_OPC_DFX_SSU_REG_2
375 };
376
377 static const struct key_info meta_data_key_info[] = {
378         { PACKET_TYPE_ID, 6 },
379         { IP_FRAGEMENT, 1 },
380         { ROCE_TYPE, 1 },
381         { NEXT_KEY, 5 },
382         { VLAN_NUMBER, 2 },
383         { SRC_VPORT, 12 },
384         { DST_VPORT, 12 },
385         { TUNNEL_PACKET, 1 },
386 };
387
388 static const struct key_info tuple_key_info[] = {
389         { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390         { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391         { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392         { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393         { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394         { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395         { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396         { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397         { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398         { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399         { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400         { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401         { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402         { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403         { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404         { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405         { INNER_DST_MAC, 48, KEY_OPT_MAC,
406           offsetof(struct hclge_fd_rule, tuples.dst_mac),
407           offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408         { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409           offsetof(struct hclge_fd_rule, tuples.src_mac),
410           offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411         { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412           offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413           offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414         { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415         { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416           offsetof(struct hclge_fd_rule, tuples.ether_proto),
417           offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418         { INNER_L2_RSV, 16, KEY_OPT_LE16,
419           offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420           offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421         { INNER_IP_TOS, 8, KEY_OPT_U8,
422           offsetof(struct hclge_fd_rule, tuples.ip_tos),
423           offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424         { INNER_IP_PROTO, 8, KEY_OPT_U8,
425           offsetof(struct hclge_fd_rule, tuples.ip_proto),
426           offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427         { INNER_SRC_IP, 32, KEY_OPT_IP,
428           offsetof(struct hclge_fd_rule, tuples.src_ip),
429           offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430         { INNER_DST_IP, 32, KEY_OPT_IP,
431           offsetof(struct hclge_fd_rule, tuples.dst_ip),
432           offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433         { INNER_L3_RSV, 16, KEY_OPT_LE16,
434           offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435           offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436         { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437           offsetof(struct hclge_fd_rule, tuples.src_port),
438           offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439         { INNER_DST_PORT, 16, KEY_OPT_LE16,
440           offsetof(struct hclge_fd_rule, tuples.dst_port),
441           offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442         { INNER_L4_RSV, 32, KEY_OPT_LE32,
443           offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444           offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450
451         u64 *data = (u64 *)(&hdev->mac_stats);
452         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453         __le64 *desc_data;
454         int i, k, n;
455         int ret;
456
457         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459         if (ret) {
460                 dev_err(&hdev->pdev->dev,
461                         "Get MAC pkt stats fail, status = %d.\n", ret);
462
463                 return ret;
464         }
465
466         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467                 /* for special opcode 0032, only the first desc has the head */
468                 if (unlikely(i == 0)) {
469                         desc_data = (__le64 *)(&desc[i].data[0]);
470                         n = HCLGE_RD_FIRST_STATS_NUM;
471                 } else {
472                         desc_data = (__le64 *)(&desc[i]);
473                         n = HCLGE_RD_OTHER_STATS_NUM;
474                 }
475
476                 for (k = 0; k < n; k++) {
477                         *data += le64_to_cpu(*desc_data);
478                         data++;
479                         desc_data++;
480                 }
481         }
482
483         return 0;
484 }
485
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488         u64 *data = (u64 *)(&hdev->mac_stats);
489         struct hclge_desc *desc;
490         __le64 *desc_data;
491         u16 i, k, n;
492         int ret;
493
494         /* This may be called inside atomic sections,
495          * so GFP_ATOMIC is more suitalbe here
496          */
497         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498         if (!desc)
499                 return -ENOMEM;
500
501         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503         if (ret) {
504                 kfree(desc);
505                 return ret;
506         }
507
508         for (i = 0; i < desc_num; i++) {
509                 /* for special opcode 0034, only the first desc has the head */
510                 if (i == 0) {
511                         desc_data = (__le64 *)(&desc[i].data[0]);
512                         n = HCLGE_RD_FIRST_STATS_NUM;
513                 } else {
514                         desc_data = (__le64 *)(&desc[i]);
515                         n = HCLGE_RD_OTHER_STATS_NUM;
516                 }
517
518                 for (k = 0; k < n; k++) {
519                         *data += le64_to_cpu(*desc_data);
520                         data++;
521                         desc_data++;
522                 }
523         }
524
525         kfree(desc);
526
527         return 0;
528 }
529
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532         struct hclge_desc desc;
533         __le32 *desc_data;
534         u32 reg_num;
535         int ret;
536
537         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539         if (ret)
540                 return ret;
541
542         desc_data = (__le32 *)(&desc.data[0]);
543         reg_num = le32_to_cpu(*desc_data);
544
545         *desc_num = 1 + ((reg_num - 3) >> 2) +
546                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547
548         return 0;
549 }
550
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553         u32 desc_num;
554         int ret;
555
556         ret = hclge_mac_query_reg_num(hdev, &desc_num);
557         /* The firmware supports the new statistics acquisition method */
558         if (!ret)
559                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560         else if (ret == -EOPNOTSUPP)
561                 ret = hclge_mac_update_stats_defective(hdev);
562         else
563                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564
565         return ret;
566 }
567
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571         struct hclge_vport *vport = hclge_get_vport(handle);
572         struct hclge_dev *hdev = vport->back;
573         struct hnae3_queue *queue;
574         struct hclge_desc desc[1];
575         struct hclge_tqp *tqp;
576         int ret, i;
577
578         for (i = 0; i < kinfo->num_tqps; i++) {
579                 queue = handle->kinfo.tqp[i];
580                 tqp = container_of(queue, struct hclge_tqp, q);
581                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583                                            true);
584
585                 desc[0].data[0] = cpu_to_le32(tqp->index);
586                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587                 if (ret) {
588                         dev_err(&hdev->pdev->dev,
589                                 "Query tqp stat fail, status = %d,queue = %d\n",
590                                 ret, i);
591                         return ret;
592                 }
593                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594                         le32_to_cpu(desc[0].data[1]);
595         }
596
597         for (i = 0; i < kinfo->num_tqps; i++) {
598                 queue = handle->kinfo.tqp[i];
599                 tqp = container_of(queue, struct hclge_tqp, q);
600                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601                 hclge_cmd_setup_basic_desc(&desc[0],
602                                            HCLGE_OPC_QUERY_TX_STATS,
603                                            true);
604
605                 desc[0].data[0] = cpu_to_le32(tqp->index);
606                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607                 if (ret) {
608                         dev_err(&hdev->pdev->dev,
609                                 "Query tqp stat fail, status = %d,queue = %d\n",
610                                 ret, i);
611                         return ret;
612                 }
613                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614                         le32_to_cpu(desc[0].data[1]);
615         }
616
617         return 0;
618 }
619
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623         struct hclge_tqp *tqp;
624         u64 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630         }
631
632         for (i = 0; i < kinfo->num_tqps; i++) {
633                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635         }
636
637         return buff;
638 }
639
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643
644         /* each tqp has TX & RX two queues */
645         return kinfo->num_tqps * (2);
646 }
647
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651         u8 *buff = data;
652         int i;
653
654         for (i = 0; i < kinfo->num_tqps; i++) {
655                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656                         struct hclge_tqp, q);
657                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658                          tqp->index);
659                 buff = buff + ETH_GSTRING_LEN;
660         }
661
662         for (i = 0; i < kinfo->num_tqps; i++) {
663                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664                         struct hclge_tqp, q);
665                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666                          tqp->index);
667                 buff = buff + ETH_GSTRING_LEN;
668         }
669
670         return buff;
671 }
672
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674                                  const struct hclge_comm_stats_str strs[],
675                                  int size, u64 *data)
676 {
677         u64 *buf = data;
678         u32 i;
679
680         for (i = 0; i < size; i++)
681                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682
683         return buf + size;
684 }
685
686 static u8 *hclge_comm_get_strings(u32 stringset,
687                                   const struct hclge_comm_stats_str strs[],
688                                   int size, u8 *data)
689 {
690         char *buff = (char *)data;
691         u32 i;
692
693         if (stringset != ETH_SS_STATS)
694                 return buff;
695
696         for (i = 0; i < size; i++) {
697                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698                 buff = buff + ETH_GSTRING_LEN;
699         }
700
701         return (u8 *)buff;
702 }
703
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706         struct hnae3_handle *handle;
707         int status;
708
709         handle = &hdev->vport[0].nic;
710         if (handle->client) {
711                 status = hclge_tqps_update_stats(handle);
712                 if (status) {
713                         dev_err(&hdev->pdev->dev,
714                                 "Update TQPS stats fail, status = %d.\n",
715                                 status);
716                 }
717         }
718
719         status = hclge_mac_update_stats(hdev);
720         if (status)
721                 dev_err(&hdev->pdev->dev,
722                         "Update MAC stats fail, status = %d.\n", status);
723 }
724
725 static void hclge_update_stats(struct hnae3_handle *handle,
726                                struct net_device_stats *net_stats)
727 {
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int status;
731
732         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733                 return;
734
735         status = hclge_mac_update_stats(hdev);
736         if (status)
737                 dev_err(&hdev->pdev->dev,
738                         "Update MAC stats fail, status = %d.\n",
739                         status);
740
741         status = hclge_tqps_update_stats(handle);
742         if (status)
743                 dev_err(&hdev->pdev->dev,
744                         "Update TQPS stats fail, status = %d.\n",
745                         status);
746
747         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753                 HNAE3_SUPPORT_PHY_LOOPBACK | \
754                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756
757         struct hclge_vport *vport = hclge_get_vport(handle);
758         struct hclge_dev *hdev = vport->back;
759         int count = 0;
760
761         /* Loopback test support rules:
762          * mac: only GE mode support
763          * serdes: all mac mode will support include GE/XGE/LGE/CGE
764          * phy: only support when phy device exist on board
765          */
766         if (stringset == ETH_SS_TEST) {
767                 /* clear loopback bit flags at first */
768                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773                         count += 1;
774                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775                 }
776
777                 count += 2;
778                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780
781                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782                      hdev->hw.mac.phydev->drv->set_loopback) ||
783                     hnae3_dev_phy_imp_supported(hdev)) {
784                         count += 1;
785                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786                 }
787         } else if (stringset == ETH_SS_STATS) {
788                 count = ARRAY_SIZE(g_mac_stats_string) +
789                         hclge_tqps_get_sset_count(handle, stringset);
790         }
791
792         return count;
793 }
794
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796                               u8 *data)
797 {
798         u8 *p = (char *)data;
799         int size;
800
801         if (stringset == ETH_SS_STATS) {
802                 size = ARRAY_SIZE(g_mac_stats_string);
803                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804                                            size, p);
805                 p = hclge_tqps_get_strings(handle, p);
806         } else if (stringset == ETH_SS_TEST) {
807                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809                                ETH_GSTRING_LEN);
810                         p += ETH_GSTRING_LEN;
811                 }
812                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814                                ETH_GSTRING_LEN);
815                         p += ETH_GSTRING_LEN;
816                 }
817                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818                         memcpy(p,
819                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820                                ETH_GSTRING_LEN);
821                         p += ETH_GSTRING_LEN;
822                 }
823                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825                                ETH_GSTRING_LEN);
826                         p += ETH_GSTRING_LEN;
827                 }
828         }
829 }
830
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833         struct hclge_vport *vport = hclge_get_vport(handle);
834         struct hclge_dev *hdev = vport->back;
835         u64 *p;
836
837         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838                                  ARRAY_SIZE(g_mac_stats_string), data);
839         p = hclge_tqps_get_stats(handle, p);
840 }
841
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843                                struct hns3_mac_stats *mac_stats)
844 {
845         struct hclge_vport *vport = hclge_get_vport(handle);
846         struct hclge_dev *hdev = vport->back;
847
848         hclge_update_stats(handle, NULL);
849
850         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855                                    struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK       0xF
858
859         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860                 return -EINVAL;
861
862         /* Set the pf to main pf */
863         if (status->pf_state & HCLGE_PF_STATE_MAIN)
864                 hdev->flag |= HCLGE_FLAG_MAIN;
865         else
866                 hdev->flag &= ~HCLGE_FLAG_MAIN;
867
868         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869         return 0;
870 }
871
872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT     5
875
876         struct hclge_func_status_cmd *req;
877         struct hclge_desc desc;
878         int timeout = 0;
879         int ret;
880
881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882         req = (struct hclge_func_status_cmd *)desc.data;
883
884         do {
885                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886                 if (ret) {
887                         dev_err(&hdev->pdev->dev,
888                                 "query function status failed %d.\n", ret);
889                         return ret;
890                 }
891
892                 /* Check pf reset is done */
893                 if (req->pf_state)
894                         break;
895                 usleep_range(1000, 2000);
896         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897
898         return hclge_parse_func_status(hdev, req);
899 }
900
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903         struct hclge_pf_res_cmd *req;
904         struct hclge_desc desc;
905         int ret;
906
907         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909         if (ret) {
910                 dev_err(&hdev->pdev->dev,
911                         "query pf resource failed %d.\n", ret);
912                 return ret;
913         }
914
915         req = (struct hclge_pf_res_cmd *)desc.data;
916         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917                          le16_to_cpu(req->ext_tqp_num);
918         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919
920         if (req->tx_buf_size)
921                 hdev->tx_buf_size =
922                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923         else
924                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925
926         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927
928         if (req->dv_buf_size)
929                 hdev->dv_buf_size =
930                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931         else
932                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933
934         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935
936         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938                 dev_err(&hdev->pdev->dev,
939                         "only %u msi resources available, not enough for pf(min:2).\n",
940                         hdev->num_nic_msi);
941                 return -EINVAL;
942         }
943
944         if (hnae3_dev_roce_supported(hdev)) {
945                 hdev->num_roce_msi =
946                         le16_to_cpu(req->pf_intr_vector_number_roce);
947
948                 /* PF should have NIC vectors and Roce vectors,
949                  * NIC vectors are queued before Roce vectors.
950                  */
951                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952         } else {
953                 hdev->num_msi = hdev->num_nic_msi;
954         }
955
956         return 0;
957 }
958
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961         switch (speed_cmd) {
962         case HCLGE_FW_MAC_SPEED_10M:
963                 *speed = HCLGE_MAC_SPEED_10M;
964                 break;
965         case HCLGE_FW_MAC_SPEED_100M:
966                 *speed = HCLGE_MAC_SPEED_100M;
967                 break;
968         case HCLGE_FW_MAC_SPEED_1G:
969                 *speed = HCLGE_MAC_SPEED_1G;
970                 break;
971         case HCLGE_FW_MAC_SPEED_10G:
972                 *speed = HCLGE_MAC_SPEED_10G;
973                 break;
974         case HCLGE_FW_MAC_SPEED_25G:
975                 *speed = HCLGE_MAC_SPEED_25G;
976                 break;
977         case HCLGE_FW_MAC_SPEED_40G:
978                 *speed = HCLGE_MAC_SPEED_40G;
979                 break;
980         case HCLGE_FW_MAC_SPEED_50G:
981                 *speed = HCLGE_MAC_SPEED_50G;
982                 break;
983         case HCLGE_FW_MAC_SPEED_100G:
984                 *speed = HCLGE_MAC_SPEED_100G;
985                 break;
986         case HCLGE_FW_MAC_SPEED_200G:
987                 *speed = HCLGE_MAC_SPEED_200G;
988                 break;
989         default:
990                 return -EINVAL;
991         }
992
993         return 0;
994 }
995
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997         {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998         {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999         {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000         {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001         {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002         {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003         {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004         {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005         {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1006 };
1007
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1009 {
1010         u16 i;
1011
1012         for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013                 if (speed == speed_bit_map[i].speed) {
1014                         *speed_bit = speed_bit_map[i].speed_bit;
1015                         return 0;
1016                 }
1017         }
1018
1019         return -EINVAL;
1020 }
1021
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1023 {
1024         struct hclge_vport *vport = hclge_get_vport(handle);
1025         struct hclge_dev *hdev = vport->back;
1026         u32 speed_ability = hdev->hw.mac.speed_ability;
1027         u32 speed_bit = 0;
1028         int ret;
1029
1030         ret = hclge_get_speed_bit(speed, &speed_bit);
1031         if (ret)
1032                 return ret;
1033
1034         if (speed_bit & speed_ability)
1035                 return 0;
1036
1037         return -EINVAL;
1038 }
1039
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059                                  mac->supported);
1060 }
1061
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080                 linkmode_set_bit(
1081                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082                         mac->supported);
1083 }
1084
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111                                  mac->supported);
1112         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114                                  mac->supported);
1115         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117                                  mac->supported);
1118         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120                                  mac->supported);
1121         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123                                  mac->supported);
1124         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126                                  mac->supported);
1127         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129                                  mac->supported);
1130 }
1131
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137         switch (mac->speed) {
1138         case HCLGE_MAC_SPEED_10G:
1139         case HCLGE_MAC_SPEED_40G:
1140                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141                                  mac->supported);
1142                 mac->fec_ability =
1143                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144                 break;
1145         case HCLGE_MAC_SPEED_25G:
1146         case HCLGE_MAC_SPEED_50G:
1147                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148                                  mac->supported);
1149                 mac->fec_ability =
1150                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151                         BIT(HNAE3_FEC_AUTO);
1152                 break;
1153         case HCLGE_MAC_SPEED_100G:
1154         case HCLGE_MAC_SPEED_200G:
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157                 break;
1158         default:
1159                 mac->fec_ability = 0;
1160                 break;
1161         }
1162 }
1163
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165                                         u16 speed_ability)
1166 {
1167         struct hclge_mac *mac = &hdev->hw.mac;
1168
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171                                  mac->supported);
1172
1173         hclge_convert_setting_sr(mac, speed_ability);
1174         hclge_convert_setting_lr(mac, speed_ability);
1175         hclge_convert_setting_cr(mac, speed_ability);
1176         if (hnae3_dev_fec_supported(hdev))
1177                 hclge_convert_setting_fec(mac);
1178
1179         if (hnae3_dev_pause_supported(hdev))
1180                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187                                             u16 speed_ability)
1188 {
1189         struct hclge_mac *mac = &hdev->hw.mac;
1190
1191         hclge_convert_setting_kr(mac, speed_ability);
1192         if (hnae3_dev_fec_supported(hdev))
1193                 hclge_convert_setting_fec(mac);
1194
1195         if (hnae3_dev_pause_supported(hdev))
1196                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203                                          u16 speed_ability)
1204 {
1205         unsigned long *supported = hdev->hw.mac.supported;
1206
1207         /* default to support all speed for GE port */
1208         if (!speed_ability)
1209                 speed_ability = HCLGE_SUPPORT_GE;
1210
1211         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213                                  supported);
1214
1215         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217                                  supported);
1218                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219                                  supported);
1220         }
1221
1222         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225         }
1226
1227         if (hnae3_dev_pause_supported(hdev)) {
1228                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230         }
1231
1232         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238         u8 media_type = hdev->hw.mac.media_type;
1239
1240         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243                 hclge_parse_copper_link_mode(hdev, speed_ability);
1244         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251                 return HCLGE_MAC_SPEED_200G;
1252
1253         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254                 return HCLGE_MAC_SPEED_100G;
1255
1256         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257                 return HCLGE_MAC_SPEED_50G;
1258
1259         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260                 return HCLGE_MAC_SPEED_40G;
1261
1262         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263                 return HCLGE_MAC_SPEED_25G;
1264
1265         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266                 return HCLGE_MAC_SPEED_10G;
1267
1268         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269                 return HCLGE_MAC_SPEED_1G;
1270
1271         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272                 return HCLGE_MAC_SPEED_100M;
1273
1274         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275                 return HCLGE_MAC_SPEED_10M;
1276
1277         return HCLGE_MAC_SPEED_1G;
1278 }
1279
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT                4096
1283 #define SPEED_ABILITY_EXT_SHIFT                 8
1284
1285         struct hclge_cfg_param_cmd *req;
1286         u64 mac_addr_tmp_high;
1287         u16 speed_ability_ext;
1288         u64 mac_addr_tmp;
1289         unsigned int i;
1290
1291         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292
1293         /* get the configuration */
1294         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297                                             HCLGE_CFG_TQP_DESC_N_M,
1298                                             HCLGE_CFG_TQP_DESC_N_S);
1299
1300         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301                                         HCLGE_CFG_PHY_ADDR_M,
1302                                         HCLGE_CFG_PHY_ADDR_S);
1303         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304                                           HCLGE_CFG_MEDIA_TP_M,
1305                                           HCLGE_CFG_MEDIA_TP_S);
1306         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307                                           HCLGE_CFG_RX_BUF_LEN_M,
1308                                           HCLGE_CFG_RX_BUF_LEN_S);
1309         /* get mac_address */
1310         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312                                             HCLGE_CFG_MAC_ADDR_H_M,
1313                                             HCLGE_CFG_MAC_ADDR_H_S);
1314
1315         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316
1317         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318                                              HCLGE_CFG_DEFAULT_SPEED_M,
1319                                              HCLGE_CFG_DEFAULT_SPEED_S);
1320         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321                                                HCLGE_CFG_RSS_SIZE_M,
1322                                                HCLGE_CFG_RSS_SIZE_S);
1323
1324         for (i = 0; i < ETH_ALEN; i++)
1325                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326
1327         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329
1330         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331                                              HCLGE_CFG_SPEED_ABILITY_M,
1332                                              HCLGE_CFG_SPEED_ABILITY_S);
1333         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337
1338         cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339                                                HCLGE_CFG_VLAN_FLTR_CAP_M,
1340                                                HCLGE_CFG_VLAN_FLTR_CAP_S);
1341
1342         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1344                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1345
1346         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1347                                                HCLGE_CFG_PF_RSS_SIZE_M,
1348                                                HCLGE_CFG_PF_RSS_SIZE_S);
1349
1350         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1351          * power of 2, instead of reading out directly. This would
1352          * be more flexible for future changes and expansions.
1353          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1354          * it does not make sense if PF's field is 0. In this case, PF and VF
1355          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1356          */
1357         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1358                                1U << cfg->pf_rss_size_max :
1359                                cfg->vf_rss_size_max;
1360
1361         /* The unit of the tx spare buffer size queried from configuration
1362          * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1363          * needed here.
1364          */
1365         cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1366                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1367                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1368         cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1369 }
1370
1371 /* hclge_get_cfg: query the static parameter from flash
1372  * @hdev: pointer to struct hclge_dev
1373  * @hcfg: the config structure to be getted
1374  */
1375 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1376 {
1377         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1378         struct hclge_cfg_param_cmd *req;
1379         unsigned int i;
1380         int ret;
1381
1382         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1383                 u32 offset = 0;
1384
1385                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1386                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1387                                            true);
1388                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1389                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1390                 /* Len should be united by 4 bytes when send to hardware */
1391                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1392                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1393                 req->offset = cpu_to_le32(offset);
1394         }
1395
1396         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1397         if (ret) {
1398                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1399                 return ret;
1400         }
1401
1402         hclge_parse_cfg(hcfg, desc);
1403
1404         return 0;
1405 }
1406
1407 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1408 {
1409 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1410
1411         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1412
1413         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1414         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1415         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1416         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1417         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1418         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1419         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1420         ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1421 }
1422
1423 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1424                                   struct hclge_desc *desc)
1425 {
1426         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1427         struct hclge_dev_specs_0_cmd *req0;
1428         struct hclge_dev_specs_1_cmd *req1;
1429
1430         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1431         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1432
1433         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1434         ae_dev->dev_specs.rss_ind_tbl_size =
1435                 le16_to_cpu(req0->rss_ind_tbl_size);
1436         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1437         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1438         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1439         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1440         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1441         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1442         ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1443         ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1444 }
1445
1446 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1447 {
1448         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1449
1450         if (!dev_specs->max_non_tso_bd_num)
1451                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1452         if (!dev_specs->rss_ind_tbl_size)
1453                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1454         if (!dev_specs->rss_key_size)
1455                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1456         if (!dev_specs->max_tm_rate)
1457                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1458         if (!dev_specs->max_qset_num)
1459                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1460         if (!dev_specs->max_int_gl)
1461                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1462         if (!dev_specs->max_frm_size)
1463                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1464         if (!dev_specs->umv_size)
1465                 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1466 }
1467
1468 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1469 {
1470         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1471         int ret;
1472         int i;
1473
1474         /* set default specifications as devices lower than version V3 do not
1475          * support querying specifications from firmware.
1476          */
1477         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1478                 hclge_set_default_dev_specs(hdev);
1479                 return 0;
1480         }
1481
1482         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1483                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1484                                            true);
1485                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1486         }
1487         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1488
1489         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1490         if (ret)
1491                 return ret;
1492
1493         hclge_parse_dev_specs(hdev, desc);
1494         hclge_check_dev_specs(hdev);
1495
1496         return 0;
1497 }
1498
1499 static int hclge_get_cap(struct hclge_dev *hdev)
1500 {
1501         int ret;
1502
1503         ret = hclge_query_function_status(hdev);
1504         if (ret) {
1505                 dev_err(&hdev->pdev->dev,
1506                         "query function status error %d.\n", ret);
1507                 return ret;
1508         }
1509
1510         /* get pf resource */
1511         return hclge_query_pf_resource(hdev);
1512 }
1513
1514 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1515 {
1516 #define HCLGE_MIN_TX_DESC       64
1517 #define HCLGE_MIN_RX_DESC       64
1518
1519         if (!is_kdump_kernel())
1520                 return;
1521
1522         dev_info(&hdev->pdev->dev,
1523                  "Running kdump kernel. Using minimal resources\n");
1524
1525         /* minimal queue pairs equals to the number of vports */
1526         hdev->num_tqps = hdev->num_req_vfs + 1;
1527         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1528         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1529 }
1530
1531 static int hclge_configure(struct hclge_dev *hdev)
1532 {
1533         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1534         const struct cpumask *cpumask = cpu_online_mask;
1535         struct hclge_cfg cfg;
1536         unsigned int i;
1537         int node, ret;
1538
1539         ret = hclge_get_cfg(hdev, &cfg);
1540         if (ret)
1541                 return ret;
1542
1543         hdev->base_tqp_pid = 0;
1544         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1545         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1546         hdev->rx_buf_len = cfg.rx_buf_len;
1547         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1548         hdev->hw.mac.media_type = cfg.media_type;
1549         hdev->hw.mac.phy_addr = cfg.phy_addr;
1550         hdev->num_tx_desc = cfg.tqp_desc_num;
1551         hdev->num_rx_desc = cfg.tqp_desc_num;
1552         hdev->tm_info.num_pg = 1;
1553         hdev->tc_max = cfg.tc_num;
1554         hdev->tm_info.hw_pfc_map = 0;
1555         if (cfg.umv_space)
1556                 hdev->wanted_umv_size = cfg.umv_space;
1557         else
1558                 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1559         hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1560         hdev->gro_en = true;
1561         if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1562                 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1563
1564         if (hnae3_dev_fd_supported(hdev)) {
1565                 hdev->fd_en = true;
1566                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1567         }
1568
1569         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1570         if (ret) {
1571                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1572                         cfg.default_speed, ret);
1573                 return ret;
1574         }
1575
1576         hclge_parse_link_mode(hdev, cfg.speed_ability);
1577
1578         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1579
1580         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1581             (hdev->tc_max < 1)) {
1582                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1583                          hdev->tc_max);
1584                 hdev->tc_max = 1;
1585         }
1586
1587         /* Dev does not support DCB */
1588         if (!hnae3_dev_dcb_supported(hdev)) {
1589                 hdev->tc_max = 1;
1590                 hdev->pfc_max = 0;
1591         } else {
1592                 hdev->pfc_max = hdev->tc_max;
1593         }
1594
1595         hdev->tm_info.num_tc = 1;
1596
1597         /* Currently not support uncontiuous tc */
1598         for (i = 0; i < hdev->tm_info.num_tc; i++)
1599                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1600
1601         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1602
1603         hclge_init_kdump_kernel_config(hdev);
1604
1605         /* Set the affinity based on numa node */
1606         node = dev_to_node(&hdev->pdev->dev);
1607         if (node != NUMA_NO_NODE)
1608                 cpumask = cpumask_of_node(node);
1609
1610         cpumask_copy(&hdev->affinity_mask, cpumask);
1611
1612         return ret;
1613 }
1614
1615 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1616                             u16 tso_mss_max)
1617 {
1618         struct hclge_cfg_tso_status_cmd *req;
1619         struct hclge_desc desc;
1620
1621         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1622
1623         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1624         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1625         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1626
1627         return hclge_cmd_send(&hdev->hw, &desc, 1);
1628 }
1629
1630 static int hclge_config_gro(struct hclge_dev *hdev)
1631 {
1632         struct hclge_cfg_gro_status_cmd *req;
1633         struct hclge_desc desc;
1634         int ret;
1635
1636         if (!hnae3_dev_gro_supported(hdev))
1637                 return 0;
1638
1639         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1640         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1641
1642         req->gro_en = hdev->gro_en ? 1 : 0;
1643
1644         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1645         if (ret)
1646                 dev_err(&hdev->pdev->dev,
1647                         "GRO hardware config cmd failed, ret = %d\n", ret);
1648
1649         return ret;
1650 }
1651
1652 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1653 {
1654         struct hclge_tqp *tqp;
1655         int i;
1656
1657         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1658                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1659         if (!hdev->htqp)
1660                 return -ENOMEM;
1661
1662         tqp = hdev->htqp;
1663
1664         for (i = 0; i < hdev->num_tqps; i++) {
1665                 tqp->dev = &hdev->pdev->dev;
1666                 tqp->index = i;
1667
1668                 tqp->q.ae_algo = &ae_algo;
1669                 tqp->q.buf_size = hdev->rx_buf_len;
1670                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1671                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1672
1673                 /* need an extended offset to configure queues >=
1674                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1675                  */
1676                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1677                         tqp->q.io_base = hdev->hw.io_base +
1678                                          HCLGE_TQP_REG_OFFSET +
1679                                          i * HCLGE_TQP_REG_SIZE;
1680                 else
1681                         tqp->q.io_base = hdev->hw.io_base +
1682                                          HCLGE_TQP_REG_OFFSET +
1683                                          HCLGE_TQP_EXT_REG_OFFSET +
1684                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1685                                          HCLGE_TQP_REG_SIZE;
1686
1687                 tqp++;
1688         }
1689
1690         return 0;
1691 }
1692
1693 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1694                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1695 {
1696         struct hclge_tqp_map_cmd *req;
1697         struct hclge_desc desc;
1698         int ret;
1699
1700         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1701
1702         req = (struct hclge_tqp_map_cmd *)desc.data;
1703         req->tqp_id = cpu_to_le16(tqp_pid);
1704         req->tqp_vf = func_id;
1705         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1706         if (!is_pf)
1707                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1708         req->tqp_vid = cpu_to_le16(tqp_vid);
1709
1710         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1711         if (ret)
1712                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1713
1714         return ret;
1715 }
1716
1717 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1718 {
1719         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1720         struct hclge_dev *hdev = vport->back;
1721         int i, alloced;
1722
1723         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1724              alloced < num_tqps; i++) {
1725                 if (!hdev->htqp[i].alloced) {
1726                         hdev->htqp[i].q.handle = &vport->nic;
1727                         hdev->htqp[i].q.tqp_index = alloced;
1728                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1729                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1730                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1731                         hdev->htqp[i].alloced = true;
1732                         alloced++;
1733                 }
1734         }
1735         vport->alloc_tqps = alloced;
1736         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1737                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1738
1739         /* ensure one to one mapping between irq and queue at default */
1740         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1741                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1742
1743         return 0;
1744 }
1745
1746 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1747                             u16 num_tx_desc, u16 num_rx_desc)
1748
1749 {
1750         struct hnae3_handle *nic = &vport->nic;
1751         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1752         struct hclge_dev *hdev = vport->back;
1753         int ret;
1754
1755         kinfo->num_tx_desc = num_tx_desc;
1756         kinfo->num_rx_desc = num_rx_desc;
1757
1758         kinfo->rx_buf_len = hdev->rx_buf_len;
1759         kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1760
1761         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1762                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1763         if (!kinfo->tqp)
1764                 return -ENOMEM;
1765
1766         ret = hclge_assign_tqp(vport, num_tqps);
1767         if (ret)
1768                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1769
1770         return ret;
1771 }
1772
1773 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1774                                   struct hclge_vport *vport)
1775 {
1776         struct hnae3_handle *nic = &vport->nic;
1777         struct hnae3_knic_private_info *kinfo;
1778         u16 i;
1779
1780         kinfo = &nic->kinfo;
1781         for (i = 0; i < vport->alloc_tqps; i++) {
1782                 struct hclge_tqp *q =
1783                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1784                 bool is_pf;
1785                 int ret;
1786
1787                 is_pf = !(vport->vport_id);
1788                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1789                                              i, is_pf);
1790                 if (ret)
1791                         return ret;
1792         }
1793
1794         return 0;
1795 }
1796
1797 static int hclge_map_tqp(struct hclge_dev *hdev)
1798 {
1799         struct hclge_vport *vport = hdev->vport;
1800         u16 i, num_vport;
1801
1802         num_vport = hdev->num_req_vfs + 1;
1803         for (i = 0; i < num_vport; i++) {
1804                 int ret;
1805
1806                 ret = hclge_map_tqp_to_vport(hdev, vport);
1807                 if (ret)
1808                         return ret;
1809
1810                 vport++;
1811         }
1812
1813         return 0;
1814 }
1815
1816 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1817 {
1818         struct hnae3_handle *nic = &vport->nic;
1819         struct hclge_dev *hdev = vport->back;
1820         int ret;
1821
1822         nic->pdev = hdev->pdev;
1823         nic->ae_algo = &ae_algo;
1824         nic->numa_node_mask = hdev->numa_node_mask;
1825         nic->kinfo.io_base = hdev->hw.io_base;
1826
1827         ret = hclge_knic_setup(vport, num_tqps,
1828                                hdev->num_tx_desc, hdev->num_rx_desc);
1829         if (ret)
1830                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1831
1832         return ret;
1833 }
1834
1835 static int hclge_alloc_vport(struct hclge_dev *hdev)
1836 {
1837         struct pci_dev *pdev = hdev->pdev;
1838         struct hclge_vport *vport;
1839         u32 tqp_main_vport;
1840         u32 tqp_per_vport;
1841         int num_vport, i;
1842         int ret;
1843
1844         /* We need to alloc a vport for main NIC of PF */
1845         num_vport = hdev->num_req_vfs + 1;
1846
1847         if (hdev->num_tqps < num_vport) {
1848                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1849                         hdev->num_tqps, num_vport);
1850                 return -EINVAL;
1851         }
1852
1853         /* Alloc the same number of TQPs for every vport */
1854         tqp_per_vport = hdev->num_tqps / num_vport;
1855         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1856
1857         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1858                              GFP_KERNEL);
1859         if (!vport)
1860                 return -ENOMEM;
1861
1862         hdev->vport = vport;
1863         hdev->num_alloc_vport = num_vport;
1864
1865         if (IS_ENABLED(CONFIG_PCI_IOV))
1866                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1867
1868         for (i = 0; i < num_vport; i++) {
1869                 vport->back = hdev;
1870                 vport->vport_id = i;
1871                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1872                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1873                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1874                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1875                 vport->req_vlan_fltr_en = true;
1876                 INIT_LIST_HEAD(&vport->vlan_list);
1877                 INIT_LIST_HEAD(&vport->uc_mac_list);
1878                 INIT_LIST_HEAD(&vport->mc_mac_list);
1879                 spin_lock_init(&vport->mac_list_lock);
1880
1881                 if (i == 0)
1882                         ret = hclge_vport_setup(vport, tqp_main_vport);
1883                 else
1884                         ret = hclge_vport_setup(vport, tqp_per_vport);
1885                 if (ret) {
1886                         dev_err(&pdev->dev,
1887                                 "vport setup failed for vport %d, %d\n",
1888                                 i, ret);
1889                         return ret;
1890                 }
1891
1892                 vport++;
1893         }
1894
1895         return 0;
1896 }
1897
1898 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1899                                     struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901 /* TX buffer size is unit by 128 byte */
1902 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1903 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1904         struct hclge_tx_buff_alloc_cmd *req;
1905         struct hclge_desc desc;
1906         int ret;
1907         u8 i;
1908
1909         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1910
1911         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1912         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1913                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1914
1915                 req->tx_pkt_buff[i] =
1916                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1917                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1918         }
1919
1920         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1921         if (ret)
1922                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1923                         ret);
1924
1925         return ret;
1926 }
1927
1928 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1929                                  struct hclge_pkt_buf_alloc *buf_alloc)
1930 {
1931         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1932
1933         if (ret)
1934                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1935
1936         return ret;
1937 }
1938
1939 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1940 {
1941         unsigned int i;
1942         u32 cnt = 0;
1943
1944         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1945                 if (hdev->hw_tc_map & BIT(i))
1946                         cnt++;
1947         return cnt;
1948 }
1949
1950 /* Get the number of pfc enabled TCs, which have private buffer */
1951 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1952                                   struct hclge_pkt_buf_alloc *buf_alloc)
1953 {
1954         struct hclge_priv_buf *priv;
1955         unsigned int i;
1956         int cnt = 0;
1957
1958         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1959                 priv = &buf_alloc->priv_buf[i];
1960                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1961                     priv->enable)
1962                         cnt++;
1963         }
1964
1965         return cnt;
1966 }
1967
1968 /* Get the number of pfc disabled TCs, which have private buffer */
1969 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1970                                      struct hclge_pkt_buf_alloc *buf_alloc)
1971 {
1972         struct hclge_priv_buf *priv;
1973         unsigned int i;
1974         int cnt = 0;
1975
1976         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1977                 priv = &buf_alloc->priv_buf[i];
1978                 if (hdev->hw_tc_map & BIT(i) &&
1979                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1980                     priv->enable)
1981                         cnt++;
1982         }
1983
1984         return cnt;
1985 }
1986
1987 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1988 {
1989         struct hclge_priv_buf *priv;
1990         u32 rx_priv = 0;
1991         int i;
1992
1993         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1994                 priv = &buf_alloc->priv_buf[i];
1995                 if (priv->enable)
1996                         rx_priv += priv->buf_size;
1997         }
1998         return rx_priv;
1999 }
2000
2001 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2002 {
2003         u32 i, total_tx_size = 0;
2004
2005         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2006                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2007
2008         return total_tx_size;
2009 }
2010
2011 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2012                                 struct hclge_pkt_buf_alloc *buf_alloc,
2013                                 u32 rx_all)
2014 {
2015         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2016         u32 tc_num = hclge_get_tc_num(hdev);
2017         u32 shared_buf, aligned_mps;
2018         u32 rx_priv;
2019         int i;
2020
2021         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2022
2023         if (hnae3_dev_dcb_supported(hdev))
2024                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2025                                         hdev->dv_buf_size;
2026         else
2027                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2028                                         + hdev->dv_buf_size;
2029
2030         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2031         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2032                              HCLGE_BUF_SIZE_UNIT);
2033
2034         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2035         if (rx_all < rx_priv + shared_std)
2036                 return false;
2037
2038         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2039         buf_alloc->s_buf.buf_size = shared_buf;
2040         if (hnae3_dev_dcb_supported(hdev)) {
2041                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2042                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2043                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2044                                   HCLGE_BUF_SIZE_UNIT);
2045         } else {
2046                 buf_alloc->s_buf.self.high = aligned_mps +
2047                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
2048                 buf_alloc->s_buf.self.low = aligned_mps;
2049         }
2050
2051         if (hnae3_dev_dcb_supported(hdev)) {
2052                 hi_thrd = shared_buf - hdev->dv_buf_size;
2053
2054                 if (tc_num <= NEED_RESERVE_TC_NUM)
2055                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2056                                         / BUF_MAX_PERCENT;
2057
2058                 if (tc_num)
2059                         hi_thrd = hi_thrd / tc_num;
2060
2061                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2062                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2063                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2064         } else {
2065                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2066                 lo_thrd = aligned_mps;
2067         }
2068
2069         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2070                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2071                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2072         }
2073
2074         return true;
2075 }
2076
2077 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2078                                 struct hclge_pkt_buf_alloc *buf_alloc)
2079 {
2080         u32 i, total_size;
2081
2082         total_size = hdev->pkt_buf_size;
2083
2084         /* alloc tx buffer for all enabled tc */
2085         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2086                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2087
2088                 if (hdev->hw_tc_map & BIT(i)) {
2089                         if (total_size < hdev->tx_buf_size)
2090                                 return -ENOMEM;
2091
2092                         priv->tx_buf_size = hdev->tx_buf_size;
2093                 } else {
2094                         priv->tx_buf_size = 0;
2095                 }
2096
2097                 total_size -= priv->tx_buf_size;
2098         }
2099
2100         return 0;
2101 }
2102
2103 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2104                                   struct hclge_pkt_buf_alloc *buf_alloc)
2105 {
2106         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2107         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2108         unsigned int i;
2109
2110         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2111                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2112
2113                 priv->enable = 0;
2114                 priv->wl.low = 0;
2115                 priv->wl.high = 0;
2116                 priv->buf_size = 0;
2117
2118                 if (!(hdev->hw_tc_map & BIT(i)))
2119                         continue;
2120
2121                 priv->enable = 1;
2122
2123                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2124                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2125                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2126                                                 HCLGE_BUF_SIZE_UNIT);
2127                 } else {
2128                         priv->wl.low = 0;
2129                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2130                                         aligned_mps;
2131                 }
2132
2133                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2134         }
2135
2136         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138
2139 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2140                                           struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2144         int i;
2145
2146         /* let the last to be cleared first */
2147         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149                 unsigned int mask = BIT((unsigned int)i);
2150
2151                 if (hdev->hw_tc_map & mask &&
2152                     !(hdev->tm_info.hw_pfc_map & mask)) {
2153                         /* Clear the no pfc TC private buffer */
2154                         priv->wl.low = 0;
2155                         priv->wl.high = 0;
2156                         priv->buf_size = 0;
2157                         priv->enable = 0;
2158                         no_pfc_priv_num--;
2159                 }
2160
2161                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2162                     no_pfc_priv_num == 0)
2163                         break;
2164         }
2165
2166         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2167 }
2168
2169 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2170                                         struct hclge_pkt_buf_alloc *buf_alloc)
2171 {
2172         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2173         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2174         int i;
2175
2176         /* let the last to be cleared first */
2177         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2178                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2179                 unsigned int mask = BIT((unsigned int)i);
2180
2181                 if (hdev->hw_tc_map & mask &&
2182                     hdev->tm_info.hw_pfc_map & mask) {
2183                         /* Reduce the number of pfc TC with private buffer */
2184                         priv->wl.low = 0;
2185                         priv->enable = 0;
2186                         priv->wl.high = 0;
2187                         priv->buf_size = 0;
2188                         pfc_priv_num--;
2189                 }
2190
2191                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2192                     pfc_priv_num == 0)
2193                         break;
2194         }
2195
2196         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2197 }
2198
2199 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2200                                       struct hclge_pkt_buf_alloc *buf_alloc)
2201 {
2202 #define COMPENSATE_BUFFER       0x3C00
2203 #define COMPENSATE_HALF_MPS_NUM 5
2204 #define PRIV_WL_GAP             0x1800
2205
2206         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2207         u32 tc_num = hclge_get_tc_num(hdev);
2208         u32 half_mps = hdev->mps >> 1;
2209         u32 min_rx_priv;
2210         unsigned int i;
2211
2212         if (tc_num)
2213                 rx_priv = rx_priv / tc_num;
2214
2215         if (tc_num <= NEED_RESERVE_TC_NUM)
2216                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2217
2218         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2219                         COMPENSATE_HALF_MPS_NUM * half_mps;
2220         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2221         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2222         if (rx_priv < min_rx_priv)
2223                 return false;
2224
2225         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2226                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2227
2228                 priv->enable = 0;
2229                 priv->wl.low = 0;
2230                 priv->wl.high = 0;
2231                 priv->buf_size = 0;
2232
2233                 if (!(hdev->hw_tc_map & BIT(i)))
2234                         continue;
2235
2236                 priv->enable = 1;
2237                 priv->buf_size = rx_priv;
2238                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2239                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2240         }
2241
2242         buf_alloc->s_buf.buf_size = 0;
2243
2244         return true;
2245 }
2246
2247 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2248  * @hdev: pointer to struct hclge_dev
2249  * @buf_alloc: pointer to buffer calculation data
2250  * @return: 0: calculate successful, negative: fail
2251  */
2252 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2253                                 struct hclge_pkt_buf_alloc *buf_alloc)
2254 {
2255         /* When DCB is not supported, rx private buffer is not allocated. */
2256         if (!hnae3_dev_dcb_supported(hdev)) {
2257                 u32 rx_all = hdev->pkt_buf_size;
2258
2259                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2260                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2261                         return -ENOMEM;
2262
2263                 return 0;
2264         }
2265
2266         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2267                 return 0;
2268
2269         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2270                 return 0;
2271
2272         /* try to decrease the buffer size */
2273         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2274                 return 0;
2275
2276         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2277                 return 0;
2278
2279         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2280                 return 0;
2281
2282         return -ENOMEM;
2283 }
2284
2285 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2286                                    struct hclge_pkt_buf_alloc *buf_alloc)
2287 {
2288         struct hclge_rx_priv_buff_cmd *req;
2289         struct hclge_desc desc;
2290         int ret;
2291         int i;
2292
2293         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2294         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2295
2296         /* Alloc private buffer TCs */
2297         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2298                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2299
2300                 req->buf_num[i] =
2301                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2302                 req->buf_num[i] |=
2303                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2304         }
2305
2306         req->shared_buf =
2307                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2308                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2309
2310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2311         if (ret)
2312                 dev_err(&hdev->pdev->dev,
2313                         "rx private buffer alloc cmd failed %d\n", ret);
2314
2315         return ret;
2316 }
2317
2318 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2319                                    struct hclge_pkt_buf_alloc *buf_alloc)
2320 {
2321         struct hclge_rx_priv_wl_buf *req;
2322         struct hclge_priv_buf *priv;
2323         struct hclge_desc desc[2];
2324         int i, j;
2325         int ret;
2326
2327         for (i = 0; i < 2; i++) {
2328                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2329                                            false);
2330                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2331
2332                 /* The first descriptor set the NEXT bit to 1 */
2333                 if (i == 0)
2334                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2335                 else
2336                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2337
2338                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2339                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2340
2341                         priv = &buf_alloc->priv_buf[idx];
2342                         req->tc_wl[j].high =
2343                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2344                         req->tc_wl[j].high |=
2345                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2346                         req->tc_wl[j].low =
2347                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2348                         req->tc_wl[j].low |=
2349                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2350                 }
2351         }
2352
2353         /* Send 2 descriptor at one time */
2354         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2355         if (ret)
2356                 dev_err(&hdev->pdev->dev,
2357                         "rx private waterline config cmd failed %d\n",
2358                         ret);
2359         return ret;
2360 }
2361
2362 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2363                                     struct hclge_pkt_buf_alloc *buf_alloc)
2364 {
2365         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2366         struct hclge_rx_com_thrd *req;
2367         struct hclge_desc desc[2];
2368         struct hclge_tc_thrd *tc;
2369         int i, j;
2370         int ret;
2371
2372         for (i = 0; i < 2; i++) {
2373                 hclge_cmd_setup_basic_desc(&desc[i],
2374                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2375                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2376
2377                 /* The first descriptor set the NEXT bit to 1 */
2378                 if (i == 0)
2379                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2380                 else
2381                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2382
2383                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2384                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2385
2386                         req->com_thrd[j].high =
2387                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2388                         req->com_thrd[j].high |=
2389                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2390                         req->com_thrd[j].low =
2391                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2392                         req->com_thrd[j].low |=
2393                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2394                 }
2395         }
2396
2397         /* Send 2 descriptors at one time */
2398         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2399         if (ret)
2400                 dev_err(&hdev->pdev->dev,
2401                         "common threshold config cmd failed %d\n", ret);
2402         return ret;
2403 }
2404
2405 static int hclge_common_wl_config(struct hclge_dev *hdev,
2406                                   struct hclge_pkt_buf_alloc *buf_alloc)
2407 {
2408         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2409         struct hclge_rx_com_wl *req;
2410         struct hclge_desc desc;
2411         int ret;
2412
2413         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2414
2415         req = (struct hclge_rx_com_wl *)desc.data;
2416         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2417         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2418
2419         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2420         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2421
2422         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2423         if (ret)
2424                 dev_err(&hdev->pdev->dev,
2425                         "common waterline config cmd failed %d\n", ret);
2426
2427         return ret;
2428 }
2429
2430 int hclge_buffer_alloc(struct hclge_dev *hdev)
2431 {
2432         struct hclge_pkt_buf_alloc *pkt_buf;
2433         int ret;
2434
2435         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2436         if (!pkt_buf)
2437                 return -ENOMEM;
2438
2439         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2440         if (ret) {
2441                 dev_err(&hdev->pdev->dev,
2442                         "could not calc tx buffer size for all TCs %d\n", ret);
2443                 goto out;
2444         }
2445
2446         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2447         if (ret) {
2448                 dev_err(&hdev->pdev->dev,
2449                         "could not alloc tx buffers %d\n", ret);
2450                 goto out;
2451         }
2452
2453         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2454         if (ret) {
2455                 dev_err(&hdev->pdev->dev,
2456                         "could not calc rx priv buffer size for all TCs %d\n",
2457                         ret);
2458                 goto out;
2459         }
2460
2461         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2462         if (ret) {
2463                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2464                         ret);
2465                 goto out;
2466         }
2467
2468         if (hnae3_dev_dcb_supported(hdev)) {
2469                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2470                 if (ret) {
2471                         dev_err(&hdev->pdev->dev,
2472                                 "could not configure rx private waterline %d\n",
2473                                 ret);
2474                         goto out;
2475                 }
2476
2477                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2478                 if (ret) {
2479                         dev_err(&hdev->pdev->dev,
2480                                 "could not configure common threshold %d\n",
2481                                 ret);
2482                         goto out;
2483                 }
2484         }
2485
2486         ret = hclge_common_wl_config(hdev, pkt_buf);
2487         if (ret)
2488                 dev_err(&hdev->pdev->dev,
2489                         "could not configure common waterline %d\n", ret);
2490
2491 out:
2492         kfree(pkt_buf);
2493         return ret;
2494 }
2495
2496 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2497 {
2498         struct hnae3_handle *roce = &vport->roce;
2499         struct hnae3_handle *nic = &vport->nic;
2500         struct hclge_dev *hdev = vport->back;
2501
2502         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2503
2504         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2505                 return -EINVAL;
2506
2507         roce->rinfo.base_vector = hdev->roce_base_vector;
2508
2509         roce->rinfo.netdev = nic->kinfo.netdev;
2510         roce->rinfo.roce_io_base = hdev->hw.io_base;
2511         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2512
2513         roce->pdev = nic->pdev;
2514         roce->ae_algo = nic->ae_algo;
2515         roce->numa_node_mask = nic->numa_node_mask;
2516
2517         return 0;
2518 }
2519
2520 static int hclge_init_msi(struct hclge_dev *hdev)
2521 {
2522         struct pci_dev *pdev = hdev->pdev;
2523         int vectors;
2524         int i;
2525
2526         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2527                                         hdev->num_msi,
2528                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2529         if (vectors < 0) {
2530                 dev_err(&pdev->dev,
2531                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2532                         vectors);
2533                 return vectors;
2534         }
2535         if (vectors < hdev->num_msi)
2536                 dev_warn(&hdev->pdev->dev,
2537                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2538                          hdev->num_msi, vectors);
2539
2540         hdev->num_msi = vectors;
2541         hdev->num_msi_left = vectors;
2542
2543         hdev->base_msi_vector = pdev->irq;
2544         hdev->roce_base_vector = hdev->base_msi_vector +
2545                                 hdev->num_nic_msi;
2546
2547         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2548                                            sizeof(u16), GFP_KERNEL);
2549         if (!hdev->vector_status) {
2550                 pci_free_irq_vectors(pdev);
2551                 return -ENOMEM;
2552         }
2553
2554         for (i = 0; i < hdev->num_msi; i++)
2555                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2556
2557         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2558                                         sizeof(int), GFP_KERNEL);
2559         if (!hdev->vector_irq) {
2560                 pci_free_irq_vectors(pdev);
2561                 return -ENOMEM;
2562         }
2563
2564         return 0;
2565 }
2566
2567 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2568 {
2569         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2570                 duplex = HCLGE_MAC_FULL;
2571
2572         return duplex;
2573 }
2574
2575 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2576                                       u8 duplex)
2577 {
2578         struct hclge_config_mac_speed_dup_cmd *req;
2579         struct hclge_desc desc;
2580         int ret;
2581
2582         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2583
2584         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2585
2586         if (duplex)
2587                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2588
2589         switch (speed) {
2590         case HCLGE_MAC_SPEED_10M:
2591                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2592                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2593                 break;
2594         case HCLGE_MAC_SPEED_100M:
2595                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2596                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2597                 break;
2598         case HCLGE_MAC_SPEED_1G:
2599                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2600                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2601                 break;
2602         case HCLGE_MAC_SPEED_10G:
2603                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2604                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2605                 break;
2606         case HCLGE_MAC_SPEED_25G:
2607                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2608                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2609                 break;
2610         case HCLGE_MAC_SPEED_40G:
2611                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2612                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2613                 break;
2614         case HCLGE_MAC_SPEED_50G:
2615                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2616                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2617                 break;
2618         case HCLGE_MAC_SPEED_100G:
2619                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2620                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2621                 break;
2622         case HCLGE_MAC_SPEED_200G:
2623                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2624                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2625                 break;
2626         default:
2627                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2628                 return -EINVAL;
2629         }
2630
2631         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2632                       1);
2633
2634         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2635         if (ret) {
2636                 dev_err(&hdev->pdev->dev,
2637                         "mac speed/duplex config cmd failed %d.\n", ret);
2638                 return ret;
2639         }
2640
2641         return 0;
2642 }
2643
2644 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2645 {
2646         struct hclge_mac *mac = &hdev->hw.mac;
2647         int ret;
2648
2649         duplex = hclge_check_speed_dup(duplex, speed);
2650         if (!mac->support_autoneg && mac->speed == speed &&
2651             mac->duplex == duplex)
2652                 return 0;
2653
2654         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2655         if (ret)
2656                 return ret;
2657
2658         hdev->hw.mac.speed = speed;
2659         hdev->hw.mac.duplex = duplex;
2660
2661         return 0;
2662 }
2663
2664 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2665                                      u8 duplex)
2666 {
2667         struct hclge_vport *vport = hclge_get_vport(handle);
2668         struct hclge_dev *hdev = vport->back;
2669
2670         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2671 }
2672
2673 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2674 {
2675         struct hclge_config_auto_neg_cmd *req;
2676         struct hclge_desc desc;
2677         u32 flag = 0;
2678         int ret;
2679
2680         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2681
2682         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2683         if (enable)
2684                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2685         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2686
2687         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2688         if (ret)
2689                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2690                         ret);
2691
2692         return ret;
2693 }
2694
2695 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2696 {
2697         struct hclge_vport *vport = hclge_get_vport(handle);
2698         struct hclge_dev *hdev = vport->back;
2699
2700         if (!hdev->hw.mac.support_autoneg) {
2701                 if (enable) {
2702                         dev_err(&hdev->pdev->dev,
2703                                 "autoneg is not supported by current port\n");
2704                         return -EOPNOTSUPP;
2705                 } else {
2706                         return 0;
2707                 }
2708         }
2709
2710         return hclge_set_autoneg_en(hdev, enable);
2711 }
2712
2713 static int hclge_get_autoneg(struct hnae3_handle *handle)
2714 {
2715         struct hclge_vport *vport = hclge_get_vport(handle);
2716         struct hclge_dev *hdev = vport->back;
2717         struct phy_device *phydev = hdev->hw.mac.phydev;
2718
2719         if (phydev)
2720                 return phydev->autoneg;
2721
2722         return hdev->hw.mac.autoneg;
2723 }
2724
2725 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2726 {
2727         struct hclge_vport *vport = hclge_get_vport(handle);
2728         struct hclge_dev *hdev = vport->back;
2729         int ret;
2730
2731         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2732
2733         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2734         if (ret)
2735                 return ret;
2736         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2737 }
2738
2739 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2740 {
2741         struct hclge_vport *vport = hclge_get_vport(handle);
2742         struct hclge_dev *hdev = vport->back;
2743
2744         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2745                 return hclge_set_autoneg_en(hdev, !halt);
2746
2747         return 0;
2748 }
2749
2750 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2751 {
2752         struct hclge_config_fec_cmd *req;
2753         struct hclge_desc desc;
2754         int ret;
2755
2756         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2757
2758         req = (struct hclge_config_fec_cmd *)desc.data;
2759         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2760                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2761         if (fec_mode & BIT(HNAE3_FEC_RS))
2762                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2763                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2764         if (fec_mode & BIT(HNAE3_FEC_BASER))
2765                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2766                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2767
2768         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2769         if (ret)
2770                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2771
2772         return ret;
2773 }
2774
2775 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2776 {
2777         struct hclge_vport *vport = hclge_get_vport(handle);
2778         struct hclge_dev *hdev = vport->back;
2779         struct hclge_mac *mac = &hdev->hw.mac;
2780         int ret;
2781
2782         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2783                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2784                 return -EINVAL;
2785         }
2786
2787         ret = hclge_set_fec_hw(hdev, fec_mode);
2788         if (ret)
2789                 return ret;
2790
2791         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2792         return 0;
2793 }
2794
2795 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2796                           u8 *fec_mode)
2797 {
2798         struct hclge_vport *vport = hclge_get_vport(handle);
2799         struct hclge_dev *hdev = vport->back;
2800         struct hclge_mac *mac = &hdev->hw.mac;
2801
2802         if (fec_ability)
2803                 *fec_ability = mac->fec_ability;
2804         if (fec_mode)
2805                 *fec_mode = mac->fec_mode;
2806 }
2807
2808 static int hclge_mac_init(struct hclge_dev *hdev)
2809 {
2810         struct hclge_mac *mac = &hdev->hw.mac;
2811         int ret;
2812
2813         hdev->support_sfp_query = true;
2814         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2815         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2816                                          hdev->hw.mac.duplex);
2817         if (ret)
2818                 return ret;
2819
2820         if (hdev->hw.mac.support_autoneg) {
2821                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2822                 if (ret)
2823                         return ret;
2824         }
2825
2826         mac->link = 0;
2827
2828         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2829                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2830                 if (ret)
2831                         return ret;
2832         }
2833
2834         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2835         if (ret) {
2836                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2837                 return ret;
2838         }
2839
2840         ret = hclge_set_default_loopback(hdev);
2841         if (ret)
2842                 return ret;
2843
2844         ret = hclge_buffer_alloc(hdev);
2845         if (ret)
2846                 dev_err(&hdev->pdev->dev,
2847                         "allocate buffer fail, ret=%d\n", ret);
2848
2849         return ret;
2850 }
2851
2852 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2853 {
2854         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2855             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2856                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2857                                     hclge_wq, &hdev->service_task, 0);
2858 }
2859
2860 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2861 {
2862         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2863             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2864                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2865                                     hclge_wq, &hdev->service_task, 0);
2866 }
2867
2868 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2869 {
2870         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2871             !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2872                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2873                                     hclge_wq, &hdev->service_task, 0);
2874 }
2875
2876 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2877 {
2878         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2879             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2880                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2881                                     hclge_wq, &hdev->service_task,
2882                                     delay_time);
2883 }
2884
2885 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2886 {
2887         struct hclge_link_status_cmd *req;
2888         struct hclge_desc desc;
2889         int ret;
2890
2891         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2892         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2893         if (ret) {
2894                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2895                         ret);
2896                 return ret;
2897         }
2898
2899         req = (struct hclge_link_status_cmd *)desc.data;
2900         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2901                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2902
2903         return 0;
2904 }
2905
2906 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2907 {
2908         struct phy_device *phydev = hdev->hw.mac.phydev;
2909
2910         *link_status = HCLGE_LINK_STATUS_DOWN;
2911
2912         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2913                 return 0;
2914
2915         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2916                 return 0;
2917
2918         return hclge_get_mac_link_status(hdev, link_status);
2919 }
2920
2921 static void hclge_push_link_status(struct hclge_dev *hdev)
2922 {
2923         struct hclge_vport *vport;
2924         int ret;
2925         u16 i;
2926
2927         for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2928                 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2929
2930                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2931                     vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2932                         continue;
2933
2934                 ret = hclge_push_vf_link_status(vport);
2935                 if (ret) {
2936                         dev_err(&hdev->pdev->dev,
2937                                 "failed to push link status to vf%u, ret = %d\n",
2938                                 i, ret);
2939                 }
2940         }
2941 }
2942
2943 static void hclge_update_link_status(struct hclge_dev *hdev)
2944 {
2945         struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2946         struct hnae3_handle *handle = &hdev->vport[0].nic;
2947         struct hnae3_client *rclient = hdev->roce_client;
2948         struct hnae3_client *client = hdev->nic_client;
2949         int state;
2950         int ret;
2951
2952         if (!client)
2953                 return;
2954
2955         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2956                 return;
2957
2958         ret = hclge_get_mac_phy_link(hdev, &state);
2959         if (ret) {
2960                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2961                 return;
2962         }
2963
2964         if (state != hdev->hw.mac.link) {
2965                 hdev->hw.mac.link = state;
2966                 client->ops->link_status_change(handle, state);
2967                 hclge_config_mac_tnl_int(hdev, state);
2968                 if (rclient && rclient->ops->link_status_change)
2969                         rclient->ops->link_status_change(rhandle, state);
2970
2971                 hclge_push_link_status(hdev);
2972         }
2973
2974         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2975 }
2976
2977 static void hclge_update_port_capability(struct hclge_dev *hdev,
2978                                          struct hclge_mac *mac)
2979 {
2980         if (hnae3_dev_fec_supported(hdev))
2981                 /* update fec ability by speed */
2982                 hclge_convert_setting_fec(mac);
2983
2984         /* firmware can not identify back plane type, the media type
2985          * read from configuration can help deal it
2986          */
2987         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2988             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2989                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2990         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2991                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2992
2993         if (mac->support_autoneg) {
2994                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2995                 linkmode_copy(mac->advertising, mac->supported);
2996         } else {
2997                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2998                                    mac->supported);
2999                 linkmode_zero(mac->advertising);
3000         }
3001 }
3002
3003 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3004 {
3005         struct hclge_sfp_info_cmd *resp;
3006         struct hclge_desc desc;
3007         int ret;
3008
3009         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3010         resp = (struct hclge_sfp_info_cmd *)desc.data;
3011         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3012         if (ret == -EOPNOTSUPP) {
3013                 dev_warn(&hdev->pdev->dev,
3014                          "IMP do not support get SFP speed %d\n", ret);
3015                 return ret;
3016         } else if (ret) {
3017                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3018                 return ret;
3019         }
3020
3021         *speed = le32_to_cpu(resp->speed);
3022
3023         return 0;
3024 }
3025
3026 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3027 {
3028         struct hclge_sfp_info_cmd *resp;
3029         struct hclge_desc desc;
3030         int ret;
3031
3032         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3033         resp = (struct hclge_sfp_info_cmd *)desc.data;
3034
3035         resp->query_type = QUERY_ACTIVE_SPEED;
3036
3037         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3038         if (ret == -EOPNOTSUPP) {
3039                 dev_warn(&hdev->pdev->dev,
3040                          "IMP does not support get SFP info %d\n", ret);
3041                 return ret;
3042         } else if (ret) {
3043                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3044                 return ret;
3045         }
3046
3047         /* In some case, mac speed get from IMP may be 0, it shouldn't be
3048          * set to mac->speed.
3049          */
3050         if (!le32_to_cpu(resp->speed))
3051                 return 0;
3052
3053         mac->speed = le32_to_cpu(resp->speed);
3054         /* if resp->speed_ability is 0, it means it's an old version
3055          * firmware, do not update these params
3056          */
3057         if (resp->speed_ability) {
3058                 mac->module_type = le32_to_cpu(resp->module_type);
3059                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3060                 mac->autoneg = resp->autoneg;
3061                 mac->support_autoneg = resp->autoneg_ability;
3062                 mac->speed_type = QUERY_ACTIVE_SPEED;
3063                 if (!resp->active_fec)
3064                         mac->fec_mode = 0;
3065                 else
3066                         mac->fec_mode = BIT(resp->active_fec);
3067         } else {
3068                 mac->speed_type = QUERY_SFP_SPEED;
3069         }
3070
3071         return 0;
3072 }
3073
3074 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3075                                         struct ethtool_link_ksettings *cmd)
3076 {
3077         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3078         struct hclge_vport *vport = hclge_get_vport(handle);
3079         struct hclge_phy_link_ksetting_0_cmd *req0;
3080         struct hclge_phy_link_ksetting_1_cmd *req1;
3081         u32 supported, advertising, lp_advertising;
3082         struct hclge_dev *hdev = vport->back;
3083         int ret;
3084
3085         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3086                                    true);
3087         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3088         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3089                                    true);
3090
3091         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3092         if (ret) {
3093                 dev_err(&hdev->pdev->dev,
3094                         "failed to get phy link ksetting, ret = %d.\n", ret);
3095                 return ret;
3096         }
3097
3098         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3099         cmd->base.autoneg = req0->autoneg;
3100         cmd->base.speed = le32_to_cpu(req0->speed);
3101         cmd->base.duplex = req0->duplex;
3102         cmd->base.port = req0->port;
3103         cmd->base.transceiver = req0->transceiver;
3104         cmd->base.phy_address = req0->phy_address;
3105         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3106         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3107         supported = le32_to_cpu(req0->supported);
3108         advertising = le32_to_cpu(req0->advertising);
3109         lp_advertising = le32_to_cpu(req0->lp_advertising);
3110         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3111                                                 supported);
3112         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3113                                                 advertising);
3114         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3115                                                 lp_advertising);
3116
3117         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3118         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3119         cmd->base.master_slave_state = req1->master_slave_state;
3120
3121         return 0;
3122 }
3123
3124 static int
3125 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3126                              const struct ethtool_link_ksettings *cmd)
3127 {
3128         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3129         struct hclge_vport *vport = hclge_get_vport(handle);
3130         struct hclge_phy_link_ksetting_0_cmd *req0;
3131         struct hclge_phy_link_ksetting_1_cmd *req1;
3132         struct hclge_dev *hdev = vport->back;
3133         u32 advertising;
3134         int ret;
3135
3136         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3137             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3138              (cmd->base.duplex != DUPLEX_HALF &&
3139               cmd->base.duplex != DUPLEX_FULL)))
3140                 return -EINVAL;
3141
3142         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3143                                    false);
3144         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3145         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3146                                    false);
3147
3148         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3149         req0->autoneg = cmd->base.autoneg;
3150         req0->speed = cpu_to_le32(cmd->base.speed);
3151         req0->duplex = cmd->base.duplex;
3152         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3153                                                 cmd->link_modes.advertising);
3154         req0->advertising = cpu_to_le32(advertising);
3155         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3156
3157         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3158         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3159
3160         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3161         if (ret) {
3162                 dev_err(&hdev->pdev->dev,
3163                         "failed to set phy link ksettings, ret = %d.\n", ret);
3164                 return ret;
3165         }
3166
3167         hdev->hw.mac.autoneg = cmd->base.autoneg;
3168         hdev->hw.mac.speed = cmd->base.speed;
3169         hdev->hw.mac.duplex = cmd->base.duplex;
3170         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3171
3172         return 0;
3173 }
3174
3175 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3176 {
3177         struct ethtool_link_ksettings cmd;
3178         int ret;
3179
3180         if (!hnae3_dev_phy_imp_supported(hdev))
3181                 return 0;
3182
3183         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3184         if (ret)
3185                 return ret;
3186
3187         hdev->hw.mac.autoneg = cmd.base.autoneg;
3188         hdev->hw.mac.speed = cmd.base.speed;
3189         hdev->hw.mac.duplex = cmd.base.duplex;
3190
3191         return 0;
3192 }
3193
3194 static int hclge_tp_port_init(struct hclge_dev *hdev)
3195 {
3196         struct ethtool_link_ksettings cmd;
3197
3198         if (!hnae3_dev_phy_imp_supported(hdev))
3199                 return 0;
3200
3201         cmd.base.autoneg = hdev->hw.mac.autoneg;
3202         cmd.base.speed = hdev->hw.mac.speed;
3203         cmd.base.duplex = hdev->hw.mac.duplex;
3204         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3205
3206         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3207 }
3208
3209 static int hclge_update_port_info(struct hclge_dev *hdev)
3210 {
3211         struct hclge_mac *mac = &hdev->hw.mac;
3212         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3213         int ret;
3214
3215         /* get the port info from SFP cmd if not copper port */
3216         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3217                 return hclge_update_tp_port_info(hdev);
3218
3219         /* if IMP does not support get SFP/qSFP info, return directly */
3220         if (!hdev->support_sfp_query)
3221                 return 0;
3222
3223         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3224                 ret = hclge_get_sfp_info(hdev, mac);
3225         else
3226                 ret = hclge_get_sfp_speed(hdev, &speed);
3227
3228         if (ret == -EOPNOTSUPP) {
3229                 hdev->support_sfp_query = false;
3230                 return ret;
3231         } else if (ret) {
3232                 return ret;
3233         }
3234
3235         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3236                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3237                         hclge_update_port_capability(hdev, mac);
3238                         return 0;
3239                 }
3240                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3241                                                HCLGE_MAC_FULL);
3242         } else {
3243                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3244                         return 0; /* do nothing if no SFP */
3245
3246                 /* must config full duplex for SFP */
3247                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3248         }
3249 }
3250
3251 static int hclge_get_status(struct hnae3_handle *handle)
3252 {
3253         struct hclge_vport *vport = hclge_get_vport(handle);
3254         struct hclge_dev *hdev = vport->back;
3255
3256         hclge_update_link_status(hdev);
3257
3258         return hdev->hw.mac.link;
3259 }
3260
3261 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3262 {
3263         if (!pci_num_vf(hdev->pdev)) {
3264                 dev_err(&hdev->pdev->dev,
3265                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3266                 return NULL;
3267         }
3268
3269         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3270                 dev_err(&hdev->pdev->dev,
3271                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3272                         vf, pci_num_vf(hdev->pdev));
3273                 return NULL;
3274         }
3275
3276         /* VF start from 1 in vport */
3277         vf += HCLGE_VF_VPORT_START_NUM;
3278         return &hdev->vport[vf];
3279 }
3280
3281 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3282                                struct ifla_vf_info *ivf)
3283 {
3284         struct hclge_vport *vport = hclge_get_vport(handle);
3285         struct hclge_dev *hdev = vport->back;
3286
3287         vport = hclge_get_vf_vport(hdev, vf);
3288         if (!vport)
3289                 return -EINVAL;
3290
3291         ivf->vf = vf;
3292         ivf->linkstate = vport->vf_info.link_state;
3293         ivf->spoofchk = vport->vf_info.spoofchk;
3294         ivf->trusted = vport->vf_info.trusted;
3295         ivf->min_tx_rate = 0;
3296         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3297         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3298         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3299         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3300         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3301
3302         return 0;
3303 }
3304
3305 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3306                                    int link_state)
3307 {
3308         struct hclge_vport *vport = hclge_get_vport(handle);
3309         struct hclge_dev *hdev = vport->back;
3310         int link_state_old;
3311         int ret;
3312
3313         vport = hclge_get_vf_vport(hdev, vf);
3314         if (!vport)
3315                 return -EINVAL;
3316
3317         link_state_old = vport->vf_info.link_state;
3318         vport->vf_info.link_state = link_state;
3319
3320         ret = hclge_push_vf_link_status(vport);
3321         if (ret) {
3322                 vport->vf_info.link_state = link_state_old;
3323                 dev_err(&hdev->pdev->dev,
3324                         "failed to push vf%d link status, ret = %d\n", vf, ret);
3325         }
3326
3327         return ret;
3328 }
3329
3330 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3331 {
3332         u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3333
3334         /* fetch the events from their corresponding regs */
3335         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3336         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3337         hw_err_src_reg = hclge_read_dev(&hdev->hw,
3338                                         HCLGE_RAS_PF_OTHER_INT_STS_REG);
3339
3340         /* Assumption: If by any chance reset and mailbox events are reported
3341          * together then we will only process reset event in this go and will
3342          * defer the processing of the mailbox events. Since, we would have not
3343          * cleared RX CMDQ event this time we would receive again another
3344          * interrupt from H/W just for the mailbox.
3345          *
3346          * check for vector0 reset event sources
3347          */
3348         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3349                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3350                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3351                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3352                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3353                 hdev->rst_stats.imp_rst_cnt++;
3354                 return HCLGE_VECTOR0_EVENT_RST;
3355         }
3356
3357         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3358                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3359                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3360                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3361                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3362                 hdev->rst_stats.global_rst_cnt++;
3363                 return HCLGE_VECTOR0_EVENT_RST;
3364         }
3365
3366         /* check for vector0 msix event and hardware error event source */
3367         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3368             hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3369                 return HCLGE_VECTOR0_EVENT_ERR;
3370
3371         /* check for vector0 ptp event source */
3372         if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3373                 *clearval = msix_src_reg;
3374                 return HCLGE_VECTOR0_EVENT_PTP;
3375         }
3376
3377         /* check for vector0 mailbox(=CMDQ RX) event source */
3378         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3379                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3380                 *clearval = cmdq_src_reg;
3381                 return HCLGE_VECTOR0_EVENT_MBX;
3382         }
3383
3384         /* print other vector0 event source */
3385         dev_info(&hdev->pdev->dev,
3386                  "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3387                  cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3388
3389         return HCLGE_VECTOR0_EVENT_OTHER;
3390 }
3391
3392 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3393                                     u32 regclr)
3394 {
3395         switch (event_type) {
3396         case HCLGE_VECTOR0_EVENT_PTP:
3397         case HCLGE_VECTOR0_EVENT_RST:
3398                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3399                 break;
3400         case HCLGE_VECTOR0_EVENT_MBX:
3401                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3402                 break;
3403         default:
3404                 break;
3405         }
3406 }
3407
3408 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3409 {
3410         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3411                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3412                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3413                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3414         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3415 }
3416
3417 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3418 {
3419         writel(enable ? 1 : 0, vector->addr);
3420 }
3421
3422 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3423 {
3424         struct hclge_dev *hdev = data;
3425         unsigned long flags;
3426         u32 clearval = 0;
3427         u32 event_cause;
3428
3429         hclge_enable_vector(&hdev->misc_vector, false);
3430         event_cause = hclge_check_event_cause(hdev, &clearval);
3431
3432         /* vector 0 interrupt is shared with reset and mailbox source events. */
3433         switch (event_cause) {
3434         case HCLGE_VECTOR0_EVENT_ERR:
3435                 hclge_errhand_task_schedule(hdev);
3436                 break;
3437         case HCLGE_VECTOR0_EVENT_RST:
3438                 hclge_reset_task_schedule(hdev);
3439                 break;
3440         case HCLGE_VECTOR0_EVENT_PTP:
3441                 spin_lock_irqsave(&hdev->ptp->lock, flags);
3442                 hclge_ptp_clean_tx_hwts(hdev);
3443                 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3444                 break;
3445         case HCLGE_VECTOR0_EVENT_MBX:
3446                 /* If we are here then,
3447                  * 1. Either we are not handling any mbx task and we are not
3448                  *    scheduled as well
3449                  *                        OR
3450                  * 2. We could be handling a mbx task but nothing more is
3451                  *    scheduled.
3452                  * In both cases, we should schedule mbx task as there are more
3453                  * mbx messages reported by this interrupt.
3454                  */
3455                 hclge_mbx_task_schedule(hdev);
3456                 break;
3457         default:
3458                 dev_warn(&hdev->pdev->dev,
3459                          "received unknown or unhandled event of vector0\n");
3460                 break;
3461         }
3462
3463         hclge_clear_event_cause(hdev, event_cause, clearval);
3464
3465         /* Enable interrupt if it is not caused by reset event or error event */
3466         if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3467             event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3468             event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3469                 hclge_enable_vector(&hdev->misc_vector, true);
3470
3471         return IRQ_HANDLED;
3472 }
3473
3474 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3475 {
3476         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3477                 dev_warn(&hdev->pdev->dev,
3478                          "vector(vector_id %d) has been freed.\n", vector_id);
3479                 return;
3480         }
3481
3482         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3483         hdev->num_msi_left += 1;
3484         hdev->num_msi_used -= 1;
3485 }
3486
3487 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3488 {
3489         struct hclge_misc_vector *vector = &hdev->misc_vector;
3490
3491         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3492
3493         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3494         hdev->vector_status[0] = 0;
3495
3496         hdev->num_msi_left -= 1;
3497         hdev->num_msi_used += 1;
3498 }
3499
3500 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3501                                       const cpumask_t *mask)
3502 {
3503         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3504                                               affinity_notify);
3505
3506         cpumask_copy(&hdev->affinity_mask, mask);
3507 }
3508
3509 static void hclge_irq_affinity_release(struct kref *ref)
3510 {
3511 }
3512
3513 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3514 {
3515         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3516                               &hdev->affinity_mask);
3517
3518         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3519         hdev->affinity_notify.release = hclge_irq_affinity_release;
3520         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3521                                   &hdev->affinity_notify);
3522 }
3523
3524 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3525 {
3526         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3527         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3528 }
3529
3530 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3531 {
3532         int ret;
3533
3534         hclge_get_misc_vector(hdev);
3535
3536         /* this would be explicitly freed in the end */
3537         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3538                  HCLGE_NAME, pci_name(hdev->pdev));
3539         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3540                           0, hdev->misc_vector.name, hdev);
3541         if (ret) {
3542                 hclge_free_vector(hdev, 0);
3543                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3544                         hdev->misc_vector.vector_irq);
3545         }
3546
3547         return ret;
3548 }
3549
3550 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3551 {
3552         free_irq(hdev->misc_vector.vector_irq, hdev);
3553         hclge_free_vector(hdev, 0);
3554 }
3555
3556 int hclge_notify_client(struct hclge_dev *hdev,
3557                         enum hnae3_reset_notify_type type)
3558 {
3559         struct hnae3_handle *handle = &hdev->vport[0].nic;
3560         struct hnae3_client *client = hdev->nic_client;
3561         int ret;
3562
3563         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3564                 return 0;
3565
3566         if (!client->ops->reset_notify)
3567                 return -EOPNOTSUPP;
3568
3569         ret = client->ops->reset_notify(handle, type);
3570         if (ret)
3571                 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3572                         type, ret);
3573
3574         return ret;
3575 }
3576
3577 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3578                                     enum hnae3_reset_notify_type type)
3579 {
3580         struct hnae3_handle *handle = &hdev->vport[0].roce;
3581         struct hnae3_client *client = hdev->roce_client;
3582         int ret;
3583
3584         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3585                 return 0;
3586
3587         if (!client->ops->reset_notify)
3588                 return -EOPNOTSUPP;
3589
3590         ret = client->ops->reset_notify(handle, type);
3591         if (ret)
3592                 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3593                         type, ret);
3594
3595         return ret;
3596 }
3597
3598 static int hclge_reset_wait(struct hclge_dev *hdev)
3599 {
3600 #define HCLGE_RESET_WATI_MS     100
3601 #define HCLGE_RESET_WAIT_CNT    350
3602
3603         u32 val, reg, reg_bit;
3604         u32 cnt = 0;
3605
3606         switch (hdev->reset_type) {
3607         case HNAE3_IMP_RESET:
3608                 reg = HCLGE_GLOBAL_RESET_REG;
3609                 reg_bit = HCLGE_IMP_RESET_BIT;
3610                 break;
3611         case HNAE3_GLOBAL_RESET:
3612                 reg = HCLGE_GLOBAL_RESET_REG;
3613                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3614                 break;
3615         case HNAE3_FUNC_RESET:
3616                 reg = HCLGE_FUN_RST_ING;
3617                 reg_bit = HCLGE_FUN_RST_ING_B;
3618                 break;
3619         default:
3620                 dev_err(&hdev->pdev->dev,
3621                         "Wait for unsupported reset type: %d\n",
3622                         hdev->reset_type);
3623                 return -EINVAL;
3624         }
3625
3626         val = hclge_read_dev(&hdev->hw, reg);
3627         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3628                 msleep(HCLGE_RESET_WATI_MS);
3629                 val = hclge_read_dev(&hdev->hw, reg);
3630                 cnt++;
3631         }
3632
3633         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3634                 dev_warn(&hdev->pdev->dev,
3635                          "Wait for reset timeout: %d\n", hdev->reset_type);
3636                 return -EBUSY;
3637         }
3638
3639         return 0;
3640 }
3641
3642 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3643 {
3644         struct hclge_vf_rst_cmd *req;
3645         struct hclge_desc desc;
3646
3647         req = (struct hclge_vf_rst_cmd *)desc.data;
3648         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3649         req->dest_vfid = func_id;
3650
3651         if (reset)
3652                 req->vf_rst = 0x1;
3653
3654         return hclge_cmd_send(&hdev->hw, &desc, 1);
3655 }
3656
3657 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3658 {
3659         int i;
3660
3661         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3662                 struct hclge_vport *vport = &hdev->vport[i];
3663                 int ret;
3664
3665                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3666                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3667                 if (ret) {
3668                         dev_err(&hdev->pdev->dev,
3669                                 "set vf(%u) rst failed %d!\n",
3670                                 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3671                                 ret);
3672                         return ret;
3673                 }
3674
3675                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3676                         continue;
3677
3678                 /* Inform VF to process the reset.
3679                  * hclge_inform_reset_assert_to_vf may fail if VF
3680                  * driver is not loaded.
3681                  */
3682                 ret = hclge_inform_reset_assert_to_vf(vport);
3683                 if (ret)
3684                         dev_warn(&hdev->pdev->dev,
3685                                  "inform reset to vf(%u) failed %d!\n",
3686                                  vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3687                                  ret);
3688         }
3689
3690         return 0;
3691 }
3692
3693 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3694 {
3695         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3696             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3697             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3698                 return;
3699
3700         hclge_mbx_handler(hdev);
3701
3702         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3703 }
3704
3705 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3706 {
3707         struct hclge_pf_rst_sync_cmd *req;
3708         struct hclge_desc desc;
3709         int cnt = 0;
3710         int ret;
3711
3712         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3713         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3714
3715         do {
3716                 /* vf need to down netdev by mbx during PF or FLR reset */
3717                 hclge_mailbox_service_task(hdev);
3718
3719                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3720                 /* for compatible with old firmware, wait
3721                  * 100 ms for VF to stop IO
3722                  */
3723                 if (ret == -EOPNOTSUPP) {
3724                         msleep(HCLGE_RESET_SYNC_TIME);
3725                         return;
3726                 } else if (ret) {
3727                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3728                                  ret);
3729                         return;
3730                 } else if (req->all_vf_ready) {
3731                         return;
3732                 }
3733                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3734                 hclge_cmd_reuse_desc(&desc, true);
3735         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3736
3737         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3738 }
3739
3740 void hclge_report_hw_error(struct hclge_dev *hdev,
3741                            enum hnae3_hw_error_type type)
3742 {
3743         struct hnae3_client *client = hdev->nic_client;
3744
3745         if (!client || !client->ops->process_hw_error ||
3746             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3747                 return;
3748
3749         client->ops->process_hw_error(&hdev->vport[0].nic, type);
3750 }
3751
3752 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3753 {
3754         u32 reg_val;
3755
3756         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3757         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3758                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3759                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3760                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3761         }
3762
3763         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3764                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3765                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3766                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3767         }
3768 }
3769
3770 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3771 {
3772         struct hclge_desc desc;
3773         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3774         int ret;
3775
3776         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3777         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3778         req->fun_reset_vfid = func_id;
3779
3780         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3781         if (ret)
3782                 dev_err(&hdev->pdev->dev,
3783                         "send function reset cmd fail, status =%d\n", ret);
3784
3785         return ret;
3786 }
3787
3788 static void hclge_do_reset(struct hclge_dev *hdev)
3789 {
3790         struct hnae3_handle *handle = &hdev->vport[0].nic;
3791         struct pci_dev *pdev = hdev->pdev;
3792         u32 val;
3793
3794         if (hclge_get_hw_reset_stat(handle)) {
3795                 dev_info(&pdev->dev, "hardware reset not finish\n");
3796                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3797                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3798                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3799                 return;
3800         }
3801
3802         switch (hdev->reset_type) {
3803         case HNAE3_IMP_RESET:
3804                 dev_info(&pdev->dev, "IMP reset requested\n");
3805                 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3806                 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3807                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3808                 break;
3809         case HNAE3_GLOBAL_RESET:
3810                 dev_info(&pdev->dev, "global reset requested\n");
3811                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3812                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3813                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3814                 break;
3815         case HNAE3_FUNC_RESET:
3816                 dev_info(&pdev->dev, "PF reset requested\n");
3817                 /* schedule again to check later */
3818                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3819                 hclge_reset_task_schedule(hdev);
3820                 break;
3821         default:
3822                 dev_warn(&pdev->dev,
3823                          "unsupported reset type: %d\n", hdev->reset_type);
3824                 break;
3825         }
3826 }
3827
3828 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3829                                                    unsigned long *addr)
3830 {
3831         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3832         struct hclge_dev *hdev = ae_dev->priv;
3833
3834         /* return the highest priority reset level amongst all */
3835         if (test_bit(HNAE3_IMP_RESET, addr)) {
3836                 rst_level = HNAE3_IMP_RESET;
3837                 clear_bit(HNAE3_IMP_RESET, addr);
3838                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3839                 clear_bit(HNAE3_FUNC_RESET, addr);
3840         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3841                 rst_level = HNAE3_GLOBAL_RESET;
3842                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3843                 clear_bit(HNAE3_FUNC_RESET, addr);
3844         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3845                 rst_level = HNAE3_FUNC_RESET;
3846                 clear_bit(HNAE3_FUNC_RESET, addr);
3847         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3848                 rst_level = HNAE3_FLR_RESET;
3849                 clear_bit(HNAE3_FLR_RESET, addr);
3850         }
3851
3852         if (hdev->reset_type != HNAE3_NONE_RESET &&
3853             rst_level < hdev->reset_type)
3854                 return HNAE3_NONE_RESET;
3855
3856         return rst_level;
3857 }
3858
3859 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3860 {
3861         u32 clearval = 0;
3862
3863         switch (hdev->reset_type) {
3864         case HNAE3_IMP_RESET:
3865                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3866                 break;
3867         case HNAE3_GLOBAL_RESET:
3868                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3869                 break;
3870         default:
3871                 break;
3872         }
3873
3874         if (!clearval)
3875                 return;
3876
3877         /* For revision 0x20, the reset interrupt source
3878          * can only be cleared after hardware reset done
3879          */
3880         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3881                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3882                                 clearval);
3883
3884         hclge_enable_vector(&hdev->misc_vector, true);
3885 }
3886
3887 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3888 {
3889         u32 reg_val;
3890
3891         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3892         if (enable)
3893                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3894         else
3895                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3896
3897         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3898 }
3899
3900 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3901 {
3902         int ret;
3903
3904         ret = hclge_set_all_vf_rst(hdev, true);
3905         if (ret)
3906                 return ret;
3907
3908         hclge_func_reset_sync_vf(hdev);
3909
3910         return 0;
3911 }
3912
3913 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3914 {
3915         u32 reg_val;
3916         int ret = 0;
3917
3918         switch (hdev->reset_type) {
3919         case HNAE3_FUNC_RESET:
3920                 ret = hclge_func_reset_notify_vf(hdev);
3921                 if (ret)
3922                         return ret;
3923
3924                 ret = hclge_func_reset_cmd(hdev, 0);
3925                 if (ret) {
3926                         dev_err(&hdev->pdev->dev,
3927                                 "asserting function reset fail %d!\n", ret);
3928                         return ret;
3929                 }
3930
3931                 /* After performaning pf reset, it is not necessary to do the
3932                  * mailbox handling or send any command to firmware, because
3933                  * any mailbox handling or command to firmware is only valid
3934                  * after hclge_cmd_init is called.
3935                  */
3936                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3937                 hdev->rst_stats.pf_rst_cnt++;
3938                 break;
3939         case HNAE3_FLR_RESET:
3940                 ret = hclge_func_reset_notify_vf(hdev);
3941                 if (ret)
3942                         return ret;
3943                 break;
3944         case HNAE3_IMP_RESET:
3945                 hclge_handle_imp_error(hdev);
3946                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3947                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3948                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3949                 break;
3950         default:
3951                 break;
3952         }
3953
3954         /* inform hardware that preparatory work is done */
3955         msleep(HCLGE_RESET_SYNC_TIME);
3956         hclge_reset_handshake(hdev, true);
3957         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3958
3959         return ret;
3960 }
3961
3962 static void hclge_show_rst_info(struct hclge_dev *hdev)
3963 {
3964         char *buf;
3965
3966         buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3967         if (!buf)
3968                 return;
3969
3970         hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3971
3972         dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3973
3974         kfree(buf);
3975 }
3976
3977 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3978 {
3979 #define MAX_RESET_FAIL_CNT 5
3980
3981         if (hdev->reset_pending) {
3982                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3983                          hdev->reset_pending);
3984                 return true;
3985         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3986                    HCLGE_RESET_INT_M) {
3987                 dev_info(&hdev->pdev->dev,
3988                          "reset failed because new reset interrupt\n");
3989                 hclge_clear_reset_cause(hdev);
3990                 return false;
3991         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3992                 hdev->rst_stats.reset_fail_cnt++;
3993                 set_bit(hdev->reset_type, &hdev->reset_pending);
3994                 dev_info(&hdev->pdev->dev,
3995                          "re-schedule reset task(%u)\n",
3996                          hdev->rst_stats.reset_fail_cnt);
3997                 return true;
3998         }
3999
4000         hclge_clear_reset_cause(hdev);
4001
4002         /* recover the handshake status when reset fail */
4003         hclge_reset_handshake(hdev, true);
4004
4005         dev_err(&hdev->pdev->dev, "Reset fail!\n");
4006
4007         hclge_show_rst_info(hdev);
4008
4009         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4010
4011         return false;
4012 }
4013
4014 static void hclge_update_reset_level(struct hclge_dev *hdev)
4015 {
4016         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4017         enum hnae3_reset_type reset_level;
4018
4019         /* reset request will not be set during reset, so clear
4020          * pending reset request to avoid unnecessary reset
4021          * caused by the same reason.
4022          */
4023         hclge_get_reset_level(ae_dev, &hdev->reset_request);
4024
4025         /* if default_reset_request has a higher level reset request,
4026          * it should be handled as soon as possible. since some errors
4027          * need this kind of reset to fix.
4028          */
4029         reset_level = hclge_get_reset_level(ae_dev,
4030                                             &hdev->default_reset_request);
4031         if (reset_level != HNAE3_NONE_RESET)
4032                 set_bit(reset_level, &hdev->reset_request);
4033 }
4034
4035 static int hclge_set_rst_done(struct hclge_dev *hdev)
4036 {
4037         struct hclge_pf_rst_done_cmd *req;
4038         struct hclge_desc desc;
4039         int ret;
4040
4041         req = (struct hclge_pf_rst_done_cmd *)desc.data;
4042         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4043         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4044
4045         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4046         /* To be compatible with the old firmware, which does not support
4047          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4048          * return success
4049          */
4050         if (ret == -EOPNOTSUPP) {
4051                 dev_warn(&hdev->pdev->dev,
4052                          "current firmware does not support command(0x%x)!\n",
4053                          HCLGE_OPC_PF_RST_DONE);
4054                 return 0;
4055         } else if (ret) {
4056                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4057                         ret);
4058         }
4059
4060         return ret;
4061 }
4062
4063 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4064 {
4065         int ret = 0;
4066
4067         switch (hdev->reset_type) {
4068         case HNAE3_FUNC_RESET:
4069         case HNAE3_FLR_RESET:
4070                 ret = hclge_set_all_vf_rst(hdev, false);
4071                 break;
4072         case HNAE3_GLOBAL_RESET:
4073         case HNAE3_IMP_RESET:
4074                 ret = hclge_set_rst_done(hdev);
4075                 break;
4076         default:
4077                 break;
4078         }
4079
4080         /* clear up the handshake status after re-initialize done */
4081         hclge_reset_handshake(hdev, false);
4082
4083         return ret;
4084 }
4085
4086 static int hclge_reset_stack(struct hclge_dev *hdev)
4087 {
4088         int ret;
4089
4090         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4091         if (ret)
4092                 return ret;
4093
4094         ret = hclge_reset_ae_dev(hdev->ae_dev);
4095         if (ret)
4096                 return ret;
4097
4098         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4099 }
4100
4101 static int hclge_reset_prepare(struct hclge_dev *hdev)
4102 {
4103         int ret;
4104
4105         hdev->rst_stats.reset_cnt++;
4106         /* perform reset of the stack & ae device for a client */
4107         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4108         if (ret)
4109                 return ret;
4110
4111         rtnl_lock();
4112         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4113         rtnl_unlock();
4114         if (ret)
4115                 return ret;
4116
4117         return hclge_reset_prepare_wait(hdev);
4118 }
4119
4120 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4121 {
4122         int ret;
4123
4124         hdev->rst_stats.hw_reset_done_cnt++;
4125
4126         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4127         if (ret)
4128                 return ret;
4129
4130         rtnl_lock();
4131         ret = hclge_reset_stack(hdev);
4132         rtnl_unlock();
4133         if (ret)
4134                 return ret;
4135
4136         hclge_clear_reset_cause(hdev);
4137
4138         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4139         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4140          * times
4141          */
4142         if (ret &&
4143             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4144                 return ret;
4145
4146         ret = hclge_reset_prepare_up(hdev);
4147         if (ret)
4148                 return ret;
4149
4150         rtnl_lock();
4151         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4152         rtnl_unlock();
4153         if (ret)
4154                 return ret;
4155
4156         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4157         if (ret)
4158                 return ret;
4159
4160         hdev->last_reset_time = jiffies;
4161         hdev->rst_stats.reset_fail_cnt = 0;
4162         hdev->rst_stats.reset_done_cnt++;
4163         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4164
4165         hclge_update_reset_level(hdev);
4166
4167         return 0;
4168 }
4169
4170 static void hclge_reset(struct hclge_dev *hdev)
4171 {
4172         if (hclge_reset_prepare(hdev))
4173                 goto err_reset;
4174
4175         if (hclge_reset_wait(hdev))
4176                 goto err_reset;
4177
4178         if (hclge_reset_rebuild(hdev))
4179                 goto err_reset;
4180
4181         return;
4182
4183 err_reset:
4184         if (hclge_reset_err_handle(hdev))
4185                 hclge_reset_task_schedule(hdev);
4186 }
4187
4188 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4189 {
4190         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4191         struct hclge_dev *hdev = ae_dev->priv;
4192
4193         /* We might end up getting called broadly because of 2 below cases:
4194          * 1. Recoverable error was conveyed through APEI and only way to bring
4195          *    normalcy is to reset.
4196          * 2. A new reset request from the stack due to timeout
4197          *
4198          * check if this is a new reset request and we are not here just because
4199          * last reset attempt did not succeed and watchdog hit us again. We will
4200          * know this if last reset request did not occur very recently (watchdog
4201          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4202          * In case of new request we reset the "reset level" to PF reset.
4203          * And if it is a repeat reset request of the most recent one then we
4204          * want to make sure we throttle the reset request. Therefore, we will
4205          * not allow it again before 3*HZ times.
4206          */
4207
4208         if (time_before(jiffies, (hdev->last_reset_time +
4209                                   HCLGE_RESET_INTERVAL))) {
4210                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4211                 return;
4212         }
4213
4214         if (hdev->default_reset_request) {
4215                 hdev->reset_level =
4216                         hclge_get_reset_level(ae_dev,
4217                                               &hdev->default_reset_request);
4218         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4219                 hdev->reset_level = HNAE3_FUNC_RESET;
4220         }
4221
4222         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4223                  hdev->reset_level);
4224
4225         /* request reset & schedule reset task */
4226         set_bit(hdev->reset_level, &hdev->reset_request);
4227         hclge_reset_task_schedule(hdev);
4228
4229         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4230                 hdev->reset_level++;
4231 }
4232
4233 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4234                                         enum hnae3_reset_type rst_type)
4235 {
4236         struct hclge_dev *hdev = ae_dev->priv;
4237
4238         set_bit(rst_type, &hdev->default_reset_request);
4239 }
4240
4241 static void hclge_reset_timer(struct timer_list *t)
4242 {
4243         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4244
4245         /* if default_reset_request has no value, it means that this reset
4246          * request has already be handled, so just return here
4247          */
4248         if (!hdev->default_reset_request)
4249                 return;
4250
4251         dev_info(&hdev->pdev->dev,
4252                  "triggering reset in reset timer\n");
4253         hclge_reset_event(hdev->pdev, NULL);
4254 }
4255
4256 static void hclge_reset_subtask(struct hclge_dev *hdev)
4257 {
4258         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4259
4260         /* check if there is any ongoing reset in the hardware. This status can
4261          * be checked from reset_pending. If there is then, we need to wait for
4262          * hardware to complete reset.
4263          *    a. If we are able to figure out in reasonable time that hardware
4264          *       has fully resetted then, we can proceed with driver, client
4265          *       reset.
4266          *    b. else, we can come back later to check this status so re-sched
4267          *       now.
4268          */
4269         hdev->last_reset_time = jiffies;
4270         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4271         if (hdev->reset_type != HNAE3_NONE_RESET)
4272                 hclge_reset(hdev);
4273
4274         /* check if we got any *new* reset requests to be honored */
4275         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4276         if (hdev->reset_type != HNAE3_NONE_RESET)
4277                 hclge_do_reset(hdev);
4278
4279         hdev->reset_type = HNAE3_NONE_RESET;
4280 }
4281
4282 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4283 {
4284         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4285         enum hnae3_reset_type reset_type;
4286
4287         if (ae_dev->hw_err_reset_req) {
4288                 reset_type = hclge_get_reset_level(ae_dev,
4289                                                    &ae_dev->hw_err_reset_req);
4290                 hclge_set_def_reset_request(ae_dev, reset_type);
4291         }
4292
4293         if (hdev->default_reset_request && ae_dev->ops->reset_event)
4294                 ae_dev->ops->reset_event(hdev->pdev, NULL);
4295
4296         /* enable interrupt after error handling complete */
4297         hclge_enable_vector(&hdev->misc_vector, true);
4298 }
4299
4300 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4301 {
4302         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4303
4304         ae_dev->hw_err_reset_req = 0;
4305
4306         if (hclge_find_error_source(hdev)) {
4307                 hclge_handle_error_info_log(ae_dev);
4308                 hclge_handle_mac_tnl(hdev);
4309         }
4310
4311         hclge_handle_err_reset_request(hdev);
4312 }
4313
4314 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4315 {
4316         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4317         struct device *dev = &hdev->pdev->dev;
4318         u32 msix_sts_reg;
4319
4320         msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4321         if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4322                 if (hclge_handle_hw_msix_error
4323                                 (hdev, &hdev->default_reset_request))
4324                         dev_info(dev, "received msix interrupt 0x%x\n",
4325                                  msix_sts_reg);
4326         }
4327
4328         hclge_handle_hw_ras_error(ae_dev);
4329
4330         hclge_handle_err_reset_request(hdev);
4331 }
4332
4333 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4334 {
4335         if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4336                 return;
4337
4338         if (hnae3_dev_ras_imp_supported(hdev))
4339                 hclge_handle_err_recovery(hdev);
4340         else
4341                 hclge_misc_err_recovery(hdev);
4342 }
4343
4344 static void hclge_reset_service_task(struct hclge_dev *hdev)
4345 {
4346         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4347                 return;
4348
4349         down(&hdev->reset_sem);
4350         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4351
4352         hclge_reset_subtask(hdev);
4353
4354         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4355         up(&hdev->reset_sem);
4356 }
4357
4358 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4359 {
4360         int i;
4361
4362         /* start from vport 1 for PF is always alive */
4363         for (i = 1; i < hdev->num_alloc_vport; i++) {
4364                 struct hclge_vport *vport = &hdev->vport[i];
4365
4366                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4367                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4368
4369                 /* If vf is not alive, set to default value */
4370                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4371                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4372         }
4373 }
4374
4375 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4376 {
4377         unsigned long delta = round_jiffies_relative(HZ);
4378
4379         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4380                 return;
4381
4382         /* Always handle the link updating to make sure link state is
4383          * updated when it is triggered by mbx.
4384          */
4385         hclge_update_link_status(hdev);
4386         hclge_sync_mac_table(hdev);
4387         hclge_sync_promisc_mode(hdev);
4388         hclge_sync_fd_table(hdev);
4389
4390         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4391                 delta = jiffies - hdev->last_serv_processed;
4392
4393                 if (delta < round_jiffies_relative(HZ)) {
4394                         delta = round_jiffies_relative(HZ) - delta;
4395                         goto out;
4396                 }
4397         }
4398
4399         hdev->serv_processed_cnt++;
4400         hclge_update_vport_alive(hdev);
4401
4402         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4403                 hdev->last_serv_processed = jiffies;
4404                 goto out;
4405         }
4406
4407         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4408                 hclge_update_stats_for_all(hdev);
4409
4410         hclge_update_port_info(hdev);
4411         hclge_sync_vlan_filter(hdev);
4412
4413         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4414                 hclge_rfs_filter_expire(hdev);
4415
4416         hdev->last_serv_processed = jiffies;
4417
4418 out:
4419         hclge_task_schedule(hdev, delta);
4420 }
4421
4422 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4423 {
4424         unsigned long flags;
4425
4426         if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4427             !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4428             !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4429                 return;
4430
4431         /* to prevent concurrence with the irq handler */
4432         spin_lock_irqsave(&hdev->ptp->lock, flags);
4433
4434         /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4435          * handler may handle it just before spin_lock_irqsave().
4436          */
4437         if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4438                 hclge_ptp_clean_tx_hwts(hdev);
4439
4440         spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4441 }
4442
4443 static void hclge_service_task(struct work_struct *work)
4444 {
4445         struct hclge_dev *hdev =
4446                 container_of(work, struct hclge_dev, service_task.work);
4447
4448         hclge_errhand_service_task(hdev);
4449         hclge_reset_service_task(hdev);
4450         hclge_ptp_service_task(hdev);
4451         hclge_mailbox_service_task(hdev);
4452         hclge_periodic_service_task(hdev);
4453
4454         /* Handle error recovery, reset and mbx again in case periodical task
4455          * delays the handling by calling hclge_task_schedule() in
4456          * hclge_periodic_service_task().
4457          */
4458         hclge_errhand_service_task(hdev);
4459         hclge_reset_service_task(hdev);
4460         hclge_mailbox_service_task(hdev);
4461 }
4462
4463 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4464 {
4465         /* VF handle has no client */
4466         if (!handle->client)
4467                 return container_of(handle, struct hclge_vport, nic);
4468         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4469                 return container_of(handle, struct hclge_vport, roce);
4470         else
4471                 return container_of(handle, struct hclge_vport, nic);
4472 }
4473
4474 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4475                                   struct hnae3_vector_info *vector_info)
4476 {
4477 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4478
4479         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4480
4481         /* need an extend offset to config vector >= 64 */
4482         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4483                 vector_info->io_addr = hdev->hw.io_base +
4484                                 HCLGE_VECTOR_REG_BASE +
4485                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4486         else
4487                 vector_info->io_addr = hdev->hw.io_base +
4488                                 HCLGE_VECTOR_EXT_REG_BASE +
4489                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4490                                 HCLGE_VECTOR_REG_OFFSET_H +
4491                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4492                                 HCLGE_VECTOR_REG_OFFSET;
4493
4494         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4495         hdev->vector_irq[idx] = vector_info->vector;
4496 }
4497
4498 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4499                             struct hnae3_vector_info *vector_info)
4500 {
4501         struct hclge_vport *vport = hclge_get_vport(handle);
4502         struct hnae3_vector_info *vector = vector_info;
4503         struct hclge_dev *hdev = vport->back;
4504         int alloc = 0;
4505         u16 i = 0;
4506         u16 j;
4507
4508         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4509         vector_num = min(hdev->num_msi_left, vector_num);
4510
4511         for (j = 0; j < vector_num; j++) {
4512                 while (++i < hdev->num_nic_msi) {
4513                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4514                                 hclge_get_vector_info(hdev, i, vector);
4515                                 vector++;
4516                                 alloc++;
4517
4518                                 break;
4519                         }
4520                 }
4521         }
4522         hdev->num_msi_left -= alloc;
4523         hdev->num_msi_used += alloc;
4524
4525         return alloc;
4526 }
4527
4528 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4529 {
4530         int i;
4531
4532         for (i = 0; i < hdev->num_msi; i++)
4533                 if (vector == hdev->vector_irq[i])
4534                         return i;
4535
4536         return -EINVAL;
4537 }
4538
4539 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4540 {
4541         struct hclge_vport *vport = hclge_get_vport(handle);
4542         struct hclge_dev *hdev = vport->back;
4543         int vector_id;
4544
4545         vector_id = hclge_get_vector_index(hdev, vector);
4546         if (vector_id < 0) {
4547                 dev_err(&hdev->pdev->dev,
4548                         "Get vector index fail. vector = %d\n", vector);
4549                 return vector_id;
4550         }
4551
4552         hclge_free_vector(hdev, vector_id);
4553
4554         return 0;
4555 }
4556
4557 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4558 {
4559         return HCLGE_RSS_KEY_SIZE;
4560 }
4561
4562 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4563                                   const u8 hfunc, const u8 *key)
4564 {
4565         struct hclge_rss_config_cmd *req;
4566         unsigned int key_offset = 0;
4567         struct hclge_desc desc;
4568         int key_counts;
4569         int key_size;
4570         int ret;
4571
4572         key_counts = HCLGE_RSS_KEY_SIZE;
4573         req = (struct hclge_rss_config_cmd *)desc.data;
4574
4575         while (key_counts) {
4576                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4577                                            false);
4578
4579                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4580                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4581
4582                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4583                 memcpy(req->hash_key,
4584                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4585
4586                 key_counts -= key_size;
4587                 key_offset++;
4588                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4589                 if (ret) {
4590                         dev_err(&hdev->pdev->dev,
4591                                 "Configure RSS config fail, status = %d\n",
4592                                 ret);
4593                         return ret;
4594                 }
4595         }
4596         return 0;
4597 }
4598
4599 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4600 {
4601         struct hclge_rss_indirection_table_cmd *req;
4602         struct hclge_desc desc;
4603         int rss_cfg_tbl_num;
4604         u8 rss_msb_oft;
4605         u8 rss_msb_val;
4606         int ret;
4607         u16 qid;
4608         int i;
4609         u32 j;
4610
4611         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4612         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4613                           HCLGE_RSS_CFG_TBL_SIZE;
4614
4615         for (i = 0; i < rss_cfg_tbl_num; i++) {
4616                 hclge_cmd_setup_basic_desc
4617                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4618
4619                 req->start_table_index =
4620                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4621                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4622                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4623                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4624                         req->rss_qid_l[j] = qid & 0xff;
4625                         rss_msb_oft =
4626                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4627                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4628                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4629                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4630                 }
4631                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4632                 if (ret) {
4633                         dev_err(&hdev->pdev->dev,
4634                                 "Configure rss indir table fail,status = %d\n",
4635                                 ret);
4636                         return ret;
4637                 }
4638         }
4639         return 0;
4640 }
4641
4642 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4643                                  u16 *tc_size, u16 *tc_offset)
4644 {
4645         struct hclge_rss_tc_mode_cmd *req;
4646         struct hclge_desc desc;
4647         int ret;
4648         int i;
4649
4650         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4651         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4652
4653         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4654                 u16 mode = 0;
4655
4656                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4657                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4658                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4659                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4660                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4661                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4662                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4663
4664                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4665         }
4666
4667         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4668         if (ret)
4669                 dev_err(&hdev->pdev->dev,
4670                         "Configure rss tc mode fail, status = %d\n", ret);
4671
4672         return ret;
4673 }
4674
4675 static void hclge_get_rss_type(struct hclge_vport *vport)
4676 {
4677         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4678             vport->rss_tuple_sets.ipv4_udp_en ||
4679             vport->rss_tuple_sets.ipv4_sctp_en ||
4680             vport->rss_tuple_sets.ipv6_tcp_en ||
4681             vport->rss_tuple_sets.ipv6_udp_en ||
4682             vport->rss_tuple_sets.ipv6_sctp_en)
4683                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4684         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4685                  vport->rss_tuple_sets.ipv6_fragment_en)
4686                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4687         else
4688                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4689 }
4690
4691 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4692 {
4693         struct hclge_rss_input_tuple_cmd *req;
4694         struct hclge_desc desc;
4695         int ret;
4696
4697         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4698
4699         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4700
4701         /* Get the tuple cfg from pf */
4702         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4703         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4704         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4705         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4706         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4707         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4708         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4709         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4710         hclge_get_rss_type(&hdev->vport[0]);
4711         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4712         if (ret)
4713                 dev_err(&hdev->pdev->dev,
4714                         "Configure rss input fail, status = %d\n", ret);
4715         return ret;
4716 }
4717
4718 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4719                          u8 *key, u8 *hfunc)
4720 {
4721         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4722         struct hclge_vport *vport = hclge_get_vport(handle);
4723         int i;
4724
4725         /* Get hash algorithm */
4726         if (hfunc) {
4727                 switch (vport->rss_algo) {
4728                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4729                         *hfunc = ETH_RSS_HASH_TOP;
4730                         break;
4731                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4732                         *hfunc = ETH_RSS_HASH_XOR;
4733                         break;
4734                 default:
4735                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4736                         break;
4737                 }
4738         }
4739
4740         /* Get the RSS Key required by the user */
4741         if (key)
4742                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4743
4744         /* Get indirect table */
4745         if (indir)
4746                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4747                         indir[i] =  vport->rss_indirection_tbl[i];
4748
4749         return 0;
4750 }
4751
4752 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4753                                  u8 *hash_algo)
4754 {
4755         switch (hfunc) {
4756         case ETH_RSS_HASH_TOP:
4757                 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4758                 return 0;
4759         case ETH_RSS_HASH_XOR:
4760                 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4761                 return 0;
4762         case ETH_RSS_HASH_NO_CHANGE:
4763                 *hash_algo = vport->rss_algo;
4764                 return 0;
4765         default:
4766                 return -EINVAL;
4767         }
4768 }
4769
4770 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4771                          const  u8 *key, const  u8 hfunc)
4772 {
4773         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4774         struct hclge_vport *vport = hclge_get_vport(handle);
4775         struct hclge_dev *hdev = vport->back;
4776         u8 hash_algo;
4777         int ret, i;
4778
4779         ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4780         if (ret) {
4781                 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4782                 return ret;
4783         }
4784
4785         /* Set the RSS Hash Key if specififed by the user */
4786         if (key) {
4787                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4788                 if (ret)
4789                         return ret;
4790
4791                 /* Update the shadow RSS key with user specified qids */
4792                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4793         } else {
4794                 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4795                                              vport->rss_hash_key);
4796                 if (ret)
4797                         return ret;
4798         }
4799         vport->rss_algo = hash_algo;
4800
4801         /* Update the shadow RSS table with user specified qids */
4802         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4803                 vport->rss_indirection_tbl[i] = indir[i];
4804
4805         /* Update the hardware */
4806         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4807 }
4808
4809 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4810 {
4811         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4812
4813         if (nfc->data & RXH_L4_B_2_3)
4814                 hash_sets |= HCLGE_D_PORT_BIT;
4815         else
4816                 hash_sets &= ~HCLGE_D_PORT_BIT;
4817
4818         if (nfc->data & RXH_IP_SRC)
4819                 hash_sets |= HCLGE_S_IP_BIT;
4820         else
4821                 hash_sets &= ~HCLGE_S_IP_BIT;
4822
4823         if (nfc->data & RXH_IP_DST)
4824                 hash_sets |= HCLGE_D_IP_BIT;
4825         else
4826                 hash_sets &= ~HCLGE_D_IP_BIT;
4827
4828         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4829                 hash_sets |= HCLGE_V_TAG_BIT;
4830
4831         return hash_sets;
4832 }
4833
4834 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4835                                     struct ethtool_rxnfc *nfc,
4836                                     struct hclge_rss_input_tuple_cmd *req)
4837 {
4838         struct hclge_dev *hdev = vport->back;
4839         u8 tuple_sets;
4840
4841         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4842         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4843         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4844         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4845         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4846         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4847         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4848         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4849
4850         tuple_sets = hclge_get_rss_hash_bits(nfc);
4851         switch (nfc->flow_type) {
4852         case TCP_V4_FLOW:
4853                 req->ipv4_tcp_en = tuple_sets;
4854                 break;
4855         case TCP_V6_FLOW:
4856                 req->ipv6_tcp_en = tuple_sets;
4857                 break;
4858         case UDP_V4_FLOW:
4859                 req->ipv4_udp_en = tuple_sets;
4860                 break;
4861         case UDP_V6_FLOW:
4862                 req->ipv6_udp_en = tuple_sets;
4863                 break;
4864         case SCTP_V4_FLOW:
4865                 req->ipv4_sctp_en = tuple_sets;
4866                 break;
4867         case SCTP_V6_FLOW:
4868                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4869                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4870                         return -EINVAL;
4871
4872                 req->ipv6_sctp_en = tuple_sets;
4873                 break;
4874         case IPV4_FLOW:
4875                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4876                 break;
4877         case IPV6_FLOW:
4878                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4879                 break;
4880         default:
4881                 return -EINVAL;
4882         }
4883
4884         return 0;
4885 }
4886
4887 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4888                                struct ethtool_rxnfc *nfc)
4889 {
4890         struct hclge_vport *vport = hclge_get_vport(handle);
4891         struct hclge_dev *hdev = vport->back;
4892         struct hclge_rss_input_tuple_cmd *req;
4893         struct hclge_desc desc;
4894         int ret;
4895
4896         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4897                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4898                 return -EINVAL;
4899
4900         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4901         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4902
4903         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4904         if (ret) {
4905                 dev_err(&hdev->pdev->dev,
4906                         "failed to init rss tuple cmd, ret = %d\n", ret);
4907                 return ret;
4908         }
4909
4910         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4911         if (ret) {
4912                 dev_err(&hdev->pdev->dev,
4913                         "Set rss tuple fail, status = %d\n", ret);
4914                 return ret;
4915         }
4916
4917         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4918         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4919         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4920         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4921         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4922         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4923         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4924         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4925         hclge_get_rss_type(vport);
4926         return 0;
4927 }
4928
4929 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4930                                      u8 *tuple_sets)
4931 {
4932         switch (flow_type) {
4933         case TCP_V4_FLOW:
4934                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4935                 break;
4936         case UDP_V4_FLOW:
4937                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4938                 break;
4939         case TCP_V6_FLOW:
4940                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4941                 break;
4942         case UDP_V6_FLOW:
4943                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4944                 break;
4945         case SCTP_V4_FLOW:
4946                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4947                 break;
4948         case SCTP_V6_FLOW:
4949                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4950                 break;
4951         case IPV4_FLOW:
4952         case IPV6_FLOW:
4953                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4954                 break;
4955         default:
4956                 return -EINVAL;
4957         }
4958
4959         return 0;
4960 }
4961
4962 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4963 {
4964         u64 tuple_data = 0;
4965
4966         if (tuple_sets & HCLGE_D_PORT_BIT)
4967                 tuple_data |= RXH_L4_B_2_3;
4968         if (tuple_sets & HCLGE_S_PORT_BIT)
4969                 tuple_data |= RXH_L4_B_0_1;
4970         if (tuple_sets & HCLGE_D_IP_BIT)
4971                 tuple_data |= RXH_IP_DST;
4972         if (tuple_sets & HCLGE_S_IP_BIT)
4973                 tuple_data |= RXH_IP_SRC;
4974
4975         return tuple_data;
4976 }
4977
4978 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4979                                struct ethtool_rxnfc *nfc)
4980 {
4981         struct hclge_vport *vport = hclge_get_vport(handle);
4982         u8 tuple_sets;
4983         int ret;
4984
4985         nfc->data = 0;
4986
4987         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4988         if (ret || !tuple_sets)
4989                 return ret;
4990
4991         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4992
4993         return 0;
4994 }
4995
4996 static int hclge_get_tc_size(struct hnae3_handle *handle)
4997 {
4998         struct hclge_vport *vport = hclge_get_vport(handle);
4999         struct hclge_dev *hdev = vport->back;
5000
5001         return hdev->pf_rss_size_max;
5002 }
5003
5004 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5005 {
5006         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5007         struct hclge_vport *vport = hdev->vport;
5008         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5009         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5010         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5011         struct hnae3_tc_info *tc_info;
5012         u16 roundup_size;
5013         u16 rss_size;
5014         int i;
5015
5016         tc_info = &vport->nic.kinfo.tc_info;
5017         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5018                 rss_size = tc_info->tqp_count[i];
5019                 tc_valid[i] = 0;
5020
5021                 if (!(hdev->hw_tc_map & BIT(i)))
5022                         continue;
5023
5024                 /* tc_size set to hardware is the log2 of roundup power of two
5025                  * of rss_size, the acutal queue size is limited by indirection
5026                  * table.
5027                  */
5028                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5029                     rss_size == 0) {
5030                         dev_err(&hdev->pdev->dev,
5031                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5032                                 rss_size);
5033                         return -EINVAL;
5034                 }
5035
5036                 roundup_size = roundup_pow_of_two(rss_size);
5037                 roundup_size = ilog2(roundup_size);
5038
5039                 tc_valid[i] = 1;
5040                 tc_size[i] = roundup_size;
5041                 tc_offset[i] = tc_info->tqp_offset[i];
5042         }
5043
5044         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5045 }
5046
5047 int hclge_rss_init_hw(struct hclge_dev *hdev)
5048 {
5049         struct hclge_vport *vport = hdev->vport;
5050         u16 *rss_indir = vport[0].rss_indirection_tbl;
5051         u8 *key = vport[0].rss_hash_key;
5052         u8 hfunc = vport[0].rss_algo;
5053         int ret;
5054
5055         ret = hclge_set_rss_indir_table(hdev, rss_indir);
5056         if (ret)
5057                 return ret;
5058
5059         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5060         if (ret)
5061                 return ret;
5062
5063         ret = hclge_set_rss_input_tuple(hdev);
5064         if (ret)
5065                 return ret;
5066
5067         return hclge_init_rss_tc_mode(hdev);
5068 }
5069
5070 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5071 {
5072         struct hclge_vport *vport = &hdev->vport[0];
5073         int i;
5074
5075         for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5076                 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5077 }
5078
5079 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5080 {
5081         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5082         int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5083         struct hclge_vport *vport = &hdev->vport[0];
5084         u16 *rss_ind_tbl;
5085
5086         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5087                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5088
5089         vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5090         vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5091         vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5092         vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5093         vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5094         vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5095         vport->rss_tuple_sets.ipv6_sctp_en =
5096                 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5097                 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5098                 HCLGE_RSS_INPUT_TUPLE_SCTP;
5099         vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5100
5101         vport->rss_algo = rss_algo;
5102
5103         rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5104                                    sizeof(*rss_ind_tbl), GFP_KERNEL);
5105         if (!rss_ind_tbl)
5106                 return -ENOMEM;
5107
5108         vport->rss_indirection_tbl = rss_ind_tbl;
5109         memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5110
5111         hclge_rss_indir_init_cfg(hdev);
5112
5113         return 0;
5114 }
5115
5116 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5117                                 int vector_id, bool en,
5118                                 struct hnae3_ring_chain_node *ring_chain)
5119 {
5120         struct hclge_dev *hdev = vport->back;
5121         struct hnae3_ring_chain_node *node;
5122         struct hclge_desc desc;
5123         struct hclge_ctrl_vector_chain_cmd *req =
5124                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5125         enum hclge_cmd_status status;
5126         enum hclge_opcode_type op;
5127         u16 tqp_type_and_id;
5128         int i;
5129
5130         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5131         hclge_cmd_setup_basic_desc(&desc, op, false);
5132         req->int_vector_id_l = hnae3_get_field(vector_id,
5133                                                HCLGE_VECTOR_ID_L_M,
5134                                                HCLGE_VECTOR_ID_L_S);
5135         req->int_vector_id_h = hnae3_get_field(vector_id,
5136                                                HCLGE_VECTOR_ID_H_M,
5137                                                HCLGE_VECTOR_ID_H_S);
5138
5139         i = 0;
5140         for (node = ring_chain; node; node = node->next) {
5141                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5142                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5143                                 HCLGE_INT_TYPE_S,
5144                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5145                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5146                                 HCLGE_TQP_ID_S, node->tqp_index);
5147                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5148                                 HCLGE_INT_GL_IDX_S,
5149                                 hnae3_get_field(node->int_gl_idx,
5150                                                 HNAE3_RING_GL_IDX_M,
5151                                                 HNAE3_RING_GL_IDX_S));
5152                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5153                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5154                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5155                         req->vfid = vport->vport_id;
5156
5157                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5158                         if (status) {
5159                                 dev_err(&hdev->pdev->dev,
5160                                         "Map TQP fail, status is %d.\n",
5161                                         status);
5162                                 return -EIO;
5163                         }
5164                         i = 0;
5165
5166                         hclge_cmd_setup_basic_desc(&desc,
5167                                                    op,
5168                                                    false);
5169                         req->int_vector_id_l =
5170                                 hnae3_get_field(vector_id,
5171                                                 HCLGE_VECTOR_ID_L_M,
5172                                                 HCLGE_VECTOR_ID_L_S);
5173                         req->int_vector_id_h =
5174                                 hnae3_get_field(vector_id,
5175                                                 HCLGE_VECTOR_ID_H_M,
5176                                                 HCLGE_VECTOR_ID_H_S);
5177                 }
5178         }
5179
5180         if (i > 0) {
5181                 req->int_cause_num = i;
5182                 req->vfid = vport->vport_id;
5183                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5184                 if (status) {
5185                         dev_err(&hdev->pdev->dev,
5186                                 "Map TQP fail, status is %d.\n", status);
5187                         return -EIO;
5188                 }
5189         }
5190
5191         return 0;
5192 }
5193
5194 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5195                                     struct hnae3_ring_chain_node *ring_chain)
5196 {
5197         struct hclge_vport *vport = hclge_get_vport(handle);
5198         struct hclge_dev *hdev = vport->back;
5199         int vector_id;
5200
5201         vector_id = hclge_get_vector_index(hdev, vector);
5202         if (vector_id < 0) {
5203                 dev_err(&hdev->pdev->dev,
5204                         "failed to get vector index. vector=%d\n", vector);
5205                 return vector_id;
5206         }
5207
5208         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5209 }
5210
5211 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5212                                        struct hnae3_ring_chain_node *ring_chain)
5213 {
5214         struct hclge_vport *vport = hclge_get_vport(handle);
5215         struct hclge_dev *hdev = vport->back;
5216         int vector_id, ret;
5217
5218         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5219                 return 0;
5220
5221         vector_id = hclge_get_vector_index(hdev, vector);
5222         if (vector_id < 0) {
5223                 dev_err(&handle->pdev->dev,
5224                         "Get vector index fail. ret =%d\n", vector_id);
5225                 return vector_id;
5226         }
5227
5228         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5229         if (ret)
5230                 dev_err(&handle->pdev->dev,
5231                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5232                         vector_id, ret);
5233
5234         return ret;
5235 }
5236
5237 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5238                                       bool en_uc, bool en_mc, bool en_bc)
5239 {
5240         struct hclge_vport *vport = &hdev->vport[vf_id];
5241         struct hnae3_handle *handle = &vport->nic;
5242         struct hclge_promisc_cfg_cmd *req;
5243         struct hclge_desc desc;
5244         bool uc_tx_en = en_uc;
5245         u8 promisc_cfg = 0;
5246         int ret;
5247
5248         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5249
5250         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5251         req->vf_id = vf_id;
5252
5253         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5254                 uc_tx_en = false;
5255
5256         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5257         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5258         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5259         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5260         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5261         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5262         req->extend_promisc = promisc_cfg;
5263
5264         /* to be compatible with DEVICE_VERSION_V1/2 */
5265         promisc_cfg = 0;
5266         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5267         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5268         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5269         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5270         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5271         req->promisc = promisc_cfg;
5272
5273         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5274         if (ret)
5275                 dev_err(&hdev->pdev->dev,
5276                         "failed to set vport %u promisc mode, ret = %d.\n",
5277                         vf_id, ret);
5278
5279         return ret;
5280 }
5281
5282 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5283                                  bool en_mc_pmc, bool en_bc_pmc)
5284 {
5285         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5286                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5287 }
5288
5289 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5290                                   bool en_mc_pmc)
5291 {
5292         struct hclge_vport *vport = hclge_get_vport(handle);
5293         struct hclge_dev *hdev = vport->back;
5294         bool en_bc_pmc = true;
5295
5296         /* For device whose version below V2, if broadcast promisc enabled,
5297          * vlan filter is always bypassed. So broadcast promisc should be
5298          * disabled until user enable promisc mode
5299          */
5300         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5301                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5302
5303         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5304                                             en_bc_pmc);
5305 }
5306
5307 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5308 {
5309         struct hclge_vport *vport = hclge_get_vport(handle);
5310
5311         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5312 }
5313
5314 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5315 {
5316         if (hlist_empty(&hdev->fd_rule_list))
5317                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5318 }
5319
5320 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5321 {
5322         if (!test_bit(location, hdev->fd_bmap)) {
5323                 set_bit(location, hdev->fd_bmap);
5324                 hdev->hclge_fd_rule_num++;
5325         }
5326 }
5327
5328 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5329 {
5330         if (test_bit(location, hdev->fd_bmap)) {
5331                 clear_bit(location, hdev->fd_bmap);
5332                 hdev->hclge_fd_rule_num--;
5333         }
5334 }
5335
5336 static void hclge_fd_free_node(struct hclge_dev *hdev,
5337                                struct hclge_fd_rule *rule)
5338 {
5339         hlist_del(&rule->rule_node);
5340         kfree(rule);
5341         hclge_sync_fd_state(hdev);
5342 }
5343
5344 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5345                                       struct hclge_fd_rule *old_rule,
5346                                       struct hclge_fd_rule *new_rule,
5347                                       enum HCLGE_FD_NODE_STATE state)
5348 {
5349         switch (state) {
5350         case HCLGE_FD_TO_ADD:
5351         case HCLGE_FD_ACTIVE:
5352                 /* 1) if the new state is TO_ADD, just replace the old rule
5353                  * with the same location, no matter its state, because the
5354                  * new rule will be configured to the hardware.
5355                  * 2) if the new state is ACTIVE, it means the new rule
5356                  * has been configured to the hardware, so just replace
5357                  * the old rule node with the same location.
5358                  * 3) for it doesn't add a new node to the list, so it's
5359                  * unnecessary to update the rule number and fd_bmap.
5360                  */
5361                 new_rule->rule_node.next = old_rule->rule_node.next;
5362                 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5363                 memcpy(old_rule, new_rule, sizeof(*old_rule));
5364                 kfree(new_rule);
5365                 break;
5366         case HCLGE_FD_DELETED:
5367                 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5368                 hclge_fd_free_node(hdev, old_rule);
5369                 break;
5370         case HCLGE_FD_TO_DEL:
5371                 /* if new request is TO_DEL, and old rule is existent
5372                  * 1) the state of old rule is TO_DEL, we need do nothing,
5373                  * because we delete rule by location, other rule content
5374                  * is unncessary.
5375                  * 2) the state of old rule is ACTIVE, we need to change its
5376                  * state to TO_DEL, so the rule will be deleted when periodic
5377                  * task being scheduled.
5378                  * 3) the state of old rule is TO_ADD, it means the rule hasn't
5379                  * been added to hardware, so we just delete the rule node from
5380                  * fd_rule_list directly.
5381                  */
5382                 if (old_rule->state == HCLGE_FD_TO_ADD) {
5383                         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5384                         hclge_fd_free_node(hdev, old_rule);
5385                         return;
5386                 }
5387                 old_rule->state = HCLGE_FD_TO_DEL;
5388                 break;
5389         }
5390 }
5391
5392 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5393                                                 u16 location,
5394                                                 struct hclge_fd_rule **parent)
5395 {
5396         struct hclge_fd_rule *rule;
5397         struct hlist_node *node;
5398
5399         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5400                 if (rule->location == location)
5401                         return rule;
5402                 else if (rule->location > location)
5403                         return NULL;
5404                 /* record the parent node, use to keep the nodes in fd_rule_list
5405                  * in ascend order.
5406                  */
5407                 *parent = rule;
5408         }
5409
5410         return NULL;
5411 }
5412
5413 /* insert fd rule node in ascend order according to rule->location */
5414 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5415                                       struct hclge_fd_rule *rule,
5416                                       struct hclge_fd_rule *parent)
5417 {
5418         INIT_HLIST_NODE(&rule->rule_node);
5419
5420         if (parent)
5421                 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5422         else
5423                 hlist_add_head(&rule->rule_node, hlist);
5424 }
5425
5426 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5427                                      struct hclge_fd_user_def_cfg *cfg)
5428 {
5429         struct hclge_fd_user_def_cfg_cmd *req;
5430         struct hclge_desc desc;
5431         u16 data = 0;
5432         int ret;
5433
5434         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5435
5436         req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5437
5438         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5439         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5440                         HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5441         req->ol2_cfg = cpu_to_le16(data);
5442
5443         data = 0;
5444         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5445         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5446                         HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5447         req->ol3_cfg = cpu_to_le16(data);
5448
5449         data = 0;
5450         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5451         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5452                         HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5453         req->ol4_cfg = cpu_to_le16(data);
5454
5455         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5456         if (ret)
5457                 dev_err(&hdev->pdev->dev,
5458                         "failed to set fd user def data, ret= %d\n", ret);
5459         return ret;
5460 }
5461
5462 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5463 {
5464         int ret;
5465
5466         if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5467                 return;
5468
5469         if (!locked)
5470                 spin_lock_bh(&hdev->fd_rule_lock);
5471
5472         ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5473         if (ret)
5474                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5475
5476         if (!locked)
5477                 spin_unlock_bh(&hdev->fd_rule_lock);
5478 }
5479
5480 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5481                                           struct hclge_fd_rule *rule)
5482 {
5483         struct hlist_head *hlist = &hdev->fd_rule_list;
5484         struct hclge_fd_rule *fd_rule, *parent = NULL;
5485         struct hclge_fd_user_def_info *info, *old_info;
5486         struct hclge_fd_user_def_cfg *cfg;
5487
5488         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5489             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5490                 return 0;
5491
5492         /* for valid layer is start from 1, so need minus 1 to get the cfg */
5493         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5494         info = &rule->ep.user_def;
5495
5496         if (!cfg->ref_cnt || cfg->offset == info->offset)
5497                 return 0;
5498
5499         if (cfg->ref_cnt > 1)
5500                 goto error;
5501
5502         fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5503         if (fd_rule) {
5504                 old_info = &fd_rule->ep.user_def;
5505                 if (info->layer == old_info->layer)
5506                         return 0;
5507         }
5508
5509 error:
5510         dev_err(&hdev->pdev->dev,
5511                 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5512                 info->layer + 1);
5513         return -ENOSPC;
5514 }
5515
5516 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5517                                          struct hclge_fd_rule *rule)
5518 {
5519         struct hclge_fd_user_def_cfg *cfg;
5520
5521         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5522             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5523                 return;
5524
5525         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5526         if (!cfg->ref_cnt) {
5527                 cfg->offset = rule->ep.user_def.offset;
5528                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5529         }
5530         cfg->ref_cnt++;
5531 }
5532
5533 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5534                                          struct hclge_fd_rule *rule)
5535 {
5536         struct hclge_fd_user_def_cfg *cfg;
5537
5538         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5539             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5540                 return;
5541
5542         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5543         if (!cfg->ref_cnt)
5544                 return;
5545
5546         cfg->ref_cnt--;
5547         if (!cfg->ref_cnt) {
5548                 cfg->offset = 0;
5549                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5550         }
5551 }
5552
5553 static void hclge_update_fd_list(struct hclge_dev *hdev,
5554                                  enum HCLGE_FD_NODE_STATE state, u16 location,
5555                                  struct hclge_fd_rule *new_rule)
5556 {
5557         struct hlist_head *hlist = &hdev->fd_rule_list;
5558         struct hclge_fd_rule *fd_rule, *parent = NULL;
5559
5560         fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5561         if (fd_rule) {
5562                 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5563                 if (state == HCLGE_FD_ACTIVE)
5564                         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5565                 hclge_sync_fd_user_def_cfg(hdev, true);
5566
5567                 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5568                 return;
5569         }
5570
5571         /* it's unlikely to fail here, because we have checked the rule
5572          * exist before.
5573          */
5574         if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5575                 dev_warn(&hdev->pdev->dev,
5576                          "failed to delete fd rule %u, it's inexistent\n",
5577                          location);
5578                 return;
5579         }
5580
5581         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5582         hclge_sync_fd_user_def_cfg(hdev, true);
5583
5584         hclge_fd_insert_rule_node(hlist, new_rule, parent);
5585         hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5586
5587         if (state == HCLGE_FD_TO_ADD) {
5588                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5589                 hclge_task_schedule(hdev, 0);
5590         }
5591 }
5592
5593 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5594 {
5595         struct hclge_get_fd_mode_cmd *req;
5596         struct hclge_desc desc;
5597         int ret;
5598
5599         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5600
5601         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5602
5603         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5604         if (ret) {
5605                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5606                 return ret;
5607         }
5608
5609         *fd_mode = req->mode;
5610
5611         return ret;
5612 }
5613
5614 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5615                                    u32 *stage1_entry_num,
5616                                    u32 *stage2_entry_num,
5617                                    u16 *stage1_counter_num,
5618                                    u16 *stage2_counter_num)
5619 {
5620         struct hclge_get_fd_allocation_cmd *req;
5621         struct hclge_desc desc;
5622         int ret;
5623
5624         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5625
5626         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5627
5628         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5629         if (ret) {
5630                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5631                         ret);
5632                 return ret;
5633         }
5634
5635         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5636         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5637         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5638         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5639
5640         return ret;
5641 }
5642
5643 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5644                                    enum HCLGE_FD_STAGE stage_num)
5645 {
5646         struct hclge_set_fd_key_config_cmd *req;
5647         struct hclge_fd_key_cfg *stage;
5648         struct hclge_desc desc;
5649         int ret;
5650
5651         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5652
5653         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5654         stage = &hdev->fd_cfg.key_cfg[stage_num];
5655         req->stage = stage_num;
5656         req->key_select = stage->key_sel;
5657         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5658         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5659         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5660         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5661         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5662         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5663
5664         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5665         if (ret)
5666                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5667
5668         return ret;
5669 }
5670
5671 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5672 {
5673         struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5674
5675         spin_lock_bh(&hdev->fd_rule_lock);
5676         memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5677         spin_unlock_bh(&hdev->fd_rule_lock);
5678
5679         hclge_fd_set_user_def_cmd(hdev, cfg);
5680 }
5681
5682 static int hclge_init_fd_config(struct hclge_dev *hdev)
5683 {
5684 #define LOW_2_WORDS             0x03
5685         struct hclge_fd_key_cfg *key_cfg;
5686         int ret;
5687
5688         if (!hnae3_dev_fd_supported(hdev))
5689                 return 0;
5690
5691         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5692         if (ret)
5693                 return ret;
5694
5695         switch (hdev->fd_cfg.fd_mode) {
5696         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5697                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5698                 break;
5699         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5700                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5701                 break;
5702         default:
5703                 dev_err(&hdev->pdev->dev,
5704                         "Unsupported flow director mode %u\n",
5705                         hdev->fd_cfg.fd_mode);
5706                 return -EOPNOTSUPP;
5707         }
5708
5709         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5710         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5711         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5712         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5713         key_cfg->outer_sipv6_word_en = 0;
5714         key_cfg->outer_dipv6_word_en = 0;
5715
5716         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5717                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5718                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5719                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5720
5721         /* If use max 400bit key, we can support tuples for ether type */
5722         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5723                 key_cfg->tuple_active |=
5724                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5725                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5726                         key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5727         }
5728
5729         /* roce_type is used to filter roce frames
5730          * dst_vport is used to specify the rule
5731          */
5732         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5733
5734         ret = hclge_get_fd_allocation(hdev,
5735                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5736                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5737                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5738                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5739         if (ret)
5740                 return ret;
5741
5742         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5743 }
5744
5745 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5746                                 int loc, u8 *key, bool is_add)
5747 {
5748         struct hclge_fd_tcam_config_1_cmd *req1;
5749         struct hclge_fd_tcam_config_2_cmd *req2;
5750         struct hclge_fd_tcam_config_3_cmd *req3;
5751         struct hclge_desc desc[3];
5752         int ret;
5753
5754         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5755         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5756         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5757         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5758         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5759
5760         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5761         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5762         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5763
5764         req1->stage = stage;
5765         req1->xy_sel = sel_x ? 1 : 0;
5766         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5767         req1->index = cpu_to_le32(loc);
5768         req1->entry_vld = sel_x ? is_add : 0;
5769
5770         if (key) {
5771                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5772                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5773                        sizeof(req2->tcam_data));
5774                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5775                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5776         }
5777
5778         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5779         if (ret)
5780                 dev_err(&hdev->pdev->dev,
5781                         "config tcam key fail, ret=%d\n",
5782                         ret);
5783
5784         return ret;
5785 }
5786
5787 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5788                               struct hclge_fd_ad_data *action)
5789 {
5790         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5791         struct hclge_fd_ad_config_cmd *req;
5792         struct hclge_desc desc;
5793         u64 ad_data = 0;
5794         int ret;
5795
5796         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5797
5798         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5799         req->index = cpu_to_le32(loc);
5800         req->stage = stage;
5801
5802         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5803                       action->write_rule_id_to_bd);
5804         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5805                         action->rule_id);
5806         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5807                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5808                               action->override_tc);
5809                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5810                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5811         }
5812         ad_data <<= 32;
5813         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5814         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5815                       action->forward_to_direct_queue);
5816         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5817                         action->queue_id);
5818         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5819         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5820                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5821         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5822         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5823                         action->counter_id);
5824
5825         req->ad_data = cpu_to_le64(ad_data);
5826         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5827         if (ret)
5828                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5829
5830         return ret;
5831 }
5832
5833 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5834                                    struct hclge_fd_rule *rule)
5835 {
5836         int offset, moffset, ip_offset;
5837         enum HCLGE_FD_KEY_OPT key_opt;
5838         u16 tmp_x_s, tmp_y_s;
5839         u32 tmp_x_l, tmp_y_l;
5840         u8 *p = (u8 *)rule;
5841         int i;
5842
5843         if (rule->unused_tuple & BIT(tuple_bit))
5844                 return true;
5845
5846         key_opt = tuple_key_info[tuple_bit].key_opt;
5847         offset = tuple_key_info[tuple_bit].offset;
5848         moffset = tuple_key_info[tuple_bit].moffset;
5849
5850         switch (key_opt) {
5851         case KEY_OPT_U8:
5852                 calc_x(*key_x, p[offset], p[moffset]);
5853                 calc_y(*key_y, p[offset], p[moffset]);
5854
5855                 return true;
5856         case KEY_OPT_LE16:
5857                 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5858                 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5859                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5860                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5861
5862                 return true;
5863         case KEY_OPT_LE32:
5864                 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5865                 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5866                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5867                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5868
5869                 return true;
5870         case KEY_OPT_MAC:
5871                 for (i = 0; i < ETH_ALEN; i++) {
5872                         calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5873                                p[moffset + i]);
5874                         calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5875                                p[moffset + i]);
5876                 }
5877
5878                 return true;
5879         case KEY_OPT_IP:
5880                 ip_offset = IPV4_INDEX * sizeof(u32);
5881                 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5882                        *(u32 *)(&p[moffset + ip_offset]));
5883                 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5884                        *(u32 *)(&p[moffset + ip_offset]));
5885                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5886                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5887
5888                 return true;
5889         default:
5890                 return false;
5891         }
5892 }
5893
5894 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5895                                  u8 vf_id, u8 network_port_id)
5896 {
5897         u32 port_number = 0;
5898
5899         if (port_type == HOST_PORT) {
5900                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5901                                 pf_id);
5902                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5903                                 vf_id);
5904                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5905         } else {
5906                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5907                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5908                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5909         }
5910
5911         return port_number;
5912 }
5913
5914 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5915                                        __le32 *key_x, __le32 *key_y,
5916                                        struct hclge_fd_rule *rule)
5917 {
5918         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5919         u8 cur_pos = 0, tuple_size, shift_bits;
5920         unsigned int i;
5921
5922         for (i = 0; i < MAX_META_DATA; i++) {
5923                 tuple_size = meta_data_key_info[i].key_length;
5924                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5925
5926                 switch (tuple_bit) {
5927                 case BIT(ROCE_TYPE):
5928                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5929                         cur_pos += tuple_size;
5930                         break;
5931                 case BIT(DST_VPORT):
5932                         port_number = hclge_get_port_number(HOST_PORT, 0,
5933                                                             rule->vf_id, 0);
5934                         hnae3_set_field(meta_data,
5935                                         GENMASK(cur_pos + tuple_size, cur_pos),
5936                                         cur_pos, port_number);
5937                         cur_pos += tuple_size;
5938                         break;
5939                 default:
5940                         break;
5941                 }
5942         }
5943
5944         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5945         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5946         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5947
5948         *key_x = cpu_to_le32(tmp_x << shift_bits);
5949         *key_y = cpu_to_le32(tmp_y << shift_bits);
5950 }
5951
5952 /* A complete key is combined with meta data key and tuple key.
5953  * Meta data key is stored at the MSB region, and tuple key is stored at
5954  * the LSB region, unused bits will be filled 0.
5955  */
5956 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5957                             struct hclge_fd_rule *rule)
5958 {
5959         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5960         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5961         u8 *cur_key_x, *cur_key_y;
5962         u8 meta_data_region;
5963         u8 tuple_size;
5964         int ret;
5965         u32 i;
5966
5967         memset(key_x, 0, sizeof(key_x));
5968         memset(key_y, 0, sizeof(key_y));
5969         cur_key_x = key_x;
5970         cur_key_y = key_y;
5971
5972         for (i = 0; i < MAX_TUPLE; i++) {
5973                 bool tuple_valid;
5974
5975                 tuple_size = tuple_key_info[i].key_length / 8;
5976                 if (!(key_cfg->tuple_active & BIT(i)))
5977                         continue;
5978
5979                 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5980                                                      cur_key_y, rule);
5981                 if (tuple_valid) {
5982                         cur_key_x += tuple_size;
5983                         cur_key_y += tuple_size;
5984                 }
5985         }
5986
5987         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5988                         MAX_META_DATA_LENGTH / 8;
5989
5990         hclge_fd_convert_meta_data(key_cfg,
5991                                    (__le32 *)(key_x + meta_data_region),
5992                                    (__le32 *)(key_y + meta_data_region),
5993                                    rule);
5994
5995         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5996                                    true);
5997         if (ret) {
5998                 dev_err(&hdev->pdev->dev,
5999                         "fd key_y config fail, loc=%u, ret=%d\n",
6000                         rule->queue_id, ret);
6001                 return ret;
6002         }
6003
6004         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6005                                    true);
6006         if (ret)
6007                 dev_err(&hdev->pdev->dev,
6008                         "fd key_x config fail, loc=%u, ret=%d\n",
6009                         rule->queue_id, ret);
6010         return ret;
6011 }
6012
6013 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6014                                struct hclge_fd_rule *rule)
6015 {
6016         struct hclge_vport *vport = hdev->vport;
6017         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6018         struct hclge_fd_ad_data ad_data;
6019
6020         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6021         ad_data.ad_id = rule->location;
6022
6023         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6024                 ad_data.drop_packet = true;
6025         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6026                 ad_data.override_tc = true;
6027                 ad_data.queue_id =
6028                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6029                 ad_data.tc_size =
6030                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6031         } else {
6032                 ad_data.forward_to_direct_queue = true;
6033                 ad_data.queue_id = rule->queue_id;
6034         }
6035
6036         if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6037                 ad_data.use_counter = true;
6038                 ad_data.counter_id = rule->vf_id %
6039                                      hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6040         } else {
6041                 ad_data.use_counter = false;
6042                 ad_data.counter_id = 0;
6043         }
6044
6045         ad_data.use_next_stage = false;
6046         ad_data.next_input_key = 0;
6047
6048         ad_data.write_rule_id_to_bd = true;
6049         ad_data.rule_id = rule->location;
6050
6051         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6052 }
6053
6054 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6055                                        u32 *unused_tuple)
6056 {
6057         if (!spec || !unused_tuple)
6058                 return -EINVAL;
6059
6060         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6061
6062         if (!spec->ip4src)
6063                 *unused_tuple |= BIT(INNER_SRC_IP);
6064
6065         if (!spec->ip4dst)
6066                 *unused_tuple |= BIT(INNER_DST_IP);
6067
6068         if (!spec->psrc)
6069                 *unused_tuple |= BIT(INNER_SRC_PORT);
6070
6071         if (!spec->pdst)
6072                 *unused_tuple |= BIT(INNER_DST_PORT);
6073
6074         if (!spec->tos)
6075                 *unused_tuple |= BIT(INNER_IP_TOS);
6076
6077         return 0;
6078 }
6079
6080 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6081                                     u32 *unused_tuple)
6082 {
6083         if (!spec || !unused_tuple)
6084                 return -EINVAL;
6085
6086         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6087                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6088
6089         if (!spec->ip4src)
6090                 *unused_tuple |= BIT(INNER_SRC_IP);
6091
6092         if (!spec->ip4dst)
6093                 *unused_tuple |= BIT(INNER_DST_IP);
6094
6095         if (!spec->tos)
6096                 *unused_tuple |= BIT(INNER_IP_TOS);
6097
6098         if (!spec->proto)
6099                 *unused_tuple |= BIT(INNER_IP_PROTO);
6100
6101         if (spec->l4_4_bytes)
6102                 return -EOPNOTSUPP;
6103
6104         if (spec->ip_ver != ETH_RX_NFC_IP4)
6105                 return -EOPNOTSUPP;
6106
6107         return 0;
6108 }
6109
6110 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6111                                        u32 *unused_tuple)
6112 {
6113         if (!spec || !unused_tuple)
6114                 return -EINVAL;
6115
6116         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6117
6118         /* check whether src/dst ip address used */
6119         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6120                 *unused_tuple |= BIT(INNER_SRC_IP);
6121
6122         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6123                 *unused_tuple |= BIT(INNER_DST_IP);
6124
6125         if (!spec->psrc)
6126                 *unused_tuple |= BIT(INNER_SRC_PORT);
6127
6128         if (!spec->pdst)
6129                 *unused_tuple |= BIT(INNER_DST_PORT);
6130
6131         if (!spec->tclass)
6132                 *unused_tuple |= BIT(INNER_IP_TOS);
6133
6134         return 0;
6135 }
6136
6137 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6138                                     u32 *unused_tuple)
6139 {
6140         if (!spec || !unused_tuple)
6141                 return -EINVAL;
6142
6143         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6144                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6145
6146         /* check whether src/dst ip address used */
6147         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6148                 *unused_tuple |= BIT(INNER_SRC_IP);
6149
6150         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6151                 *unused_tuple |= BIT(INNER_DST_IP);
6152
6153         if (!spec->l4_proto)
6154                 *unused_tuple |= BIT(INNER_IP_PROTO);
6155
6156         if (!spec->tclass)
6157                 *unused_tuple |= BIT(INNER_IP_TOS);
6158
6159         if (spec->l4_4_bytes)
6160                 return -EOPNOTSUPP;
6161
6162         return 0;
6163 }
6164
6165 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6166 {
6167         if (!spec || !unused_tuple)
6168                 return -EINVAL;
6169
6170         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6171                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6172                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6173
6174         if (is_zero_ether_addr(spec->h_source))
6175                 *unused_tuple |= BIT(INNER_SRC_MAC);
6176
6177         if (is_zero_ether_addr(spec->h_dest))
6178                 *unused_tuple |= BIT(INNER_DST_MAC);
6179
6180         if (!spec->h_proto)
6181                 *unused_tuple |= BIT(INNER_ETH_TYPE);
6182
6183         return 0;
6184 }
6185
6186 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6187                                     struct ethtool_rx_flow_spec *fs,
6188                                     u32 *unused_tuple)
6189 {
6190         if (fs->flow_type & FLOW_EXT) {
6191                 if (fs->h_ext.vlan_etype) {
6192                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6193                         return -EOPNOTSUPP;
6194                 }
6195
6196                 if (!fs->h_ext.vlan_tci)
6197                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6198
6199                 if (fs->m_ext.vlan_tci &&
6200                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6201                         dev_err(&hdev->pdev->dev,
6202                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6203                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6204                         return -EINVAL;
6205                 }
6206         } else {
6207                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6208         }
6209
6210         if (fs->flow_type & FLOW_MAC_EXT) {
6211                 if (hdev->fd_cfg.fd_mode !=
6212                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6213                         dev_err(&hdev->pdev->dev,
6214                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6215                         return -EOPNOTSUPP;
6216                 }
6217
6218                 if (is_zero_ether_addr(fs->h_ext.h_dest))
6219                         *unused_tuple |= BIT(INNER_DST_MAC);
6220                 else
6221                         *unused_tuple &= ~BIT(INNER_DST_MAC);
6222         }
6223
6224         return 0;
6225 }
6226
6227 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6228                                        struct hclge_fd_user_def_info *info)
6229 {
6230         switch (flow_type) {
6231         case ETHER_FLOW:
6232                 info->layer = HCLGE_FD_USER_DEF_L2;
6233                 *unused_tuple &= ~BIT(INNER_L2_RSV);
6234                 break;
6235         case IP_USER_FLOW:
6236         case IPV6_USER_FLOW:
6237                 info->layer = HCLGE_FD_USER_DEF_L3;
6238                 *unused_tuple &= ~BIT(INNER_L3_RSV);
6239                 break;
6240         case TCP_V4_FLOW:
6241         case UDP_V4_FLOW:
6242         case TCP_V6_FLOW:
6243         case UDP_V6_FLOW:
6244                 info->layer = HCLGE_FD_USER_DEF_L4;
6245                 *unused_tuple &= ~BIT(INNER_L4_RSV);
6246                 break;
6247         default:
6248                 return -EOPNOTSUPP;
6249         }
6250
6251         return 0;
6252 }
6253
6254 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6255 {
6256         return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6257 }
6258
6259 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6260                                          struct ethtool_rx_flow_spec *fs,
6261                                          u32 *unused_tuple,
6262                                          struct hclge_fd_user_def_info *info)
6263 {
6264         u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6265         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6266         u16 data, offset, data_mask, offset_mask;
6267         int ret;
6268
6269         info->layer = HCLGE_FD_USER_DEF_NONE;
6270         *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6271
6272         if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6273                 return 0;
6274
6275         /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6276          * for data, and bit32~47 is used for offset.
6277          */
6278         data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6279         data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6280         offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6281         offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6282
6283         if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6284                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6285                 return -EOPNOTSUPP;
6286         }
6287
6288         if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6289                 dev_err(&hdev->pdev->dev,
6290                         "user-def offset[%u] should be no more than %u\n",
6291                         offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6292                 return -EINVAL;
6293         }
6294
6295         if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6296                 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6297                 return -EINVAL;
6298         }
6299
6300         ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6301         if (ret) {
6302                 dev_err(&hdev->pdev->dev,
6303                         "unsupported flow type for user-def bytes, ret = %d\n",
6304                         ret);
6305                 return ret;
6306         }
6307
6308         info->data = data;
6309         info->data_mask = data_mask;
6310         info->offset = offset;
6311
6312         return 0;
6313 }
6314
6315 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6316                                struct ethtool_rx_flow_spec *fs,
6317                                u32 *unused_tuple,
6318                                struct hclge_fd_user_def_info *info)
6319 {
6320         u32 flow_type;
6321         int ret;
6322
6323         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6324                 dev_err(&hdev->pdev->dev,
6325                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6326                         fs->location,
6327                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6328                 return -EINVAL;
6329         }
6330
6331         ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6332         if (ret)
6333                 return ret;
6334
6335         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6336         switch (flow_type) {
6337         case SCTP_V4_FLOW:
6338         case TCP_V4_FLOW:
6339         case UDP_V4_FLOW:
6340                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6341                                                   unused_tuple);
6342                 break;
6343         case IP_USER_FLOW:
6344                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6345                                                unused_tuple);
6346                 break;
6347         case SCTP_V6_FLOW:
6348         case TCP_V6_FLOW:
6349         case UDP_V6_FLOW:
6350                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6351                                                   unused_tuple);
6352                 break;
6353         case IPV6_USER_FLOW:
6354                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6355                                                unused_tuple);
6356                 break;
6357         case ETHER_FLOW:
6358                 if (hdev->fd_cfg.fd_mode !=
6359                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6360                         dev_err(&hdev->pdev->dev,
6361                                 "ETHER_FLOW is not supported in current fd mode!\n");
6362                         return -EOPNOTSUPP;
6363                 }
6364
6365                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6366                                                  unused_tuple);
6367                 break;
6368         default:
6369                 dev_err(&hdev->pdev->dev,
6370                         "unsupported protocol type, protocol type = %#x\n",
6371                         flow_type);
6372                 return -EOPNOTSUPP;
6373         }
6374
6375         if (ret) {
6376                 dev_err(&hdev->pdev->dev,
6377                         "failed to check flow union tuple, ret = %d\n",
6378                         ret);
6379                 return ret;
6380         }
6381
6382         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6383 }
6384
6385 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6386                                       struct ethtool_rx_flow_spec *fs,
6387                                       struct hclge_fd_rule *rule, u8 ip_proto)
6388 {
6389         rule->tuples.src_ip[IPV4_INDEX] =
6390                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6391         rule->tuples_mask.src_ip[IPV4_INDEX] =
6392                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6393
6394         rule->tuples.dst_ip[IPV4_INDEX] =
6395                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6396         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6397                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6398
6399         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6400         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6401
6402         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6403         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6404
6405         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6406         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6407
6408         rule->tuples.ether_proto = ETH_P_IP;
6409         rule->tuples_mask.ether_proto = 0xFFFF;
6410
6411         rule->tuples.ip_proto = ip_proto;
6412         rule->tuples_mask.ip_proto = 0xFF;
6413 }
6414
6415 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6416                                    struct ethtool_rx_flow_spec *fs,
6417                                    struct hclge_fd_rule *rule)
6418 {
6419         rule->tuples.src_ip[IPV4_INDEX] =
6420                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6421         rule->tuples_mask.src_ip[IPV4_INDEX] =
6422                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6423
6424         rule->tuples.dst_ip[IPV4_INDEX] =
6425                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6426         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6427                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6428
6429         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6430         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6431
6432         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6433         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6434
6435         rule->tuples.ether_proto = ETH_P_IP;
6436         rule->tuples_mask.ether_proto = 0xFFFF;
6437 }
6438
6439 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6440                                       struct ethtool_rx_flow_spec *fs,
6441                                       struct hclge_fd_rule *rule, u8 ip_proto)
6442 {
6443         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6444                           IPV6_SIZE);
6445         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6446                           IPV6_SIZE);
6447
6448         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6449                           IPV6_SIZE);
6450         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6451                           IPV6_SIZE);
6452
6453         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6454         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6455
6456         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6457         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6458
6459         rule->tuples.ether_proto = ETH_P_IPV6;
6460         rule->tuples_mask.ether_proto = 0xFFFF;
6461
6462         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6463         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6464
6465         rule->tuples.ip_proto = ip_proto;
6466         rule->tuples_mask.ip_proto = 0xFF;
6467 }
6468
6469 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6470                                    struct ethtool_rx_flow_spec *fs,
6471                                    struct hclge_fd_rule *rule)
6472 {
6473         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6474                           IPV6_SIZE);
6475         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6476                           IPV6_SIZE);
6477
6478         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6479                           IPV6_SIZE);
6480         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6481                           IPV6_SIZE);
6482
6483         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6484         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6485
6486         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6487         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6488
6489         rule->tuples.ether_proto = ETH_P_IPV6;
6490         rule->tuples_mask.ether_proto = 0xFFFF;
6491 }
6492
6493 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6494                                      struct ethtool_rx_flow_spec *fs,
6495                                      struct hclge_fd_rule *rule)
6496 {
6497         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6498         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6499
6500         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6501         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6502
6503         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6504         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6505 }
6506
6507 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6508                                         struct hclge_fd_rule *rule)
6509 {
6510         switch (info->layer) {
6511         case HCLGE_FD_USER_DEF_L2:
6512                 rule->tuples.l2_user_def = info->data;
6513                 rule->tuples_mask.l2_user_def = info->data_mask;
6514                 break;
6515         case HCLGE_FD_USER_DEF_L3:
6516                 rule->tuples.l3_user_def = info->data;
6517                 rule->tuples_mask.l3_user_def = info->data_mask;
6518                 break;
6519         case HCLGE_FD_USER_DEF_L4:
6520                 rule->tuples.l4_user_def = (u32)info->data << 16;
6521                 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6522                 break;
6523         default:
6524                 break;
6525         }
6526
6527         rule->ep.user_def = *info;
6528 }
6529
6530 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6531                               struct ethtool_rx_flow_spec *fs,
6532                               struct hclge_fd_rule *rule,
6533                               struct hclge_fd_user_def_info *info)
6534 {
6535         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6536
6537         switch (flow_type) {
6538         case SCTP_V4_FLOW:
6539                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6540                 break;
6541         case TCP_V4_FLOW:
6542                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6543                 break;
6544         case UDP_V4_FLOW:
6545                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6546                 break;
6547         case IP_USER_FLOW:
6548                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6549                 break;
6550         case SCTP_V6_FLOW:
6551                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6552                 break;
6553         case TCP_V6_FLOW:
6554                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6555                 break;
6556         case UDP_V6_FLOW:
6557                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6558                 break;
6559         case IPV6_USER_FLOW:
6560                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6561                 break;
6562         case ETHER_FLOW:
6563                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6564                 break;
6565         default:
6566                 return -EOPNOTSUPP;
6567         }
6568
6569         if (fs->flow_type & FLOW_EXT) {
6570                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6571                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6572                 hclge_fd_get_user_def_tuple(info, rule);
6573         }
6574
6575         if (fs->flow_type & FLOW_MAC_EXT) {
6576                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6577                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6578         }
6579
6580         return 0;
6581 }
6582
6583 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6584                                 struct hclge_fd_rule *rule)
6585 {
6586         int ret;
6587
6588         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6589         if (ret)
6590                 return ret;
6591
6592         return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6593 }
6594
6595 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6596                                      struct hclge_fd_rule *rule)
6597 {
6598         int ret;
6599
6600         spin_lock_bh(&hdev->fd_rule_lock);
6601
6602         if (hdev->fd_active_type != rule->rule_type &&
6603             (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6604              hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6605                 dev_err(&hdev->pdev->dev,
6606                         "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6607                         rule->rule_type, hdev->fd_active_type);
6608                 spin_unlock_bh(&hdev->fd_rule_lock);
6609                 return -EINVAL;
6610         }
6611
6612         ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6613         if (ret)
6614                 goto out;
6615
6616         ret = hclge_clear_arfs_rules(hdev);
6617         if (ret)
6618                 goto out;
6619
6620         ret = hclge_fd_config_rule(hdev, rule);
6621         if (ret)
6622                 goto out;
6623
6624         rule->state = HCLGE_FD_ACTIVE;
6625         hdev->fd_active_type = rule->rule_type;
6626         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6627
6628 out:
6629         spin_unlock_bh(&hdev->fd_rule_lock);
6630         return ret;
6631 }
6632
6633 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6634 {
6635         struct hclge_vport *vport = hclge_get_vport(handle);
6636         struct hclge_dev *hdev = vport->back;
6637
6638         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6639 }
6640
6641 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6642                                       u16 *vport_id, u8 *action, u16 *queue_id)
6643 {
6644         struct hclge_vport *vport = hdev->vport;
6645
6646         if (ring_cookie == RX_CLS_FLOW_DISC) {
6647                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6648         } else {
6649                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6650                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6651                 u16 tqps;
6652
6653                 /* To keep consistent with user's configuration, minus 1 when
6654                  * printing 'vf', because vf id from ethtool is added 1 for vf.
6655                  */
6656                 if (vf > hdev->num_req_vfs) {
6657                         dev_err(&hdev->pdev->dev,
6658                                 "Error: vf id (%u) should be less than %u\n",
6659                                 vf - 1, hdev->num_req_vfs);
6660                         return -EINVAL;
6661                 }
6662
6663                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6664                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6665
6666                 if (ring >= tqps) {
6667                         dev_err(&hdev->pdev->dev,
6668                                 "Error: queue id (%u) > max tqp num (%u)\n",
6669                                 ring, tqps - 1);
6670                         return -EINVAL;
6671                 }
6672
6673                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6674                 *queue_id = ring;
6675         }
6676
6677         return 0;
6678 }
6679
6680 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6681                               struct ethtool_rxnfc *cmd)
6682 {
6683         struct hclge_vport *vport = hclge_get_vport(handle);
6684         struct hclge_dev *hdev = vport->back;
6685         struct hclge_fd_user_def_info info;
6686         u16 dst_vport_id = 0, q_index = 0;
6687         struct ethtool_rx_flow_spec *fs;
6688         struct hclge_fd_rule *rule;
6689         u32 unused = 0;
6690         u8 action;
6691         int ret;
6692
6693         if (!hnae3_dev_fd_supported(hdev)) {
6694                 dev_err(&hdev->pdev->dev,
6695                         "flow table director is not supported\n");
6696                 return -EOPNOTSUPP;
6697         }
6698
6699         if (!hdev->fd_en) {
6700                 dev_err(&hdev->pdev->dev,
6701                         "please enable flow director first\n");
6702                 return -EOPNOTSUPP;
6703         }
6704
6705         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6706
6707         ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6708         if (ret)
6709                 return ret;
6710
6711         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6712                                          &action, &q_index);
6713         if (ret)
6714                 return ret;
6715
6716         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6717         if (!rule)
6718                 return -ENOMEM;
6719
6720         ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6721         if (ret) {
6722                 kfree(rule);
6723                 return ret;
6724         }
6725
6726         rule->flow_type = fs->flow_type;
6727         rule->location = fs->location;
6728         rule->unused_tuple = unused;
6729         rule->vf_id = dst_vport_id;
6730         rule->queue_id = q_index;
6731         rule->action = action;
6732         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6733
6734         ret = hclge_add_fd_entry_common(hdev, rule);
6735         if (ret)
6736                 kfree(rule);
6737
6738         return ret;
6739 }
6740
6741 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6742                               struct ethtool_rxnfc *cmd)
6743 {
6744         struct hclge_vport *vport = hclge_get_vport(handle);
6745         struct hclge_dev *hdev = vport->back;
6746         struct ethtool_rx_flow_spec *fs;
6747         int ret;
6748
6749         if (!hnae3_dev_fd_supported(hdev))
6750                 return -EOPNOTSUPP;
6751
6752         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6753
6754         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6755                 return -EINVAL;
6756
6757         spin_lock_bh(&hdev->fd_rule_lock);
6758         if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6759             !test_bit(fs->location, hdev->fd_bmap)) {
6760                 dev_err(&hdev->pdev->dev,
6761                         "Delete fail, rule %u is inexistent\n", fs->location);
6762                 spin_unlock_bh(&hdev->fd_rule_lock);
6763                 return -ENOENT;
6764         }
6765
6766         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6767                                    NULL, false);
6768         if (ret)
6769                 goto out;
6770
6771         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6772
6773 out:
6774         spin_unlock_bh(&hdev->fd_rule_lock);
6775         return ret;
6776 }
6777
6778 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6779                                          bool clear_list)
6780 {
6781         struct hclge_fd_rule *rule;
6782         struct hlist_node *node;
6783         u16 location;
6784
6785         if (!hnae3_dev_fd_supported(hdev))
6786                 return;
6787
6788         spin_lock_bh(&hdev->fd_rule_lock);
6789
6790         for_each_set_bit(location, hdev->fd_bmap,
6791                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6792                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6793                                      NULL, false);
6794
6795         if (clear_list) {
6796                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6797                                           rule_node) {
6798                         hlist_del(&rule->rule_node);
6799                         kfree(rule);
6800                 }
6801                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6802                 hdev->hclge_fd_rule_num = 0;
6803                 bitmap_zero(hdev->fd_bmap,
6804                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6805         }
6806
6807         spin_unlock_bh(&hdev->fd_rule_lock);
6808 }
6809
6810 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6811 {
6812         hclge_clear_fd_rules_in_list(hdev, true);
6813         hclge_fd_disable_user_def(hdev);
6814 }
6815
6816 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6817 {
6818         struct hclge_vport *vport = hclge_get_vport(handle);
6819         struct hclge_dev *hdev = vport->back;
6820         struct hclge_fd_rule *rule;
6821         struct hlist_node *node;
6822
6823         /* Return ok here, because reset error handling will check this
6824          * return value. If error is returned here, the reset process will
6825          * fail.
6826          */
6827         if (!hnae3_dev_fd_supported(hdev))
6828                 return 0;
6829
6830         /* if fd is disabled, should not restore it when reset */
6831         if (!hdev->fd_en)
6832                 return 0;
6833
6834         spin_lock_bh(&hdev->fd_rule_lock);
6835         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6836                 if (rule->state == HCLGE_FD_ACTIVE)
6837                         rule->state = HCLGE_FD_TO_ADD;
6838         }
6839         spin_unlock_bh(&hdev->fd_rule_lock);
6840         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6841
6842         return 0;
6843 }
6844
6845 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6846                                  struct ethtool_rxnfc *cmd)
6847 {
6848         struct hclge_vport *vport = hclge_get_vport(handle);
6849         struct hclge_dev *hdev = vport->back;
6850
6851         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6852                 return -EOPNOTSUPP;
6853
6854         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6855         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6856
6857         return 0;
6858 }
6859
6860 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6861                                      struct ethtool_tcpip4_spec *spec,
6862                                      struct ethtool_tcpip4_spec *spec_mask)
6863 {
6864         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6865         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6866                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6867
6868         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6869         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6870                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6871
6872         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6873         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6874                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6875
6876         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6877         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6878                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6879
6880         spec->tos = rule->tuples.ip_tos;
6881         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6882                         0 : rule->tuples_mask.ip_tos;
6883 }
6884
6885 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6886                                   struct ethtool_usrip4_spec *spec,
6887                                   struct ethtool_usrip4_spec *spec_mask)
6888 {
6889         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6890         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6891                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6892
6893         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6894         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6895                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6896
6897         spec->tos = rule->tuples.ip_tos;
6898         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6899                         0 : rule->tuples_mask.ip_tos;
6900
6901         spec->proto = rule->tuples.ip_proto;
6902         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6903                         0 : rule->tuples_mask.ip_proto;
6904
6905         spec->ip_ver = ETH_RX_NFC_IP4;
6906 }
6907
6908 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6909                                      struct ethtool_tcpip6_spec *spec,
6910                                      struct ethtool_tcpip6_spec *spec_mask)
6911 {
6912         cpu_to_be32_array(spec->ip6src,
6913                           rule->tuples.src_ip, IPV6_SIZE);
6914         cpu_to_be32_array(spec->ip6dst,
6915                           rule->tuples.dst_ip, IPV6_SIZE);
6916         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6917                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6918         else
6919                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6920                                   IPV6_SIZE);
6921
6922         if (rule->unused_tuple & BIT(INNER_DST_IP))
6923                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6924         else
6925                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6926                                   IPV6_SIZE);
6927
6928         spec->tclass = rule->tuples.ip_tos;
6929         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6930                         0 : rule->tuples_mask.ip_tos;
6931
6932         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6933         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6934                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6935
6936         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6937         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6938                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6939 }
6940
6941 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6942                                   struct ethtool_usrip6_spec *spec,
6943                                   struct ethtool_usrip6_spec *spec_mask)
6944 {
6945         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6946         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6947         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6948                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6949         else
6950                 cpu_to_be32_array(spec_mask->ip6src,
6951                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6952
6953         if (rule->unused_tuple & BIT(INNER_DST_IP))
6954                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6955         else
6956                 cpu_to_be32_array(spec_mask->ip6dst,
6957                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6958
6959         spec->tclass = rule->tuples.ip_tos;
6960         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6961                         0 : rule->tuples_mask.ip_tos;
6962
6963         spec->l4_proto = rule->tuples.ip_proto;
6964         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6965                         0 : rule->tuples_mask.ip_proto;
6966 }
6967
6968 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6969                                     struct ethhdr *spec,
6970                                     struct ethhdr *spec_mask)
6971 {
6972         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6973         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6974
6975         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6976                 eth_zero_addr(spec_mask->h_source);
6977         else
6978                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6979
6980         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6981                 eth_zero_addr(spec_mask->h_dest);
6982         else
6983                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6984
6985         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6986         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6987                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6988 }
6989
6990 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6991                                        struct hclge_fd_rule *rule)
6992 {
6993         if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6994             HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6995                 fs->h_ext.data[0] = 0;
6996                 fs->h_ext.data[1] = 0;
6997                 fs->m_ext.data[0] = 0;
6998                 fs->m_ext.data[1] = 0;
6999         } else {
7000                 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
7001                 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
7002                 fs->m_ext.data[0] =
7003                                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
7004                 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7005         }
7006 }
7007
7008 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7009                                   struct hclge_fd_rule *rule)
7010 {
7011         if (fs->flow_type & FLOW_EXT) {
7012                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7013                 fs->m_ext.vlan_tci =
7014                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7015                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7016
7017                 hclge_fd_get_user_def_info(fs, rule);
7018         }
7019
7020         if (fs->flow_type & FLOW_MAC_EXT) {
7021                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7022                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
7023                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
7024                 else
7025                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
7026                                         rule->tuples_mask.dst_mac);
7027         }
7028 }
7029
7030 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7031                                   struct ethtool_rxnfc *cmd)
7032 {
7033         struct hclge_vport *vport = hclge_get_vport(handle);
7034         struct hclge_fd_rule *rule = NULL;
7035         struct hclge_dev *hdev = vport->back;
7036         struct ethtool_rx_flow_spec *fs;
7037         struct hlist_node *node2;
7038
7039         if (!hnae3_dev_fd_supported(hdev))
7040                 return -EOPNOTSUPP;
7041
7042         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7043
7044         spin_lock_bh(&hdev->fd_rule_lock);
7045
7046         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7047                 if (rule->location >= fs->location)
7048                         break;
7049         }
7050
7051         if (!rule || fs->location != rule->location) {
7052                 spin_unlock_bh(&hdev->fd_rule_lock);
7053
7054                 return -ENOENT;
7055         }
7056
7057         fs->flow_type = rule->flow_type;
7058         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7059         case SCTP_V4_FLOW:
7060         case TCP_V4_FLOW:
7061         case UDP_V4_FLOW:
7062                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7063                                          &fs->m_u.tcp_ip4_spec);
7064                 break;
7065         case IP_USER_FLOW:
7066                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7067                                       &fs->m_u.usr_ip4_spec);
7068                 break;
7069         case SCTP_V6_FLOW:
7070         case TCP_V6_FLOW:
7071         case UDP_V6_FLOW:
7072                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7073                                          &fs->m_u.tcp_ip6_spec);
7074                 break;
7075         case IPV6_USER_FLOW:
7076                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7077                                       &fs->m_u.usr_ip6_spec);
7078                 break;
7079         /* The flow type of fd rule has been checked before adding in to rule
7080          * list. As other flow types have been handled, it must be ETHER_FLOW
7081          * for the default case
7082          */
7083         default:
7084                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7085                                         &fs->m_u.ether_spec);
7086                 break;
7087         }
7088
7089         hclge_fd_get_ext_info(fs, rule);
7090
7091         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7092                 fs->ring_cookie = RX_CLS_FLOW_DISC;
7093         } else {
7094                 u64 vf_id;
7095
7096                 fs->ring_cookie = rule->queue_id;
7097                 vf_id = rule->vf_id;
7098                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7099                 fs->ring_cookie |= vf_id;
7100         }
7101
7102         spin_unlock_bh(&hdev->fd_rule_lock);
7103
7104         return 0;
7105 }
7106
7107 static int hclge_get_all_rules(struct hnae3_handle *handle,
7108                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
7109 {
7110         struct hclge_vport *vport = hclge_get_vport(handle);
7111         struct hclge_dev *hdev = vport->back;
7112         struct hclge_fd_rule *rule;
7113         struct hlist_node *node2;
7114         int cnt = 0;
7115
7116         if (!hnae3_dev_fd_supported(hdev))
7117                 return -EOPNOTSUPP;
7118
7119         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7120
7121         spin_lock_bh(&hdev->fd_rule_lock);
7122         hlist_for_each_entry_safe(rule, node2,
7123                                   &hdev->fd_rule_list, rule_node) {
7124                 if (cnt == cmd->rule_cnt) {
7125                         spin_unlock_bh(&hdev->fd_rule_lock);
7126                         return -EMSGSIZE;
7127                 }
7128
7129                 if (rule->state == HCLGE_FD_TO_DEL)
7130                         continue;
7131
7132                 rule_locs[cnt] = rule->location;
7133                 cnt++;
7134         }
7135
7136         spin_unlock_bh(&hdev->fd_rule_lock);
7137
7138         cmd->rule_cnt = cnt;
7139
7140         return 0;
7141 }
7142
7143 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7144                                      struct hclge_fd_rule_tuples *tuples)
7145 {
7146 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7147 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7148
7149         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7150         tuples->ip_proto = fkeys->basic.ip_proto;
7151         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7152
7153         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7154                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7155                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7156         } else {
7157                 int i;
7158
7159                 for (i = 0; i < IPV6_SIZE; i++) {
7160                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7161                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7162                 }
7163         }
7164 }
7165
7166 /* traverse all rules, check whether an existed rule has the same tuples */
7167 static struct hclge_fd_rule *
7168 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7169                           const struct hclge_fd_rule_tuples *tuples)
7170 {
7171         struct hclge_fd_rule *rule = NULL;
7172         struct hlist_node *node;
7173
7174         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7175                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7176                         return rule;
7177         }
7178
7179         return NULL;
7180 }
7181
7182 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7183                                      struct hclge_fd_rule *rule)
7184 {
7185         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7186                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7187                              BIT(INNER_SRC_PORT);
7188         rule->action = 0;
7189         rule->vf_id = 0;
7190         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7191         rule->state = HCLGE_FD_TO_ADD;
7192         if (tuples->ether_proto == ETH_P_IP) {
7193                 if (tuples->ip_proto == IPPROTO_TCP)
7194                         rule->flow_type = TCP_V4_FLOW;
7195                 else
7196                         rule->flow_type = UDP_V4_FLOW;
7197         } else {
7198                 if (tuples->ip_proto == IPPROTO_TCP)
7199                         rule->flow_type = TCP_V6_FLOW;
7200                 else
7201                         rule->flow_type = UDP_V6_FLOW;
7202         }
7203         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7204         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7205 }
7206
7207 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7208                                       u16 flow_id, struct flow_keys *fkeys)
7209 {
7210         struct hclge_vport *vport = hclge_get_vport(handle);
7211         struct hclge_fd_rule_tuples new_tuples = {};
7212         struct hclge_dev *hdev = vport->back;
7213         struct hclge_fd_rule *rule;
7214         u16 bit_id;
7215
7216         if (!hnae3_dev_fd_supported(hdev))
7217                 return -EOPNOTSUPP;
7218
7219         /* when there is already fd rule existed add by user,
7220          * arfs should not work
7221          */
7222         spin_lock_bh(&hdev->fd_rule_lock);
7223         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7224             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7225                 spin_unlock_bh(&hdev->fd_rule_lock);
7226                 return -EOPNOTSUPP;
7227         }
7228
7229         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7230
7231         /* check is there flow director filter existed for this flow,
7232          * if not, create a new filter for it;
7233          * if filter exist with different queue id, modify the filter;
7234          * if filter exist with same queue id, do nothing
7235          */
7236         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7237         if (!rule) {
7238                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7239                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7240                         spin_unlock_bh(&hdev->fd_rule_lock);
7241                         return -ENOSPC;
7242                 }
7243
7244                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7245                 if (!rule) {
7246                         spin_unlock_bh(&hdev->fd_rule_lock);
7247                         return -ENOMEM;
7248                 }
7249
7250                 rule->location = bit_id;
7251                 rule->arfs.flow_id = flow_id;
7252                 rule->queue_id = queue_id;
7253                 hclge_fd_build_arfs_rule(&new_tuples, rule);
7254                 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7255                 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7256         } else if (rule->queue_id != queue_id) {
7257                 rule->queue_id = queue_id;
7258                 rule->state = HCLGE_FD_TO_ADD;
7259                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7260                 hclge_task_schedule(hdev, 0);
7261         }
7262         spin_unlock_bh(&hdev->fd_rule_lock);
7263         return rule->location;
7264 }
7265
7266 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7267 {
7268 #ifdef CONFIG_RFS_ACCEL
7269         struct hnae3_handle *handle = &hdev->vport[0].nic;
7270         struct hclge_fd_rule *rule;
7271         struct hlist_node *node;
7272
7273         spin_lock_bh(&hdev->fd_rule_lock);
7274         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7275                 spin_unlock_bh(&hdev->fd_rule_lock);
7276                 return;
7277         }
7278         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7279                 if (rule->state != HCLGE_FD_ACTIVE)
7280                         continue;
7281                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7282                                         rule->arfs.flow_id, rule->location)) {
7283                         rule->state = HCLGE_FD_TO_DEL;
7284                         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7285                 }
7286         }
7287         spin_unlock_bh(&hdev->fd_rule_lock);
7288 #endif
7289 }
7290
7291 /* make sure being called after lock up with fd_rule_lock */
7292 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7293 {
7294 #ifdef CONFIG_RFS_ACCEL
7295         struct hclge_fd_rule *rule;
7296         struct hlist_node *node;
7297         int ret;
7298
7299         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7300                 return 0;
7301
7302         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7303                 switch (rule->state) {
7304                 case HCLGE_FD_TO_DEL:
7305                 case HCLGE_FD_ACTIVE:
7306                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7307                                                    rule->location, NULL, false);
7308                         if (ret)
7309                                 return ret;
7310                         fallthrough;
7311                 case HCLGE_FD_TO_ADD:
7312                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7313                         hlist_del(&rule->rule_node);
7314                         kfree(rule);
7315                         break;
7316                 default:
7317                         break;
7318                 }
7319         }
7320         hclge_sync_fd_state(hdev);
7321
7322 #endif
7323         return 0;
7324 }
7325
7326 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7327                                     struct hclge_fd_rule *rule)
7328 {
7329         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7330                 struct flow_match_basic match;
7331                 u16 ethtype_key, ethtype_mask;
7332
7333                 flow_rule_match_basic(flow, &match);
7334                 ethtype_key = ntohs(match.key->n_proto);
7335                 ethtype_mask = ntohs(match.mask->n_proto);
7336
7337                 if (ethtype_key == ETH_P_ALL) {
7338                         ethtype_key = 0;
7339                         ethtype_mask = 0;
7340                 }
7341                 rule->tuples.ether_proto = ethtype_key;
7342                 rule->tuples_mask.ether_proto = ethtype_mask;
7343                 rule->tuples.ip_proto = match.key->ip_proto;
7344                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7345         } else {
7346                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7347                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7348         }
7349 }
7350
7351 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7352                                   struct hclge_fd_rule *rule)
7353 {
7354         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7355                 struct flow_match_eth_addrs match;
7356
7357                 flow_rule_match_eth_addrs(flow, &match);
7358                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7359                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7360                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7361                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7362         } else {
7363                 rule->unused_tuple |= BIT(INNER_DST_MAC);
7364                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7365         }
7366 }
7367
7368 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7369                                    struct hclge_fd_rule *rule)
7370 {
7371         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7372                 struct flow_match_vlan match;
7373
7374                 flow_rule_match_vlan(flow, &match);
7375                 rule->tuples.vlan_tag1 = match.key->vlan_id |
7376                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7377                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7378                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7379         } else {
7380                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7381         }
7382 }
7383
7384 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7385                                  struct hclge_fd_rule *rule)
7386 {
7387         u16 addr_type = 0;
7388
7389         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7390                 struct flow_match_control match;
7391
7392                 flow_rule_match_control(flow, &match);
7393                 addr_type = match.key->addr_type;
7394         }
7395
7396         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7397                 struct flow_match_ipv4_addrs match;
7398
7399                 flow_rule_match_ipv4_addrs(flow, &match);
7400                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7401                 rule->tuples_mask.src_ip[IPV4_INDEX] =
7402                                                 be32_to_cpu(match.mask->src);
7403                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7404                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7405                                                 be32_to_cpu(match.mask->dst);
7406         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7407                 struct flow_match_ipv6_addrs match;
7408
7409                 flow_rule_match_ipv6_addrs(flow, &match);
7410                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7411                                   IPV6_SIZE);
7412                 be32_to_cpu_array(rule->tuples_mask.src_ip,
7413                                   match.mask->src.s6_addr32, IPV6_SIZE);
7414                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7415                                   IPV6_SIZE);
7416                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7417                                   match.mask->dst.s6_addr32, IPV6_SIZE);
7418         } else {
7419                 rule->unused_tuple |= BIT(INNER_SRC_IP);
7420                 rule->unused_tuple |= BIT(INNER_DST_IP);
7421         }
7422 }
7423
7424 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7425                                    struct hclge_fd_rule *rule)
7426 {
7427         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7428                 struct flow_match_ports match;
7429
7430                 flow_rule_match_ports(flow, &match);
7431
7432                 rule->tuples.src_port = be16_to_cpu(match.key->src);
7433                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7434                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7435                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7436         } else {
7437                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7438                 rule->unused_tuple |= BIT(INNER_DST_PORT);
7439         }
7440 }
7441
7442 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7443                                   struct flow_cls_offload *cls_flower,
7444                                   struct hclge_fd_rule *rule)
7445 {
7446         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7447         struct flow_dissector *dissector = flow->match.dissector;
7448
7449         if (dissector->used_keys &
7450             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7451               BIT(FLOW_DISSECTOR_KEY_BASIC) |
7452               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7453               BIT(FLOW_DISSECTOR_KEY_VLAN) |
7454               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7455               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7456               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7457                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7458                         dissector->used_keys);
7459                 return -EOPNOTSUPP;
7460         }
7461
7462         hclge_get_cls_key_basic(flow, rule);
7463         hclge_get_cls_key_mac(flow, rule);
7464         hclge_get_cls_key_vlan(flow, rule);
7465         hclge_get_cls_key_ip(flow, rule);
7466         hclge_get_cls_key_port(flow, rule);
7467
7468         return 0;
7469 }
7470
7471 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7472                                   struct flow_cls_offload *cls_flower, int tc)
7473 {
7474         u32 prio = cls_flower->common.prio;
7475
7476         if (tc < 0 || tc > hdev->tc_max) {
7477                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7478                 return -EINVAL;
7479         }
7480
7481         if (prio == 0 ||
7482             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7483                 dev_err(&hdev->pdev->dev,
7484                         "prio %u should be in range[1, %u]\n",
7485                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7486                 return -EINVAL;
7487         }
7488
7489         if (test_bit(prio - 1, hdev->fd_bmap)) {
7490                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7491                 return -EINVAL;
7492         }
7493         return 0;
7494 }
7495
7496 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7497                                 struct flow_cls_offload *cls_flower,
7498                                 int tc)
7499 {
7500         struct hclge_vport *vport = hclge_get_vport(handle);
7501         struct hclge_dev *hdev = vport->back;
7502         struct hclge_fd_rule *rule;
7503         int ret;
7504
7505         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7506         if (ret) {
7507                 dev_err(&hdev->pdev->dev,
7508                         "failed to check cls flower params, ret = %d\n", ret);
7509                 return ret;
7510         }
7511
7512         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7513         if (!rule)
7514                 return -ENOMEM;
7515
7516         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7517         if (ret) {
7518                 kfree(rule);
7519                 return ret;
7520         }
7521
7522         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7523         rule->cls_flower.tc = tc;
7524         rule->location = cls_flower->common.prio - 1;
7525         rule->vf_id = 0;
7526         rule->cls_flower.cookie = cls_flower->cookie;
7527         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7528
7529         ret = hclge_add_fd_entry_common(hdev, rule);
7530         if (ret)
7531                 kfree(rule);
7532
7533         return ret;
7534 }
7535
7536 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7537                                                    unsigned long cookie)
7538 {
7539         struct hclge_fd_rule *rule;
7540         struct hlist_node *node;
7541
7542         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7543                 if (rule->cls_flower.cookie == cookie)
7544                         return rule;
7545         }
7546
7547         return NULL;
7548 }
7549
7550 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7551                                 struct flow_cls_offload *cls_flower)
7552 {
7553         struct hclge_vport *vport = hclge_get_vport(handle);
7554         struct hclge_dev *hdev = vport->back;
7555         struct hclge_fd_rule *rule;
7556         int ret;
7557
7558         spin_lock_bh(&hdev->fd_rule_lock);
7559
7560         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7561         if (!rule) {
7562                 spin_unlock_bh(&hdev->fd_rule_lock);
7563                 return -EINVAL;
7564         }
7565
7566         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7567                                    NULL, false);
7568         if (ret) {
7569                 spin_unlock_bh(&hdev->fd_rule_lock);
7570                 return ret;
7571         }
7572
7573         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7574         spin_unlock_bh(&hdev->fd_rule_lock);
7575
7576         return 0;
7577 }
7578
7579 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7580 {
7581         struct hclge_fd_rule *rule;
7582         struct hlist_node *node;
7583         int ret = 0;
7584
7585         if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7586                 return;
7587
7588         spin_lock_bh(&hdev->fd_rule_lock);
7589
7590         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7591                 switch (rule->state) {
7592                 case HCLGE_FD_TO_ADD:
7593                         ret = hclge_fd_config_rule(hdev, rule);
7594                         if (ret)
7595                                 goto out;
7596                         rule->state = HCLGE_FD_ACTIVE;
7597                         break;
7598                 case HCLGE_FD_TO_DEL:
7599                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7600                                                    rule->location, NULL, false);
7601                         if (ret)
7602                                 goto out;
7603                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7604                         hclge_fd_free_node(hdev, rule);
7605                         break;
7606                 default:
7607                         break;
7608                 }
7609         }
7610
7611 out:
7612         if (ret)
7613                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7614
7615         spin_unlock_bh(&hdev->fd_rule_lock);
7616 }
7617
7618 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7619 {
7620         if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7621                 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7622
7623                 hclge_clear_fd_rules_in_list(hdev, clear_list);
7624         }
7625
7626         hclge_sync_fd_user_def_cfg(hdev, false);
7627
7628         hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7629 }
7630
7631 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7632 {
7633         struct hclge_vport *vport = hclge_get_vport(handle);
7634         struct hclge_dev *hdev = vport->back;
7635
7636         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7637                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7638 }
7639
7640 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7641 {
7642         struct hclge_vport *vport = hclge_get_vport(handle);
7643         struct hclge_dev *hdev = vport->back;
7644
7645         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7646 }
7647
7648 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7649 {
7650         struct hclge_vport *vport = hclge_get_vport(handle);
7651         struct hclge_dev *hdev = vport->back;
7652
7653         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7654 }
7655
7656 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7657 {
7658         struct hclge_vport *vport = hclge_get_vport(handle);
7659         struct hclge_dev *hdev = vport->back;
7660
7661         return hdev->rst_stats.hw_reset_done_cnt;
7662 }
7663
7664 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7665 {
7666         struct hclge_vport *vport = hclge_get_vport(handle);
7667         struct hclge_dev *hdev = vport->back;
7668
7669         hdev->fd_en = enable;
7670
7671         if (!enable)
7672                 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7673         else
7674                 hclge_restore_fd_entries(handle);
7675
7676         hclge_task_schedule(hdev, 0);
7677 }
7678
7679 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7680 {
7681         struct hclge_desc desc;
7682         struct hclge_config_mac_mode_cmd *req =
7683                 (struct hclge_config_mac_mode_cmd *)desc.data;
7684         u32 loop_en = 0;
7685         int ret;
7686
7687         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7688
7689         if (enable) {
7690                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7691                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7692                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7693                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7694                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7695                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7696                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7697                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7698                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7699                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7700         }
7701
7702         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7703
7704         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7705         if (ret)
7706                 dev_err(&hdev->pdev->dev,
7707                         "mac enable fail, ret =%d.\n", ret);
7708 }
7709
7710 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7711                                      u8 switch_param, u8 param_mask)
7712 {
7713         struct hclge_mac_vlan_switch_cmd *req;
7714         struct hclge_desc desc;
7715         u32 func_id;
7716         int ret;
7717
7718         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7719         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7720
7721         /* read current config parameter */
7722         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7723                                    true);
7724         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7725         req->func_id = cpu_to_le32(func_id);
7726
7727         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7728         if (ret) {
7729                 dev_err(&hdev->pdev->dev,
7730                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7731                 return ret;
7732         }
7733
7734         /* modify and write new config parameter */
7735         hclge_cmd_reuse_desc(&desc, false);
7736         req->switch_param = (req->switch_param & param_mask) | switch_param;
7737         req->param_mask = param_mask;
7738
7739         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7740         if (ret)
7741                 dev_err(&hdev->pdev->dev,
7742                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7743         return ret;
7744 }
7745
7746 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7747                                        int link_ret)
7748 {
7749 #define HCLGE_PHY_LINK_STATUS_NUM  200
7750
7751         struct phy_device *phydev = hdev->hw.mac.phydev;
7752         int i = 0;
7753         int ret;
7754
7755         do {
7756                 ret = phy_read_status(phydev);
7757                 if (ret) {
7758                         dev_err(&hdev->pdev->dev,
7759                                 "phy update link status fail, ret = %d\n", ret);
7760                         return;
7761                 }
7762
7763                 if (phydev->link == link_ret)
7764                         break;
7765
7766                 msleep(HCLGE_LINK_STATUS_MS);
7767         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7768 }
7769
7770 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7771 {
7772 #define HCLGE_MAC_LINK_STATUS_NUM  100
7773
7774         int link_status;
7775         int i = 0;
7776         int ret;
7777
7778         do {
7779                 ret = hclge_get_mac_link_status(hdev, &link_status);
7780                 if (ret)
7781                         return ret;
7782                 if (link_status == link_ret)
7783                         return 0;
7784
7785                 msleep(HCLGE_LINK_STATUS_MS);
7786         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7787         return -EBUSY;
7788 }
7789
7790 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7791                                           bool is_phy)
7792 {
7793         int link_ret;
7794
7795         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7796
7797         if (is_phy)
7798                 hclge_phy_link_status_wait(hdev, link_ret);
7799
7800         return hclge_mac_link_status_wait(hdev, link_ret);
7801 }
7802
7803 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7804 {
7805         struct hclge_config_mac_mode_cmd *req;
7806         struct hclge_desc desc;
7807         u32 loop_en;
7808         int ret;
7809
7810         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7811         /* 1 Read out the MAC mode config at first */
7812         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7813         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7814         if (ret) {
7815                 dev_err(&hdev->pdev->dev,
7816                         "mac loopback get fail, ret =%d.\n", ret);
7817                 return ret;
7818         }
7819
7820         /* 2 Then setup the loopback flag */
7821         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7822         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7823
7824         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7825
7826         /* 3 Config mac work mode with loopback flag
7827          * and its original configure parameters
7828          */
7829         hclge_cmd_reuse_desc(&desc, false);
7830         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7831         if (ret)
7832                 dev_err(&hdev->pdev->dev,
7833                         "mac loopback set fail, ret =%d.\n", ret);
7834         return ret;
7835 }
7836
7837 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7838                                      enum hnae3_loop loop_mode)
7839 {
7840 #define HCLGE_COMMON_LB_RETRY_MS        10
7841 #define HCLGE_COMMON_LB_RETRY_NUM       100
7842
7843         struct hclge_common_lb_cmd *req;
7844         struct hclge_desc desc;
7845         int ret, i = 0;
7846         u8 loop_mode_b;
7847
7848         req = (struct hclge_common_lb_cmd *)desc.data;
7849         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7850
7851         switch (loop_mode) {
7852         case HNAE3_LOOP_SERIAL_SERDES:
7853                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7854                 break;
7855         case HNAE3_LOOP_PARALLEL_SERDES:
7856                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7857                 break;
7858         case HNAE3_LOOP_PHY:
7859                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7860                 break;
7861         default:
7862                 dev_err(&hdev->pdev->dev,
7863                         "unsupported common loopback mode %d\n", loop_mode);
7864                 return -ENOTSUPP;
7865         }
7866
7867         if (en) {
7868                 req->enable = loop_mode_b;
7869                 req->mask = loop_mode_b;
7870         } else {
7871                 req->mask = loop_mode_b;
7872         }
7873
7874         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7875         if (ret) {
7876                 dev_err(&hdev->pdev->dev,
7877                         "common loopback set fail, ret = %d\n", ret);
7878                 return ret;
7879         }
7880
7881         do {
7882                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7883                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7884                                            true);
7885                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7886                 if (ret) {
7887                         dev_err(&hdev->pdev->dev,
7888                                 "common loopback get, ret = %d\n", ret);
7889                         return ret;
7890                 }
7891         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7892                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7893
7894         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7895                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7896                 return -EBUSY;
7897         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7898                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7899                 return -EIO;
7900         }
7901         return ret;
7902 }
7903
7904 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7905                                      enum hnae3_loop loop_mode)
7906 {
7907         int ret;
7908
7909         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7910         if (ret)
7911                 return ret;
7912
7913         hclge_cfg_mac_mode(hdev, en);
7914
7915         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7916         if (ret)
7917                 dev_err(&hdev->pdev->dev,
7918                         "serdes loopback config mac mode timeout\n");
7919
7920         return ret;
7921 }
7922
7923 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7924                                      struct phy_device *phydev)
7925 {
7926         int ret;
7927
7928         if (!phydev->suspended) {
7929                 ret = phy_suspend(phydev);
7930                 if (ret)
7931                         return ret;
7932         }
7933
7934         ret = phy_resume(phydev);
7935         if (ret)
7936                 return ret;
7937
7938         return phy_loopback(phydev, true);
7939 }
7940
7941 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7942                                       struct phy_device *phydev)
7943 {
7944         int ret;
7945
7946         ret = phy_loopback(phydev, false);
7947         if (ret)
7948                 return ret;
7949
7950         return phy_suspend(phydev);
7951 }
7952
7953 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7954 {
7955         struct phy_device *phydev = hdev->hw.mac.phydev;
7956         int ret;
7957
7958         if (!phydev) {
7959                 if (hnae3_dev_phy_imp_supported(hdev))
7960                         return hclge_set_common_loopback(hdev, en,
7961                                                          HNAE3_LOOP_PHY);
7962                 return -ENOTSUPP;
7963         }
7964
7965         if (en)
7966                 ret = hclge_enable_phy_loopback(hdev, phydev);
7967         else
7968                 ret = hclge_disable_phy_loopback(hdev, phydev);
7969         if (ret) {
7970                 dev_err(&hdev->pdev->dev,
7971                         "set phy loopback fail, ret = %d\n", ret);
7972                 return ret;
7973         }
7974
7975         hclge_cfg_mac_mode(hdev, en);
7976
7977         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7978         if (ret)
7979                 dev_err(&hdev->pdev->dev,
7980                         "phy loopback config mac mode timeout\n");
7981
7982         return ret;
7983 }
7984
7985 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7986                                      u16 stream_id, bool enable)
7987 {
7988         struct hclge_desc desc;
7989         struct hclge_cfg_com_tqp_queue_cmd *req =
7990                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7991
7992         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7993         req->tqp_id = cpu_to_le16(tqp_id);
7994         req->stream_id = cpu_to_le16(stream_id);
7995         if (enable)
7996                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7997
7998         return hclge_cmd_send(&hdev->hw, &desc, 1);
7999 }
8000
8001 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
8002 {
8003         struct hclge_vport *vport = hclge_get_vport(handle);
8004         struct hclge_dev *hdev = vport->back;
8005         int ret;
8006         u16 i;
8007
8008         for (i = 0; i < handle->kinfo.num_tqps; i++) {
8009                 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8010                 if (ret)
8011                         return ret;
8012         }
8013         return 0;
8014 }
8015
8016 static int hclge_set_loopback(struct hnae3_handle *handle,
8017                               enum hnae3_loop loop_mode, bool en)
8018 {
8019         struct hclge_vport *vport = hclge_get_vport(handle);
8020         struct hclge_dev *hdev = vport->back;
8021         int ret;
8022
8023         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8024          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8025          * the same, the packets are looped back in the SSU. If SSU loopback
8026          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8027          */
8028         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8029                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8030
8031                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8032                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
8033                 if (ret)
8034                         return ret;
8035         }
8036
8037         switch (loop_mode) {
8038         case HNAE3_LOOP_APP:
8039                 ret = hclge_set_app_loopback(hdev, en);
8040                 break;
8041         case HNAE3_LOOP_SERIAL_SERDES:
8042         case HNAE3_LOOP_PARALLEL_SERDES:
8043                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8044                 break;
8045         case HNAE3_LOOP_PHY:
8046                 ret = hclge_set_phy_loopback(hdev, en);
8047                 break;
8048         default:
8049                 ret = -ENOTSUPP;
8050                 dev_err(&hdev->pdev->dev,
8051                         "loop_mode %d is not supported\n", loop_mode);
8052                 break;
8053         }
8054
8055         if (ret)
8056                 return ret;
8057
8058         ret = hclge_tqp_enable(handle, en);
8059         if (ret)
8060                 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8061                         en ? "enable" : "disable", ret);
8062
8063         return ret;
8064 }
8065
8066 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8067 {
8068         int ret;
8069
8070         ret = hclge_set_app_loopback(hdev, false);
8071         if (ret)
8072                 return ret;
8073
8074         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8075         if (ret)
8076                 return ret;
8077
8078         return hclge_cfg_common_loopback(hdev, false,
8079                                          HNAE3_LOOP_PARALLEL_SERDES);
8080 }
8081
8082 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8083 {
8084         struct hclge_vport *vport = hclge_get_vport(handle);
8085         struct hnae3_knic_private_info *kinfo;
8086         struct hnae3_queue *queue;
8087         struct hclge_tqp *tqp;
8088         int i;
8089
8090         kinfo = &vport->nic.kinfo;
8091         for (i = 0; i < kinfo->num_tqps; i++) {
8092                 queue = handle->kinfo.tqp[i];
8093                 tqp = container_of(queue, struct hclge_tqp, q);
8094                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8095         }
8096 }
8097
8098 static void hclge_flush_link_update(struct hclge_dev *hdev)
8099 {
8100 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
8101
8102         unsigned long last = hdev->serv_processed_cnt;
8103         int i = 0;
8104
8105         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8106                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8107                last == hdev->serv_processed_cnt)
8108                 usleep_range(1, 1);
8109 }
8110
8111 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8112 {
8113         struct hclge_vport *vport = hclge_get_vport(handle);
8114         struct hclge_dev *hdev = vport->back;
8115
8116         if (enable) {
8117                 hclge_task_schedule(hdev, 0);
8118         } else {
8119                 /* Set the DOWN flag here to disable link updating */
8120                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8121
8122                 /* flush memory to make sure DOWN is seen by service task */
8123                 smp_mb__before_atomic();
8124                 hclge_flush_link_update(hdev);
8125         }
8126 }
8127
8128 static int hclge_ae_start(struct hnae3_handle *handle)
8129 {
8130         struct hclge_vport *vport = hclge_get_vport(handle);
8131         struct hclge_dev *hdev = vport->back;
8132
8133         /* mac enable */
8134         hclge_cfg_mac_mode(hdev, true);
8135         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8136         hdev->hw.mac.link = 0;
8137
8138         /* reset tqp stats */
8139         hclge_reset_tqp_stats(handle);
8140
8141         hclge_mac_start_phy(hdev);
8142
8143         return 0;
8144 }
8145
8146 static void hclge_ae_stop(struct hnae3_handle *handle)
8147 {
8148         struct hclge_vport *vport = hclge_get_vport(handle);
8149         struct hclge_dev *hdev = vport->back;
8150
8151         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8152         spin_lock_bh(&hdev->fd_rule_lock);
8153         hclge_clear_arfs_rules(hdev);
8154         spin_unlock_bh(&hdev->fd_rule_lock);
8155
8156         /* If it is not PF reset or FLR, the firmware will disable the MAC,
8157          * so it only need to stop phy here.
8158          */
8159         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8160             hdev->reset_type != HNAE3_FUNC_RESET &&
8161             hdev->reset_type != HNAE3_FLR_RESET) {
8162                 hclge_mac_stop_phy(hdev);
8163                 hclge_update_link_status(hdev);
8164                 return;
8165         }
8166
8167         hclge_reset_tqp(handle);
8168
8169         hclge_config_mac_tnl_int(hdev, false);
8170
8171         /* Mac disable */
8172         hclge_cfg_mac_mode(hdev, false);
8173
8174         hclge_mac_stop_phy(hdev);
8175
8176         /* reset tqp stats */
8177         hclge_reset_tqp_stats(handle);
8178         hclge_update_link_status(hdev);
8179 }
8180
8181 int hclge_vport_start(struct hclge_vport *vport)
8182 {
8183         struct hclge_dev *hdev = vport->back;
8184
8185         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8186         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8187         vport->last_active_jiffies = jiffies;
8188
8189         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8190                 if (vport->vport_id) {
8191                         hclge_restore_mac_table_common(vport);
8192                         hclge_restore_vport_vlan_table(vport);
8193                 } else {
8194                         hclge_restore_hw_table(hdev);
8195                 }
8196         }
8197
8198         clear_bit(vport->vport_id, hdev->vport_config_block);
8199
8200         return 0;
8201 }
8202
8203 void hclge_vport_stop(struct hclge_vport *vport)
8204 {
8205         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8206 }
8207
8208 static int hclge_client_start(struct hnae3_handle *handle)
8209 {
8210         struct hclge_vport *vport = hclge_get_vport(handle);
8211
8212         return hclge_vport_start(vport);
8213 }
8214
8215 static void hclge_client_stop(struct hnae3_handle *handle)
8216 {
8217         struct hclge_vport *vport = hclge_get_vport(handle);
8218
8219         hclge_vport_stop(vport);
8220 }
8221
8222 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8223                                          u16 cmdq_resp, u8  resp_code,
8224                                          enum hclge_mac_vlan_tbl_opcode op)
8225 {
8226         struct hclge_dev *hdev = vport->back;
8227
8228         if (cmdq_resp) {
8229                 dev_err(&hdev->pdev->dev,
8230                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8231                         cmdq_resp);
8232                 return -EIO;
8233         }
8234
8235         if (op == HCLGE_MAC_VLAN_ADD) {
8236                 if (!resp_code || resp_code == 1)
8237                         return 0;
8238                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8239                          resp_code == HCLGE_ADD_MC_OVERFLOW)
8240                         return -ENOSPC;
8241
8242                 dev_err(&hdev->pdev->dev,
8243                         "add mac addr failed for undefined, code=%u.\n",
8244                         resp_code);
8245                 return -EIO;
8246         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8247                 if (!resp_code) {
8248                         return 0;
8249                 } else if (resp_code == 1) {
8250                         dev_dbg(&hdev->pdev->dev,
8251                                 "remove mac addr failed for miss.\n");
8252                         return -ENOENT;
8253                 }
8254
8255                 dev_err(&hdev->pdev->dev,
8256                         "remove mac addr failed for undefined, code=%u.\n",
8257                         resp_code);
8258                 return -EIO;
8259         } else if (op == HCLGE_MAC_VLAN_LKUP) {
8260                 if (!resp_code) {
8261                         return 0;
8262                 } else if (resp_code == 1) {
8263                         dev_dbg(&hdev->pdev->dev,
8264                                 "lookup mac addr failed for miss.\n");
8265                         return -ENOENT;
8266                 }
8267
8268                 dev_err(&hdev->pdev->dev,
8269                         "lookup mac addr failed for undefined, code=%u.\n",
8270                         resp_code);
8271                 return -EIO;
8272         }
8273
8274         dev_err(&hdev->pdev->dev,
8275                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8276
8277         return -EINVAL;
8278 }
8279
8280 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8281 {
8282 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8283
8284         unsigned int word_num;
8285         unsigned int bit_num;
8286
8287         if (vfid > 255 || vfid < 0)
8288                 return -EIO;
8289
8290         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8291                 word_num = vfid / 32;
8292                 bit_num  = vfid % 32;
8293                 if (clr)
8294                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8295                 else
8296                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8297         } else {
8298                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8299                 bit_num  = vfid % 32;
8300                 if (clr)
8301                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8302                 else
8303                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8304         }
8305
8306         return 0;
8307 }
8308
8309 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8310 {
8311 #define HCLGE_DESC_NUMBER 3
8312 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8313         int i, j;
8314
8315         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8316                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8317                         if (desc[i].data[j])
8318                                 return false;
8319
8320         return true;
8321 }
8322
8323 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8324                                    const u8 *addr, bool is_mc)
8325 {
8326         const unsigned char *mac_addr = addr;
8327         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8328                        (mac_addr[0]) | (mac_addr[1] << 8);
8329         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8330
8331         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8332         if (is_mc) {
8333                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8334                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8335         }
8336
8337         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8338         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8339 }
8340
8341 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8342                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8343 {
8344         struct hclge_dev *hdev = vport->back;
8345         struct hclge_desc desc;
8346         u8 resp_code;
8347         u16 retval;
8348         int ret;
8349
8350         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8351
8352         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8353
8354         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8355         if (ret) {
8356                 dev_err(&hdev->pdev->dev,
8357                         "del mac addr failed for cmd_send, ret =%d.\n",
8358                         ret);
8359                 return ret;
8360         }
8361         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8362         retval = le16_to_cpu(desc.retval);
8363
8364         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8365                                              HCLGE_MAC_VLAN_REMOVE);
8366 }
8367
8368 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8369                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8370                                      struct hclge_desc *desc,
8371                                      bool is_mc)
8372 {
8373         struct hclge_dev *hdev = vport->back;
8374         u8 resp_code;
8375         u16 retval;
8376         int ret;
8377
8378         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8379         if (is_mc) {
8380                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8381                 memcpy(desc[0].data,
8382                        req,
8383                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8384                 hclge_cmd_setup_basic_desc(&desc[1],
8385                                            HCLGE_OPC_MAC_VLAN_ADD,
8386                                            true);
8387                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8388                 hclge_cmd_setup_basic_desc(&desc[2],
8389                                            HCLGE_OPC_MAC_VLAN_ADD,
8390                                            true);
8391                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8392         } else {
8393                 memcpy(desc[0].data,
8394                        req,
8395                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8396                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8397         }
8398         if (ret) {
8399                 dev_err(&hdev->pdev->dev,
8400                         "lookup mac addr failed for cmd_send, ret =%d.\n",
8401                         ret);
8402                 return ret;
8403         }
8404         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8405         retval = le16_to_cpu(desc[0].retval);
8406
8407         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8408                                              HCLGE_MAC_VLAN_LKUP);
8409 }
8410
8411 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8412                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8413                                   struct hclge_desc *mc_desc)
8414 {
8415         struct hclge_dev *hdev = vport->back;
8416         int cfg_status;
8417         u8 resp_code;
8418         u16 retval;
8419         int ret;
8420
8421         if (!mc_desc) {
8422                 struct hclge_desc desc;
8423
8424                 hclge_cmd_setup_basic_desc(&desc,
8425                                            HCLGE_OPC_MAC_VLAN_ADD,
8426                                            false);
8427                 memcpy(desc.data, req,
8428                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8429                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8430                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8431                 retval = le16_to_cpu(desc.retval);
8432
8433                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8434                                                            resp_code,
8435                                                            HCLGE_MAC_VLAN_ADD);
8436         } else {
8437                 hclge_cmd_reuse_desc(&mc_desc[0], false);
8438                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8439                 hclge_cmd_reuse_desc(&mc_desc[1], false);
8440                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8441                 hclge_cmd_reuse_desc(&mc_desc[2], false);
8442                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8443                 memcpy(mc_desc[0].data, req,
8444                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8445                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8446                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8447                 retval = le16_to_cpu(mc_desc[0].retval);
8448
8449                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8450                                                            resp_code,
8451                                                            HCLGE_MAC_VLAN_ADD);
8452         }
8453
8454         if (ret) {
8455                 dev_err(&hdev->pdev->dev,
8456                         "add mac addr failed for cmd_send, ret =%d.\n",
8457                         ret);
8458                 return ret;
8459         }
8460
8461         return cfg_status;
8462 }
8463
8464 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8465                                u16 *allocated_size)
8466 {
8467         struct hclge_umv_spc_alc_cmd *req;
8468         struct hclge_desc desc;
8469         int ret;
8470
8471         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8472         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8473
8474         req->space_size = cpu_to_le32(space_size);
8475
8476         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8477         if (ret) {
8478                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8479                         ret);
8480                 return ret;
8481         }
8482
8483         *allocated_size = le32_to_cpu(desc.data[1]);
8484
8485         return 0;
8486 }
8487
8488 static int hclge_init_umv_space(struct hclge_dev *hdev)
8489 {
8490         u16 allocated_size = 0;
8491         int ret;
8492
8493         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8494         if (ret)
8495                 return ret;
8496
8497         if (allocated_size < hdev->wanted_umv_size)
8498                 dev_warn(&hdev->pdev->dev,
8499                          "failed to alloc umv space, want %u, get %u\n",
8500                          hdev->wanted_umv_size, allocated_size);
8501
8502         hdev->max_umv_size = allocated_size;
8503         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8504         hdev->share_umv_size = hdev->priv_umv_size +
8505                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8506
8507         if (hdev->ae_dev->dev_specs.mc_mac_size)
8508                 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8509
8510         return 0;
8511 }
8512
8513 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8514 {
8515         struct hclge_vport *vport;
8516         int i;
8517
8518         for (i = 0; i < hdev->num_alloc_vport; i++) {
8519                 vport = &hdev->vport[i];
8520                 vport->used_umv_num = 0;
8521         }
8522
8523         mutex_lock(&hdev->vport_lock);
8524         hdev->share_umv_size = hdev->priv_umv_size +
8525                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8526         mutex_unlock(&hdev->vport_lock);
8527
8528         hdev->used_mc_mac_num = 0;
8529 }
8530
8531 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8532 {
8533         struct hclge_dev *hdev = vport->back;
8534         bool is_full;
8535
8536         if (need_lock)
8537                 mutex_lock(&hdev->vport_lock);
8538
8539         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8540                    hdev->share_umv_size == 0);
8541
8542         if (need_lock)
8543                 mutex_unlock(&hdev->vport_lock);
8544
8545         return is_full;
8546 }
8547
8548 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8549 {
8550         struct hclge_dev *hdev = vport->back;
8551
8552         if (is_free) {
8553                 if (vport->used_umv_num > hdev->priv_umv_size)
8554                         hdev->share_umv_size++;
8555
8556                 if (vport->used_umv_num > 0)
8557                         vport->used_umv_num--;
8558         } else {
8559                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8560                     hdev->share_umv_size > 0)
8561                         hdev->share_umv_size--;
8562                 vport->used_umv_num++;
8563         }
8564 }
8565
8566 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8567                                                   const u8 *mac_addr)
8568 {
8569         struct hclge_mac_node *mac_node, *tmp;
8570
8571         list_for_each_entry_safe(mac_node, tmp, list, node)
8572                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8573                         return mac_node;
8574
8575         return NULL;
8576 }
8577
8578 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8579                                   enum HCLGE_MAC_NODE_STATE state)
8580 {
8581         switch (state) {
8582         /* from set_rx_mode or tmp_add_list */
8583         case HCLGE_MAC_TO_ADD:
8584                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8585                         mac_node->state = HCLGE_MAC_ACTIVE;
8586                 break;
8587         /* only from set_rx_mode */
8588         case HCLGE_MAC_TO_DEL:
8589                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8590                         list_del(&mac_node->node);
8591                         kfree(mac_node);
8592                 } else {
8593                         mac_node->state = HCLGE_MAC_TO_DEL;
8594                 }
8595                 break;
8596         /* only from tmp_add_list, the mac_node->state won't be
8597          * ACTIVE.
8598          */
8599         case HCLGE_MAC_ACTIVE:
8600                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8601                         mac_node->state = HCLGE_MAC_ACTIVE;
8602
8603                 break;
8604         }
8605 }
8606
8607 int hclge_update_mac_list(struct hclge_vport *vport,
8608                           enum HCLGE_MAC_NODE_STATE state,
8609                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8610                           const unsigned char *addr)
8611 {
8612         struct hclge_dev *hdev = vport->back;
8613         struct hclge_mac_node *mac_node;
8614         struct list_head *list;
8615
8616         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8617                 &vport->uc_mac_list : &vport->mc_mac_list;
8618
8619         spin_lock_bh(&vport->mac_list_lock);
8620
8621         /* if the mac addr is already in the mac list, no need to add a new
8622          * one into it, just check the mac addr state, convert it to a new
8623          * state, or just remove it, or do nothing.
8624          */
8625         mac_node = hclge_find_mac_node(list, addr);
8626         if (mac_node) {
8627                 hclge_update_mac_node(mac_node, state);
8628                 spin_unlock_bh(&vport->mac_list_lock);
8629                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8630                 return 0;
8631         }
8632
8633         /* if this address is never added, unnecessary to delete */
8634         if (state == HCLGE_MAC_TO_DEL) {
8635                 spin_unlock_bh(&vport->mac_list_lock);
8636                 dev_err(&hdev->pdev->dev,
8637                         "failed to delete address %pM from mac list\n",
8638                         addr);
8639                 return -ENOENT;
8640         }
8641
8642         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8643         if (!mac_node) {
8644                 spin_unlock_bh(&vport->mac_list_lock);
8645                 return -ENOMEM;
8646         }
8647
8648         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8649
8650         mac_node->state = state;
8651         ether_addr_copy(mac_node->mac_addr, addr);
8652         list_add_tail(&mac_node->node, list);
8653
8654         spin_unlock_bh(&vport->mac_list_lock);
8655
8656         return 0;
8657 }
8658
8659 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8660                              const unsigned char *addr)
8661 {
8662         struct hclge_vport *vport = hclge_get_vport(handle);
8663
8664         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8665                                      addr);
8666 }
8667
8668 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8669                              const unsigned char *addr)
8670 {
8671         struct hclge_dev *hdev = vport->back;
8672         struct hclge_mac_vlan_tbl_entry_cmd req;
8673         struct hclge_desc desc;
8674         u16 egress_port = 0;
8675         int ret;
8676
8677         /* mac addr check */
8678         if (is_zero_ether_addr(addr) ||
8679             is_broadcast_ether_addr(addr) ||
8680             is_multicast_ether_addr(addr)) {
8681                 dev_err(&hdev->pdev->dev,
8682                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8683                          addr, is_zero_ether_addr(addr),
8684                          is_broadcast_ether_addr(addr),
8685                          is_multicast_ether_addr(addr));
8686                 return -EINVAL;
8687         }
8688
8689         memset(&req, 0, sizeof(req));
8690
8691         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8692                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8693
8694         req.egress_port = cpu_to_le16(egress_port);
8695
8696         hclge_prepare_mac_addr(&req, addr, false);
8697
8698         /* Lookup the mac address in the mac_vlan table, and add
8699          * it if the entry is inexistent. Repeated unicast entry
8700          * is not allowed in the mac vlan table.
8701          */
8702         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8703         if (ret == -ENOENT) {
8704                 mutex_lock(&hdev->vport_lock);
8705                 if (!hclge_is_umv_space_full(vport, false)) {
8706                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8707                         if (!ret)
8708                                 hclge_update_umv_space(vport, false);
8709                         mutex_unlock(&hdev->vport_lock);
8710                         return ret;
8711                 }
8712                 mutex_unlock(&hdev->vport_lock);
8713
8714                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8715                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8716                                 hdev->priv_umv_size);
8717
8718                 return -ENOSPC;
8719         }
8720
8721         /* check if we just hit the duplicate */
8722         if (!ret) {
8723                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8724                          vport->vport_id, addr);
8725                 return 0;
8726         }
8727
8728         dev_err(&hdev->pdev->dev,
8729                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8730                 addr);
8731
8732         return ret;
8733 }
8734
8735 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8736                             const unsigned char *addr)
8737 {
8738         struct hclge_vport *vport = hclge_get_vport(handle);
8739
8740         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8741                                      addr);
8742 }
8743
8744 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8745                             const unsigned char *addr)
8746 {
8747         struct hclge_dev *hdev = vport->back;
8748         struct hclge_mac_vlan_tbl_entry_cmd req;
8749         int ret;
8750
8751         /* mac addr check */
8752         if (is_zero_ether_addr(addr) ||
8753             is_broadcast_ether_addr(addr) ||
8754             is_multicast_ether_addr(addr)) {
8755                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8756                         addr);
8757                 return -EINVAL;
8758         }
8759
8760         memset(&req, 0, sizeof(req));
8761         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8762         hclge_prepare_mac_addr(&req, addr, false);
8763         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8764         if (!ret) {
8765                 mutex_lock(&hdev->vport_lock);
8766                 hclge_update_umv_space(vport, true);
8767                 mutex_unlock(&hdev->vport_lock);
8768         } else if (ret == -ENOENT) {
8769                 ret = 0;
8770         }
8771
8772         return ret;
8773 }
8774
8775 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8776                              const unsigned char *addr)
8777 {
8778         struct hclge_vport *vport = hclge_get_vport(handle);
8779
8780         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8781                                      addr);
8782 }
8783
8784 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8785                              const unsigned char *addr)
8786 {
8787         struct hclge_dev *hdev = vport->back;
8788         struct hclge_mac_vlan_tbl_entry_cmd req;
8789         struct hclge_desc desc[3];
8790         bool is_new_addr = false;
8791         int status;
8792
8793         /* mac addr check */
8794         if (!is_multicast_ether_addr(addr)) {
8795                 dev_err(&hdev->pdev->dev,
8796                         "Add mc mac err! invalid mac:%pM.\n",
8797                          addr);
8798                 return -EINVAL;
8799         }
8800         memset(&req, 0, sizeof(req));
8801         hclge_prepare_mac_addr(&req, addr, true);
8802         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8803         if (status) {
8804                 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8805                     hdev->used_mc_mac_num >=
8806                     hdev->ae_dev->dev_specs.mc_mac_size)
8807                         goto err_no_space;
8808
8809                 is_new_addr = true;
8810
8811                 /* This mac addr do not exist, add new entry for it */
8812                 memset(desc[0].data, 0, sizeof(desc[0].data));
8813                 memset(desc[1].data, 0, sizeof(desc[0].data));
8814                 memset(desc[2].data, 0, sizeof(desc[0].data));
8815         }
8816         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8817         if (status)
8818                 return status;
8819         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8820         if (status == -ENOSPC)
8821                 goto err_no_space;
8822         else if (!status && is_new_addr)
8823                 hdev->used_mc_mac_num++;
8824
8825         return status;
8826
8827 err_no_space:
8828         /* if already overflow, not to print each time */
8829         if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8830                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8831         return -ENOSPC;
8832 }
8833
8834 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8835                             const unsigned char *addr)
8836 {
8837         struct hclge_vport *vport = hclge_get_vport(handle);
8838
8839         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8840                                      addr);
8841 }
8842
8843 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8844                             const unsigned char *addr)
8845 {
8846         struct hclge_dev *hdev = vport->back;
8847         struct hclge_mac_vlan_tbl_entry_cmd req;
8848         enum hclge_cmd_status status;
8849         struct hclge_desc desc[3];
8850
8851         /* mac addr check */
8852         if (!is_multicast_ether_addr(addr)) {
8853                 dev_dbg(&hdev->pdev->dev,
8854                         "Remove mc mac err! invalid mac:%pM.\n",
8855                          addr);
8856                 return -EINVAL;
8857         }
8858
8859         memset(&req, 0, sizeof(req));
8860         hclge_prepare_mac_addr(&req, addr, true);
8861         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8862         if (!status) {
8863                 /* This mac addr exist, remove this handle's VFID for it */
8864                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8865                 if (status)
8866                         return status;
8867
8868                 if (hclge_is_all_function_id_zero(desc)) {
8869                         /* All the vfid is zero, so need to delete this entry */
8870                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8871                         if (!status)
8872                                 hdev->used_mc_mac_num--;
8873                 } else {
8874                         /* Not all the vfid is zero, update the vfid */
8875                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8876                 }
8877         } else if (status == -ENOENT) {
8878                 status = 0;
8879         }
8880
8881         return status;
8882 }
8883
8884 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8885                                       struct list_head *list,
8886                                       int (*sync)(struct hclge_vport *,
8887                                                   const unsigned char *))
8888 {
8889         struct hclge_mac_node *mac_node, *tmp;
8890         int ret;
8891
8892         list_for_each_entry_safe(mac_node, tmp, list, node) {
8893                 ret = sync(vport, mac_node->mac_addr);
8894                 if (!ret) {
8895                         mac_node->state = HCLGE_MAC_ACTIVE;
8896                 } else {
8897                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8898                                 &vport->state);
8899                         break;
8900                 }
8901         }
8902 }
8903
8904 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8905                                         struct list_head *list,
8906                                         int (*unsync)(struct hclge_vport *,
8907                                                       const unsigned char *))
8908 {
8909         struct hclge_mac_node *mac_node, *tmp;
8910         int ret;
8911
8912         list_for_each_entry_safe(mac_node, tmp, list, node) {
8913                 ret = unsync(vport, mac_node->mac_addr);
8914                 if (!ret || ret == -ENOENT) {
8915                         list_del(&mac_node->node);
8916                         kfree(mac_node);
8917                 } else {
8918                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8919                                 &vport->state);
8920                         break;
8921                 }
8922         }
8923 }
8924
8925 static bool hclge_sync_from_add_list(struct list_head *add_list,
8926                                      struct list_head *mac_list)
8927 {
8928         struct hclge_mac_node *mac_node, *tmp, *new_node;
8929         bool all_added = true;
8930
8931         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8932                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8933                         all_added = false;
8934
8935                 /* if the mac address from tmp_add_list is not in the
8936                  * uc/mc_mac_list, it means have received a TO_DEL request
8937                  * during the time window of adding the mac address into mac
8938                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8939                  * then it will be removed at next time. else it must be TO_ADD,
8940                  * this address hasn't been added into mac table,
8941                  * so just remove the mac node.
8942                  */
8943                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8944                 if (new_node) {
8945                         hclge_update_mac_node(new_node, mac_node->state);
8946                         list_del(&mac_node->node);
8947                         kfree(mac_node);
8948                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8949                         mac_node->state = HCLGE_MAC_TO_DEL;
8950                         list_move_tail(&mac_node->node, mac_list);
8951                 } else {
8952                         list_del(&mac_node->node);
8953                         kfree(mac_node);
8954                 }
8955         }
8956
8957         return all_added;
8958 }
8959
8960 static void hclge_sync_from_del_list(struct list_head *del_list,
8961                                      struct list_head *mac_list)
8962 {
8963         struct hclge_mac_node *mac_node, *tmp, *new_node;
8964
8965         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8966                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8967                 if (new_node) {
8968                         /* If the mac addr exists in the mac list, it means
8969                          * received a new TO_ADD request during the time window
8970                          * of configuring the mac address. For the mac node
8971                          * state is TO_ADD, and the address is already in the
8972                          * in the hardware(due to delete fail), so we just need
8973                          * to change the mac node state to ACTIVE.
8974                          */
8975                         new_node->state = HCLGE_MAC_ACTIVE;
8976                         list_del(&mac_node->node);
8977                         kfree(mac_node);
8978                 } else {
8979                         list_move_tail(&mac_node->node, mac_list);
8980                 }
8981         }
8982 }
8983
8984 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8985                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8986                                         bool is_all_added)
8987 {
8988         if (mac_type == HCLGE_MAC_ADDR_UC) {
8989                 if (is_all_added)
8990                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8991                 else
8992                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8993         } else {
8994                 if (is_all_added)
8995                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8996                 else
8997                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8998         }
8999 }
9000
9001 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
9002                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
9003 {
9004         struct hclge_mac_node *mac_node, *tmp, *new_node;
9005         struct list_head tmp_add_list, tmp_del_list;
9006         struct list_head *list;
9007         bool all_added;
9008
9009         INIT_LIST_HEAD(&tmp_add_list);
9010         INIT_LIST_HEAD(&tmp_del_list);
9011
9012         /* move the mac addr to the tmp_add_list and tmp_del_list, then
9013          * we can add/delete these mac addr outside the spin lock
9014          */
9015         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9016                 &vport->uc_mac_list : &vport->mc_mac_list;
9017
9018         spin_lock_bh(&vport->mac_list_lock);
9019
9020         list_for_each_entry_safe(mac_node, tmp, list, node) {
9021                 switch (mac_node->state) {
9022                 case HCLGE_MAC_TO_DEL:
9023                         list_move_tail(&mac_node->node, &tmp_del_list);
9024                         break;
9025                 case HCLGE_MAC_TO_ADD:
9026                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9027                         if (!new_node)
9028                                 goto stop_traverse;
9029                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9030                         new_node->state = mac_node->state;
9031                         list_add_tail(&new_node->node, &tmp_add_list);
9032                         break;
9033                 default:
9034                         break;
9035                 }
9036         }
9037
9038 stop_traverse:
9039         spin_unlock_bh(&vport->mac_list_lock);
9040
9041         /* delete first, in order to get max mac table space for adding */
9042         if (mac_type == HCLGE_MAC_ADDR_UC) {
9043                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9044                                             hclge_rm_uc_addr_common);
9045                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9046                                           hclge_add_uc_addr_common);
9047         } else {
9048                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9049                                             hclge_rm_mc_addr_common);
9050                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9051                                           hclge_add_mc_addr_common);
9052         }
9053
9054         /* if some mac addresses were added/deleted fail, move back to the
9055          * mac_list, and retry at next time.
9056          */
9057         spin_lock_bh(&vport->mac_list_lock);
9058
9059         hclge_sync_from_del_list(&tmp_del_list, list);
9060         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9061
9062         spin_unlock_bh(&vport->mac_list_lock);
9063
9064         hclge_update_overflow_flags(vport, mac_type, all_added);
9065 }
9066
9067 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9068 {
9069         struct hclge_dev *hdev = vport->back;
9070
9071         if (test_bit(vport->vport_id, hdev->vport_config_block))
9072                 return false;
9073
9074         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9075                 return true;
9076
9077         return false;
9078 }
9079
9080 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9081 {
9082         int i;
9083
9084         for (i = 0; i < hdev->num_alloc_vport; i++) {
9085                 struct hclge_vport *vport = &hdev->vport[i];
9086
9087                 if (!hclge_need_sync_mac_table(vport))
9088                         continue;
9089
9090                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9091                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9092         }
9093 }
9094
9095 static void hclge_build_del_list(struct list_head *list,
9096                                  bool is_del_list,
9097                                  struct list_head *tmp_del_list)
9098 {
9099         struct hclge_mac_node *mac_cfg, *tmp;
9100
9101         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9102                 switch (mac_cfg->state) {
9103                 case HCLGE_MAC_TO_DEL:
9104                 case HCLGE_MAC_ACTIVE:
9105                         list_move_tail(&mac_cfg->node, tmp_del_list);
9106                         break;
9107                 case HCLGE_MAC_TO_ADD:
9108                         if (is_del_list) {
9109                                 list_del(&mac_cfg->node);
9110                                 kfree(mac_cfg);
9111                         }
9112                         break;
9113                 }
9114         }
9115 }
9116
9117 static void hclge_unsync_del_list(struct hclge_vport *vport,
9118                                   int (*unsync)(struct hclge_vport *vport,
9119                                                 const unsigned char *addr),
9120                                   bool is_del_list,
9121                                   struct list_head *tmp_del_list)
9122 {
9123         struct hclge_mac_node *mac_cfg, *tmp;
9124         int ret;
9125
9126         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9127                 ret = unsync(vport, mac_cfg->mac_addr);
9128                 if (!ret || ret == -ENOENT) {
9129                         /* clear all mac addr from hardware, but remain these
9130                          * mac addr in the mac list, and restore them after
9131                          * vf reset finished.
9132                          */
9133                         if (!is_del_list &&
9134                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
9135                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
9136                         } else {
9137                                 list_del(&mac_cfg->node);
9138                                 kfree(mac_cfg);
9139                         }
9140                 } else if (is_del_list) {
9141                         mac_cfg->state = HCLGE_MAC_TO_DEL;
9142                 }
9143         }
9144 }
9145
9146 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9147                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
9148 {
9149         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9150         struct hclge_dev *hdev = vport->back;
9151         struct list_head tmp_del_list, *list;
9152
9153         if (mac_type == HCLGE_MAC_ADDR_UC) {
9154                 list = &vport->uc_mac_list;
9155                 unsync = hclge_rm_uc_addr_common;
9156         } else {
9157                 list = &vport->mc_mac_list;
9158                 unsync = hclge_rm_mc_addr_common;
9159         }
9160
9161         INIT_LIST_HEAD(&tmp_del_list);
9162
9163         if (!is_del_list)
9164                 set_bit(vport->vport_id, hdev->vport_config_block);
9165
9166         spin_lock_bh(&vport->mac_list_lock);
9167
9168         hclge_build_del_list(list, is_del_list, &tmp_del_list);
9169
9170         spin_unlock_bh(&vport->mac_list_lock);
9171
9172         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9173
9174         spin_lock_bh(&vport->mac_list_lock);
9175
9176         hclge_sync_from_del_list(&tmp_del_list, list);
9177
9178         spin_unlock_bh(&vport->mac_list_lock);
9179 }
9180
9181 /* remove all mac address when uninitailize */
9182 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9183                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
9184 {
9185         struct hclge_mac_node *mac_node, *tmp;
9186         struct hclge_dev *hdev = vport->back;
9187         struct list_head tmp_del_list, *list;
9188
9189         INIT_LIST_HEAD(&tmp_del_list);
9190
9191         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9192                 &vport->uc_mac_list : &vport->mc_mac_list;
9193
9194         spin_lock_bh(&vport->mac_list_lock);
9195
9196         list_for_each_entry_safe(mac_node, tmp, list, node) {
9197                 switch (mac_node->state) {
9198                 case HCLGE_MAC_TO_DEL:
9199                 case HCLGE_MAC_ACTIVE:
9200                         list_move_tail(&mac_node->node, &tmp_del_list);
9201                         break;
9202                 case HCLGE_MAC_TO_ADD:
9203                         list_del(&mac_node->node);
9204                         kfree(mac_node);
9205                         break;
9206                 }
9207         }
9208
9209         spin_unlock_bh(&vport->mac_list_lock);
9210
9211         if (mac_type == HCLGE_MAC_ADDR_UC)
9212                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9213                                             hclge_rm_uc_addr_common);
9214         else
9215                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9216                                             hclge_rm_mc_addr_common);
9217
9218         if (!list_empty(&tmp_del_list))
9219                 dev_warn(&hdev->pdev->dev,
9220                          "uninit %s mac list for vport %u not completely.\n",
9221                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9222                          vport->vport_id);
9223
9224         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9225                 list_del(&mac_node->node);
9226                 kfree(mac_node);
9227         }
9228 }
9229
9230 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9231 {
9232         struct hclge_vport *vport;
9233         int i;
9234
9235         for (i = 0; i < hdev->num_alloc_vport; i++) {
9236                 vport = &hdev->vport[i];
9237                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9238                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9239         }
9240 }
9241
9242 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9243                                               u16 cmdq_resp, u8 resp_code)
9244 {
9245 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
9246 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
9247 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
9248 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
9249
9250         int return_status;
9251
9252         if (cmdq_resp) {
9253                 dev_err(&hdev->pdev->dev,
9254                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9255                         cmdq_resp);
9256                 return -EIO;
9257         }
9258
9259         switch (resp_code) {
9260         case HCLGE_ETHERTYPE_SUCCESS_ADD:
9261         case HCLGE_ETHERTYPE_ALREADY_ADD:
9262                 return_status = 0;
9263                 break;
9264         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9265                 dev_err(&hdev->pdev->dev,
9266                         "add mac ethertype failed for manager table overflow.\n");
9267                 return_status = -EIO;
9268                 break;
9269         case HCLGE_ETHERTYPE_KEY_CONFLICT:
9270                 dev_err(&hdev->pdev->dev,
9271                         "add mac ethertype failed for key conflict.\n");
9272                 return_status = -EIO;
9273                 break;
9274         default:
9275                 dev_err(&hdev->pdev->dev,
9276                         "add mac ethertype failed for undefined, code=%u.\n",
9277                         resp_code);
9278                 return_status = -EIO;
9279         }
9280
9281         return return_status;
9282 }
9283
9284 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9285                                      u8 *mac_addr)
9286 {
9287         struct hclge_mac_vlan_tbl_entry_cmd req;
9288         struct hclge_dev *hdev = vport->back;
9289         struct hclge_desc desc;
9290         u16 egress_port = 0;
9291         int i;
9292
9293         if (is_zero_ether_addr(mac_addr))
9294                 return false;
9295
9296         memset(&req, 0, sizeof(req));
9297         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9298                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9299         req.egress_port = cpu_to_le16(egress_port);
9300         hclge_prepare_mac_addr(&req, mac_addr, false);
9301
9302         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9303                 return true;
9304
9305         vf_idx += HCLGE_VF_VPORT_START_NUM;
9306         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9307                 if (i != vf_idx &&
9308                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9309                         return true;
9310
9311         return false;
9312 }
9313
9314 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9315                             u8 *mac_addr)
9316 {
9317         struct hclge_vport *vport = hclge_get_vport(handle);
9318         struct hclge_dev *hdev = vport->back;
9319
9320         vport = hclge_get_vf_vport(hdev, vf);
9321         if (!vport)
9322                 return -EINVAL;
9323
9324         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9325                 dev_info(&hdev->pdev->dev,
9326                          "Specified MAC(=%pM) is same as before, no change committed!\n",
9327                          mac_addr);
9328                 return 0;
9329         }
9330
9331         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9332                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9333                         mac_addr);
9334                 return -EEXIST;
9335         }
9336
9337         ether_addr_copy(vport->vf_info.mac, mac_addr);
9338
9339         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9340                 dev_info(&hdev->pdev->dev,
9341                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9342                          vf, mac_addr);
9343                 return hclge_inform_reset_assert_to_vf(vport);
9344         }
9345
9346         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9347                  vf, mac_addr);
9348         return 0;
9349 }
9350
9351 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9352                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
9353 {
9354         struct hclge_desc desc;
9355         u8 resp_code;
9356         u16 retval;
9357         int ret;
9358
9359         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9360         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9361
9362         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9363         if (ret) {
9364                 dev_err(&hdev->pdev->dev,
9365                         "add mac ethertype failed for cmd_send, ret =%d.\n",
9366                         ret);
9367                 return ret;
9368         }
9369
9370         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9371         retval = le16_to_cpu(desc.retval);
9372
9373         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9374 }
9375
9376 static int init_mgr_tbl(struct hclge_dev *hdev)
9377 {
9378         int ret;
9379         int i;
9380
9381         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9382                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9383                 if (ret) {
9384                         dev_err(&hdev->pdev->dev,
9385                                 "add mac ethertype failed, ret =%d.\n",
9386                                 ret);
9387                         return ret;
9388                 }
9389         }
9390
9391         return 0;
9392 }
9393
9394 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9395 {
9396         struct hclge_vport *vport = hclge_get_vport(handle);
9397         struct hclge_dev *hdev = vport->back;
9398
9399         ether_addr_copy(p, hdev->hw.mac.mac_addr);
9400 }
9401
9402 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9403                                        const u8 *old_addr, const u8 *new_addr)
9404 {
9405         struct list_head *list = &vport->uc_mac_list;
9406         struct hclge_mac_node *old_node, *new_node;
9407
9408         new_node = hclge_find_mac_node(list, new_addr);
9409         if (!new_node) {
9410                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9411                 if (!new_node)
9412                         return -ENOMEM;
9413
9414                 new_node->state = HCLGE_MAC_TO_ADD;
9415                 ether_addr_copy(new_node->mac_addr, new_addr);
9416                 list_add(&new_node->node, list);
9417         } else {
9418                 if (new_node->state == HCLGE_MAC_TO_DEL)
9419                         new_node->state = HCLGE_MAC_ACTIVE;
9420
9421                 /* make sure the new addr is in the list head, avoid dev
9422                  * addr may be not re-added into mac table for the umv space
9423                  * limitation after global/imp reset which will clear mac
9424                  * table by hardware.
9425                  */
9426                 list_move(&new_node->node, list);
9427         }
9428
9429         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9430                 old_node = hclge_find_mac_node(list, old_addr);
9431                 if (old_node) {
9432                         if (old_node->state == HCLGE_MAC_TO_ADD) {
9433                                 list_del(&old_node->node);
9434                                 kfree(old_node);
9435                         } else {
9436                                 old_node->state = HCLGE_MAC_TO_DEL;
9437                         }
9438                 }
9439         }
9440
9441         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9442
9443         return 0;
9444 }
9445
9446 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9447                               bool is_first)
9448 {
9449         const unsigned char *new_addr = (const unsigned char *)p;
9450         struct hclge_vport *vport = hclge_get_vport(handle);
9451         struct hclge_dev *hdev = vport->back;
9452         unsigned char *old_addr = NULL;
9453         int ret;
9454
9455         /* mac addr check */
9456         if (is_zero_ether_addr(new_addr) ||
9457             is_broadcast_ether_addr(new_addr) ||
9458             is_multicast_ether_addr(new_addr)) {
9459                 dev_err(&hdev->pdev->dev,
9460                         "change uc mac err! invalid mac: %pM.\n",
9461                          new_addr);
9462                 return -EINVAL;
9463         }
9464
9465         ret = hclge_pause_addr_cfg(hdev, new_addr);
9466         if (ret) {
9467                 dev_err(&hdev->pdev->dev,
9468                         "failed to configure mac pause address, ret = %d\n",
9469                         ret);
9470                 return ret;
9471         }
9472
9473         if (!is_first)
9474                 old_addr = hdev->hw.mac.mac_addr;
9475
9476         spin_lock_bh(&vport->mac_list_lock);
9477         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9478         if (ret) {
9479                 dev_err(&hdev->pdev->dev,
9480                         "failed to change the mac addr:%pM, ret = %d\n",
9481                         new_addr, ret);
9482                 spin_unlock_bh(&vport->mac_list_lock);
9483
9484                 if (!is_first)
9485                         hclge_pause_addr_cfg(hdev, old_addr);
9486
9487                 return ret;
9488         }
9489         /* we must update dev addr with spin lock protect, preventing dev addr
9490          * being removed by set_rx_mode path.
9491          */
9492         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9493         spin_unlock_bh(&vport->mac_list_lock);
9494
9495         hclge_task_schedule(hdev, 0);
9496
9497         return 0;
9498 }
9499
9500 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9501 {
9502         struct mii_ioctl_data *data = if_mii(ifr);
9503
9504         if (!hnae3_dev_phy_imp_supported(hdev))
9505                 return -EOPNOTSUPP;
9506
9507         switch (cmd) {
9508         case SIOCGMIIPHY:
9509                 data->phy_id = hdev->hw.mac.phy_addr;
9510                 /* this command reads phy id and register at the same time */
9511                 fallthrough;
9512         case SIOCGMIIREG:
9513                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9514                 return 0;
9515
9516         case SIOCSMIIREG:
9517                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9518         default:
9519                 return -EOPNOTSUPP;
9520         }
9521 }
9522
9523 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9524                           int cmd)
9525 {
9526         struct hclge_vport *vport = hclge_get_vport(handle);
9527         struct hclge_dev *hdev = vport->back;
9528
9529         switch (cmd) {
9530         case SIOCGHWTSTAMP:
9531                 return hclge_ptp_get_cfg(hdev, ifr);
9532         case SIOCSHWTSTAMP:
9533                 return hclge_ptp_set_cfg(hdev, ifr);
9534         default:
9535                 if (!hdev->hw.mac.phydev)
9536                         return hclge_mii_ioctl(hdev, ifr, cmd);
9537         }
9538
9539         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9540 }
9541
9542 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9543                                              bool bypass_en)
9544 {
9545         struct hclge_port_vlan_filter_bypass_cmd *req;
9546         struct hclge_desc desc;
9547         int ret;
9548
9549         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9550         req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9551         req->vf_id = vf_id;
9552         hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9553                       bypass_en ? 1 : 0);
9554
9555         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9556         if (ret)
9557                 dev_err(&hdev->pdev->dev,
9558                         "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9559                         vf_id, ret);
9560
9561         return ret;
9562 }
9563
9564 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9565                                       u8 fe_type, bool filter_en, u8 vf_id)
9566 {
9567         struct hclge_vlan_filter_ctrl_cmd *req;
9568         struct hclge_desc desc;
9569         int ret;
9570
9571         /* read current vlan filter parameter */
9572         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9573         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9574         req->vlan_type = vlan_type;
9575         req->vf_id = vf_id;
9576
9577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9578         if (ret) {
9579                 dev_err(&hdev->pdev->dev,
9580                         "failed to get vlan filter config, ret = %d.\n", ret);
9581                 return ret;
9582         }
9583
9584         /* modify and write new config parameter */
9585         hclge_cmd_reuse_desc(&desc, false);
9586         req->vlan_fe = filter_en ?
9587                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9588
9589         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9590         if (ret)
9591                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9592                         ret);
9593
9594         return ret;
9595 }
9596
9597 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9598 {
9599         struct hclge_dev *hdev = vport->back;
9600         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9601         int ret;
9602
9603         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9604                 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9605                                                   HCLGE_FILTER_FE_EGRESS_V1_B,
9606                                                   enable, vport->vport_id);
9607
9608         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9609                                          HCLGE_FILTER_FE_EGRESS, enable,
9610                                          vport->vport_id);
9611         if (ret)
9612                 return ret;
9613
9614         if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9615                 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9616                                                         !enable);
9617         } else if (!vport->vport_id) {
9618                 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9619                         enable = false;
9620
9621                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9622                                                  HCLGE_FILTER_FE_INGRESS,
9623                                                  enable, 0);
9624         }
9625
9626         return ret;
9627 }
9628
9629 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9630 {
9631         struct hnae3_handle *handle = &vport->nic;
9632         struct hclge_vport_vlan_cfg *vlan, *tmp;
9633         struct hclge_dev *hdev = vport->back;
9634
9635         if (vport->vport_id) {
9636                 if (vport->port_base_vlan_cfg.state !=
9637                         HNAE3_PORT_BASE_VLAN_DISABLE)
9638                         return true;
9639
9640                 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9641                         return false;
9642         } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9643                 return false;
9644         }
9645
9646         if (!vport->req_vlan_fltr_en)
9647                 return false;
9648
9649         /* compatible with former device, always enable vlan filter */
9650         if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9651                 return true;
9652
9653         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9654                 if (vlan->vlan_id != 0)
9655                         return true;
9656
9657         return false;
9658 }
9659
9660 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9661 {
9662         struct hclge_dev *hdev = vport->back;
9663         bool need_en;
9664         int ret;
9665
9666         mutex_lock(&hdev->vport_lock);
9667
9668         vport->req_vlan_fltr_en = request_en;
9669
9670         need_en = hclge_need_enable_vport_vlan_filter(vport);
9671         if (need_en == vport->cur_vlan_fltr_en) {
9672                 mutex_unlock(&hdev->vport_lock);
9673                 return 0;
9674         }
9675
9676         ret = hclge_set_vport_vlan_filter(vport, need_en);
9677         if (ret) {
9678                 mutex_unlock(&hdev->vport_lock);
9679                 return ret;
9680         }
9681
9682         vport->cur_vlan_fltr_en = need_en;
9683
9684         mutex_unlock(&hdev->vport_lock);
9685
9686         return 0;
9687 }
9688
9689 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9690 {
9691         struct hclge_vport *vport = hclge_get_vport(handle);
9692
9693         return hclge_enable_vport_vlan_filter(vport, enable);
9694 }
9695
9696 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9697                                         bool is_kill, u16 vlan,
9698                                         struct hclge_desc *desc)
9699 {
9700         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9701         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9702         u8 vf_byte_val;
9703         u8 vf_byte_off;
9704         int ret;
9705
9706         hclge_cmd_setup_basic_desc(&desc[0],
9707                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9708         hclge_cmd_setup_basic_desc(&desc[1],
9709                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9710
9711         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9712
9713         vf_byte_off = vfid / 8;
9714         vf_byte_val = 1 << (vfid % 8);
9715
9716         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9717         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9718
9719         req0->vlan_id  = cpu_to_le16(vlan);
9720         req0->vlan_cfg = is_kill;
9721
9722         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9723                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9724         else
9725                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9726
9727         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9728         if (ret) {
9729                 dev_err(&hdev->pdev->dev,
9730                         "Send vf vlan command fail, ret =%d.\n",
9731                         ret);
9732                 return ret;
9733         }
9734
9735         return 0;
9736 }
9737
9738 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9739                                           bool is_kill, struct hclge_desc *desc)
9740 {
9741         struct hclge_vlan_filter_vf_cfg_cmd *req;
9742
9743         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9744
9745         if (!is_kill) {
9746 #define HCLGE_VF_VLAN_NO_ENTRY  2
9747                 if (!req->resp_code || req->resp_code == 1)
9748                         return 0;
9749
9750                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9751                         set_bit(vfid, hdev->vf_vlan_full);
9752                         dev_warn(&hdev->pdev->dev,
9753                                  "vf vlan table is full, vf vlan filter is disabled\n");
9754                         return 0;
9755                 }
9756
9757                 dev_err(&hdev->pdev->dev,
9758                         "Add vf vlan filter fail, ret =%u.\n",
9759                         req->resp_code);
9760         } else {
9761 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9762                 if (!req->resp_code)
9763                         return 0;
9764
9765                 /* vf vlan filter is disabled when vf vlan table is full,
9766                  * then new vlan id will not be added into vf vlan table.
9767                  * Just return 0 without warning, avoid massive verbose
9768                  * print logs when unload.
9769                  */
9770                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9771                         return 0;
9772
9773                 dev_err(&hdev->pdev->dev,
9774                         "Kill vf vlan filter fail, ret =%u.\n",
9775                         req->resp_code);
9776         }
9777
9778         return -EIO;
9779 }
9780
9781 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9782                                     bool is_kill, u16 vlan)
9783 {
9784         struct hclge_vport *vport = &hdev->vport[vfid];
9785         struct hclge_desc desc[2];
9786         int ret;
9787
9788         /* if vf vlan table is full, firmware will close vf vlan filter, it
9789          * is unable and unnecessary to add new vlan id to vf vlan filter.
9790          * If spoof check is enable, and vf vlan is full, it shouldn't add
9791          * new vlan, because tx packets with these vlan id will be dropped.
9792          */
9793         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9794                 if (vport->vf_info.spoofchk && vlan) {
9795                         dev_err(&hdev->pdev->dev,
9796                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9797                         return -EPERM;
9798                 }
9799                 return 0;
9800         }
9801
9802         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9803         if (ret)
9804                 return ret;
9805
9806         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9807 }
9808
9809 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9810                                       u16 vlan_id, bool is_kill)
9811 {
9812         struct hclge_vlan_filter_pf_cfg_cmd *req;
9813         struct hclge_desc desc;
9814         u8 vlan_offset_byte_val;
9815         u8 vlan_offset_byte;
9816         u8 vlan_offset_160;
9817         int ret;
9818
9819         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9820
9821         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9822         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9823                            HCLGE_VLAN_BYTE_SIZE;
9824         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9825
9826         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9827         req->vlan_offset = vlan_offset_160;
9828         req->vlan_cfg = is_kill;
9829         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9830
9831         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9832         if (ret)
9833                 dev_err(&hdev->pdev->dev,
9834                         "port vlan command, send fail, ret =%d.\n", ret);
9835         return ret;
9836 }
9837
9838 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9839                                     u16 vport_id, u16 vlan_id,
9840                                     bool is_kill)
9841 {
9842         u16 vport_idx, vport_num = 0;
9843         int ret;
9844
9845         if (is_kill && !vlan_id)
9846                 return 0;
9847
9848         if (vlan_id >= VLAN_N_VID)
9849                 return -EINVAL;
9850
9851         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9852         if (ret) {
9853                 dev_err(&hdev->pdev->dev,
9854                         "Set %u vport vlan filter config fail, ret =%d.\n",
9855                         vport_id, ret);
9856                 return ret;
9857         }
9858
9859         /* vlan 0 may be added twice when 8021q module is enabled */
9860         if (!is_kill && !vlan_id &&
9861             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9862                 return 0;
9863
9864         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9865                 dev_err(&hdev->pdev->dev,
9866                         "Add port vlan failed, vport %u is already in vlan %u\n",
9867                         vport_id, vlan_id);
9868                 return -EINVAL;
9869         }
9870
9871         if (is_kill &&
9872             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9873                 dev_err(&hdev->pdev->dev,
9874                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9875                         vport_id, vlan_id);
9876                 return -EINVAL;
9877         }
9878
9879         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9880                 vport_num++;
9881
9882         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9883                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9884                                                  is_kill);
9885
9886         return ret;
9887 }
9888
9889 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9890 {
9891         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9892         struct hclge_vport_vtag_tx_cfg_cmd *req;
9893         struct hclge_dev *hdev = vport->back;
9894         struct hclge_desc desc;
9895         u16 bmap_index;
9896         int status;
9897
9898         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9899
9900         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9901         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9902         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9903         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9904                       vcfg->accept_tag1 ? 1 : 0);
9905         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9906                       vcfg->accept_untag1 ? 1 : 0);
9907         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9908                       vcfg->accept_tag2 ? 1 : 0);
9909         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9910                       vcfg->accept_untag2 ? 1 : 0);
9911         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9912                       vcfg->insert_tag1_en ? 1 : 0);
9913         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9914                       vcfg->insert_tag2_en ? 1 : 0);
9915         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9916                       vcfg->tag_shift_mode_en ? 1 : 0);
9917         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9918
9919         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9920         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9921                         HCLGE_VF_NUM_PER_BYTE;
9922         req->vf_bitmap[bmap_index] =
9923                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9924
9925         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9926         if (status)
9927                 dev_err(&hdev->pdev->dev,
9928                         "Send port txvlan cfg command fail, ret =%d\n",
9929                         status);
9930
9931         return status;
9932 }
9933
9934 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9935 {
9936         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9937         struct hclge_vport_vtag_rx_cfg_cmd *req;
9938         struct hclge_dev *hdev = vport->back;
9939         struct hclge_desc desc;
9940         u16 bmap_index;
9941         int status;
9942
9943         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9944
9945         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9946         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9947                       vcfg->strip_tag1_en ? 1 : 0);
9948         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9949                       vcfg->strip_tag2_en ? 1 : 0);
9950         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9951                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9952         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9953                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9954         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9955                       vcfg->strip_tag1_discard_en ? 1 : 0);
9956         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9957                       vcfg->strip_tag2_discard_en ? 1 : 0);
9958
9959         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9960         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9961                         HCLGE_VF_NUM_PER_BYTE;
9962         req->vf_bitmap[bmap_index] =
9963                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9964
9965         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9966         if (status)
9967                 dev_err(&hdev->pdev->dev,
9968                         "Send port rxvlan cfg command fail, ret =%d\n",
9969                         status);
9970
9971         return status;
9972 }
9973
9974 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9975                                   u16 port_base_vlan_state,
9976                                   u16 vlan_tag, u8 qos)
9977 {
9978         int ret;
9979
9980         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9981                 vport->txvlan_cfg.accept_tag1 = true;
9982                 vport->txvlan_cfg.insert_tag1_en = false;
9983                 vport->txvlan_cfg.default_tag1 = 0;
9984         } else {
9985                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9986
9987                 vport->txvlan_cfg.accept_tag1 =
9988                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9989                 vport->txvlan_cfg.insert_tag1_en = true;
9990                 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9991                                                  vlan_tag;
9992         }
9993
9994         vport->txvlan_cfg.accept_untag1 = true;
9995
9996         /* accept_tag2 and accept_untag2 are not supported on
9997          * pdev revision(0x20), new revision support them,
9998          * this two fields can not be configured by user.
9999          */
10000         vport->txvlan_cfg.accept_tag2 = true;
10001         vport->txvlan_cfg.accept_untag2 = true;
10002         vport->txvlan_cfg.insert_tag2_en = false;
10003         vport->txvlan_cfg.default_tag2 = 0;
10004         vport->txvlan_cfg.tag_shift_mode_en = true;
10005
10006         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10007                 vport->rxvlan_cfg.strip_tag1_en = false;
10008                 vport->rxvlan_cfg.strip_tag2_en =
10009                                 vport->rxvlan_cfg.rx_vlan_offload_en;
10010                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10011         } else {
10012                 vport->rxvlan_cfg.strip_tag1_en =
10013                                 vport->rxvlan_cfg.rx_vlan_offload_en;
10014                 vport->rxvlan_cfg.strip_tag2_en = true;
10015                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10016         }
10017
10018         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10019         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10020         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10021
10022         ret = hclge_set_vlan_tx_offload_cfg(vport);
10023         if (ret)
10024                 return ret;
10025
10026         return hclge_set_vlan_rx_offload_cfg(vport);
10027 }
10028
10029 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10030 {
10031         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10032         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10033         struct hclge_desc desc;
10034         int status;
10035
10036         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10037         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10038         rx_req->ot_fst_vlan_type =
10039                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10040         rx_req->ot_sec_vlan_type =
10041                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10042         rx_req->in_fst_vlan_type =
10043                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10044         rx_req->in_sec_vlan_type =
10045                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10046
10047         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10048         if (status) {
10049                 dev_err(&hdev->pdev->dev,
10050                         "Send rxvlan protocol type command fail, ret =%d\n",
10051                         status);
10052                 return status;
10053         }
10054
10055         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10056
10057         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10058         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10059         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10060
10061         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10062         if (status)
10063                 dev_err(&hdev->pdev->dev,
10064                         "Send txvlan protocol type command fail, ret =%d\n",
10065                         status);
10066
10067         return status;
10068 }
10069
10070 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10071 {
10072 #define HCLGE_DEF_VLAN_TYPE             0x8100
10073
10074         struct hnae3_handle *handle = &hdev->vport[0].nic;
10075         struct hclge_vport *vport;
10076         int ret;
10077         int i;
10078
10079         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10080                 /* for revision 0x21, vf vlan filter is per function */
10081                 for (i = 0; i < hdev->num_alloc_vport; i++) {
10082                         vport = &hdev->vport[i];
10083                         ret = hclge_set_vlan_filter_ctrl(hdev,
10084                                                          HCLGE_FILTER_TYPE_VF,
10085                                                          HCLGE_FILTER_FE_EGRESS,
10086                                                          true,
10087                                                          vport->vport_id);
10088                         if (ret)
10089                                 return ret;
10090                         vport->cur_vlan_fltr_en = true;
10091                 }
10092
10093                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10094                                                  HCLGE_FILTER_FE_INGRESS, true,
10095                                                  0);
10096                 if (ret)
10097                         return ret;
10098         } else {
10099                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10100                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
10101                                                  true, 0);
10102                 if (ret)
10103                         return ret;
10104         }
10105
10106         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10107         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10108         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10109         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10110         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10111         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10112
10113         ret = hclge_set_vlan_protocol_type(hdev);
10114         if (ret)
10115                 return ret;
10116
10117         for (i = 0; i < hdev->num_alloc_vport; i++) {
10118                 u16 vlan_tag;
10119                 u8 qos;
10120
10121                 vport = &hdev->vport[i];
10122                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10123                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10124
10125                 ret = hclge_vlan_offload_cfg(vport,
10126                                              vport->port_base_vlan_cfg.state,
10127                                              vlan_tag, qos);
10128                 if (ret)
10129                         return ret;
10130         }
10131
10132         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10133 }
10134
10135 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10136                                        bool writen_to_tbl)
10137 {
10138         struct hclge_vport_vlan_cfg *vlan, *tmp;
10139
10140         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10141                 if (vlan->vlan_id == vlan_id)
10142                         return;
10143
10144         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10145         if (!vlan)
10146                 return;
10147
10148         vlan->hd_tbl_status = writen_to_tbl;
10149         vlan->vlan_id = vlan_id;
10150
10151         list_add_tail(&vlan->node, &vport->vlan_list);
10152 }
10153
10154 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10155 {
10156         struct hclge_vport_vlan_cfg *vlan, *tmp;
10157         struct hclge_dev *hdev = vport->back;
10158         int ret;
10159
10160         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10161                 if (!vlan->hd_tbl_status) {
10162                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10163                                                        vport->vport_id,
10164                                                        vlan->vlan_id, false);
10165                         if (ret) {
10166                                 dev_err(&hdev->pdev->dev,
10167                                         "restore vport vlan list failed, ret=%d\n",
10168                                         ret);
10169                                 return ret;
10170                         }
10171                 }
10172                 vlan->hd_tbl_status = true;
10173         }
10174
10175         return 0;
10176 }
10177
10178 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10179                                       bool is_write_tbl)
10180 {
10181         struct hclge_vport_vlan_cfg *vlan, *tmp;
10182         struct hclge_dev *hdev = vport->back;
10183
10184         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10185                 if (vlan->vlan_id == vlan_id) {
10186                         if (is_write_tbl && vlan->hd_tbl_status)
10187                                 hclge_set_vlan_filter_hw(hdev,
10188                                                          htons(ETH_P_8021Q),
10189                                                          vport->vport_id,
10190                                                          vlan_id,
10191                                                          true);
10192
10193                         list_del(&vlan->node);
10194                         kfree(vlan);
10195                         break;
10196                 }
10197         }
10198 }
10199
10200 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10201 {
10202         struct hclge_vport_vlan_cfg *vlan, *tmp;
10203         struct hclge_dev *hdev = vport->back;
10204
10205         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10206                 if (vlan->hd_tbl_status)
10207                         hclge_set_vlan_filter_hw(hdev,
10208                                                  htons(ETH_P_8021Q),
10209                                                  vport->vport_id,
10210                                                  vlan->vlan_id,
10211                                                  true);
10212
10213                 vlan->hd_tbl_status = false;
10214                 if (is_del_list) {
10215                         list_del(&vlan->node);
10216                         kfree(vlan);
10217                 }
10218         }
10219         clear_bit(vport->vport_id, hdev->vf_vlan_full);
10220 }
10221
10222 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10223 {
10224         struct hclge_vport_vlan_cfg *vlan, *tmp;
10225         struct hclge_vport *vport;
10226         int i;
10227
10228         for (i = 0; i < hdev->num_alloc_vport; i++) {
10229                 vport = &hdev->vport[i];
10230                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10231                         list_del(&vlan->node);
10232                         kfree(vlan);
10233                 }
10234         }
10235 }
10236
10237 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10238 {
10239         struct hclge_vport_vlan_cfg *vlan, *tmp;
10240         struct hclge_dev *hdev = vport->back;
10241         u16 vlan_proto;
10242         u16 vlan_id;
10243         u16 state;
10244         int ret;
10245
10246         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10247         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10248         state = vport->port_base_vlan_cfg.state;
10249
10250         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10251                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10252                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10253                                          vport->vport_id, vlan_id,
10254                                          false);
10255                 return;
10256         }
10257
10258         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10259                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10260                                                vport->vport_id,
10261                                                vlan->vlan_id, false);
10262                 if (ret)
10263                         break;
10264                 vlan->hd_tbl_status = true;
10265         }
10266 }
10267
10268 /* For global reset and imp reset, hardware will clear the mac table,
10269  * so we change the mac address state from ACTIVE to TO_ADD, then they
10270  * can be restored in the service task after reset complete. Furtherly,
10271  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10272  * be restored after reset, so just remove these mac nodes from mac_list.
10273  */
10274 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10275 {
10276         struct hclge_mac_node *mac_node, *tmp;
10277
10278         list_for_each_entry_safe(mac_node, tmp, list, node) {
10279                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10280                         mac_node->state = HCLGE_MAC_TO_ADD;
10281                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10282                         list_del(&mac_node->node);
10283                         kfree(mac_node);
10284                 }
10285         }
10286 }
10287
10288 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10289 {
10290         spin_lock_bh(&vport->mac_list_lock);
10291
10292         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10293         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10294         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10295
10296         spin_unlock_bh(&vport->mac_list_lock);
10297 }
10298
10299 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10300 {
10301         struct hclge_vport *vport = &hdev->vport[0];
10302         struct hnae3_handle *handle = &vport->nic;
10303
10304         hclge_restore_mac_table_common(vport);
10305         hclge_restore_vport_vlan_table(vport);
10306         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10307         hclge_restore_fd_entries(handle);
10308 }
10309
10310 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10311 {
10312         struct hclge_vport *vport = hclge_get_vport(handle);
10313
10314         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10315                 vport->rxvlan_cfg.strip_tag1_en = false;
10316                 vport->rxvlan_cfg.strip_tag2_en = enable;
10317                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10318         } else {
10319                 vport->rxvlan_cfg.strip_tag1_en = enable;
10320                 vport->rxvlan_cfg.strip_tag2_en = true;
10321                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10322         }
10323
10324         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10325         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10326         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10327         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10328
10329         return hclge_set_vlan_rx_offload_cfg(vport);
10330 }
10331
10332 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10333 {
10334         struct hclge_dev *hdev = vport->back;
10335
10336         if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10337                 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10338 }
10339
10340 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10341                                             u16 port_base_vlan_state,
10342                                             struct hclge_vlan_info *new_info,
10343                                             struct hclge_vlan_info *old_info)
10344 {
10345         struct hclge_dev *hdev = vport->back;
10346         int ret;
10347
10348         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10349                 hclge_rm_vport_all_vlan_table(vport, false);
10350                 /* force clear VLAN 0 */
10351                 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10352                 if (ret)
10353                         return ret;
10354                 return hclge_set_vlan_filter_hw(hdev,
10355                                                  htons(new_info->vlan_proto),
10356                                                  vport->vport_id,
10357                                                  new_info->vlan_tag,
10358                                                  false);
10359         }
10360
10361         /* force add VLAN 0 */
10362         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10363         if (ret)
10364                 return ret;
10365
10366         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10367                                        vport->vport_id, old_info->vlan_tag,
10368                                        true);
10369         if (ret)
10370                 return ret;
10371
10372         return hclge_add_vport_all_vlan_table(vport);
10373 }
10374
10375 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10376                                           const struct hclge_vlan_info *old_cfg)
10377 {
10378         if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10379                 return true;
10380
10381         if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10382                 return true;
10383
10384         return false;
10385 }
10386
10387 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10388                                     struct hclge_vlan_info *vlan_info)
10389 {
10390         struct hnae3_handle *nic = &vport->nic;
10391         struct hclge_vlan_info *old_vlan_info;
10392         struct hclge_dev *hdev = vport->back;
10393         int ret;
10394
10395         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10396
10397         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10398                                      vlan_info->qos);
10399         if (ret)
10400                 return ret;
10401
10402         if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10403                 goto out;
10404
10405         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10406                 /* add new VLAN tag */
10407                 ret = hclge_set_vlan_filter_hw(hdev,
10408                                                htons(vlan_info->vlan_proto),
10409                                                vport->vport_id,
10410                                                vlan_info->vlan_tag,
10411                                                false);
10412                 if (ret)
10413                         return ret;
10414
10415                 /* remove old VLAN tag */
10416                 if (old_vlan_info->vlan_tag == 0)
10417                         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10418                                                        true, 0);
10419                 else
10420                         ret = hclge_set_vlan_filter_hw(hdev,
10421                                                        htons(ETH_P_8021Q),
10422                                                        vport->vport_id,
10423                                                        old_vlan_info->vlan_tag,
10424                                                        true);
10425                 if (ret) {
10426                         dev_err(&hdev->pdev->dev,
10427                                 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10428                                 vport->vport_id, old_vlan_info->vlan_tag, ret);
10429                         return ret;
10430                 }
10431
10432                 goto out;
10433         }
10434
10435         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10436                                                old_vlan_info);
10437         if (ret)
10438                 return ret;
10439
10440 out:
10441         vport->port_base_vlan_cfg.state = state;
10442         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10443                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10444         else
10445                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10446
10447         vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10448         hclge_set_vport_vlan_fltr_change(vport);
10449
10450         return 0;
10451 }
10452
10453 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10454                                           enum hnae3_port_base_vlan_state state,
10455                                           u16 vlan, u8 qos)
10456 {
10457         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10458                 if (!vlan && !qos)
10459                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10460
10461                 return HNAE3_PORT_BASE_VLAN_ENABLE;
10462         }
10463
10464         if (!vlan && !qos)
10465                 return HNAE3_PORT_BASE_VLAN_DISABLE;
10466
10467         if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10468             vport->port_base_vlan_cfg.vlan_info.qos == qos)
10469                 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10470
10471         return HNAE3_PORT_BASE_VLAN_MODIFY;
10472 }
10473
10474 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10475                                     u16 vlan, u8 qos, __be16 proto)
10476 {
10477         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10478         struct hclge_vport *vport = hclge_get_vport(handle);
10479         struct hclge_dev *hdev = vport->back;
10480         struct hclge_vlan_info vlan_info;
10481         u16 state;
10482         int ret;
10483
10484         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10485                 return -EOPNOTSUPP;
10486
10487         vport = hclge_get_vf_vport(hdev, vfid);
10488         if (!vport)
10489                 return -EINVAL;
10490
10491         /* qos is a 3 bits value, so can not be bigger than 7 */
10492         if (vlan > VLAN_N_VID - 1 || qos > 7)
10493                 return -EINVAL;
10494         if (proto != htons(ETH_P_8021Q))
10495                 return -EPROTONOSUPPORT;
10496
10497         state = hclge_get_port_base_vlan_state(vport,
10498                                                vport->port_base_vlan_cfg.state,
10499                                                vlan, qos);
10500         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10501                 return 0;
10502
10503         vlan_info.vlan_tag = vlan;
10504         vlan_info.qos = qos;
10505         vlan_info.vlan_proto = ntohs(proto);
10506
10507         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10508         if (ret) {
10509                 dev_err(&hdev->pdev->dev,
10510                         "failed to update port base vlan for vf %d, ret = %d\n",
10511                         vfid, ret);
10512                 return ret;
10513         }
10514
10515         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10516          * VLAN state.
10517          */
10518         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10519             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10520                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10521                                                   vport->vport_id, state,
10522                                                   &vlan_info);
10523
10524         return 0;
10525 }
10526
10527 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10528 {
10529         struct hclge_vlan_info *vlan_info;
10530         struct hclge_vport *vport;
10531         int ret;
10532         int vf;
10533
10534         /* clear port base vlan for all vf */
10535         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10536                 vport = &hdev->vport[vf];
10537                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10538
10539                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10540                                                vport->vport_id,
10541                                                vlan_info->vlan_tag, true);
10542                 if (ret)
10543                         dev_err(&hdev->pdev->dev,
10544                                 "failed to clear vf vlan for vf%d, ret = %d\n",
10545                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10546         }
10547 }
10548
10549 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10550                           u16 vlan_id, bool is_kill)
10551 {
10552         struct hclge_vport *vport = hclge_get_vport(handle);
10553         struct hclge_dev *hdev = vport->back;
10554         bool writen_to_tbl = false;
10555         int ret = 0;
10556
10557         /* When device is resetting or reset failed, firmware is unable to
10558          * handle mailbox. Just record the vlan id, and remove it after
10559          * reset finished.
10560          */
10561         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10562              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10563                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10564                 return -EBUSY;
10565         }
10566
10567         /* when port base vlan enabled, we use port base vlan as the vlan
10568          * filter entry. In this case, we don't update vlan filter table
10569          * when user add new vlan or remove exist vlan, just update the vport
10570          * vlan list. The vlan id in vlan list will be writen in vlan filter
10571          * table until port base vlan disabled
10572          */
10573         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10574                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10575                                                vlan_id, is_kill);
10576                 writen_to_tbl = true;
10577         }
10578
10579         if (!ret) {
10580                 if (is_kill)
10581                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10582                 else
10583                         hclge_add_vport_vlan_table(vport, vlan_id,
10584                                                    writen_to_tbl);
10585         } else if (is_kill) {
10586                 /* when remove hw vlan filter failed, record the vlan id,
10587                  * and try to remove it from hw later, to be consistence
10588                  * with stack
10589                  */
10590                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10591         }
10592
10593         hclge_set_vport_vlan_fltr_change(vport);
10594
10595         return ret;
10596 }
10597
10598 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10599 {
10600         struct hclge_vport *vport;
10601         int ret;
10602         u16 i;
10603
10604         for (i = 0; i < hdev->num_alloc_vport; i++) {
10605                 vport = &hdev->vport[i];
10606                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10607                                         &vport->state))
10608                         continue;
10609
10610                 ret = hclge_enable_vport_vlan_filter(vport,
10611                                                      vport->req_vlan_fltr_en);
10612                 if (ret) {
10613                         dev_err(&hdev->pdev->dev,
10614                                 "failed to sync vlan filter state for vport%u, ret = %d\n",
10615                                 vport->vport_id, ret);
10616                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10617                                 &vport->state);
10618                         return;
10619                 }
10620         }
10621 }
10622
10623 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10624 {
10625 #define HCLGE_MAX_SYNC_COUNT    60
10626
10627         int i, ret, sync_cnt = 0;
10628         u16 vlan_id;
10629
10630         /* start from vport 1 for PF is always alive */
10631         for (i = 0; i < hdev->num_alloc_vport; i++) {
10632                 struct hclge_vport *vport = &hdev->vport[i];
10633
10634                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10635                                          VLAN_N_VID);
10636                 while (vlan_id != VLAN_N_VID) {
10637                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10638                                                        vport->vport_id, vlan_id,
10639                                                        true);
10640                         if (ret && ret != -EINVAL)
10641                                 return;
10642
10643                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10644                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10645                         hclge_set_vport_vlan_fltr_change(vport);
10646
10647                         sync_cnt++;
10648                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10649                                 return;
10650
10651                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10652                                                  VLAN_N_VID);
10653                 }
10654         }
10655
10656         hclge_sync_vlan_fltr_state(hdev);
10657 }
10658
10659 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10660 {
10661         struct hclge_config_max_frm_size_cmd *req;
10662         struct hclge_desc desc;
10663
10664         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10665
10666         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10667         req->max_frm_size = cpu_to_le16(new_mps);
10668         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10669
10670         return hclge_cmd_send(&hdev->hw, &desc, 1);
10671 }
10672
10673 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10674 {
10675         struct hclge_vport *vport = hclge_get_vport(handle);
10676
10677         return hclge_set_vport_mtu(vport, new_mtu);
10678 }
10679
10680 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10681 {
10682         struct hclge_dev *hdev = vport->back;
10683         int i, max_frm_size, ret;
10684
10685         /* HW supprt 2 layer vlan */
10686         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10687         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10688             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10689                 return -EINVAL;
10690
10691         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10692         mutex_lock(&hdev->vport_lock);
10693         /* VF's mps must fit within hdev->mps */
10694         if (vport->vport_id && max_frm_size > hdev->mps) {
10695                 mutex_unlock(&hdev->vport_lock);
10696                 return -EINVAL;
10697         } else if (vport->vport_id) {
10698                 vport->mps = max_frm_size;
10699                 mutex_unlock(&hdev->vport_lock);
10700                 return 0;
10701         }
10702
10703         /* PF's mps must be greater then VF's mps */
10704         for (i = 1; i < hdev->num_alloc_vport; i++)
10705                 if (max_frm_size < hdev->vport[i].mps) {
10706                         mutex_unlock(&hdev->vport_lock);
10707                         return -EINVAL;
10708                 }
10709
10710         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10711
10712         ret = hclge_set_mac_mtu(hdev, max_frm_size);
10713         if (ret) {
10714                 dev_err(&hdev->pdev->dev,
10715                         "Change mtu fail, ret =%d\n", ret);
10716                 goto out;
10717         }
10718
10719         hdev->mps = max_frm_size;
10720         vport->mps = max_frm_size;
10721
10722         ret = hclge_buffer_alloc(hdev);
10723         if (ret)
10724                 dev_err(&hdev->pdev->dev,
10725                         "Allocate buffer fail, ret =%d\n", ret);
10726
10727 out:
10728         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10729         mutex_unlock(&hdev->vport_lock);
10730         return ret;
10731 }
10732
10733 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10734                                     bool enable)
10735 {
10736         struct hclge_reset_tqp_queue_cmd *req;
10737         struct hclge_desc desc;
10738         int ret;
10739
10740         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10741
10742         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10743         req->tqp_id = cpu_to_le16(queue_id);
10744         if (enable)
10745                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10746
10747         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10748         if (ret) {
10749                 dev_err(&hdev->pdev->dev,
10750                         "Send tqp reset cmd error, status =%d\n", ret);
10751                 return ret;
10752         }
10753
10754         return 0;
10755 }
10756
10757 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10758                                   u8 *reset_status)
10759 {
10760         struct hclge_reset_tqp_queue_cmd *req;
10761         struct hclge_desc desc;
10762         int ret;
10763
10764         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10765
10766         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10767         req->tqp_id = cpu_to_le16(queue_id);
10768
10769         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10770         if (ret) {
10771                 dev_err(&hdev->pdev->dev,
10772                         "Get reset status error, status =%d\n", ret);
10773                 return ret;
10774         }
10775
10776         *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10777
10778         return 0;
10779 }
10780
10781 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10782 {
10783         struct hnae3_queue *queue;
10784         struct hclge_tqp *tqp;
10785
10786         queue = handle->kinfo.tqp[queue_id];
10787         tqp = container_of(queue, struct hclge_tqp, q);
10788
10789         return tqp->index;
10790 }
10791
10792 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10793 {
10794         struct hclge_vport *vport = hclge_get_vport(handle);
10795         struct hclge_dev *hdev = vport->back;
10796         u16 reset_try_times = 0;
10797         u8 reset_status;
10798         u16 queue_gid;
10799         int ret;
10800         u16 i;
10801
10802         for (i = 0; i < handle->kinfo.num_tqps; i++) {
10803                 queue_gid = hclge_covert_handle_qid_global(handle, i);
10804                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10805                 if (ret) {
10806                         dev_err(&hdev->pdev->dev,
10807                                 "failed to send reset tqp cmd, ret = %d\n",
10808                                 ret);
10809                         return ret;
10810                 }
10811
10812                 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10813                         ret = hclge_get_reset_status(hdev, queue_gid,
10814                                                      &reset_status);
10815                         if (ret)
10816                                 return ret;
10817
10818                         if (reset_status)
10819                                 break;
10820
10821                         /* Wait for tqp hw reset */
10822                         usleep_range(1000, 1200);
10823                 }
10824
10825                 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10826                         dev_err(&hdev->pdev->dev,
10827                                 "wait for tqp hw reset timeout\n");
10828                         return -ETIME;
10829                 }
10830
10831                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10832                 if (ret) {
10833                         dev_err(&hdev->pdev->dev,
10834                                 "failed to deassert soft reset, ret = %d\n",
10835                                 ret);
10836                         return ret;
10837                 }
10838                 reset_try_times = 0;
10839         }
10840         return 0;
10841 }
10842
10843 static int hclge_reset_rcb(struct hnae3_handle *handle)
10844 {
10845 #define HCLGE_RESET_RCB_NOT_SUPPORT     0U
10846 #define HCLGE_RESET_RCB_SUCCESS         1U
10847
10848         struct hclge_vport *vport = hclge_get_vport(handle);
10849         struct hclge_dev *hdev = vport->back;
10850         struct hclge_reset_cmd *req;
10851         struct hclge_desc desc;
10852         u8 return_status;
10853         u16 queue_gid;
10854         int ret;
10855
10856         queue_gid = hclge_covert_handle_qid_global(handle, 0);
10857
10858         req = (struct hclge_reset_cmd *)desc.data;
10859         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10860         hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10861         req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10862         req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10863
10864         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10865         if (ret) {
10866                 dev_err(&hdev->pdev->dev,
10867                         "failed to send rcb reset cmd, ret = %d\n", ret);
10868                 return ret;
10869         }
10870
10871         return_status = req->fun_reset_rcb_return_status;
10872         if (return_status == HCLGE_RESET_RCB_SUCCESS)
10873                 return 0;
10874
10875         if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10876                 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10877                         return_status);
10878                 return -EIO;
10879         }
10880
10881         /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10882          * again to reset all tqps
10883          */
10884         return hclge_reset_tqp_cmd(handle);
10885 }
10886
10887 int hclge_reset_tqp(struct hnae3_handle *handle)
10888 {
10889         struct hclge_vport *vport = hclge_get_vport(handle);
10890         struct hclge_dev *hdev = vport->back;
10891         int ret;
10892
10893         /* only need to disable PF's tqp */
10894         if (!vport->vport_id) {
10895                 ret = hclge_tqp_enable(handle, false);
10896                 if (ret) {
10897                         dev_err(&hdev->pdev->dev,
10898                                 "failed to disable tqp, ret = %d\n", ret);
10899                         return ret;
10900                 }
10901         }
10902
10903         return hclge_reset_rcb(handle);
10904 }
10905
10906 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10907 {
10908         struct hclge_vport *vport = hclge_get_vport(handle);
10909         struct hclge_dev *hdev = vport->back;
10910
10911         return hdev->fw_version;
10912 }
10913
10914 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10915 {
10916         struct phy_device *phydev = hdev->hw.mac.phydev;
10917
10918         if (!phydev)
10919                 return;
10920
10921         phy_set_asym_pause(phydev, rx_en, tx_en);
10922 }
10923
10924 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10925 {
10926         int ret;
10927
10928         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10929                 return 0;
10930
10931         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10932         if (ret)
10933                 dev_err(&hdev->pdev->dev,
10934                         "configure pauseparam error, ret = %d.\n", ret);
10935
10936         return ret;
10937 }
10938
10939 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10940 {
10941         struct phy_device *phydev = hdev->hw.mac.phydev;
10942         u16 remote_advertising = 0;
10943         u16 local_advertising;
10944         u32 rx_pause, tx_pause;
10945         u8 flowctl;
10946
10947         if (!phydev->link || !phydev->autoneg)
10948                 return 0;
10949
10950         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10951
10952         if (phydev->pause)
10953                 remote_advertising = LPA_PAUSE_CAP;
10954
10955         if (phydev->asym_pause)
10956                 remote_advertising |= LPA_PAUSE_ASYM;
10957
10958         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10959                                            remote_advertising);
10960         tx_pause = flowctl & FLOW_CTRL_TX;
10961         rx_pause = flowctl & FLOW_CTRL_RX;
10962
10963         if (phydev->duplex == HCLGE_MAC_HALF) {
10964                 tx_pause = 0;
10965                 rx_pause = 0;
10966         }
10967
10968         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10969 }
10970
10971 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10972                                  u32 *rx_en, u32 *tx_en)
10973 {
10974         struct hclge_vport *vport = hclge_get_vport(handle);
10975         struct hclge_dev *hdev = vport->back;
10976         u8 media_type = hdev->hw.mac.media_type;
10977
10978         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10979                     hclge_get_autoneg(handle) : 0;
10980
10981         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10982                 *rx_en = 0;
10983                 *tx_en = 0;
10984                 return;
10985         }
10986
10987         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10988                 *rx_en = 1;
10989                 *tx_en = 0;
10990         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10991                 *tx_en = 1;
10992                 *rx_en = 0;
10993         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10994                 *rx_en = 1;
10995                 *tx_en = 1;
10996         } else {
10997                 *rx_en = 0;
10998                 *tx_en = 0;
10999         }
11000 }
11001
11002 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11003                                          u32 rx_en, u32 tx_en)
11004 {
11005         if (rx_en && tx_en)
11006                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
11007         else if (rx_en && !tx_en)
11008                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11009         else if (!rx_en && tx_en)
11010                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11011         else
11012                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
11013
11014         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11015 }
11016
11017 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11018                                 u32 rx_en, u32 tx_en)
11019 {
11020         struct hclge_vport *vport = hclge_get_vport(handle);
11021         struct hclge_dev *hdev = vport->back;
11022         struct phy_device *phydev = hdev->hw.mac.phydev;
11023         u32 fc_autoneg;
11024
11025         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11026                 fc_autoneg = hclge_get_autoneg(handle);
11027                 if (auto_neg != fc_autoneg) {
11028                         dev_info(&hdev->pdev->dev,
11029                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11030                         return -EOPNOTSUPP;
11031                 }
11032         }
11033
11034         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11035                 dev_info(&hdev->pdev->dev,
11036                          "Priority flow control enabled. Cannot set link flow control.\n");
11037                 return -EOPNOTSUPP;
11038         }
11039
11040         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11041
11042         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11043
11044         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11045                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11046
11047         if (phydev)
11048                 return phy_start_aneg(phydev);
11049
11050         return -EOPNOTSUPP;
11051 }
11052
11053 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11054                                           u8 *auto_neg, u32 *speed, u8 *duplex)
11055 {
11056         struct hclge_vport *vport = hclge_get_vport(handle);
11057         struct hclge_dev *hdev = vport->back;
11058
11059         if (speed)
11060                 *speed = hdev->hw.mac.speed;
11061         if (duplex)
11062                 *duplex = hdev->hw.mac.duplex;
11063         if (auto_neg)
11064                 *auto_neg = hdev->hw.mac.autoneg;
11065 }
11066
11067 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11068                                  u8 *module_type)
11069 {
11070         struct hclge_vport *vport = hclge_get_vport(handle);
11071         struct hclge_dev *hdev = vport->back;
11072
11073         /* When nic is down, the service task is not running, doesn't update
11074          * the port information per second. Query the port information before
11075          * return the media type, ensure getting the correct media information.
11076          */
11077         hclge_update_port_info(hdev);
11078
11079         if (media_type)
11080                 *media_type = hdev->hw.mac.media_type;
11081
11082         if (module_type)
11083                 *module_type = hdev->hw.mac.module_type;
11084 }
11085
11086 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11087                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11088 {
11089         struct hclge_vport *vport = hclge_get_vport(handle);
11090         struct hclge_dev *hdev = vport->back;
11091         struct phy_device *phydev = hdev->hw.mac.phydev;
11092         int mdix_ctrl, mdix, is_resolved;
11093         unsigned int retval;
11094
11095         if (!phydev) {
11096                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11097                 *tp_mdix = ETH_TP_MDI_INVALID;
11098                 return;
11099         }
11100
11101         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11102
11103         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11104         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11105                                     HCLGE_PHY_MDIX_CTRL_S);
11106
11107         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11108         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11109         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11110
11111         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11112
11113         switch (mdix_ctrl) {
11114         case 0x0:
11115                 *tp_mdix_ctrl = ETH_TP_MDI;
11116                 break;
11117         case 0x1:
11118                 *tp_mdix_ctrl = ETH_TP_MDI_X;
11119                 break;
11120         case 0x3:
11121                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11122                 break;
11123         default:
11124                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11125                 break;
11126         }
11127
11128         if (!is_resolved)
11129                 *tp_mdix = ETH_TP_MDI_INVALID;
11130         else if (mdix)
11131                 *tp_mdix = ETH_TP_MDI_X;
11132         else
11133                 *tp_mdix = ETH_TP_MDI;
11134 }
11135
11136 static void hclge_info_show(struct hclge_dev *hdev)
11137 {
11138         struct device *dev = &hdev->pdev->dev;
11139
11140         dev_info(dev, "PF info begin:\n");
11141
11142         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11143         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11144         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11145         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11146         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11147         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11148         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11149         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11150         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11151         dev_info(dev, "This is %s PF\n",
11152                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11153         dev_info(dev, "DCB %s\n",
11154                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11155         dev_info(dev, "MQPRIO %s\n",
11156                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11157         dev_info(dev, "Default tx spare buffer size: %u\n",
11158                  hdev->tx_spare_buf_size);
11159
11160         dev_info(dev, "PF info end.\n");
11161 }
11162
11163 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11164                                           struct hclge_vport *vport)
11165 {
11166         struct hnae3_client *client = vport->nic.client;
11167         struct hclge_dev *hdev = ae_dev->priv;
11168         int rst_cnt = hdev->rst_stats.reset_cnt;
11169         int ret;
11170
11171         ret = client->ops->init_instance(&vport->nic);
11172         if (ret)
11173                 return ret;
11174
11175         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11176         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11177             rst_cnt != hdev->rst_stats.reset_cnt) {
11178                 ret = -EBUSY;
11179                 goto init_nic_err;
11180         }
11181
11182         /* Enable nic hw error interrupts */
11183         ret = hclge_config_nic_hw_error(hdev, true);
11184         if (ret) {
11185                 dev_err(&ae_dev->pdev->dev,
11186                         "fail(%d) to enable hw error interrupts\n", ret);
11187                 goto init_nic_err;
11188         }
11189
11190         hnae3_set_client_init_flag(client, ae_dev, 1);
11191
11192         if (netif_msg_drv(&hdev->vport->nic))
11193                 hclge_info_show(hdev);
11194
11195         return ret;
11196
11197 init_nic_err:
11198         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11199         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11200                 msleep(HCLGE_WAIT_RESET_DONE);
11201
11202         client->ops->uninit_instance(&vport->nic, 0);
11203
11204         return ret;
11205 }
11206
11207 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11208                                            struct hclge_vport *vport)
11209 {
11210         struct hclge_dev *hdev = ae_dev->priv;
11211         struct hnae3_client *client;
11212         int rst_cnt;
11213         int ret;
11214
11215         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11216             !hdev->nic_client)
11217                 return 0;
11218
11219         client = hdev->roce_client;
11220         ret = hclge_init_roce_base_info(vport);
11221         if (ret)
11222                 return ret;
11223
11224         rst_cnt = hdev->rst_stats.reset_cnt;
11225         ret = client->ops->init_instance(&vport->roce);
11226         if (ret)
11227                 return ret;
11228
11229         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11230         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11231             rst_cnt != hdev->rst_stats.reset_cnt) {
11232                 ret = -EBUSY;
11233                 goto init_roce_err;
11234         }
11235
11236         /* Enable roce ras interrupts */
11237         ret = hclge_config_rocee_ras_interrupt(hdev, true);
11238         if (ret) {
11239                 dev_err(&ae_dev->pdev->dev,
11240                         "fail(%d) to enable roce ras interrupts\n", ret);
11241                 goto init_roce_err;
11242         }
11243
11244         hnae3_set_client_init_flag(client, ae_dev, 1);
11245
11246         return 0;
11247
11248 init_roce_err:
11249         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11250         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11251                 msleep(HCLGE_WAIT_RESET_DONE);
11252
11253         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11254
11255         return ret;
11256 }
11257
11258 static int hclge_init_client_instance(struct hnae3_client *client,
11259                                       struct hnae3_ae_dev *ae_dev)
11260 {
11261         struct hclge_dev *hdev = ae_dev->priv;
11262         struct hclge_vport *vport = &hdev->vport[0];
11263         int ret;
11264
11265         switch (client->type) {
11266         case HNAE3_CLIENT_KNIC:
11267                 hdev->nic_client = client;
11268                 vport->nic.client = client;
11269                 ret = hclge_init_nic_client_instance(ae_dev, vport);
11270                 if (ret)
11271                         goto clear_nic;
11272
11273                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11274                 if (ret)
11275                         goto clear_roce;
11276
11277                 break;
11278         case HNAE3_CLIENT_ROCE:
11279                 if (hnae3_dev_roce_supported(hdev)) {
11280                         hdev->roce_client = client;
11281                         vport->roce.client = client;
11282                 }
11283
11284                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11285                 if (ret)
11286                         goto clear_roce;
11287
11288                 break;
11289         default:
11290                 return -EINVAL;
11291         }
11292
11293         return 0;
11294
11295 clear_nic:
11296         hdev->nic_client = NULL;
11297         vport->nic.client = NULL;
11298         return ret;
11299 clear_roce:
11300         hdev->roce_client = NULL;
11301         vport->roce.client = NULL;
11302         return ret;
11303 }
11304
11305 static void hclge_uninit_client_instance(struct hnae3_client *client,
11306                                          struct hnae3_ae_dev *ae_dev)
11307 {
11308         struct hclge_dev *hdev = ae_dev->priv;
11309         struct hclge_vport *vport = &hdev->vport[0];
11310
11311         if (hdev->roce_client) {
11312                 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11313                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11314                         msleep(HCLGE_WAIT_RESET_DONE);
11315
11316                 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11317                 hdev->roce_client = NULL;
11318                 vport->roce.client = NULL;
11319         }
11320         if (client->type == HNAE3_CLIENT_ROCE)
11321                 return;
11322         if (hdev->nic_client && client->ops->uninit_instance) {
11323                 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11324                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11325                         msleep(HCLGE_WAIT_RESET_DONE);
11326
11327                 client->ops->uninit_instance(&vport->nic, 0);
11328                 hdev->nic_client = NULL;
11329                 vport->nic.client = NULL;
11330         }
11331 }
11332
11333 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11334 {
11335 #define HCLGE_MEM_BAR           4
11336
11337         struct pci_dev *pdev = hdev->pdev;
11338         struct hclge_hw *hw = &hdev->hw;
11339
11340         /* for device does not have device memory, return directly */
11341         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11342                 return 0;
11343
11344         hw->mem_base = devm_ioremap_wc(&pdev->dev,
11345                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
11346                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
11347         if (!hw->mem_base) {
11348                 dev_err(&pdev->dev, "failed to map device memory\n");
11349                 return -EFAULT;
11350         }
11351
11352         return 0;
11353 }
11354
11355 static int hclge_pci_init(struct hclge_dev *hdev)
11356 {
11357         struct pci_dev *pdev = hdev->pdev;
11358         struct hclge_hw *hw;
11359         int ret;
11360
11361         ret = pci_enable_device(pdev);
11362         if (ret) {
11363                 dev_err(&pdev->dev, "failed to enable PCI device\n");
11364                 return ret;
11365         }
11366
11367         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11368         if (ret) {
11369                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11370                 if (ret) {
11371                         dev_err(&pdev->dev,
11372                                 "can't set consistent PCI DMA");
11373                         goto err_disable_device;
11374                 }
11375                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11376         }
11377
11378         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11379         if (ret) {
11380                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11381                 goto err_disable_device;
11382         }
11383
11384         pci_set_master(pdev);
11385         hw = &hdev->hw;
11386         hw->io_base = pcim_iomap(pdev, 2, 0);
11387         if (!hw->io_base) {
11388                 dev_err(&pdev->dev, "Can't map configuration register space\n");
11389                 ret = -ENOMEM;
11390                 goto err_clr_master;
11391         }
11392
11393         ret = hclge_dev_mem_map(hdev);
11394         if (ret)
11395                 goto err_unmap_io_base;
11396
11397         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11398
11399         return 0;
11400
11401 err_unmap_io_base:
11402         pcim_iounmap(pdev, hdev->hw.io_base);
11403 err_clr_master:
11404         pci_clear_master(pdev);
11405         pci_release_regions(pdev);
11406 err_disable_device:
11407         pci_disable_device(pdev);
11408
11409         return ret;
11410 }
11411
11412 static void hclge_pci_uninit(struct hclge_dev *hdev)
11413 {
11414         struct pci_dev *pdev = hdev->pdev;
11415
11416         if (hdev->hw.mem_base)
11417                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11418
11419         pcim_iounmap(pdev, hdev->hw.io_base);
11420         pci_free_irq_vectors(pdev);
11421         pci_clear_master(pdev);
11422         pci_release_mem_regions(pdev);
11423         pci_disable_device(pdev);
11424 }
11425
11426 static void hclge_state_init(struct hclge_dev *hdev)
11427 {
11428         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11429         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11430         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11431         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11432         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11433         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11434         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11435 }
11436
11437 static void hclge_state_uninit(struct hclge_dev *hdev)
11438 {
11439         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11440         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11441
11442         if (hdev->reset_timer.function)
11443                 del_timer_sync(&hdev->reset_timer);
11444         if (hdev->service_task.work.func)
11445                 cancel_delayed_work_sync(&hdev->service_task);
11446 }
11447
11448 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11449                                         enum hnae3_reset_type rst_type)
11450 {
11451 #define HCLGE_RESET_RETRY_WAIT_MS       500
11452 #define HCLGE_RESET_RETRY_CNT   5
11453
11454         struct hclge_dev *hdev = ae_dev->priv;
11455         int retry_cnt = 0;
11456         int ret;
11457
11458 retry:
11459         down(&hdev->reset_sem);
11460         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11461         hdev->reset_type = rst_type;
11462         ret = hclge_reset_prepare(hdev);
11463         if (ret || hdev->reset_pending) {
11464                 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11465                         ret);
11466                 if (hdev->reset_pending ||
11467                     retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11468                         dev_err(&hdev->pdev->dev,
11469                                 "reset_pending:0x%lx, retry_cnt:%d\n",
11470                                 hdev->reset_pending, retry_cnt);
11471                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11472                         up(&hdev->reset_sem);
11473                         msleep(HCLGE_RESET_RETRY_WAIT_MS);
11474                         goto retry;
11475                 }
11476         }
11477
11478         /* disable misc vector before reset done */
11479         hclge_enable_vector(&hdev->misc_vector, false);
11480         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11481
11482         if (hdev->reset_type == HNAE3_FLR_RESET)
11483                 hdev->rst_stats.flr_rst_cnt++;
11484 }
11485
11486 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11487 {
11488         struct hclge_dev *hdev = ae_dev->priv;
11489         int ret;
11490
11491         hclge_enable_vector(&hdev->misc_vector, true);
11492
11493         ret = hclge_reset_rebuild(hdev);
11494         if (ret)
11495                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11496
11497         hdev->reset_type = HNAE3_NONE_RESET;
11498         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11499         up(&hdev->reset_sem);
11500 }
11501
11502 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11503 {
11504         u16 i;
11505
11506         for (i = 0; i < hdev->num_alloc_vport; i++) {
11507                 struct hclge_vport *vport = &hdev->vport[i];
11508                 int ret;
11509
11510                  /* Send cmd to clear vport's FUNC_RST_ING */
11511                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11512                 if (ret)
11513                         dev_warn(&hdev->pdev->dev,
11514                                  "clear vport(%u) rst failed %d!\n",
11515                                  vport->vport_id, ret);
11516         }
11517 }
11518
11519 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11520 {
11521         struct hclge_desc desc;
11522         int ret;
11523
11524         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11525
11526         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11527         /* This new command is only supported by new firmware, it will
11528          * fail with older firmware. Error value -EOPNOSUPP can only be
11529          * returned by older firmware running this command, to keep code
11530          * backward compatible we will override this value and return
11531          * success.
11532          */
11533         if (ret && ret != -EOPNOTSUPP) {
11534                 dev_err(&hdev->pdev->dev,
11535                         "failed to clear hw resource, ret = %d\n", ret);
11536                 return ret;
11537         }
11538         return 0;
11539 }
11540
11541 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11542 {
11543         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11544                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11545 }
11546
11547 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11548 {
11549         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11550                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11551 }
11552
11553 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11554 {
11555         struct pci_dev *pdev = ae_dev->pdev;
11556         struct hclge_dev *hdev;
11557         int ret;
11558
11559         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11560         if (!hdev)
11561                 return -ENOMEM;
11562
11563         hdev->pdev = pdev;
11564         hdev->ae_dev = ae_dev;
11565         hdev->reset_type = HNAE3_NONE_RESET;
11566         hdev->reset_level = HNAE3_FUNC_RESET;
11567         ae_dev->priv = hdev;
11568
11569         /* HW supprt 2 layer vlan */
11570         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11571
11572         mutex_init(&hdev->vport_lock);
11573         spin_lock_init(&hdev->fd_rule_lock);
11574         sema_init(&hdev->reset_sem, 1);
11575
11576         ret = hclge_pci_init(hdev);
11577         if (ret)
11578                 goto out;
11579
11580         ret = hclge_devlink_init(hdev);
11581         if (ret)
11582                 goto err_pci_uninit;
11583
11584         /* Firmware command queue initialize */
11585         ret = hclge_cmd_queue_init(hdev);
11586         if (ret)
11587                 goto err_devlink_uninit;
11588
11589         /* Firmware command initialize */
11590         ret = hclge_cmd_init(hdev);
11591         if (ret)
11592                 goto err_cmd_uninit;
11593
11594         ret  = hclge_clear_hw_resource(hdev);
11595         if (ret)
11596                 goto err_cmd_uninit;
11597
11598         ret = hclge_get_cap(hdev);
11599         if (ret)
11600                 goto err_cmd_uninit;
11601
11602         ret = hclge_query_dev_specs(hdev);
11603         if (ret) {
11604                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11605                         ret);
11606                 goto err_cmd_uninit;
11607         }
11608
11609         ret = hclge_configure(hdev);
11610         if (ret) {
11611                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11612                 goto err_cmd_uninit;
11613         }
11614
11615         ret = hclge_init_msi(hdev);
11616         if (ret) {
11617                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11618                 goto err_cmd_uninit;
11619         }
11620
11621         ret = hclge_misc_irq_init(hdev);
11622         if (ret)
11623                 goto err_msi_uninit;
11624
11625         ret = hclge_alloc_tqps(hdev);
11626         if (ret) {
11627                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11628                 goto err_msi_irq_uninit;
11629         }
11630
11631         ret = hclge_alloc_vport(hdev);
11632         if (ret)
11633                 goto err_msi_irq_uninit;
11634
11635         ret = hclge_map_tqp(hdev);
11636         if (ret)
11637                 goto err_msi_irq_uninit;
11638
11639         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11640             !hnae3_dev_phy_imp_supported(hdev)) {
11641                 ret = hclge_mac_mdio_config(hdev);
11642                 if (ret)
11643                         goto err_msi_irq_uninit;
11644         }
11645
11646         ret = hclge_init_umv_space(hdev);
11647         if (ret)
11648                 goto err_mdiobus_unreg;
11649
11650         ret = hclge_mac_init(hdev);
11651         if (ret) {
11652                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11653                 goto err_mdiobus_unreg;
11654         }
11655
11656         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11657         if (ret) {
11658                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11659                 goto err_mdiobus_unreg;
11660         }
11661
11662         ret = hclge_config_gro(hdev);
11663         if (ret)
11664                 goto err_mdiobus_unreg;
11665
11666         ret = hclge_init_vlan_config(hdev);
11667         if (ret) {
11668                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11669                 goto err_mdiobus_unreg;
11670         }
11671
11672         ret = hclge_tm_schd_init(hdev);
11673         if (ret) {
11674                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11675                 goto err_mdiobus_unreg;
11676         }
11677
11678         ret = hclge_rss_init_cfg(hdev);
11679         if (ret) {
11680                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11681                 goto err_mdiobus_unreg;
11682         }
11683
11684         ret = hclge_rss_init_hw(hdev);
11685         if (ret) {
11686                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11687                 goto err_mdiobus_unreg;
11688         }
11689
11690         ret = init_mgr_tbl(hdev);
11691         if (ret) {
11692                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11693                 goto err_mdiobus_unreg;
11694         }
11695
11696         ret = hclge_init_fd_config(hdev);
11697         if (ret) {
11698                 dev_err(&pdev->dev,
11699                         "fd table init fail, ret=%d\n", ret);
11700                 goto err_mdiobus_unreg;
11701         }
11702
11703         ret = hclge_ptp_init(hdev);
11704         if (ret)
11705                 goto err_mdiobus_unreg;
11706
11707         INIT_KFIFO(hdev->mac_tnl_log);
11708
11709         hclge_dcb_ops_set(hdev);
11710
11711         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11712         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11713
11714         /* Setup affinity after service timer setup because add_timer_on
11715          * is called in affinity notify.
11716          */
11717         hclge_misc_affinity_setup(hdev);
11718
11719         hclge_clear_all_event_cause(hdev);
11720         hclge_clear_resetting_state(hdev);
11721
11722         /* Log and clear the hw errors those already occurred */
11723         if (hnae3_dev_ras_imp_supported(hdev))
11724                 hclge_handle_occurred_error(hdev);
11725         else
11726                 hclge_handle_all_hns_hw_errors(ae_dev);
11727
11728         /* request delayed reset for the error recovery because an immediate
11729          * global reset on a PF affecting pending initialization of other PFs
11730          */
11731         if (ae_dev->hw_err_reset_req) {
11732                 enum hnae3_reset_type reset_level;
11733
11734                 reset_level = hclge_get_reset_level(ae_dev,
11735                                                     &ae_dev->hw_err_reset_req);
11736                 hclge_set_def_reset_request(ae_dev, reset_level);
11737                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11738         }
11739
11740         hclge_init_rxd_adv_layout(hdev);
11741
11742         /* Enable MISC vector(vector0) */
11743         hclge_enable_vector(&hdev->misc_vector, true);
11744
11745         hclge_state_init(hdev);
11746         hdev->last_reset_time = jiffies;
11747
11748         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11749                  HCLGE_DRIVER_NAME);
11750
11751         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11752
11753         return 0;
11754
11755 err_mdiobus_unreg:
11756         if (hdev->hw.mac.phydev)
11757                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11758 err_msi_irq_uninit:
11759         hclge_misc_irq_uninit(hdev);
11760 err_msi_uninit:
11761         pci_free_irq_vectors(pdev);
11762 err_cmd_uninit:
11763         hclge_cmd_uninit(hdev);
11764 err_devlink_uninit:
11765         hclge_devlink_uninit(hdev);
11766 err_pci_uninit:
11767         pcim_iounmap(pdev, hdev->hw.io_base);
11768         pci_clear_master(pdev);
11769         pci_release_regions(pdev);
11770         pci_disable_device(pdev);
11771 out:
11772         mutex_destroy(&hdev->vport_lock);
11773         return ret;
11774 }
11775
11776 static void hclge_stats_clear(struct hclge_dev *hdev)
11777 {
11778         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11779 }
11780
11781 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11782 {
11783         return hclge_config_switch_param(hdev, vf, enable,
11784                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
11785 }
11786
11787 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11788 {
11789         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11790                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
11791                                           enable, vf);
11792 }
11793
11794 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11795 {
11796         int ret;
11797
11798         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11799         if (ret) {
11800                 dev_err(&hdev->pdev->dev,
11801                         "Set vf %d mac spoof check %s failed, ret=%d\n",
11802                         vf, enable ? "on" : "off", ret);
11803                 return ret;
11804         }
11805
11806         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11807         if (ret)
11808                 dev_err(&hdev->pdev->dev,
11809                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11810                         vf, enable ? "on" : "off", ret);
11811
11812         return ret;
11813 }
11814
11815 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11816                                  bool enable)
11817 {
11818         struct hclge_vport *vport = hclge_get_vport(handle);
11819         struct hclge_dev *hdev = vport->back;
11820         u32 new_spoofchk = enable ? 1 : 0;
11821         int ret;
11822
11823         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11824                 return -EOPNOTSUPP;
11825
11826         vport = hclge_get_vf_vport(hdev, vf);
11827         if (!vport)
11828                 return -EINVAL;
11829
11830         if (vport->vf_info.spoofchk == new_spoofchk)
11831                 return 0;
11832
11833         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11834                 dev_warn(&hdev->pdev->dev,
11835                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11836                          vf);
11837         else if (enable && hclge_is_umv_space_full(vport, true))
11838                 dev_warn(&hdev->pdev->dev,
11839                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11840                          vf);
11841
11842         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11843         if (ret)
11844                 return ret;
11845
11846         vport->vf_info.spoofchk = new_spoofchk;
11847         return 0;
11848 }
11849
11850 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11851 {
11852         struct hclge_vport *vport = hdev->vport;
11853         int ret;
11854         int i;
11855
11856         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11857                 return 0;
11858
11859         /* resume the vf spoof check state after reset */
11860         for (i = 0; i < hdev->num_alloc_vport; i++) {
11861                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11862                                                vport->vf_info.spoofchk);
11863                 if (ret)
11864                         return ret;
11865
11866                 vport++;
11867         }
11868
11869         return 0;
11870 }
11871
11872 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11873 {
11874         struct hclge_vport *vport = hclge_get_vport(handle);
11875         struct hclge_dev *hdev = vport->back;
11876         u32 new_trusted = enable ? 1 : 0;
11877
11878         vport = hclge_get_vf_vport(hdev, vf);
11879         if (!vport)
11880                 return -EINVAL;
11881
11882         if (vport->vf_info.trusted == new_trusted)
11883                 return 0;
11884
11885         vport->vf_info.trusted = new_trusted;
11886         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11887         hclge_task_schedule(hdev, 0);
11888
11889         return 0;
11890 }
11891
11892 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11893 {
11894         int ret;
11895         int vf;
11896
11897         /* reset vf rate to default value */
11898         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11899                 struct hclge_vport *vport = &hdev->vport[vf];
11900
11901                 vport->vf_info.max_tx_rate = 0;
11902                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11903                 if (ret)
11904                         dev_err(&hdev->pdev->dev,
11905                                 "vf%d failed to reset to default, ret=%d\n",
11906                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11907         }
11908 }
11909
11910 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11911                                      int min_tx_rate, int max_tx_rate)
11912 {
11913         if (min_tx_rate != 0 ||
11914             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11915                 dev_err(&hdev->pdev->dev,
11916                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11917                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11918                 return -EINVAL;
11919         }
11920
11921         return 0;
11922 }
11923
11924 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11925                              int min_tx_rate, int max_tx_rate, bool force)
11926 {
11927         struct hclge_vport *vport = hclge_get_vport(handle);
11928         struct hclge_dev *hdev = vport->back;
11929         int ret;
11930
11931         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11932         if (ret)
11933                 return ret;
11934
11935         vport = hclge_get_vf_vport(hdev, vf);
11936         if (!vport)
11937                 return -EINVAL;
11938
11939         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11940                 return 0;
11941
11942         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11943         if (ret)
11944                 return ret;
11945
11946         vport->vf_info.max_tx_rate = max_tx_rate;
11947
11948         return 0;
11949 }
11950
11951 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11952 {
11953         struct hnae3_handle *handle = &hdev->vport->nic;
11954         struct hclge_vport *vport;
11955         int ret;
11956         int vf;
11957
11958         /* resume the vf max_tx_rate after reset */
11959         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11960                 vport = hclge_get_vf_vport(hdev, vf);
11961                 if (!vport)
11962                         return -EINVAL;
11963
11964                 /* zero means max rate, after reset, firmware already set it to
11965                  * max rate, so just continue.
11966                  */
11967                 if (!vport->vf_info.max_tx_rate)
11968                         continue;
11969
11970                 ret = hclge_set_vf_rate(handle, vf, 0,
11971                                         vport->vf_info.max_tx_rate, true);
11972                 if (ret) {
11973                         dev_err(&hdev->pdev->dev,
11974                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11975                                 vf, vport->vf_info.max_tx_rate, ret);
11976                         return ret;
11977                 }
11978         }
11979
11980         return 0;
11981 }
11982
11983 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11984 {
11985         struct hclge_vport *vport = hdev->vport;
11986         int i;
11987
11988         for (i = 0; i < hdev->num_alloc_vport; i++) {
11989                 hclge_vport_stop(vport);
11990                 vport++;
11991         }
11992 }
11993
11994 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11995 {
11996         struct hclge_dev *hdev = ae_dev->priv;
11997         struct pci_dev *pdev = ae_dev->pdev;
11998         int ret;
11999
12000         set_bit(HCLGE_STATE_DOWN, &hdev->state);
12001
12002         hclge_stats_clear(hdev);
12003         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12004          * so here should not clean table in memory.
12005          */
12006         if (hdev->reset_type == HNAE3_IMP_RESET ||
12007             hdev->reset_type == HNAE3_GLOBAL_RESET) {
12008                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12009                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12010                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12011                 hclge_reset_umv_space(hdev);
12012         }
12013
12014         ret = hclge_cmd_init(hdev);
12015         if (ret) {
12016                 dev_err(&pdev->dev, "Cmd queue init failed\n");
12017                 return ret;
12018         }
12019
12020         ret = hclge_map_tqp(hdev);
12021         if (ret) {
12022                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12023                 return ret;
12024         }
12025
12026         ret = hclge_mac_init(hdev);
12027         if (ret) {
12028                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12029                 return ret;
12030         }
12031
12032         ret = hclge_tp_port_init(hdev);
12033         if (ret) {
12034                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12035                         ret);
12036                 return ret;
12037         }
12038
12039         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12040         if (ret) {
12041                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12042                 return ret;
12043         }
12044
12045         ret = hclge_config_gro(hdev);
12046         if (ret)
12047                 return ret;
12048
12049         ret = hclge_init_vlan_config(hdev);
12050         if (ret) {
12051                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12052                 return ret;
12053         }
12054
12055         ret = hclge_tm_init_hw(hdev, true);
12056         if (ret) {
12057                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12058                 return ret;
12059         }
12060
12061         ret = hclge_rss_init_hw(hdev);
12062         if (ret) {
12063                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12064                 return ret;
12065         }
12066
12067         ret = init_mgr_tbl(hdev);
12068         if (ret) {
12069                 dev_err(&pdev->dev,
12070                         "failed to reinit manager table, ret = %d\n", ret);
12071                 return ret;
12072         }
12073
12074         ret = hclge_init_fd_config(hdev);
12075         if (ret) {
12076                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12077                 return ret;
12078         }
12079
12080         ret = hclge_ptp_init(hdev);
12081         if (ret)
12082                 return ret;
12083
12084         /* Log and clear the hw errors those already occurred */
12085         if (hnae3_dev_ras_imp_supported(hdev))
12086                 hclge_handle_occurred_error(hdev);
12087         else
12088                 hclge_handle_all_hns_hw_errors(ae_dev);
12089
12090         /* Re-enable the hw error interrupts because
12091          * the interrupts get disabled on global reset.
12092          */
12093         ret = hclge_config_nic_hw_error(hdev, true);
12094         if (ret) {
12095                 dev_err(&pdev->dev,
12096                         "fail(%d) to re-enable NIC hw error interrupts\n",
12097                         ret);
12098                 return ret;
12099         }
12100
12101         if (hdev->roce_client) {
12102                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12103                 if (ret) {
12104                         dev_err(&pdev->dev,
12105                                 "fail(%d) to re-enable roce ras interrupts\n",
12106                                 ret);
12107                         return ret;
12108                 }
12109         }
12110
12111         hclge_reset_vport_state(hdev);
12112         ret = hclge_reset_vport_spoofchk(hdev);
12113         if (ret)
12114                 return ret;
12115
12116         ret = hclge_resume_vf_rate(hdev);
12117         if (ret)
12118                 return ret;
12119
12120         hclge_init_rxd_adv_layout(hdev);
12121
12122         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12123                  HCLGE_DRIVER_NAME);
12124
12125         return 0;
12126 }
12127
12128 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12129 {
12130         struct hclge_dev *hdev = ae_dev->priv;
12131         struct hclge_mac *mac = &hdev->hw.mac;
12132
12133         hclge_reset_vf_rate(hdev);
12134         hclge_clear_vf_vlan(hdev);
12135         hclge_misc_affinity_teardown(hdev);
12136         hclge_state_uninit(hdev);
12137         hclge_ptp_uninit(hdev);
12138         hclge_uninit_rxd_adv_layout(hdev);
12139         hclge_uninit_mac_table(hdev);
12140         hclge_del_all_fd_entries(hdev);
12141
12142         if (mac->phydev)
12143                 mdiobus_unregister(mac->mdio_bus);
12144
12145         /* Disable MISC vector(vector0) */
12146         hclge_enable_vector(&hdev->misc_vector, false);
12147         synchronize_irq(hdev->misc_vector.vector_irq);
12148
12149         /* Disable all hw interrupts */
12150         hclge_config_mac_tnl_int(hdev, false);
12151         hclge_config_nic_hw_error(hdev, false);
12152         hclge_config_rocee_ras_interrupt(hdev, false);
12153
12154         hclge_cmd_uninit(hdev);
12155         hclge_misc_irq_uninit(hdev);
12156         hclge_devlink_uninit(hdev);
12157         hclge_pci_uninit(hdev);
12158         mutex_destroy(&hdev->vport_lock);
12159         hclge_uninit_vport_vlan_table(hdev);
12160         ae_dev->priv = NULL;
12161 }
12162
12163 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12164 {
12165         struct hclge_vport *vport = hclge_get_vport(handle);
12166         struct hclge_dev *hdev = vport->back;
12167
12168         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12169 }
12170
12171 static void hclge_get_channels(struct hnae3_handle *handle,
12172                                struct ethtool_channels *ch)
12173 {
12174         ch->max_combined = hclge_get_max_channels(handle);
12175         ch->other_count = 1;
12176         ch->max_other = 1;
12177         ch->combined_count = handle->kinfo.rss_size;
12178 }
12179
12180 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12181                                         u16 *alloc_tqps, u16 *max_rss_size)
12182 {
12183         struct hclge_vport *vport = hclge_get_vport(handle);
12184         struct hclge_dev *hdev = vport->back;
12185
12186         *alloc_tqps = vport->alloc_tqps;
12187         *max_rss_size = hdev->pf_rss_size_max;
12188 }
12189
12190 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12191                               bool rxfh_configured)
12192 {
12193         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12194         struct hclge_vport *vport = hclge_get_vport(handle);
12195         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12196         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12197         struct hclge_dev *hdev = vport->back;
12198         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12199         u16 cur_rss_size = kinfo->rss_size;
12200         u16 cur_tqps = kinfo->num_tqps;
12201         u16 tc_valid[HCLGE_MAX_TC_NUM];
12202         u16 roundup_size;
12203         u32 *rss_indir;
12204         unsigned int i;
12205         int ret;
12206
12207         kinfo->req_rss_size = new_tqps_num;
12208
12209         ret = hclge_tm_vport_map_update(hdev);
12210         if (ret) {
12211                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12212                 return ret;
12213         }
12214
12215         roundup_size = roundup_pow_of_two(kinfo->rss_size);
12216         roundup_size = ilog2(roundup_size);
12217         /* Set the RSS TC mode according to the new RSS size */
12218         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12219                 tc_valid[i] = 0;
12220
12221                 if (!(hdev->hw_tc_map & BIT(i)))
12222                         continue;
12223
12224                 tc_valid[i] = 1;
12225                 tc_size[i] = roundup_size;
12226                 tc_offset[i] = kinfo->rss_size * i;
12227         }
12228         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12229         if (ret)
12230                 return ret;
12231
12232         /* RSS indirection table has been configured by user */
12233         if (rxfh_configured)
12234                 goto out;
12235
12236         /* Reinitializes the rss indirect table according to the new RSS size */
12237         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12238                             GFP_KERNEL);
12239         if (!rss_indir)
12240                 return -ENOMEM;
12241
12242         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12243                 rss_indir[i] = i % kinfo->rss_size;
12244
12245         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12246         if (ret)
12247                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12248                         ret);
12249
12250         kfree(rss_indir);
12251
12252 out:
12253         if (!ret)
12254                 dev_info(&hdev->pdev->dev,
12255                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12256                          cur_rss_size, kinfo->rss_size,
12257                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12258
12259         return ret;
12260 }
12261
12262 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12263                               u32 *regs_num_64_bit)
12264 {
12265         struct hclge_desc desc;
12266         u32 total_num;
12267         int ret;
12268
12269         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12270         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12271         if (ret) {
12272                 dev_err(&hdev->pdev->dev,
12273                         "Query register number cmd failed, ret = %d.\n", ret);
12274                 return ret;
12275         }
12276
12277         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12278         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12279
12280         total_num = *regs_num_32_bit + *regs_num_64_bit;
12281         if (!total_num)
12282                 return -EINVAL;
12283
12284         return 0;
12285 }
12286
12287 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12288                                  void *data)
12289 {
12290 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12291 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12292
12293         struct hclge_desc *desc;
12294         u32 *reg_val = data;
12295         __le32 *desc_data;
12296         int nodata_num;
12297         int cmd_num;
12298         int i, k, n;
12299         int ret;
12300
12301         if (regs_num == 0)
12302                 return 0;
12303
12304         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12305         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12306                                HCLGE_32_BIT_REG_RTN_DATANUM);
12307         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12308         if (!desc)
12309                 return -ENOMEM;
12310
12311         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12312         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12313         if (ret) {
12314                 dev_err(&hdev->pdev->dev,
12315                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
12316                 kfree(desc);
12317                 return ret;
12318         }
12319
12320         for (i = 0; i < cmd_num; i++) {
12321                 if (i == 0) {
12322                         desc_data = (__le32 *)(&desc[i].data[0]);
12323                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12324                 } else {
12325                         desc_data = (__le32 *)(&desc[i]);
12326                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
12327                 }
12328                 for (k = 0; k < n; k++) {
12329                         *reg_val++ = le32_to_cpu(*desc_data++);
12330
12331                         regs_num--;
12332                         if (!regs_num)
12333                                 break;
12334                 }
12335         }
12336
12337         kfree(desc);
12338         return 0;
12339 }
12340
12341 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12342                                  void *data)
12343 {
12344 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12345 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12346
12347         struct hclge_desc *desc;
12348         u64 *reg_val = data;
12349         __le64 *desc_data;
12350         int nodata_len;
12351         int cmd_num;
12352         int i, k, n;
12353         int ret;
12354
12355         if (regs_num == 0)
12356                 return 0;
12357
12358         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12359         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12360                                HCLGE_64_BIT_REG_RTN_DATANUM);
12361         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12362         if (!desc)
12363                 return -ENOMEM;
12364
12365         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12366         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12367         if (ret) {
12368                 dev_err(&hdev->pdev->dev,
12369                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
12370                 kfree(desc);
12371                 return ret;
12372         }
12373
12374         for (i = 0; i < cmd_num; i++) {
12375                 if (i == 0) {
12376                         desc_data = (__le64 *)(&desc[i].data[0]);
12377                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12378                 } else {
12379                         desc_data = (__le64 *)(&desc[i]);
12380                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
12381                 }
12382                 for (k = 0; k < n; k++) {
12383                         *reg_val++ = le64_to_cpu(*desc_data++);
12384
12385                         regs_num--;
12386                         if (!regs_num)
12387                                 break;
12388                 }
12389         }
12390
12391         kfree(desc);
12392         return 0;
12393 }
12394
12395 #define MAX_SEPARATE_NUM        4
12396 #define SEPARATOR_VALUE         0xFDFCFBFA
12397 #define REG_NUM_PER_LINE        4
12398 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
12399 #define REG_SEPARATOR_LINE      1
12400 #define REG_NUM_REMAIN_MASK     3
12401
12402 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12403 {
12404         int i;
12405
12406         /* initialize command BD except the last one */
12407         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12408                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12409                                            true);
12410                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12411         }
12412
12413         /* initialize the last command BD */
12414         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12415
12416         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12417 }
12418
12419 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12420                                     int *bd_num_list,
12421                                     u32 type_num)
12422 {
12423         u32 entries_per_desc, desc_index, index, offset, i;
12424         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12425         int ret;
12426
12427         ret = hclge_query_bd_num_cmd_send(hdev, desc);
12428         if (ret) {
12429                 dev_err(&hdev->pdev->dev,
12430                         "Get dfx bd num fail, status is %d.\n", ret);
12431                 return ret;
12432         }
12433
12434         entries_per_desc = ARRAY_SIZE(desc[0].data);
12435         for (i = 0; i < type_num; i++) {
12436                 offset = hclge_dfx_bd_offset_list[i];
12437                 index = offset % entries_per_desc;
12438                 desc_index = offset / entries_per_desc;
12439                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12440         }
12441
12442         return ret;
12443 }
12444
12445 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12446                                   struct hclge_desc *desc_src, int bd_num,
12447                                   enum hclge_opcode_type cmd)
12448 {
12449         struct hclge_desc *desc = desc_src;
12450         int i, ret;
12451
12452         hclge_cmd_setup_basic_desc(desc, cmd, true);
12453         for (i = 0; i < bd_num - 1; i++) {
12454                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12455                 desc++;
12456                 hclge_cmd_setup_basic_desc(desc, cmd, true);
12457         }
12458
12459         desc = desc_src;
12460         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12461         if (ret)
12462                 dev_err(&hdev->pdev->dev,
12463                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12464                         cmd, ret);
12465
12466         return ret;
12467 }
12468
12469 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12470                                     void *data)
12471 {
12472         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12473         struct hclge_desc *desc = desc_src;
12474         u32 *reg = data;
12475
12476         entries_per_desc = ARRAY_SIZE(desc->data);
12477         reg_num = entries_per_desc * bd_num;
12478         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12479         for (i = 0; i < reg_num; i++) {
12480                 index = i % entries_per_desc;
12481                 desc_index = i / entries_per_desc;
12482                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12483         }
12484         for (i = 0; i < separator_num; i++)
12485                 *reg++ = SEPARATOR_VALUE;
12486
12487         return reg_num + separator_num;
12488 }
12489
12490 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12491 {
12492         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12493         int data_len_per_desc, bd_num, i;
12494         int *bd_num_list;
12495         u32 data_len;
12496         int ret;
12497
12498         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12499         if (!bd_num_list)
12500                 return -ENOMEM;
12501
12502         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12503         if (ret) {
12504                 dev_err(&hdev->pdev->dev,
12505                         "Get dfx reg bd num fail, status is %d.\n", ret);
12506                 goto out;
12507         }
12508
12509         data_len_per_desc = sizeof_field(struct hclge_desc, data);
12510         *len = 0;
12511         for (i = 0; i < dfx_reg_type_num; i++) {
12512                 bd_num = bd_num_list[i];
12513                 data_len = data_len_per_desc * bd_num;
12514                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12515         }
12516
12517 out:
12518         kfree(bd_num_list);
12519         return ret;
12520 }
12521
12522 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12523 {
12524         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12525         int bd_num, bd_num_max, buf_len, i;
12526         struct hclge_desc *desc_src;
12527         int *bd_num_list;
12528         u32 *reg = data;
12529         int ret;
12530
12531         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12532         if (!bd_num_list)
12533                 return -ENOMEM;
12534
12535         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12536         if (ret) {
12537                 dev_err(&hdev->pdev->dev,
12538                         "Get dfx reg bd num fail, status is %d.\n", ret);
12539                 goto out;
12540         }
12541
12542         bd_num_max = bd_num_list[0];
12543         for (i = 1; i < dfx_reg_type_num; i++)
12544                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12545
12546         buf_len = sizeof(*desc_src) * bd_num_max;
12547         desc_src = kzalloc(buf_len, GFP_KERNEL);
12548         if (!desc_src) {
12549                 ret = -ENOMEM;
12550                 goto out;
12551         }
12552
12553         for (i = 0; i < dfx_reg_type_num; i++) {
12554                 bd_num = bd_num_list[i];
12555                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12556                                              hclge_dfx_reg_opcode_list[i]);
12557                 if (ret) {
12558                         dev_err(&hdev->pdev->dev,
12559                                 "Get dfx reg fail, status is %d.\n", ret);
12560                         break;
12561                 }
12562
12563                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12564         }
12565
12566         kfree(desc_src);
12567 out:
12568         kfree(bd_num_list);
12569         return ret;
12570 }
12571
12572 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12573                               struct hnae3_knic_private_info *kinfo)
12574 {
12575 #define HCLGE_RING_REG_OFFSET           0x200
12576 #define HCLGE_RING_INT_REG_OFFSET       0x4
12577
12578         int i, j, reg_num, separator_num;
12579         int data_num_sum;
12580         u32 *reg = data;
12581
12582         /* fetching per-PF registers valus from PF PCIe register space */
12583         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12584         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12585         for (i = 0; i < reg_num; i++)
12586                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12587         for (i = 0; i < separator_num; i++)
12588                 *reg++ = SEPARATOR_VALUE;
12589         data_num_sum = reg_num + separator_num;
12590
12591         reg_num = ARRAY_SIZE(common_reg_addr_list);
12592         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12593         for (i = 0; i < reg_num; i++)
12594                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12595         for (i = 0; i < separator_num; i++)
12596                 *reg++ = SEPARATOR_VALUE;
12597         data_num_sum += reg_num + separator_num;
12598
12599         reg_num = ARRAY_SIZE(ring_reg_addr_list);
12600         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12601         for (j = 0; j < kinfo->num_tqps; j++) {
12602                 for (i = 0; i < reg_num; i++)
12603                         *reg++ = hclge_read_dev(&hdev->hw,
12604                                                 ring_reg_addr_list[i] +
12605                                                 HCLGE_RING_REG_OFFSET * j);
12606                 for (i = 0; i < separator_num; i++)
12607                         *reg++ = SEPARATOR_VALUE;
12608         }
12609         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12610
12611         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12612         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12613         for (j = 0; j < hdev->num_msi_used - 1; j++) {
12614                 for (i = 0; i < reg_num; i++)
12615                         *reg++ = hclge_read_dev(&hdev->hw,
12616                                                 tqp_intr_reg_addr_list[i] +
12617                                                 HCLGE_RING_INT_REG_OFFSET * j);
12618                 for (i = 0; i < separator_num; i++)
12619                         *reg++ = SEPARATOR_VALUE;
12620         }
12621         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12622
12623         return data_num_sum;
12624 }
12625
12626 static int hclge_get_regs_len(struct hnae3_handle *handle)
12627 {
12628         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12629         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12630         struct hclge_vport *vport = hclge_get_vport(handle);
12631         struct hclge_dev *hdev = vport->back;
12632         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12633         int regs_lines_32_bit, regs_lines_64_bit;
12634         int ret;
12635
12636         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12637         if (ret) {
12638                 dev_err(&hdev->pdev->dev,
12639                         "Get register number failed, ret = %d.\n", ret);
12640                 return ret;
12641         }
12642
12643         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12644         if (ret) {
12645                 dev_err(&hdev->pdev->dev,
12646                         "Get dfx reg len failed, ret = %d.\n", ret);
12647                 return ret;
12648         }
12649
12650         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12651                 REG_SEPARATOR_LINE;
12652         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12653                 REG_SEPARATOR_LINE;
12654         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12655                 REG_SEPARATOR_LINE;
12656         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12657                 REG_SEPARATOR_LINE;
12658         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12659                 REG_SEPARATOR_LINE;
12660         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12661                 REG_SEPARATOR_LINE;
12662
12663         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12664                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12665                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12666 }
12667
12668 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12669                            void *data)
12670 {
12671         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12672         struct hclge_vport *vport = hclge_get_vport(handle);
12673         struct hclge_dev *hdev = vport->back;
12674         u32 regs_num_32_bit, regs_num_64_bit;
12675         int i, reg_num, separator_num, ret;
12676         u32 *reg = data;
12677
12678         *version = hdev->fw_version;
12679
12680         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12681         if (ret) {
12682                 dev_err(&hdev->pdev->dev,
12683                         "Get register number failed, ret = %d.\n", ret);
12684                 return;
12685         }
12686
12687         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12688
12689         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12690         if (ret) {
12691                 dev_err(&hdev->pdev->dev,
12692                         "Get 32 bit register failed, ret = %d.\n", ret);
12693                 return;
12694         }
12695         reg_num = regs_num_32_bit;
12696         reg += reg_num;
12697         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12698         for (i = 0; i < separator_num; i++)
12699                 *reg++ = SEPARATOR_VALUE;
12700
12701         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12702         if (ret) {
12703                 dev_err(&hdev->pdev->dev,
12704                         "Get 64 bit register failed, ret = %d.\n", ret);
12705                 return;
12706         }
12707         reg_num = regs_num_64_bit * 2;
12708         reg += reg_num;
12709         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12710         for (i = 0; i < separator_num; i++)
12711                 *reg++ = SEPARATOR_VALUE;
12712
12713         ret = hclge_get_dfx_reg(hdev, reg);
12714         if (ret)
12715                 dev_err(&hdev->pdev->dev,
12716                         "Get dfx register failed, ret = %d.\n", ret);
12717 }
12718
12719 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12720 {
12721         struct hclge_set_led_state_cmd *req;
12722         struct hclge_desc desc;
12723         int ret;
12724
12725         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12726
12727         req = (struct hclge_set_led_state_cmd *)desc.data;
12728         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12729                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12730
12731         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12732         if (ret)
12733                 dev_err(&hdev->pdev->dev,
12734                         "Send set led state cmd error, ret =%d\n", ret);
12735
12736         return ret;
12737 }
12738
12739 enum hclge_led_status {
12740         HCLGE_LED_OFF,
12741         HCLGE_LED_ON,
12742         HCLGE_LED_NO_CHANGE = 0xFF,
12743 };
12744
12745 static int hclge_set_led_id(struct hnae3_handle *handle,
12746                             enum ethtool_phys_id_state status)
12747 {
12748         struct hclge_vport *vport = hclge_get_vport(handle);
12749         struct hclge_dev *hdev = vport->back;
12750
12751         switch (status) {
12752         case ETHTOOL_ID_ACTIVE:
12753                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12754         case ETHTOOL_ID_INACTIVE:
12755                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12756         default:
12757                 return -EINVAL;
12758         }
12759 }
12760
12761 static void hclge_get_link_mode(struct hnae3_handle *handle,
12762                                 unsigned long *supported,
12763                                 unsigned long *advertising)
12764 {
12765         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12766         struct hclge_vport *vport = hclge_get_vport(handle);
12767         struct hclge_dev *hdev = vport->back;
12768         unsigned int idx = 0;
12769
12770         for (; idx < size; idx++) {
12771                 supported[idx] = hdev->hw.mac.supported[idx];
12772                 advertising[idx] = hdev->hw.mac.advertising[idx];
12773         }
12774 }
12775
12776 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12777 {
12778         struct hclge_vport *vport = hclge_get_vport(handle);
12779         struct hclge_dev *hdev = vport->back;
12780         bool gro_en_old = hdev->gro_en;
12781         int ret;
12782
12783         hdev->gro_en = enable;
12784         ret = hclge_config_gro(hdev);
12785         if (ret)
12786                 hdev->gro_en = gro_en_old;
12787
12788         return ret;
12789 }
12790
12791 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12792 {
12793         struct hclge_vport *vport = &hdev->vport[0];
12794         struct hnae3_handle *handle = &vport->nic;
12795         u8 tmp_flags;
12796         int ret;
12797         u16 i;
12798
12799         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12800                 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12801                 vport->last_promisc_flags = vport->overflow_promisc_flags;
12802         }
12803
12804         if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12805                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12806                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12807                                              tmp_flags & HNAE3_MPE);
12808                 if (!ret) {
12809                         clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12810                                   &vport->state);
12811                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12812                                 &vport->state);
12813                 }
12814         }
12815
12816         for (i = 1; i < hdev->num_alloc_vport; i++) {
12817                 bool uc_en = false;
12818                 bool mc_en = false;
12819                 bool bc_en;
12820
12821                 vport = &hdev->vport[i];
12822
12823                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12824                                         &vport->state))
12825                         continue;
12826
12827                 if (vport->vf_info.trusted) {
12828                         uc_en = vport->vf_info.request_uc_en > 0;
12829                         mc_en = vport->vf_info.request_mc_en > 0;
12830                 }
12831                 bc_en = vport->vf_info.request_bc_en > 0;
12832
12833                 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12834                                                  mc_en, bc_en);
12835                 if (ret) {
12836                         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12837                                 &vport->state);
12838                         return;
12839                 }
12840                 hclge_set_vport_vlan_fltr_change(vport);
12841         }
12842 }
12843
12844 static bool hclge_module_existed(struct hclge_dev *hdev)
12845 {
12846         struct hclge_desc desc;
12847         u32 existed;
12848         int ret;
12849
12850         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12851         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12852         if (ret) {
12853                 dev_err(&hdev->pdev->dev,
12854                         "failed to get SFP exist state, ret = %d\n", ret);
12855                 return false;
12856         }
12857
12858         existed = le32_to_cpu(desc.data[0]);
12859
12860         return existed != 0;
12861 }
12862
12863 /* need 6 bds(total 140 bytes) in one reading
12864  * return the number of bytes actually read, 0 means read failed.
12865  */
12866 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12867                                      u32 len, u8 *data)
12868 {
12869         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12870         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12871         u16 read_len;
12872         u16 copy_len;
12873         int ret;
12874         int i;
12875
12876         /* setup all 6 bds to read module eeprom info. */
12877         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12878                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12879                                            true);
12880
12881                 /* bd0~bd4 need next flag */
12882                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12883                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12884         }
12885
12886         /* setup bd0, this bd contains offset and read length. */
12887         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12888         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12889         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12890         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12891
12892         ret = hclge_cmd_send(&hdev->hw, desc, i);
12893         if (ret) {
12894                 dev_err(&hdev->pdev->dev,
12895                         "failed to get SFP eeprom info, ret = %d\n", ret);
12896                 return 0;
12897         }
12898
12899         /* copy sfp info from bd0 to out buffer. */
12900         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12901         memcpy(data, sfp_info_bd0->data, copy_len);
12902         read_len = copy_len;
12903
12904         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12905         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12906                 if (read_len >= len)
12907                         return read_len;
12908
12909                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12910                 memcpy(data + read_len, desc[i].data, copy_len);
12911                 read_len += copy_len;
12912         }
12913
12914         return read_len;
12915 }
12916
12917 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12918                                    u32 len, u8 *data)
12919 {
12920         struct hclge_vport *vport = hclge_get_vport(handle);
12921         struct hclge_dev *hdev = vport->back;
12922         u32 read_len = 0;
12923         u16 data_len;
12924
12925         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12926                 return -EOPNOTSUPP;
12927
12928         if (!hclge_module_existed(hdev))
12929                 return -ENXIO;
12930
12931         while (read_len < len) {
12932                 data_len = hclge_get_sfp_eeprom_info(hdev,
12933                                                      offset + read_len,
12934                                                      len - read_len,
12935                                                      data + read_len);
12936                 if (!data_len)
12937                         return -EIO;
12938
12939                 read_len += data_len;
12940         }
12941
12942         return 0;
12943 }
12944
12945 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12946                                          u32 *status_code)
12947 {
12948         struct hclge_vport *vport = hclge_get_vport(handle);
12949         struct hclge_dev *hdev = vport->back;
12950         struct hclge_desc desc;
12951         int ret;
12952
12953         if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12954                 return -EOPNOTSUPP;
12955
12956         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12957         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12958         if (ret) {
12959                 dev_err(&hdev->pdev->dev,
12960                         "failed to query link diagnosis info, ret = %d\n", ret);
12961                 return ret;
12962         }
12963
12964         *status_code = le32_to_cpu(desc.data[0]);
12965         return 0;
12966 }
12967
12968 static const struct hnae3_ae_ops hclge_ops = {
12969         .init_ae_dev = hclge_init_ae_dev,
12970         .uninit_ae_dev = hclge_uninit_ae_dev,
12971         .reset_prepare = hclge_reset_prepare_general,
12972         .reset_done = hclge_reset_done,
12973         .init_client_instance = hclge_init_client_instance,
12974         .uninit_client_instance = hclge_uninit_client_instance,
12975         .map_ring_to_vector = hclge_map_ring_to_vector,
12976         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12977         .get_vector = hclge_get_vector,
12978         .put_vector = hclge_put_vector,
12979         .set_promisc_mode = hclge_set_promisc_mode,
12980         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12981         .set_loopback = hclge_set_loopback,
12982         .start = hclge_ae_start,
12983         .stop = hclge_ae_stop,
12984         .client_start = hclge_client_start,
12985         .client_stop = hclge_client_stop,
12986         .get_status = hclge_get_status,
12987         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12988         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12989         .get_media_type = hclge_get_media_type,
12990         .check_port_speed = hclge_check_port_speed,
12991         .get_fec = hclge_get_fec,
12992         .set_fec = hclge_set_fec,
12993         .get_rss_key_size = hclge_get_rss_key_size,
12994         .get_rss = hclge_get_rss,
12995         .set_rss = hclge_set_rss,
12996         .set_rss_tuple = hclge_set_rss_tuple,
12997         .get_rss_tuple = hclge_get_rss_tuple,
12998         .get_tc_size = hclge_get_tc_size,
12999         .get_mac_addr = hclge_get_mac_addr,
13000         .set_mac_addr = hclge_set_mac_addr,
13001         .do_ioctl = hclge_do_ioctl,
13002         .add_uc_addr = hclge_add_uc_addr,
13003         .rm_uc_addr = hclge_rm_uc_addr,
13004         .add_mc_addr = hclge_add_mc_addr,
13005         .rm_mc_addr = hclge_rm_mc_addr,
13006         .set_autoneg = hclge_set_autoneg,
13007         .get_autoneg = hclge_get_autoneg,
13008         .restart_autoneg = hclge_restart_autoneg,
13009         .halt_autoneg = hclge_halt_autoneg,
13010         .get_pauseparam = hclge_get_pauseparam,
13011         .set_pauseparam = hclge_set_pauseparam,
13012         .set_mtu = hclge_set_mtu,
13013         .reset_queue = hclge_reset_tqp,
13014         .get_stats = hclge_get_stats,
13015         .get_mac_stats = hclge_get_mac_stat,
13016         .update_stats = hclge_update_stats,
13017         .get_strings = hclge_get_strings,
13018         .get_sset_count = hclge_get_sset_count,
13019         .get_fw_version = hclge_get_fw_version,
13020         .get_mdix_mode = hclge_get_mdix_mode,
13021         .enable_vlan_filter = hclge_enable_vlan_filter,
13022         .set_vlan_filter = hclge_set_vlan_filter,
13023         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13024         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13025         .reset_event = hclge_reset_event,
13026         .get_reset_level = hclge_get_reset_level,
13027         .set_default_reset_request = hclge_set_def_reset_request,
13028         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13029         .set_channels = hclge_set_channels,
13030         .get_channels = hclge_get_channels,
13031         .get_regs_len = hclge_get_regs_len,
13032         .get_regs = hclge_get_regs,
13033         .set_led_id = hclge_set_led_id,
13034         .get_link_mode = hclge_get_link_mode,
13035         .add_fd_entry = hclge_add_fd_entry,
13036         .del_fd_entry = hclge_del_fd_entry,
13037         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13038         .get_fd_rule_info = hclge_get_fd_rule_info,
13039         .get_fd_all_rules = hclge_get_all_rules,
13040         .enable_fd = hclge_enable_fd,
13041         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
13042         .dbg_read_cmd = hclge_dbg_read_cmd,
13043         .handle_hw_ras_error = hclge_handle_hw_ras_error,
13044         .get_hw_reset_stat = hclge_get_hw_reset_stat,
13045         .ae_dev_resetting = hclge_ae_dev_resetting,
13046         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13047         .set_gro_en = hclge_gro_en,
13048         .get_global_queue_id = hclge_covert_handle_qid_global,
13049         .set_timer_task = hclge_set_timer_task,
13050         .mac_connect_phy = hclge_mac_connect_phy,
13051         .mac_disconnect_phy = hclge_mac_disconnect_phy,
13052         .get_vf_config = hclge_get_vf_config,
13053         .set_vf_link_state = hclge_set_vf_link_state,
13054         .set_vf_spoofchk = hclge_set_vf_spoofchk,
13055         .set_vf_trust = hclge_set_vf_trust,
13056         .set_vf_rate = hclge_set_vf_rate,
13057         .set_vf_mac = hclge_set_vf_mac,
13058         .get_module_eeprom = hclge_get_module_eeprom,
13059         .get_cmdq_stat = hclge_get_cmdq_stat,
13060         .add_cls_flower = hclge_add_cls_flower,
13061         .del_cls_flower = hclge_del_cls_flower,
13062         .cls_flower_active = hclge_is_cls_flower_active,
13063         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13064         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13065         .set_tx_hwts_info = hclge_ptp_set_tx_info,
13066         .get_rx_hwts = hclge_ptp_get_rx_hwts,
13067         .get_ts_info = hclge_ptp_get_ts_info,
13068         .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13069 };
13070
13071 static struct hnae3_ae_algo ae_algo = {
13072         .ops = &hclge_ops,
13073         .pdev_id_table = ae_algo_pci_tbl,
13074 };
13075
13076 static int hclge_init(void)
13077 {
13078         pr_info("%s is initializing\n", HCLGE_NAME);
13079
13080         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13081         if (!hclge_wq) {
13082                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13083                 return -ENOMEM;
13084         }
13085
13086         hnae3_register_ae_algo(&ae_algo);
13087
13088         return 0;
13089 }
13090
13091 static void hclge_exit(void)
13092 {
13093         hnae3_unregister_ae_algo(&ae_algo);
13094         destroy_workqueue(hclge_wq);
13095 }
13096 module_init(hclge_init);
13097 module_exit(hclge_exit);
13098
13099 MODULE_LICENSE("GPL");
13100 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13101 MODULE_DESCRIPTION("HCLGE Driver");
13102 MODULE_VERSION(HCLGE_MOD_VERSION);