Commit | Line | Data |
---|---|---|
d71d8381 JS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. | |
46a3df9f S |
3 | |
4 | #include <linux/acpi.h> | |
5 | #include <linux/device.h> | |
6 | #include <linux/etherdevice.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/platform_device.h> | |
2866ccb2 | 14 | #include <linux/if_vlan.h> |
f2f432f2 | 15 | #include <net/rtnetlink.h> |
46a3df9f | 16 | #include "hclge_cmd.h" |
cacde272 | 17 | #include "hclge_dcb.h" |
46a3df9f | 18 | #include "hclge_main.h" |
dde1a86e | 19 | #include "hclge_mbx.h" |
46a3df9f S |
20 | #include "hclge_mdio.h" |
21 | #include "hclge_tm.h" | |
5a9f0eac | 22 | #include "hclge_err.h" |
46a3df9f S |
23 | #include "hnae3.h" |
24 | ||
25 | #define HCLGE_NAME "hclge" | |
26 | #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) | |
27 | #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) | |
46a3df9f | 28 | |
b9a400ac YL |
29 | #define HCLGE_BUF_SIZE_UNIT 256 |
30 | ||
e6d7d79d | 31 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); |
46a3df9f | 32 | static int hclge_init_vlan_config(struct hclge_dev *hdev); |
4ed340ab | 33 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); |
39932473 JS |
34 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, |
35 | u16 *allocated_size, bool is_alloc); | |
46a3df9f S |
36 | |
37 | static struct hnae3_ae_algo ae_algo; | |
38 | ||
39 | static const struct pci_device_id ae_algo_pci_tbl[] = { | |
40 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | |
41 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | |
42 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | |
43 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | |
44 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | |
45 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | |
46 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | |
e92a0843 | 47 | /* required last entry */ |
46a3df9f S |
48 | {0, } |
49 | }; | |
50 | ||
2f550a46 YL |
51 | MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); |
52 | ||
ea4750ca JS |
53 | static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, |
54 | HCLGE_CMDQ_TX_ADDR_H_REG, | |
55 | HCLGE_CMDQ_TX_DEPTH_REG, | |
56 | HCLGE_CMDQ_TX_TAIL_REG, | |
57 | HCLGE_CMDQ_TX_HEAD_REG, | |
58 | HCLGE_CMDQ_RX_ADDR_L_REG, | |
59 | HCLGE_CMDQ_RX_ADDR_H_REG, | |
60 | HCLGE_CMDQ_RX_DEPTH_REG, | |
61 | HCLGE_CMDQ_RX_TAIL_REG, | |
62 | HCLGE_CMDQ_RX_HEAD_REG, | |
63 | HCLGE_VECTOR0_CMDQ_SRC_REG, | |
64 | HCLGE_CMDQ_INTR_STS_REG, | |
65 | HCLGE_CMDQ_INTR_EN_REG, | |
66 | HCLGE_CMDQ_INTR_GEN_REG}; | |
67 | ||
68 | static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, | |
69 | HCLGE_VECTOR0_OTER_EN_REG, | |
70 | HCLGE_MISC_RESET_STS_REG, | |
71 | HCLGE_MISC_VECTOR_INT_STS, | |
72 | HCLGE_GLOBAL_RESET_REG, | |
73 | HCLGE_FUN_RST_ING, | |
74 | HCLGE_GRO_EN_REG}; | |
75 | ||
76 | static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, | |
77 | HCLGE_RING_RX_ADDR_H_REG, | |
78 | HCLGE_RING_RX_BD_NUM_REG, | |
79 | HCLGE_RING_RX_BD_LENGTH_REG, | |
80 | HCLGE_RING_RX_MERGE_EN_REG, | |
81 | HCLGE_RING_RX_TAIL_REG, | |
82 | HCLGE_RING_RX_HEAD_REG, | |
83 | HCLGE_RING_RX_FBD_NUM_REG, | |
84 | HCLGE_RING_RX_OFFSET_REG, | |
85 | HCLGE_RING_RX_FBD_OFFSET_REG, | |
86 | HCLGE_RING_RX_STASH_REG, | |
87 | HCLGE_RING_RX_BD_ERR_REG, | |
88 | HCLGE_RING_TX_ADDR_L_REG, | |
89 | HCLGE_RING_TX_ADDR_H_REG, | |
90 | HCLGE_RING_TX_BD_NUM_REG, | |
91 | HCLGE_RING_TX_PRIORITY_REG, | |
92 | HCLGE_RING_TX_TC_REG, | |
93 | HCLGE_RING_TX_MERGE_EN_REG, | |
94 | HCLGE_RING_TX_TAIL_REG, | |
95 | HCLGE_RING_TX_HEAD_REG, | |
96 | HCLGE_RING_TX_FBD_NUM_REG, | |
97 | HCLGE_RING_TX_OFFSET_REG, | |
98 | HCLGE_RING_TX_EBD_NUM_REG, | |
99 | HCLGE_RING_TX_EBD_OFFSET_REG, | |
100 | HCLGE_RING_TX_BD_ERR_REG, | |
101 | HCLGE_RING_EN_REG}; | |
102 | ||
103 | static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, | |
104 | HCLGE_TQP_INTR_GL0_REG, | |
105 | HCLGE_TQP_INTR_GL1_REG, | |
106 | HCLGE_TQP_INTR_GL2_REG, | |
107 | HCLGE_TQP_INTR_RL_REG}; | |
108 | ||
46a3df9f | 109 | static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { |
eb66d503 | 110 | "App Loopback test", |
4dc13b96 FL |
111 | "Serdes serial Loopback test", |
112 | "Serdes parallel Loopback test", | |
46a3df9f S |
113 | "Phy Loopback test" |
114 | }; | |
115 | ||
46a3df9f S |
116 | static const struct hclge_comm_stats_str g_mac_stats_string[] = { |
117 | {"mac_tx_mac_pause_num", | |
118 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, | |
119 | {"mac_rx_mac_pause_num", | |
120 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, | |
121 | {"mac_tx_pfc_pri0_pkt_num", | |
122 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, | |
123 | {"mac_tx_pfc_pri1_pkt_num", | |
124 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, | |
125 | {"mac_tx_pfc_pri2_pkt_num", | |
126 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, | |
127 | {"mac_tx_pfc_pri3_pkt_num", | |
128 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, | |
129 | {"mac_tx_pfc_pri4_pkt_num", | |
130 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, | |
131 | {"mac_tx_pfc_pri5_pkt_num", | |
132 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, | |
133 | {"mac_tx_pfc_pri6_pkt_num", | |
134 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, | |
135 | {"mac_tx_pfc_pri7_pkt_num", | |
136 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, | |
137 | {"mac_rx_pfc_pri0_pkt_num", | |
138 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, | |
139 | {"mac_rx_pfc_pri1_pkt_num", | |
140 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, | |
141 | {"mac_rx_pfc_pri2_pkt_num", | |
142 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, | |
143 | {"mac_rx_pfc_pri3_pkt_num", | |
144 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, | |
145 | {"mac_rx_pfc_pri4_pkt_num", | |
146 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, | |
147 | {"mac_rx_pfc_pri5_pkt_num", | |
148 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, | |
149 | {"mac_rx_pfc_pri6_pkt_num", | |
150 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, | |
151 | {"mac_rx_pfc_pri7_pkt_num", | |
152 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, | |
153 | {"mac_tx_total_pkt_num", | |
154 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, | |
155 | {"mac_tx_total_oct_num", | |
156 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, | |
157 | {"mac_tx_good_pkt_num", | |
158 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, | |
159 | {"mac_tx_bad_pkt_num", | |
160 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, | |
161 | {"mac_tx_good_oct_num", | |
162 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, | |
163 | {"mac_tx_bad_oct_num", | |
164 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, | |
165 | {"mac_tx_uni_pkt_num", | |
166 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, | |
167 | {"mac_tx_multi_pkt_num", | |
168 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, | |
169 | {"mac_tx_broad_pkt_num", | |
170 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, | |
171 | {"mac_tx_undersize_pkt_num", | |
172 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, | |
200a88c6 JS |
173 | {"mac_tx_oversize_pkt_num", |
174 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, | |
46a3df9f S |
175 | {"mac_tx_64_oct_pkt_num", |
176 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, | |
177 | {"mac_tx_65_127_oct_pkt_num", | |
178 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, | |
179 | {"mac_tx_128_255_oct_pkt_num", | |
180 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, | |
181 | {"mac_tx_256_511_oct_pkt_num", | |
182 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, | |
183 | {"mac_tx_512_1023_oct_pkt_num", | |
184 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, | |
185 | {"mac_tx_1024_1518_oct_pkt_num", | |
186 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, | |
91f384f6 JS |
187 | {"mac_tx_1519_2047_oct_pkt_num", |
188 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, | |
189 | {"mac_tx_2048_4095_oct_pkt_num", | |
190 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, | |
191 | {"mac_tx_4096_8191_oct_pkt_num", | |
192 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, | |
91f384f6 JS |
193 | {"mac_tx_8192_9216_oct_pkt_num", |
194 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, | |
195 | {"mac_tx_9217_12287_oct_pkt_num", | |
196 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, | |
197 | {"mac_tx_12288_16383_oct_pkt_num", | |
198 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, | |
199 | {"mac_tx_1519_max_good_pkt_num", | |
200 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, | |
201 | {"mac_tx_1519_max_bad_pkt_num", | |
202 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f S |
203 | {"mac_rx_total_pkt_num", |
204 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, | |
205 | {"mac_rx_total_oct_num", | |
206 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, | |
207 | {"mac_rx_good_pkt_num", | |
208 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, | |
209 | {"mac_rx_bad_pkt_num", | |
210 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, | |
211 | {"mac_rx_good_oct_num", | |
212 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, | |
213 | {"mac_rx_bad_oct_num", | |
214 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, | |
215 | {"mac_rx_uni_pkt_num", | |
216 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, | |
217 | {"mac_rx_multi_pkt_num", | |
218 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, | |
219 | {"mac_rx_broad_pkt_num", | |
220 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, | |
221 | {"mac_rx_undersize_pkt_num", | |
222 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, | |
200a88c6 JS |
223 | {"mac_rx_oversize_pkt_num", |
224 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, | |
46a3df9f S |
225 | {"mac_rx_64_oct_pkt_num", |
226 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, | |
227 | {"mac_rx_65_127_oct_pkt_num", | |
228 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, | |
229 | {"mac_rx_128_255_oct_pkt_num", | |
230 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, | |
231 | {"mac_rx_256_511_oct_pkt_num", | |
232 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, | |
233 | {"mac_rx_512_1023_oct_pkt_num", | |
234 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, | |
235 | {"mac_rx_1024_1518_oct_pkt_num", | |
236 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, | |
91f384f6 JS |
237 | {"mac_rx_1519_2047_oct_pkt_num", |
238 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, | |
239 | {"mac_rx_2048_4095_oct_pkt_num", | |
240 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, | |
241 | {"mac_rx_4096_8191_oct_pkt_num", | |
242 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, | |
91f384f6 JS |
243 | {"mac_rx_8192_9216_oct_pkt_num", |
244 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, | |
245 | {"mac_rx_9217_12287_oct_pkt_num", | |
246 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, | |
247 | {"mac_rx_12288_16383_oct_pkt_num", | |
248 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, | |
249 | {"mac_rx_1519_max_good_pkt_num", | |
250 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, | |
251 | {"mac_rx_1519_max_bad_pkt_num", | |
252 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f | 253 | |
a6c51c26 JS |
254 | {"mac_tx_fragment_pkt_num", |
255 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, | |
256 | {"mac_tx_undermin_pkt_num", | |
257 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, | |
258 | {"mac_tx_jabber_pkt_num", | |
259 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, | |
260 | {"mac_tx_err_all_pkt_num", | |
261 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, | |
262 | {"mac_tx_from_app_good_pkt_num", | |
263 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, | |
264 | {"mac_tx_from_app_bad_pkt_num", | |
265 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, | |
266 | {"mac_rx_fragment_pkt_num", | |
267 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, | |
268 | {"mac_rx_undermin_pkt_num", | |
269 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, | |
270 | {"mac_rx_jabber_pkt_num", | |
271 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, | |
272 | {"mac_rx_fcs_err_pkt_num", | |
273 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, | |
274 | {"mac_rx_send_app_good_pkt_num", | |
275 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, | |
276 | {"mac_rx_send_app_bad_pkt_num", | |
277 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} | |
46a3df9f S |
278 | }; |
279 | ||
f5aac71c FL |
280 | static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { |
281 | { | |
282 | .flags = HCLGE_MAC_MGR_MASK_VLAN_B, | |
283 | .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), | |
284 | .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), | |
285 | .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), | |
286 | .i_port_bitmap = 0x1, | |
287 | }, | |
288 | }; | |
289 | ||
46a3df9f S |
290 | static int hclge_mac_update_stats(struct hclge_dev *hdev) |
291 | { | |
91f384f6 | 292 | #define HCLGE_MAC_CMD_NUM 21 |
46a3df9f S |
293 | #define HCLGE_RTN_DATA_NUM 4 |
294 | ||
295 | u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); | |
296 | struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; | |
a90bb9a5 | 297 | __le64 *desc_data; |
46a3df9f S |
298 | int i, k, n; |
299 | int ret; | |
300 | ||
301 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); | |
302 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); | |
303 | if (ret) { | |
304 | dev_err(&hdev->pdev->dev, | |
305 | "Get MAC pkt stats fail, status = %d.\n", ret); | |
306 | ||
307 | return ret; | |
308 | } | |
309 | ||
310 | for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { | |
311 | if (unlikely(i == 0)) { | |
a90bb9a5 | 312 | desc_data = (__le64 *)(&desc[i].data[0]); |
46a3df9f S |
313 | n = HCLGE_RTN_DATA_NUM - 2; |
314 | } else { | |
a90bb9a5 | 315 | desc_data = (__le64 *)(&desc[i]); |
46a3df9f S |
316 | n = HCLGE_RTN_DATA_NUM; |
317 | } | |
318 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 319 | *data++ += le64_to_cpu(*desc_data); |
46a3df9f S |
320 | desc_data++; |
321 | } | |
322 | } | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
327 | static int hclge_tqps_update_stats(struct hnae3_handle *handle) | |
328 | { | |
329 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
330 | struct hclge_vport *vport = hclge_get_vport(handle); | |
331 | struct hclge_dev *hdev = vport->back; | |
332 | struct hnae3_queue *queue; | |
333 | struct hclge_desc desc[1]; | |
334 | struct hclge_tqp *tqp; | |
335 | int ret, i; | |
336 | ||
337 | for (i = 0; i < kinfo->num_tqps; i++) { | |
338 | queue = handle->kinfo.tqp[i]; | |
339 | tqp = container_of(queue, struct hclge_tqp, q); | |
340 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
341 | hclge_cmd_setup_basic_desc(&desc[0], | |
342 | HCLGE_OPC_QUERY_RX_STATUS, | |
343 | true); | |
344 | ||
a90bb9a5 | 345 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
346 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
347 | if (ret) { | |
348 | dev_err(&hdev->pdev->dev, | |
349 | "Query tqp stat fail, status = %d,queue = %d\n", | |
350 | ret, i); | |
351 | return ret; | |
352 | } | |
353 | tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += | |
cf72fa63 | 354 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
355 | } |
356 | ||
357 | for (i = 0; i < kinfo->num_tqps; i++) { | |
358 | queue = handle->kinfo.tqp[i]; | |
359 | tqp = container_of(queue, struct hclge_tqp, q); | |
360 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
361 | hclge_cmd_setup_basic_desc(&desc[0], | |
362 | HCLGE_OPC_QUERY_TX_STATUS, | |
363 | true); | |
364 | ||
a90bb9a5 | 365 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
366 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
367 | if (ret) { | |
368 | dev_err(&hdev->pdev->dev, | |
369 | "Query tqp stat fail, status = %d,queue = %d\n", | |
370 | ret, i); | |
371 | return ret; | |
372 | } | |
373 | tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += | |
cf72fa63 | 374 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
375 | } |
376 | ||
377 | return 0; | |
378 | } | |
379 | ||
380 | static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) | |
381 | { | |
382 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
383 | struct hclge_tqp *tqp; | |
384 | u64 *buff = data; | |
385 | int i; | |
386 | ||
387 | for (i = 0; i < kinfo->num_tqps; i++) { | |
388 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 389 | *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; |
46a3df9f S |
390 | } |
391 | ||
392 | for (i = 0; i < kinfo->num_tqps; i++) { | |
393 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 394 | *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; |
46a3df9f S |
395 | } |
396 | ||
397 | return buff; | |
398 | } | |
399 | ||
400 | static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) | |
401 | { | |
402 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
403 | ||
404 | return kinfo->num_tqps * (2); | |
405 | } | |
406 | ||
407 | static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) | |
408 | { | |
409 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
410 | u8 *buff = data; | |
411 | int i = 0; | |
412 | ||
413 | for (i = 0; i < kinfo->num_tqps; i++) { | |
414 | struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], | |
415 | struct hclge_tqp, q); | |
0c218123 | 416 | snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", |
46a3df9f S |
417 | tqp->index); |
418 | buff = buff + ETH_GSTRING_LEN; | |
419 | } | |
420 | ||
421 | for (i = 0; i < kinfo->num_tqps; i++) { | |
422 | struct hclge_tqp *tqp = container_of(kinfo->tqp[i], | |
423 | struct hclge_tqp, q); | |
0c218123 | 424 | snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", |
46a3df9f S |
425 | tqp->index); |
426 | buff = buff + ETH_GSTRING_LEN; | |
427 | } | |
428 | ||
429 | return buff; | |
430 | } | |
431 | ||
432 | static u64 *hclge_comm_get_stats(void *comm_stats, | |
433 | const struct hclge_comm_stats_str strs[], | |
434 | int size, u64 *data) | |
435 | { | |
436 | u64 *buf = data; | |
437 | u32 i; | |
438 | ||
439 | for (i = 0; i < size; i++) | |
440 | buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); | |
441 | ||
442 | return buf + size; | |
443 | } | |
444 | ||
445 | static u8 *hclge_comm_get_strings(u32 stringset, | |
446 | const struct hclge_comm_stats_str strs[], | |
447 | int size, u8 *data) | |
448 | { | |
449 | char *buff = (char *)data; | |
450 | u32 i; | |
451 | ||
452 | if (stringset != ETH_SS_STATS) | |
453 | return buff; | |
454 | ||
455 | for (i = 0; i < size; i++) { | |
456 | snprintf(buff, ETH_GSTRING_LEN, | |
457 | strs[i].desc); | |
458 | buff = buff + ETH_GSTRING_LEN; | |
459 | } | |
460 | ||
461 | return (u8 *)buff; | |
462 | } | |
463 | ||
464 | static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, | |
465 | struct net_device_stats *net_stats) | |
466 | { | |
467 | net_stats->tx_dropped = 0; | |
200a88c6 | 468 | net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 469 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; |
a6c51c26 | 470 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
471 | |
472 | net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; | |
473 | net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; | |
474 | ||
a6c51c26 | 475 | net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
476 | net_stats->rx_length_errors = |
477 | hw_stats->mac_stats.mac_rx_undersize_pkt_num; | |
478 | net_stats->rx_length_errors += | |
200a88c6 | 479 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 480 | net_stats->rx_over_errors = |
200a88c6 | 481 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f S |
482 | } |
483 | ||
484 | static void hclge_update_stats_for_all(struct hclge_dev *hdev) | |
485 | { | |
486 | struct hnae3_handle *handle; | |
487 | int status; | |
488 | ||
489 | handle = &hdev->vport[0].nic; | |
490 | if (handle->client) { | |
491 | status = hclge_tqps_update_stats(handle); | |
492 | if (status) { | |
493 | dev_err(&hdev->pdev->dev, | |
494 | "Update TQPS stats fail, status = %d.\n", | |
495 | status); | |
496 | } | |
497 | } | |
498 | ||
499 | status = hclge_mac_update_stats(hdev); | |
500 | if (status) | |
501 | dev_err(&hdev->pdev->dev, | |
502 | "Update MAC stats fail, status = %d.\n", status); | |
503 | ||
46a3df9f S |
504 | hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); |
505 | } | |
506 | ||
507 | static void hclge_update_stats(struct hnae3_handle *handle, | |
508 | struct net_device_stats *net_stats) | |
509 | { | |
510 | struct hclge_vport *vport = hclge_get_vport(handle); | |
511 | struct hclge_dev *hdev = vport->back; | |
512 | struct hclge_hw_stats *hw_stats = &hdev->hw_stats; | |
513 | int status; | |
514 | ||
c5f65480 JS |
515 | if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) |
516 | return; | |
517 | ||
46a3df9f S |
518 | status = hclge_mac_update_stats(hdev); |
519 | if (status) | |
520 | dev_err(&hdev->pdev->dev, | |
521 | "Update MAC stats fail, status = %d.\n", | |
522 | status); | |
523 | ||
46a3df9f S |
524 | status = hclge_tqps_update_stats(handle); |
525 | if (status) | |
526 | dev_err(&hdev->pdev->dev, | |
527 | "Update TQPS stats fail, status = %d.\n", | |
528 | status); | |
529 | ||
530 | hclge_update_netstat(hw_stats, net_stats); | |
c5f65480 JS |
531 | |
532 | clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); | |
46a3df9f S |
533 | } |
534 | ||
535 | static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) | |
536 | { | |
4dc13b96 FL |
537 | #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ |
538 | HNAE3_SUPPORT_PHY_LOOPBACK |\ | |
539 | HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ | |
540 | HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) | |
46a3df9f S |
541 | |
542 | struct hclge_vport *vport = hclge_get_vport(handle); | |
543 | struct hclge_dev *hdev = vport->back; | |
544 | int count = 0; | |
545 | ||
546 | /* Loopback test support rules: | |
547 | * mac: only GE mode support | |
548 | * serdes: all mac mode will support include GE/XGE/LGE/CGE | |
549 | * phy: only support when phy device exist on board | |
550 | */ | |
551 | if (stringset == ETH_SS_TEST) { | |
552 | /* clear loopback bit flags at first */ | |
553 | handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); | |
3ff6cde8 | 554 | if (hdev->pdev->revision >= 0x21 || |
4dc13b96 | 555 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || |
46a3df9f S |
556 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || |
557 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { | |
558 | count += 1; | |
eb66d503 | 559 | handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; |
46a3df9f | 560 | } |
5fd50ac3 | 561 | |
4dc13b96 FL |
562 | count += 2; |
563 | handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; | |
564 | handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; | |
46a3df9f S |
565 | } else if (stringset == ETH_SS_STATS) { |
566 | count = ARRAY_SIZE(g_mac_stats_string) + | |
46a3df9f S |
567 | hclge_tqps_get_sset_count(handle, stringset); |
568 | } | |
569 | ||
570 | return count; | |
571 | } | |
572 | ||
573 | static void hclge_get_strings(struct hnae3_handle *handle, | |
574 | u32 stringset, | |
575 | u8 *data) | |
576 | { | |
577 | u8 *p = (char *)data; | |
578 | int size; | |
579 | ||
580 | if (stringset == ETH_SS_STATS) { | |
581 | size = ARRAY_SIZE(g_mac_stats_string); | |
582 | p = hclge_comm_get_strings(stringset, | |
583 | g_mac_stats_string, | |
584 | size, | |
585 | p); | |
46a3df9f S |
586 | p = hclge_tqps_get_strings(handle, p); |
587 | } else if (stringset == ETH_SS_TEST) { | |
eb66d503 | 588 | if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { |
46a3df9f | 589 | memcpy(p, |
eb66d503 | 590 | hns3_nic_test_strs[HNAE3_LOOP_APP], |
46a3df9f S |
591 | ETH_GSTRING_LEN); |
592 | p += ETH_GSTRING_LEN; | |
593 | } | |
4dc13b96 | 594 | if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { |
46a3df9f | 595 | memcpy(p, |
4dc13b96 FL |
596 | hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], |
597 | ETH_GSTRING_LEN); | |
598 | p += ETH_GSTRING_LEN; | |
599 | } | |
600 | if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { | |
601 | memcpy(p, | |
602 | hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], | |
46a3df9f S |
603 | ETH_GSTRING_LEN); |
604 | p += ETH_GSTRING_LEN; | |
605 | } | |
606 | if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { | |
607 | memcpy(p, | |
a7b687b3 | 608 | hns3_nic_test_strs[HNAE3_LOOP_PHY], |
46a3df9f S |
609 | ETH_GSTRING_LEN); |
610 | p += ETH_GSTRING_LEN; | |
611 | } | |
612 | } | |
613 | } | |
614 | ||
615 | static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) | |
616 | { | |
617 | struct hclge_vport *vport = hclge_get_vport(handle); | |
618 | struct hclge_dev *hdev = vport->back; | |
619 | u64 *p; | |
620 | ||
621 | p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, | |
622 | g_mac_stats_string, | |
623 | ARRAY_SIZE(g_mac_stats_string), | |
624 | data); | |
46a3df9f S |
625 | p = hclge_tqps_get_stats(handle, p); |
626 | } | |
627 | ||
628 | static int hclge_parse_func_status(struct hclge_dev *hdev, | |
d44f9b63 | 629 | struct hclge_func_status_cmd *status) |
46a3df9f S |
630 | { |
631 | if (!(status->pf_state & HCLGE_PF_STATE_DONE)) | |
632 | return -EINVAL; | |
633 | ||
634 | /* Set the pf to main pf */ | |
635 | if (status->pf_state & HCLGE_PF_STATE_MAIN) | |
636 | hdev->flag |= HCLGE_FLAG_MAIN; | |
637 | else | |
638 | hdev->flag &= ~HCLGE_FLAG_MAIN; | |
639 | ||
46a3df9f S |
640 | return 0; |
641 | } | |
642 | ||
643 | static int hclge_query_function_status(struct hclge_dev *hdev) | |
644 | { | |
d44f9b63 | 645 | struct hclge_func_status_cmd *req; |
46a3df9f S |
646 | struct hclge_desc desc; |
647 | int timeout = 0; | |
648 | int ret; | |
649 | ||
650 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); | |
d44f9b63 | 651 | req = (struct hclge_func_status_cmd *)desc.data; |
46a3df9f S |
652 | |
653 | do { | |
654 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
655 | if (ret) { | |
656 | dev_err(&hdev->pdev->dev, | |
657 | "query function status failed %d.\n", | |
658 | ret); | |
659 | ||
660 | return ret; | |
661 | } | |
662 | ||
663 | /* Check pf reset is done */ | |
664 | if (req->pf_state) | |
665 | break; | |
666 | usleep_range(1000, 2000); | |
667 | } while (timeout++ < 5); | |
668 | ||
669 | ret = hclge_parse_func_status(hdev, req); | |
670 | ||
671 | return ret; | |
672 | } | |
673 | ||
674 | static int hclge_query_pf_resource(struct hclge_dev *hdev) | |
675 | { | |
d44f9b63 | 676 | struct hclge_pf_res_cmd *req; |
46a3df9f S |
677 | struct hclge_desc desc; |
678 | int ret; | |
679 | ||
680 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); | |
681 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
682 | if (ret) { | |
683 | dev_err(&hdev->pdev->dev, | |
684 | "query pf resource failed %d.\n", ret); | |
685 | return ret; | |
686 | } | |
687 | ||
d44f9b63 | 688 | req = (struct hclge_pf_res_cmd *)desc.data; |
46a3df9f S |
689 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); |
690 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; | |
691 | ||
368686be YL |
692 | if (req->tx_buf_size) |
693 | hdev->tx_buf_size = | |
694 | __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; | |
695 | else | |
696 | hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; | |
697 | ||
b9a400ac YL |
698 | hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); |
699 | ||
368686be YL |
700 | if (req->dv_buf_size) |
701 | hdev->dv_buf_size = | |
702 | __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; | |
703 | else | |
704 | hdev->dv_buf_size = HCLGE_DEFAULT_DV; | |
705 | ||
b9a400ac YL |
706 | hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); |
707 | ||
e92a0843 | 708 | if (hnae3_dev_roce_supported(hdev)) { |
375dd5e4 JS |
709 | hdev->roce_base_msix_offset = |
710 | hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), | |
711 | HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); | |
887c3820 | 712 | hdev->num_roce_msi = |
e4e87715 PL |
713 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
714 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
715 | |
716 | /* PF should have NIC vectors and Roce vectors, | |
717 | * NIC vectors are queued before Roce vectors. | |
718 | */ | |
375dd5e4 JS |
719 | hdev->num_msi = hdev->num_roce_msi + |
720 | hdev->roce_base_msix_offset; | |
46a3df9f S |
721 | } else { |
722 | hdev->num_msi = | |
e4e87715 PL |
723 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
724 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
725 | } |
726 | ||
727 | return 0; | |
728 | } | |
729 | ||
730 | static int hclge_parse_speed(int speed_cmd, int *speed) | |
731 | { | |
732 | switch (speed_cmd) { | |
733 | case 6: | |
734 | *speed = HCLGE_MAC_SPEED_10M; | |
735 | break; | |
736 | case 7: | |
737 | *speed = HCLGE_MAC_SPEED_100M; | |
738 | break; | |
739 | case 0: | |
740 | *speed = HCLGE_MAC_SPEED_1G; | |
741 | break; | |
742 | case 1: | |
743 | *speed = HCLGE_MAC_SPEED_10G; | |
744 | break; | |
745 | case 2: | |
746 | *speed = HCLGE_MAC_SPEED_25G; | |
747 | break; | |
748 | case 3: | |
749 | *speed = HCLGE_MAC_SPEED_40G; | |
750 | break; | |
751 | case 4: | |
752 | *speed = HCLGE_MAC_SPEED_50G; | |
753 | break; | |
754 | case 5: | |
755 | *speed = HCLGE_MAC_SPEED_100G; | |
756 | break; | |
757 | default: | |
758 | return -EINVAL; | |
759 | } | |
760 | ||
761 | return 0; | |
762 | } | |
763 | ||
0979aa0b FL |
764 | static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, |
765 | u8 speed_ability) | |
766 | { | |
767 | unsigned long *supported = hdev->hw.mac.supported; | |
768 | ||
769 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) | |
770 | set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, | |
771 | supported); | |
772 | ||
773 | if (speed_ability & HCLGE_SUPPORT_10G_BIT) | |
774 | set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, | |
775 | supported); | |
776 | ||
777 | if (speed_ability & HCLGE_SUPPORT_25G_BIT) | |
778 | set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, | |
779 | supported); | |
780 | ||
781 | if (speed_ability & HCLGE_SUPPORT_50G_BIT) | |
782 | set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, | |
783 | supported); | |
784 | ||
785 | if (speed_ability & HCLGE_SUPPORT_100G_BIT) | |
786 | set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, | |
787 | supported); | |
788 | ||
789 | set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); | |
790 | set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); | |
791 | } | |
792 | ||
793 | static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) | |
794 | { | |
795 | u8 media_type = hdev->hw.mac.media_type; | |
796 | ||
797 | if (media_type != HNAE3_MEDIA_TYPE_FIBER) | |
798 | return; | |
799 | ||
800 | hclge_parse_fiber_link_mode(hdev, speed_ability); | |
801 | } | |
802 | ||
46a3df9f S |
803 | static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) |
804 | { | |
d44f9b63 | 805 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
806 | u64 mac_addr_tmp_high; |
807 | u64 mac_addr_tmp; | |
808 | int i; | |
809 | ||
d44f9b63 | 810 | req = (struct hclge_cfg_param_cmd *)desc[0].data; |
46a3df9f S |
811 | |
812 | /* get the configuration */ | |
e4e87715 PL |
813 | cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), |
814 | HCLGE_CFG_VMDQ_M, | |
815 | HCLGE_CFG_VMDQ_S); | |
816 | cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
817 | HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); | |
818 | cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
819 | HCLGE_CFG_TQP_DESC_N_M, | |
820 | HCLGE_CFG_TQP_DESC_N_S); | |
821 | ||
822 | cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
823 | HCLGE_CFG_PHY_ADDR_M, | |
824 | HCLGE_CFG_PHY_ADDR_S); | |
825 | cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
826 | HCLGE_CFG_MEDIA_TP_M, | |
827 | HCLGE_CFG_MEDIA_TP_S); | |
828 | cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
829 | HCLGE_CFG_RX_BUF_LEN_M, | |
830 | HCLGE_CFG_RX_BUF_LEN_S); | |
46a3df9f S |
831 | /* get mac_address */ |
832 | mac_addr_tmp = __le32_to_cpu(req->param[2]); | |
e4e87715 PL |
833 | mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), |
834 | HCLGE_CFG_MAC_ADDR_H_M, | |
835 | HCLGE_CFG_MAC_ADDR_H_S); | |
46a3df9f S |
836 | |
837 | mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; | |
838 | ||
e4e87715 PL |
839 | cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), |
840 | HCLGE_CFG_DEFAULT_SPEED_M, | |
841 | HCLGE_CFG_DEFAULT_SPEED_S); | |
842 | cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), | |
843 | HCLGE_CFG_RSS_SIZE_M, | |
844 | HCLGE_CFG_RSS_SIZE_S); | |
0e7a40cd | 845 | |
46a3df9f S |
846 | for (i = 0; i < ETH_ALEN; i++) |
847 | cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; | |
848 | ||
d44f9b63 | 849 | req = (struct hclge_cfg_param_cmd *)desc[1].data; |
46a3df9f | 850 | cfg->numa_node_map = __le32_to_cpu(req->param[0]); |
0979aa0b | 851 | |
e4e87715 PL |
852 | cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), |
853 | HCLGE_CFG_SPEED_ABILITY_M, | |
854 | HCLGE_CFG_SPEED_ABILITY_S); | |
39932473 JS |
855 | cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), |
856 | HCLGE_CFG_UMV_TBL_SPACE_M, | |
857 | HCLGE_CFG_UMV_TBL_SPACE_S); | |
858 | if (!cfg->umv_space) | |
859 | cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; | |
46a3df9f S |
860 | } |
861 | ||
862 | /* hclge_get_cfg: query the static parameter from flash | |
863 | * @hdev: pointer to struct hclge_dev | |
864 | * @hcfg: the config structure to be getted | |
865 | */ | |
866 | static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) | |
867 | { | |
868 | struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; | |
d44f9b63 | 869 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
870 | int i, ret; |
871 | ||
872 | for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { | |
a90bb9a5 YL |
873 | u32 offset = 0; |
874 | ||
d44f9b63 | 875 | req = (struct hclge_cfg_param_cmd *)desc[i].data; |
46a3df9f S |
876 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, |
877 | true); | |
e4e87715 PL |
878 | hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, |
879 | HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); | |
46a3df9f | 880 | /* Len should be united by 4 bytes when send to hardware */ |
e4e87715 PL |
881 | hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, |
882 | HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); | |
a90bb9a5 | 883 | req->offset = cpu_to_le32(offset); |
46a3df9f S |
884 | } |
885 | ||
886 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); | |
887 | if (ret) { | |
3f639907 | 888 | dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); |
46a3df9f S |
889 | return ret; |
890 | } | |
891 | ||
892 | hclge_parse_cfg(hcfg, desc); | |
3f639907 | 893 | |
46a3df9f S |
894 | return 0; |
895 | } | |
896 | ||
897 | static int hclge_get_cap(struct hclge_dev *hdev) | |
898 | { | |
899 | int ret; | |
900 | ||
901 | ret = hclge_query_function_status(hdev); | |
902 | if (ret) { | |
903 | dev_err(&hdev->pdev->dev, | |
904 | "query function status error %d.\n", ret); | |
905 | return ret; | |
906 | } | |
907 | ||
908 | /* get pf resource */ | |
909 | ret = hclge_query_pf_resource(hdev); | |
3f639907 JS |
910 | if (ret) |
911 | dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); | |
46a3df9f | 912 | |
3f639907 | 913 | return ret; |
46a3df9f S |
914 | } |
915 | ||
916 | static int hclge_configure(struct hclge_dev *hdev) | |
917 | { | |
918 | struct hclge_cfg cfg; | |
919 | int ret, i; | |
920 | ||
921 | ret = hclge_get_cfg(hdev, &cfg); | |
922 | if (ret) { | |
923 | dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); | |
924 | return ret; | |
925 | } | |
926 | ||
927 | hdev->num_vmdq_vport = cfg.vmdq_vport_num; | |
928 | hdev->base_tqp_pid = 0; | |
0e7a40cd | 929 | hdev->rss_size_max = cfg.rss_size_max; |
46a3df9f | 930 | hdev->rx_buf_len = cfg.rx_buf_len; |
fbbb1536 | 931 | ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); |
46a3df9f | 932 | hdev->hw.mac.media_type = cfg.media_type; |
2a4776e1 | 933 | hdev->hw.mac.phy_addr = cfg.phy_addr; |
46a3df9f S |
934 | hdev->num_desc = cfg.tqp_desc_num; |
935 | hdev->tm_info.num_pg = 1; | |
cacde272 | 936 | hdev->tc_max = cfg.tc_num; |
46a3df9f | 937 | hdev->tm_info.hw_pfc_map = 0; |
39932473 | 938 | hdev->wanted_umv_size = cfg.umv_space; |
46a3df9f S |
939 | |
940 | ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); | |
941 | if (ret) { | |
942 | dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); | |
943 | return ret; | |
944 | } | |
945 | ||
0979aa0b FL |
946 | hclge_parse_link_mode(hdev, cfg.speed_ability); |
947 | ||
cacde272 YL |
948 | if ((hdev->tc_max > HNAE3_MAX_TC) || |
949 | (hdev->tc_max < 1)) { | |
46a3df9f | 950 | dev_warn(&hdev->pdev->dev, "TC num = %d.\n", |
cacde272 YL |
951 | hdev->tc_max); |
952 | hdev->tc_max = 1; | |
46a3df9f S |
953 | } |
954 | ||
cacde272 YL |
955 | /* Dev does not support DCB */ |
956 | if (!hnae3_dev_dcb_supported(hdev)) { | |
957 | hdev->tc_max = 1; | |
958 | hdev->pfc_max = 0; | |
959 | } else { | |
960 | hdev->pfc_max = hdev->tc_max; | |
961 | } | |
962 | ||
963 | hdev->tm_info.num_tc = hdev->tc_max; | |
964 | ||
46a3df9f | 965 | /* Currently not support uncontiuous tc */ |
cacde272 | 966 | for (i = 0; i < hdev->tm_info.num_tc; i++) |
e4e87715 | 967 | hnae3_set_bit(hdev->hw_tc_map, i, 1); |
46a3df9f | 968 | |
71b83869 | 969 | hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; |
46a3df9f S |
970 | |
971 | return ret; | |
972 | } | |
973 | ||
974 | static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, | |
975 | int tso_mss_max) | |
976 | { | |
d44f9b63 | 977 | struct hclge_cfg_tso_status_cmd *req; |
46a3df9f | 978 | struct hclge_desc desc; |
a90bb9a5 | 979 | u16 tso_mss; |
46a3df9f S |
980 | |
981 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); | |
982 | ||
d44f9b63 | 983 | req = (struct hclge_cfg_tso_status_cmd *)desc.data; |
a90bb9a5 YL |
984 | |
985 | tso_mss = 0; | |
e4e87715 PL |
986 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
987 | HCLGE_TSO_MSS_MIN_S, tso_mss_min); | |
a90bb9a5 YL |
988 | req->tso_mss_min = cpu_to_le16(tso_mss); |
989 | ||
990 | tso_mss = 0; | |
e4e87715 PL |
991 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
992 | HCLGE_TSO_MSS_MIN_S, tso_mss_max); | |
a90bb9a5 | 993 | req->tso_mss_max = cpu_to_le16(tso_mss); |
46a3df9f S |
994 | |
995 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
996 | } | |
997 | ||
b26a6fea PL |
998 | static int hclge_config_gro(struct hclge_dev *hdev, bool en) |
999 | { | |
1000 | struct hclge_cfg_gro_status_cmd *req; | |
1001 | struct hclge_desc desc; | |
1002 | int ret; | |
1003 | ||
1004 | if (!hnae3_dev_gro_supported(hdev)) | |
1005 | return 0; | |
1006 | ||
1007 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); | |
1008 | req = (struct hclge_cfg_gro_status_cmd *)desc.data; | |
1009 | ||
1010 | req->gro_en = cpu_to_le16(en ? 1 : 0); | |
1011 | ||
1012 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1013 | if (ret) | |
1014 | dev_err(&hdev->pdev->dev, | |
1015 | "GRO hardware config cmd failed, ret = %d\n", ret); | |
1016 | ||
1017 | return ret; | |
1018 | } | |
1019 | ||
46a3df9f S |
1020 | static int hclge_alloc_tqps(struct hclge_dev *hdev) |
1021 | { | |
1022 | struct hclge_tqp *tqp; | |
1023 | int i; | |
1024 | ||
1025 | hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, | |
1026 | sizeof(struct hclge_tqp), GFP_KERNEL); | |
1027 | if (!hdev->htqp) | |
1028 | return -ENOMEM; | |
1029 | ||
1030 | tqp = hdev->htqp; | |
1031 | ||
1032 | for (i = 0; i < hdev->num_tqps; i++) { | |
1033 | tqp->dev = &hdev->pdev->dev; | |
1034 | tqp->index = i; | |
1035 | ||
1036 | tqp->q.ae_algo = &ae_algo; | |
1037 | tqp->q.buf_size = hdev->rx_buf_len; | |
1038 | tqp->q.desc_num = hdev->num_desc; | |
1039 | tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + | |
1040 | i * HCLGE_TQP_REG_SIZE; | |
1041 | ||
1042 | tqp++; | |
1043 | } | |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | ||
1048 | static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, | |
1049 | u16 tqp_pid, u16 tqp_vid, bool is_pf) | |
1050 | { | |
d44f9b63 | 1051 | struct hclge_tqp_map_cmd *req; |
46a3df9f S |
1052 | struct hclge_desc desc; |
1053 | int ret; | |
1054 | ||
1055 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); | |
1056 | ||
d44f9b63 | 1057 | req = (struct hclge_tqp_map_cmd *)desc.data; |
46a3df9f | 1058 | req->tqp_id = cpu_to_le16(tqp_pid); |
a90bb9a5 | 1059 | req->tqp_vf = func_id; |
46a3df9f S |
1060 | req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | |
1061 | 1 << HCLGE_TQP_MAP_EN_B; | |
1062 | req->tqp_vid = cpu_to_le16(tqp_vid); | |
1063 | ||
1064 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 JS |
1065 | if (ret) |
1066 | dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); | |
46a3df9f | 1067 | |
3f639907 | 1068 | return ret; |
46a3df9f S |
1069 | } |
1070 | ||
128b900d | 1071 | static int hclge_assign_tqp(struct hclge_vport *vport) |
46a3df9f | 1072 | { |
128b900d | 1073 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
46a3df9f | 1074 | struct hclge_dev *hdev = vport->back; |
7df7dad6 | 1075 | int i, alloced; |
46a3df9f S |
1076 | |
1077 | for (i = 0, alloced = 0; i < hdev->num_tqps && | |
128b900d | 1078 | alloced < kinfo->num_tqps; i++) { |
46a3df9f S |
1079 | if (!hdev->htqp[i].alloced) { |
1080 | hdev->htqp[i].q.handle = &vport->nic; | |
1081 | hdev->htqp[i].q.tqp_index = alloced; | |
128b900d YL |
1082 | hdev->htqp[i].q.desc_num = kinfo->num_desc; |
1083 | kinfo->tqp[alloced] = &hdev->htqp[i].q; | |
46a3df9f | 1084 | hdev->htqp[i].alloced = true; |
46a3df9f S |
1085 | alloced++; |
1086 | } | |
1087 | } | |
128b900d | 1088 | vport->alloc_tqps = kinfo->num_tqps; |
46a3df9f S |
1089 | |
1090 | return 0; | |
1091 | } | |
1092 | ||
128b900d YL |
1093 | static int hclge_knic_setup(struct hclge_vport *vport, |
1094 | u16 num_tqps, u16 num_desc) | |
46a3df9f S |
1095 | { |
1096 | struct hnae3_handle *nic = &vport->nic; | |
1097 | struct hnae3_knic_private_info *kinfo = &nic->kinfo; | |
1098 | struct hclge_dev *hdev = vport->back; | |
1099 | int i, ret; | |
1100 | ||
128b900d | 1101 | kinfo->num_desc = num_desc; |
46a3df9f S |
1102 | kinfo->rx_buf_len = hdev->rx_buf_len; |
1103 | kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); | |
1104 | kinfo->rss_size | |
1105 | = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); | |
1106 | kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; | |
1107 | ||
1108 | for (i = 0; i < HNAE3_MAX_TC; i++) { | |
1109 | if (hdev->hw_tc_map & BIT(i)) { | |
1110 | kinfo->tc_info[i].enable = true; | |
1111 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; | |
1112 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; | |
1113 | kinfo->tc_info[i].tc = i; | |
1114 | } else { | |
1115 | /* Set to default queue if TC is disable */ | |
1116 | kinfo->tc_info[i].enable = false; | |
1117 | kinfo->tc_info[i].tqp_offset = 0; | |
1118 | kinfo->tc_info[i].tqp_count = 1; | |
1119 | kinfo->tc_info[i].tc = 0; | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, | |
1124 | sizeof(struct hnae3_queue *), GFP_KERNEL); | |
1125 | if (!kinfo->tqp) | |
1126 | return -ENOMEM; | |
1127 | ||
128b900d | 1128 | ret = hclge_assign_tqp(vport); |
3f639907 | 1129 | if (ret) |
46a3df9f | 1130 | dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); |
46a3df9f | 1131 | |
3f639907 | 1132 | return ret; |
46a3df9f S |
1133 | } |
1134 | ||
7df7dad6 L |
1135 | static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, |
1136 | struct hclge_vport *vport) | |
1137 | { | |
1138 | struct hnae3_handle *nic = &vport->nic; | |
1139 | struct hnae3_knic_private_info *kinfo; | |
1140 | u16 i; | |
1141 | ||
1142 | kinfo = &nic->kinfo; | |
1143 | for (i = 0; i < kinfo->num_tqps; i++) { | |
1144 | struct hclge_tqp *q = | |
1145 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
1146 | bool is_pf; | |
1147 | int ret; | |
1148 | ||
1149 | is_pf = !(vport->vport_id); | |
1150 | ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, | |
1151 | i, is_pf); | |
1152 | if (ret) | |
1153 | return ret; | |
1154 | } | |
1155 | ||
1156 | return 0; | |
1157 | } | |
1158 | ||
1159 | static int hclge_map_tqp(struct hclge_dev *hdev) | |
1160 | { | |
1161 | struct hclge_vport *vport = hdev->vport; | |
1162 | u16 i, num_vport; | |
1163 | ||
1164 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1165 | for (i = 0; i < num_vport; i++) { | |
1166 | int ret; | |
1167 | ||
1168 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
1169 | if (ret) | |
1170 | return ret; | |
1171 | ||
1172 | vport++; | |
1173 | } | |
1174 | ||
1175 | return 0; | |
1176 | } | |
1177 | ||
46a3df9f S |
1178 | static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) |
1179 | { | |
1180 | /* this would be initialized later */ | |
1181 | } | |
1182 | ||
1183 | static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) | |
1184 | { | |
1185 | struct hnae3_handle *nic = &vport->nic; | |
1186 | struct hclge_dev *hdev = vport->back; | |
1187 | int ret; | |
1188 | ||
1189 | nic->pdev = hdev->pdev; | |
1190 | nic->ae_algo = &ae_algo; | |
1191 | nic->numa_node_mask = hdev->numa_node_mask; | |
1192 | ||
1193 | if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { | |
128b900d | 1194 | ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); |
46a3df9f S |
1195 | if (ret) { |
1196 | dev_err(&hdev->pdev->dev, "knic setup failed %d\n", | |
1197 | ret); | |
1198 | return ret; | |
1199 | } | |
1200 | } else { | |
1201 | hclge_unic_setup(vport, num_tqps); | |
1202 | } | |
1203 | ||
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | static int hclge_alloc_vport(struct hclge_dev *hdev) | |
1208 | { | |
1209 | struct pci_dev *pdev = hdev->pdev; | |
1210 | struct hclge_vport *vport; | |
1211 | u32 tqp_main_vport; | |
1212 | u32 tqp_per_vport; | |
1213 | int num_vport, i; | |
1214 | int ret; | |
1215 | ||
1216 | /* We need to alloc a vport for main NIC of PF */ | |
1217 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1218 | ||
38e62046 HT |
1219 | if (hdev->num_tqps < num_vport) { |
1220 | dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", | |
1221 | hdev->num_tqps, num_vport); | |
1222 | return -EINVAL; | |
1223 | } | |
46a3df9f S |
1224 | |
1225 | /* Alloc the same number of TQPs for every vport */ | |
1226 | tqp_per_vport = hdev->num_tqps / num_vport; | |
1227 | tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; | |
1228 | ||
1229 | vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), | |
1230 | GFP_KERNEL); | |
1231 | if (!vport) | |
1232 | return -ENOMEM; | |
1233 | ||
1234 | hdev->vport = vport; | |
1235 | hdev->num_alloc_vport = num_vport; | |
1236 | ||
2312e050 FL |
1237 | if (IS_ENABLED(CONFIG_PCI_IOV)) |
1238 | hdev->num_alloc_vfs = hdev->num_req_vfs; | |
46a3df9f S |
1239 | |
1240 | for (i = 0; i < num_vport; i++) { | |
1241 | vport->back = hdev; | |
1242 | vport->vport_id = i; | |
818f1675 | 1243 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; |
46a3df9f S |
1244 | |
1245 | if (i == 0) | |
1246 | ret = hclge_vport_setup(vport, tqp_main_vport); | |
1247 | else | |
1248 | ret = hclge_vport_setup(vport, tqp_per_vport); | |
1249 | if (ret) { | |
1250 | dev_err(&pdev->dev, | |
1251 | "vport setup failed for vport %d, %d\n", | |
1252 | i, ret); | |
1253 | return ret; | |
1254 | } | |
1255 | ||
1256 | vport++; | |
1257 | } | |
1258 | ||
1259 | return 0; | |
1260 | } | |
1261 | ||
acf61ecd YL |
1262 | static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, |
1263 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1264 | { |
1265 | /* TX buffer size is unit by 128 byte */ | |
1266 | #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 | |
1267 | #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) | |
d44f9b63 | 1268 | struct hclge_tx_buff_alloc_cmd *req; |
46a3df9f S |
1269 | struct hclge_desc desc; |
1270 | int ret; | |
1271 | u8 i; | |
1272 | ||
d44f9b63 | 1273 | req = (struct hclge_tx_buff_alloc_cmd *)desc.data; |
46a3df9f S |
1274 | |
1275 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); | |
9ffe79a9 | 1276 | for (i = 0; i < HCLGE_TC_NUM; i++) { |
acf61ecd | 1277 | u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 | 1278 | |
46a3df9f S |
1279 | req->tx_pkt_buff[i] = |
1280 | cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | | |
1281 | HCLGE_BUF_SIZE_UPDATE_EN_MSK); | |
9ffe79a9 | 1282 | } |
46a3df9f S |
1283 | |
1284 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 1285 | if (ret) |
46a3df9f S |
1286 | dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", |
1287 | ret); | |
46a3df9f | 1288 | |
3f639907 | 1289 | return ret; |
46a3df9f S |
1290 | } |
1291 | ||
acf61ecd YL |
1292 | static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, |
1293 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1294 | { |
acf61ecd | 1295 | int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); |
46a3df9f | 1296 | |
3f639907 JS |
1297 | if (ret) |
1298 | dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); | |
46a3df9f | 1299 | |
3f639907 | 1300 | return ret; |
46a3df9f S |
1301 | } |
1302 | ||
1303 | static int hclge_get_tc_num(struct hclge_dev *hdev) | |
1304 | { | |
1305 | int i, cnt = 0; | |
1306 | ||
1307 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1308 | if (hdev->hw_tc_map & BIT(i)) | |
1309 | cnt++; | |
1310 | return cnt; | |
1311 | } | |
1312 | ||
1313 | static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) | |
1314 | { | |
1315 | int i, cnt = 0; | |
1316 | ||
1317 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1318 | if (hdev->hw_tc_map & BIT(i) && | |
1319 | hdev->tm_info.hw_pfc_map & BIT(i)) | |
1320 | cnt++; | |
1321 | return cnt; | |
1322 | } | |
1323 | ||
1324 | /* Get the number of pfc enabled TCs, which have private buffer */ | |
acf61ecd YL |
1325 | static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, |
1326 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1327 | { |
1328 | struct hclge_priv_buf *priv; | |
1329 | int i, cnt = 0; | |
1330 | ||
1331 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1332 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1333 | if ((hdev->tm_info.hw_pfc_map & BIT(i)) && |
1334 | priv->enable) | |
1335 | cnt++; | |
1336 | } | |
1337 | ||
1338 | return cnt; | |
1339 | } | |
1340 | ||
1341 | /* Get the number of pfc disabled TCs, which have private buffer */ | |
acf61ecd YL |
1342 | static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, |
1343 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1344 | { |
1345 | struct hclge_priv_buf *priv; | |
1346 | int i, cnt = 0; | |
1347 | ||
1348 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1349 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1350 | if (hdev->hw_tc_map & BIT(i) && |
1351 | !(hdev->tm_info.hw_pfc_map & BIT(i)) && | |
1352 | priv->enable) | |
1353 | cnt++; | |
1354 | } | |
1355 | ||
1356 | return cnt; | |
1357 | } | |
1358 | ||
acf61ecd | 1359 | static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
46a3df9f S |
1360 | { |
1361 | struct hclge_priv_buf *priv; | |
1362 | u32 rx_priv = 0; | |
1363 | int i; | |
1364 | ||
1365 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1366 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1367 | if (priv->enable) |
1368 | rx_priv += priv->buf_size; | |
1369 | } | |
1370 | return rx_priv; | |
1371 | } | |
1372 | ||
acf61ecd | 1373 | static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
9ffe79a9 YL |
1374 | { |
1375 | u32 i, total_tx_size = 0; | |
1376 | ||
1377 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
acf61ecd | 1378 | total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 YL |
1379 | |
1380 | return total_tx_size; | |
1381 | } | |
1382 | ||
acf61ecd YL |
1383 | static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, |
1384 | struct hclge_pkt_buf_alloc *buf_alloc, | |
1385 | u32 rx_all) | |
46a3df9f S |
1386 | { |
1387 | u32 shared_buf_min, shared_buf_tc, shared_std; | |
1388 | int tc_num, pfc_enable_num; | |
b9a400ac | 1389 | u32 shared_buf, aligned_mps; |
46a3df9f S |
1390 | u32 rx_priv; |
1391 | int i; | |
1392 | ||
1393 | tc_num = hclge_get_tc_num(hdev); | |
1394 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); | |
b9a400ac | 1395 | aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
46a3df9f | 1396 | |
d221df4e | 1397 | if (hnae3_dev_dcb_supported(hdev)) |
b9a400ac | 1398 | shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size; |
d221df4e | 1399 | else |
b9a400ac | 1400 | shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF |
368686be | 1401 | + hdev->dv_buf_size; |
d221df4e | 1402 | |
b9a400ac YL |
1403 | shared_buf_tc = pfc_enable_num * aligned_mps + |
1404 | (tc_num - pfc_enable_num) * aligned_mps / 2 + | |
1405 | aligned_mps; | |
46a3df9f S |
1406 | shared_std = max_t(u32, shared_buf_min, shared_buf_tc); |
1407 | ||
acf61ecd | 1408 | rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); |
46a3df9f S |
1409 | if (rx_all <= rx_priv + shared_std) |
1410 | return false; | |
1411 | ||
b9a400ac | 1412 | shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); |
acf61ecd | 1413 | buf_alloc->s_buf.buf_size = shared_buf; |
368686be YL |
1414 | if (hnae3_dev_dcb_supported(hdev)) { |
1415 | buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; | |
1416 | buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high | |
b9a400ac | 1417 | - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); |
368686be | 1418 | } else { |
b9a400ac | 1419 | buf_alloc->s_buf.self.high = aligned_mps + |
368686be | 1420 | HCLGE_NON_DCB_ADDITIONAL_BUF; |
b9a400ac YL |
1421 | buf_alloc->s_buf.self.low = |
1422 | roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); | |
368686be | 1423 | } |
46a3df9f S |
1424 | |
1425 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
1426 | if ((hdev->hw_tc_map & BIT(i)) && | |
1427 | (hdev->tm_info.hw_pfc_map & BIT(i))) { | |
b9a400ac YL |
1428 | buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; |
1429 | buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; | |
46a3df9f | 1430 | } else { |
acf61ecd | 1431 | buf_alloc->s_buf.tc_thrd[i].low = 0; |
b9a400ac | 1432 | buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; |
46a3df9f S |
1433 | } |
1434 | } | |
1435 | ||
1436 | return true; | |
1437 | } | |
1438 | ||
acf61ecd YL |
1439 | static int hclge_tx_buffer_calc(struct hclge_dev *hdev, |
1440 | struct hclge_pkt_buf_alloc *buf_alloc) | |
9ffe79a9 YL |
1441 | { |
1442 | u32 i, total_size; | |
1443 | ||
1444 | total_size = hdev->pkt_buf_size; | |
1445 | ||
1446 | /* alloc tx buffer for all enabled tc */ | |
1447 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1448 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
9ffe79a9 | 1449 | |
368686be | 1450 | if (total_size < hdev->tx_buf_size) |
9ffe79a9 YL |
1451 | return -ENOMEM; |
1452 | ||
1453 | if (hdev->hw_tc_map & BIT(i)) | |
368686be | 1454 | priv->tx_buf_size = hdev->tx_buf_size; |
9ffe79a9 YL |
1455 | else |
1456 | priv->tx_buf_size = 0; | |
1457 | ||
1458 | total_size -= priv->tx_buf_size; | |
1459 | } | |
1460 | ||
1461 | return 0; | |
1462 | } | |
1463 | ||
46a3df9f S |
1464 | /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs |
1465 | * @hdev: pointer to struct hclge_dev | |
acf61ecd | 1466 | * @buf_alloc: pointer to buffer calculation data |
46a3df9f S |
1467 | * @return: 0: calculate sucessful, negative: fail |
1468 | */ | |
1db9b1bf YL |
1469 | static int hclge_rx_buffer_calc(struct hclge_dev *hdev, |
1470 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1471 | { |
996ff918 | 1472 | u32 rx_all = hdev->pkt_buf_size, aligned_mps; |
46a3df9f S |
1473 | int no_pfc_priv_num, pfc_priv_num; |
1474 | struct hclge_priv_buf *priv; | |
1475 | int i; | |
1476 | ||
996ff918 | 1477 | aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
acf61ecd | 1478 | rx_all -= hclge_get_tx_buff_alloced(buf_alloc); |
9ffe79a9 | 1479 | |
d602a525 YL |
1480 | /* When DCB is not supported, rx private |
1481 | * buffer is not allocated. | |
1482 | */ | |
1483 | if (!hnae3_dev_dcb_supported(hdev)) { | |
acf61ecd | 1484 | if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
d602a525 YL |
1485 | return -ENOMEM; |
1486 | ||
1487 | return 0; | |
1488 | } | |
1489 | ||
46a3df9f S |
1490 | /* step 1, try to alloc private buffer for all enabled tc */ |
1491 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1492 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1493 | if (hdev->hw_tc_map & BIT(i)) { |
1494 | priv->enable = 1; | |
1495 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
996ff918 | 1496 | priv->wl.low = aligned_mps; |
b9a400ac YL |
1497 | priv->wl.high = |
1498 | roundup(priv->wl.low + aligned_mps, | |
1499 | HCLGE_BUF_SIZE_UNIT); | |
46a3df9f | 1500 | priv->buf_size = priv->wl.high + |
b9a400ac | 1501 | hdev->dv_buf_size; |
46a3df9f S |
1502 | } else { |
1503 | priv->wl.low = 0; | |
996ff918 | 1504 | priv->wl.high = 2 * aligned_mps; |
368686be YL |
1505 | priv->buf_size = priv->wl.high + |
1506 | hdev->dv_buf_size; | |
46a3df9f | 1507 | } |
bb1fe9ea YL |
1508 | } else { |
1509 | priv->enable = 0; | |
1510 | priv->wl.low = 0; | |
1511 | priv->wl.high = 0; | |
1512 | priv->buf_size = 0; | |
46a3df9f S |
1513 | } |
1514 | } | |
1515 | ||
acf61ecd | 1516 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1517 | return 0; |
1518 | ||
1519 | /* step 2, try to decrease the buffer size of | |
1520 | * no pfc TC's private buffer | |
1521 | */ | |
1522 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1523 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f | 1524 | |
bb1fe9ea YL |
1525 | priv->enable = 0; |
1526 | priv->wl.low = 0; | |
1527 | priv->wl.high = 0; | |
1528 | priv->buf_size = 0; | |
1529 | ||
1530 | if (!(hdev->hw_tc_map & BIT(i))) | |
1531 | continue; | |
1532 | ||
1533 | priv->enable = 1; | |
46a3df9f S |
1534 | |
1535 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
b9a400ac | 1536 | priv->wl.low = 256; |
996ff918 | 1537 | priv->wl.high = priv->wl.low + aligned_mps; |
368686be | 1538 | priv->buf_size = priv->wl.high + hdev->dv_buf_size; |
46a3df9f S |
1539 | } else { |
1540 | priv->wl.low = 0; | |
996ff918 | 1541 | priv->wl.high = aligned_mps; |
368686be | 1542 | priv->buf_size = priv->wl.high + hdev->dv_buf_size; |
46a3df9f S |
1543 | } |
1544 | } | |
1545 | ||
acf61ecd | 1546 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1547 | return 0; |
1548 | ||
1549 | /* step 3, try to reduce the number of pfc disabled TCs, | |
1550 | * which have private buffer | |
1551 | */ | |
1552 | /* get the total no pfc enable TC number, which have private buffer */ | |
acf61ecd | 1553 | no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1554 | |
1555 | /* let the last to be cleared first */ | |
1556 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1557 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1558 | |
1559 | if (hdev->hw_tc_map & BIT(i) && | |
1560 | !(hdev->tm_info.hw_pfc_map & BIT(i))) { | |
1561 | /* Clear the no pfc TC private buffer */ | |
1562 | priv->wl.low = 0; | |
1563 | priv->wl.high = 0; | |
1564 | priv->buf_size = 0; | |
1565 | priv->enable = 0; | |
1566 | no_pfc_priv_num--; | |
1567 | } | |
1568 | ||
acf61ecd | 1569 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1570 | no_pfc_priv_num == 0) |
1571 | break; | |
1572 | } | |
1573 | ||
acf61ecd | 1574 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1575 | return 0; |
1576 | ||
1577 | /* step 4, try to reduce the number of pfc enabled TCs | |
1578 | * which have private buffer. | |
1579 | */ | |
acf61ecd | 1580 | pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1581 | |
1582 | /* let the last to be cleared first */ | |
1583 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1584 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1585 | |
1586 | if (hdev->hw_tc_map & BIT(i) && | |
1587 | hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1588 | /* Reduce the number of pfc TC with private buffer */ | |
1589 | priv->wl.low = 0; | |
1590 | priv->enable = 0; | |
1591 | priv->wl.high = 0; | |
1592 | priv->buf_size = 0; | |
1593 | pfc_priv_num--; | |
1594 | } | |
1595 | ||
acf61ecd | 1596 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1597 | pfc_priv_num == 0) |
1598 | break; | |
1599 | } | |
acf61ecd | 1600 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1601 | return 0; |
1602 | ||
1603 | return -ENOMEM; | |
1604 | } | |
1605 | ||
acf61ecd YL |
1606 | static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, |
1607 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1608 | { |
d44f9b63 | 1609 | struct hclge_rx_priv_buff_cmd *req; |
46a3df9f S |
1610 | struct hclge_desc desc; |
1611 | int ret; | |
1612 | int i; | |
1613 | ||
1614 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); | |
d44f9b63 | 1615 | req = (struct hclge_rx_priv_buff_cmd *)desc.data; |
46a3df9f S |
1616 | |
1617 | /* Alloc private buffer TCs */ | |
1618 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1619 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1620 | |
1621 | req->buf_num[i] = | |
1622 | cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); | |
1623 | req->buf_num[i] |= | |
5bca3b94 | 1624 | cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); |
46a3df9f S |
1625 | } |
1626 | ||
b8c8bf47 | 1627 | req->shared_buf = |
acf61ecd | 1628 | cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | |
b8c8bf47 YL |
1629 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); |
1630 | ||
46a3df9f | 1631 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
3f639907 | 1632 | if (ret) |
46a3df9f S |
1633 | dev_err(&hdev->pdev->dev, |
1634 | "rx private buffer alloc cmd failed %d\n", ret); | |
46a3df9f | 1635 | |
3f639907 | 1636 | return ret; |
46a3df9f S |
1637 | } |
1638 | ||
acf61ecd YL |
1639 | static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, |
1640 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1641 | { |
1642 | struct hclge_rx_priv_wl_buf *req; | |
1643 | struct hclge_priv_buf *priv; | |
1644 | struct hclge_desc desc[2]; | |
1645 | int i, j; | |
1646 | int ret; | |
1647 | ||
1648 | for (i = 0; i < 2; i++) { | |
1649 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, | |
1650 | false); | |
1651 | req = (struct hclge_rx_priv_wl_buf *)desc[i].data; | |
1652 | ||
1653 | /* The first descriptor set the NEXT bit to 1 */ | |
1654 | if (i == 0) | |
1655 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1656 | else | |
1657 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1658 | ||
1659 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
acf61ecd YL |
1660 | u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; |
1661 | ||
1662 | priv = &buf_alloc->priv_buf[idx]; | |
46a3df9f S |
1663 | req->tc_wl[j].high = |
1664 | cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); | |
1665 | req->tc_wl[j].high |= | |
3738287c | 1666 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1667 | req->tc_wl[j].low = |
1668 | cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); | |
1669 | req->tc_wl[j].low |= | |
3738287c | 1670 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1671 | } |
1672 | } | |
1673 | ||
1674 | /* Send 2 descriptor at one time */ | |
1675 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
3f639907 | 1676 | if (ret) |
46a3df9f S |
1677 | dev_err(&hdev->pdev->dev, |
1678 | "rx private waterline config cmd failed %d\n", | |
1679 | ret); | |
3f639907 | 1680 | return ret; |
46a3df9f S |
1681 | } |
1682 | ||
acf61ecd YL |
1683 | static int hclge_common_thrd_config(struct hclge_dev *hdev, |
1684 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1685 | { |
acf61ecd | 1686 | struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; |
46a3df9f S |
1687 | struct hclge_rx_com_thrd *req; |
1688 | struct hclge_desc desc[2]; | |
1689 | struct hclge_tc_thrd *tc; | |
1690 | int i, j; | |
1691 | int ret; | |
1692 | ||
1693 | for (i = 0; i < 2; i++) { | |
1694 | hclge_cmd_setup_basic_desc(&desc[i], | |
1695 | HCLGE_OPC_RX_COM_THRD_ALLOC, false); | |
1696 | req = (struct hclge_rx_com_thrd *)&desc[i].data; | |
1697 | ||
1698 | /* The first descriptor set the NEXT bit to 1 */ | |
1699 | if (i == 0) | |
1700 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1701 | else | |
1702 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1703 | ||
1704 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
1705 | tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; | |
1706 | ||
1707 | req->com_thrd[j].high = | |
1708 | cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); | |
1709 | req->com_thrd[j].high |= | |
3738287c | 1710 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1711 | req->com_thrd[j].low = |
1712 | cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); | |
1713 | req->com_thrd[j].low |= | |
3738287c | 1714 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1715 | } |
1716 | } | |
1717 | ||
1718 | /* Send 2 descriptors at one time */ | |
1719 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
3f639907 | 1720 | if (ret) |
46a3df9f S |
1721 | dev_err(&hdev->pdev->dev, |
1722 | "common threshold config cmd failed %d\n", ret); | |
3f639907 | 1723 | return ret; |
46a3df9f S |
1724 | } |
1725 | ||
acf61ecd YL |
1726 | static int hclge_common_wl_config(struct hclge_dev *hdev, |
1727 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1728 | { |
acf61ecd | 1729 | struct hclge_shared_buf *buf = &buf_alloc->s_buf; |
46a3df9f S |
1730 | struct hclge_rx_com_wl *req; |
1731 | struct hclge_desc desc; | |
1732 | int ret; | |
1733 | ||
1734 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); | |
1735 | ||
1736 | req = (struct hclge_rx_com_wl *)desc.data; | |
1737 | req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); | |
3738287c | 1738 | req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1739 | |
1740 | req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); | |
3738287c | 1741 | req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1742 | |
1743 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 1744 | if (ret) |
46a3df9f S |
1745 | dev_err(&hdev->pdev->dev, |
1746 | "common waterline config cmd failed %d\n", ret); | |
46a3df9f | 1747 | |
3f639907 | 1748 | return ret; |
46a3df9f S |
1749 | } |
1750 | ||
1751 | int hclge_buffer_alloc(struct hclge_dev *hdev) | |
1752 | { | |
acf61ecd | 1753 | struct hclge_pkt_buf_alloc *pkt_buf; |
46a3df9f S |
1754 | int ret; |
1755 | ||
acf61ecd YL |
1756 | pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); |
1757 | if (!pkt_buf) | |
46a3df9f S |
1758 | return -ENOMEM; |
1759 | ||
acf61ecd | 1760 | ret = hclge_tx_buffer_calc(hdev, pkt_buf); |
9ffe79a9 YL |
1761 | if (ret) { |
1762 | dev_err(&hdev->pdev->dev, | |
1763 | "could not calc tx buffer size for all TCs %d\n", ret); | |
acf61ecd | 1764 | goto out; |
9ffe79a9 YL |
1765 | } |
1766 | ||
acf61ecd | 1767 | ret = hclge_tx_buffer_alloc(hdev, pkt_buf); |
46a3df9f S |
1768 | if (ret) { |
1769 | dev_err(&hdev->pdev->dev, | |
1770 | "could not alloc tx buffers %d\n", ret); | |
acf61ecd | 1771 | goto out; |
46a3df9f S |
1772 | } |
1773 | ||
acf61ecd | 1774 | ret = hclge_rx_buffer_calc(hdev, pkt_buf); |
46a3df9f S |
1775 | if (ret) { |
1776 | dev_err(&hdev->pdev->dev, | |
1777 | "could not calc rx priv buffer size for all TCs %d\n", | |
1778 | ret); | |
acf61ecd | 1779 | goto out; |
46a3df9f S |
1780 | } |
1781 | ||
acf61ecd | 1782 | ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); |
46a3df9f S |
1783 | if (ret) { |
1784 | dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", | |
1785 | ret); | |
acf61ecd | 1786 | goto out; |
46a3df9f S |
1787 | } |
1788 | ||
2daf4a65 | 1789 | if (hnae3_dev_dcb_supported(hdev)) { |
acf61ecd | 1790 | ret = hclge_rx_priv_wl_config(hdev, pkt_buf); |
2daf4a65 YL |
1791 | if (ret) { |
1792 | dev_err(&hdev->pdev->dev, | |
1793 | "could not configure rx private waterline %d\n", | |
1794 | ret); | |
acf61ecd | 1795 | goto out; |
2daf4a65 | 1796 | } |
46a3df9f | 1797 | |
acf61ecd | 1798 | ret = hclge_common_thrd_config(hdev, pkt_buf); |
2daf4a65 YL |
1799 | if (ret) { |
1800 | dev_err(&hdev->pdev->dev, | |
1801 | "could not configure common threshold %d\n", | |
1802 | ret); | |
acf61ecd | 1803 | goto out; |
2daf4a65 | 1804 | } |
46a3df9f S |
1805 | } |
1806 | ||
acf61ecd YL |
1807 | ret = hclge_common_wl_config(hdev, pkt_buf); |
1808 | if (ret) | |
46a3df9f S |
1809 | dev_err(&hdev->pdev->dev, |
1810 | "could not configure common waterline %d\n", ret); | |
46a3df9f | 1811 | |
acf61ecd YL |
1812 | out: |
1813 | kfree(pkt_buf); | |
1814 | return ret; | |
46a3df9f S |
1815 | } |
1816 | ||
1817 | static int hclge_init_roce_base_info(struct hclge_vport *vport) | |
1818 | { | |
1819 | struct hnae3_handle *roce = &vport->roce; | |
1820 | struct hnae3_handle *nic = &vport->nic; | |
1821 | ||
887c3820 | 1822 | roce->rinfo.num_vectors = vport->back->num_roce_msi; |
46a3df9f S |
1823 | |
1824 | if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || | |
1825 | vport->back->num_msi_left == 0) | |
1826 | return -EINVAL; | |
1827 | ||
1828 | roce->rinfo.base_vector = vport->back->roce_base_vector; | |
1829 | ||
1830 | roce->rinfo.netdev = nic->kinfo.netdev; | |
1831 | roce->rinfo.roce_io_base = vport->back->hw.io_base; | |
1832 | ||
1833 | roce->pdev = nic->pdev; | |
1834 | roce->ae_algo = nic->ae_algo; | |
1835 | roce->numa_node_mask = nic->numa_node_mask; | |
1836 | ||
1837 | return 0; | |
1838 | } | |
1839 | ||
887c3820 | 1840 | static int hclge_init_msi(struct hclge_dev *hdev) |
46a3df9f S |
1841 | { |
1842 | struct pci_dev *pdev = hdev->pdev; | |
887c3820 SM |
1843 | int vectors; |
1844 | int i; | |
46a3df9f | 1845 | |
887c3820 SM |
1846 | vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, |
1847 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | |
1848 | if (vectors < 0) { | |
1849 | dev_err(&pdev->dev, | |
1850 | "failed(%d) to allocate MSI/MSI-X vectors\n", | |
1851 | vectors); | |
1852 | return vectors; | |
46a3df9f | 1853 | } |
887c3820 SM |
1854 | if (vectors < hdev->num_msi) |
1855 | dev_warn(&hdev->pdev->dev, | |
1856 | "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", | |
1857 | hdev->num_msi, vectors); | |
46a3df9f | 1858 | |
887c3820 SM |
1859 | hdev->num_msi = vectors; |
1860 | hdev->num_msi_left = vectors; | |
1861 | hdev->base_msi_vector = pdev->irq; | |
46a3df9f | 1862 | hdev->roce_base_vector = hdev->base_msi_vector + |
375dd5e4 | 1863 | hdev->roce_base_msix_offset; |
46a3df9f | 1864 | |
46a3df9f S |
1865 | hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1866 | sizeof(u16), GFP_KERNEL); | |
887c3820 SM |
1867 | if (!hdev->vector_status) { |
1868 | pci_free_irq_vectors(pdev); | |
46a3df9f | 1869 | return -ENOMEM; |
887c3820 | 1870 | } |
46a3df9f S |
1871 | |
1872 | for (i = 0; i < hdev->num_msi; i++) | |
1873 | hdev->vector_status[i] = HCLGE_INVALID_VPORT; | |
1874 | ||
887c3820 SM |
1875 | hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1876 | sizeof(int), GFP_KERNEL); | |
1877 | if (!hdev->vector_irq) { | |
1878 | pci_free_irq_vectors(pdev); | |
1879 | return -ENOMEM; | |
46a3df9f | 1880 | } |
46a3df9f S |
1881 | |
1882 | return 0; | |
1883 | } | |
1884 | ||
2d03eacc | 1885 | static u8 hclge_check_speed_dup(u8 duplex, int speed) |
46a3df9f | 1886 | { |
46a3df9f | 1887 | |
2d03eacc YL |
1888 | if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) |
1889 | duplex = HCLGE_MAC_FULL; | |
46a3df9f | 1890 | |
2d03eacc | 1891 | return duplex; |
46a3df9f S |
1892 | } |
1893 | ||
2d03eacc YL |
1894 | static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, |
1895 | u8 duplex) | |
46a3df9f | 1896 | { |
d44f9b63 | 1897 | struct hclge_config_mac_speed_dup_cmd *req; |
46a3df9f S |
1898 | struct hclge_desc desc; |
1899 | int ret; | |
1900 | ||
d44f9b63 | 1901 | req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; |
46a3df9f S |
1902 | |
1903 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); | |
1904 | ||
e4e87715 | 1905 | hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); |
46a3df9f S |
1906 | |
1907 | switch (speed) { | |
1908 | case HCLGE_MAC_SPEED_10M: | |
e4e87715 PL |
1909 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1910 | HCLGE_CFG_SPEED_S, 6); | |
46a3df9f S |
1911 | break; |
1912 | case HCLGE_MAC_SPEED_100M: | |
e4e87715 PL |
1913 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1914 | HCLGE_CFG_SPEED_S, 7); | |
46a3df9f S |
1915 | break; |
1916 | case HCLGE_MAC_SPEED_1G: | |
e4e87715 PL |
1917 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1918 | HCLGE_CFG_SPEED_S, 0); | |
46a3df9f S |
1919 | break; |
1920 | case HCLGE_MAC_SPEED_10G: | |
e4e87715 PL |
1921 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1922 | HCLGE_CFG_SPEED_S, 1); | |
46a3df9f S |
1923 | break; |
1924 | case HCLGE_MAC_SPEED_25G: | |
e4e87715 PL |
1925 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1926 | HCLGE_CFG_SPEED_S, 2); | |
46a3df9f S |
1927 | break; |
1928 | case HCLGE_MAC_SPEED_40G: | |
e4e87715 PL |
1929 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1930 | HCLGE_CFG_SPEED_S, 3); | |
46a3df9f S |
1931 | break; |
1932 | case HCLGE_MAC_SPEED_50G: | |
e4e87715 PL |
1933 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1934 | HCLGE_CFG_SPEED_S, 4); | |
46a3df9f S |
1935 | break; |
1936 | case HCLGE_MAC_SPEED_100G: | |
e4e87715 PL |
1937 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1938 | HCLGE_CFG_SPEED_S, 5); | |
46a3df9f S |
1939 | break; |
1940 | default: | |
d7629e74 | 1941 | dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); |
46a3df9f S |
1942 | return -EINVAL; |
1943 | } | |
1944 | ||
e4e87715 PL |
1945 | hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, |
1946 | 1); | |
46a3df9f S |
1947 | |
1948 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1949 | if (ret) { | |
1950 | dev_err(&hdev->pdev->dev, | |
1951 | "mac speed/duplex config cmd failed %d.\n", ret); | |
1952 | return ret; | |
1953 | } | |
1954 | ||
2d03eacc YL |
1955 | return 0; |
1956 | } | |
1957 | ||
1958 | int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) | |
1959 | { | |
1960 | int ret; | |
1961 | ||
1962 | duplex = hclge_check_speed_dup(duplex, speed); | |
1963 | if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) | |
1964 | return 0; | |
1965 | ||
1966 | ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); | |
1967 | if (ret) | |
1968 | return ret; | |
1969 | ||
1970 | hdev->hw.mac.speed = speed; | |
1971 | hdev->hw.mac.duplex = duplex; | |
46a3df9f S |
1972 | |
1973 | return 0; | |
1974 | } | |
1975 | ||
1976 | static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, | |
1977 | u8 duplex) | |
1978 | { | |
1979 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1980 | struct hclge_dev *hdev = vport->back; | |
1981 | ||
1982 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex); | |
1983 | } | |
1984 | ||
46a3df9f S |
1985 | static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) |
1986 | { | |
d44f9b63 | 1987 | struct hclge_config_auto_neg_cmd *req; |
46a3df9f | 1988 | struct hclge_desc desc; |
a90bb9a5 | 1989 | u32 flag = 0; |
46a3df9f S |
1990 | int ret; |
1991 | ||
1992 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); | |
1993 | ||
d44f9b63 | 1994 | req = (struct hclge_config_auto_neg_cmd *)desc.data; |
e4e87715 | 1995 | hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); |
a90bb9a5 | 1996 | req->cfg_an_cmd_flag = cpu_to_le32(flag); |
46a3df9f S |
1997 | |
1998 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 1999 | if (ret) |
46a3df9f S |
2000 | dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", |
2001 | ret); | |
46a3df9f | 2002 | |
3f639907 | 2003 | return ret; |
46a3df9f S |
2004 | } |
2005 | ||
2006 | static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) | |
2007 | { | |
2008 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2009 | struct hclge_dev *hdev = vport->back; | |
2010 | ||
2011 | return hclge_set_autoneg_en(hdev, enable); | |
2012 | } | |
2013 | ||
2014 | static int hclge_get_autoneg(struct hnae3_handle *handle) | |
2015 | { | |
2016 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2017 | struct hclge_dev *hdev = vport->back; | |
27b5bf49 FL |
2018 | struct phy_device *phydev = hdev->hw.mac.phydev; |
2019 | ||
2020 | if (phydev) | |
2021 | return phydev->autoneg; | |
46a3df9f S |
2022 | |
2023 | return hdev->hw.mac.autoneg; | |
2024 | } | |
2025 | ||
2026 | static int hclge_mac_init(struct hclge_dev *hdev) | |
2027 | { | |
2028 | struct hclge_mac *mac = &hdev->hw.mac; | |
2029 | int ret; | |
2030 | ||
5d497936 | 2031 | hdev->support_sfp_query = true; |
2d03eacc YL |
2032 | hdev->hw.mac.duplex = HCLGE_MAC_FULL; |
2033 | ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, | |
2034 | hdev->hw.mac.duplex); | |
46a3df9f S |
2035 | if (ret) { |
2036 | dev_err(&hdev->pdev->dev, | |
2037 | "Config mac speed dup fail ret=%d\n", ret); | |
2038 | return ret; | |
2039 | } | |
2040 | ||
2041 | mac->link = 0; | |
2042 | ||
e6d7d79d YL |
2043 | ret = hclge_set_mac_mtu(hdev, hdev->mps); |
2044 | if (ret) { | |
2045 | dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); | |
2046 | return ret; | |
2047 | } | |
f9fd82a9 | 2048 | |
e6d7d79d | 2049 | ret = hclge_buffer_alloc(hdev); |
3f639907 | 2050 | if (ret) |
f9fd82a9 | 2051 | dev_err(&hdev->pdev->dev, |
e6d7d79d | 2052 | "allocate buffer fail, ret=%d\n", ret); |
f9fd82a9 | 2053 | |
3f639907 | 2054 | return ret; |
46a3df9f S |
2055 | } |
2056 | ||
c1a81619 SM |
2057 | static void hclge_mbx_task_schedule(struct hclge_dev *hdev) |
2058 | { | |
2059 | if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) | |
2060 | schedule_work(&hdev->mbx_service_task); | |
2061 | } | |
2062 | ||
cb1b9f77 SM |
2063 | static void hclge_reset_task_schedule(struct hclge_dev *hdev) |
2064 | { | |
2065 | if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) | |
2066 | schedule_work(&hdev->rst_service_task); | |
2067 | } | |
2068 | ||
46a3df9f S |
2069 | static void hclge_task_schedule(struct hclge_dev *hdev) |
2070 | { | |
2071 | if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && | |
2072 | !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && | |
2073 | !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) | |
2074 | (void)schedule_work(&hdev->service_task); | |
2075 | } | |
2076 | ||
2077 | static int hclge_get_mac_link_status(struct hclge_dev *hdev) | |
2078 | { | |
d44f9b63 | 2079 | struct hclge_link_status_cmd *req; |
46a3df9f S |
2080 | struct hclge_desc desc; |
2081 | int link_status; | |
2082 | int ret; | |
2083 | ||
2084 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); | |
2085 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2086 | if (ret) { | |
2087 | dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", | |
2088 | ret); | |
2089 | return ret; | |
2090 | } | |
2091 | ||
d44f9b63 | 2092 | req = (struct hclge_link_status_cmd *)desc.data; |
c79301d8 | 2093 | link_status = req->status & HCLGE_LINK_STATUS_UP_M; |
46a3df9f S |
2094 | |
2095 | return !!link_status; | |
2096 | } | |
2097 | ||
2098 | static int hclge_get_mac_phy_link(struct hclge_dev *hdev) | |
2099 | { | |
2100 | int mac_state; | |
2101 | int link_stat; | |
2102 | ||
582d37bb PL |
2103 | if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) |
2104 | return 0; | |
2105 | ||
46a3df9f S |
2106 | mac_state = hclge_get_mac_link_status(hdev); |
2107 | ||
2108 | if (hdev->hw.mac.phydev) { | |
fd813314 | 2109 | if (hdev->hw.mac.phydev->state == PHY_RUNNING) |
46a3df9f S |
2110 | link_stat = mac_state & |
2111 | hdev->hw.mac.phydev->link; | |
2112 | else | |
2113 | link_stat = 0; | |
2114 | ||
2115 | } else { | |
2116 | link_stat = mac_state; | |
2117 | } | |
2118 | ||
2119 | return !!link_stat; | |
2120 | } | |
2121 | ||
2122 | static void hclge_update_link_status(struct hclge_dev *hdev) | |
2123 | { | |
2124 | struct hnae3_client *client = hdev->nic_client; | |
2125 | struct hnae3_handle *handle; | |
2126 | int state; | |
2127 | int i; | |
2128 | ||
2129 | if (!client) | |
2130 | return; | |
2131 | state = hclge_get_mac_phy_link(hdev); | |
2132 | if (state != hdev->hw.mac.link) { | |
2133 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2134 | handle = &hdev->vport[i].nic; | |
2135 | client->ops->link_status_change(handle, state); | |
2136 | } | |
2137 | hdev->hw.mac.link = state; | |
2138 | } | |
2139 | } | |
2140 | ||
5d497936 PL |
2141 | static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) |
2142 | { | |
2143 | struct hclge_sfp_speed_cmd *resp = NULL; | |
2144 | struct hclge_desc desc; | |
2145 | int ret; | |
2146 | ||
2147 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true); | |
2148 | resp = (struct hclge_sfp_speed_cmd *)desc.data; | |
2149 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2150 | if (ret == -EOPNOTSUPP) { | |
2151 | dev_warn(&hdev->pdev->dev, | |
2152 | "IMP do not support get SFP speed %d\n", ret); | |
2153 | return ret; | |
2154 | } else if (ret) { | |
2155 | dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); | |
2156 | return ret; | |
2157 | } | |
2158 | ||
2159 | *speed = resp->sfp_speed; | |
2160 | ||
2161 | return 0; | |
2162 | } | |
2163 | ||
46a3df9f S |
2164 | static int hclge_update_speed_duplex(struct hclge_dev *hdev) |
2165 | { | |
2166 | struct hclge_mac mac = hdev->hw.mac; | |
46a3df9f S |
2167 | int speed; |
2168 | int ret; | |
2169 | ||
5d497936 | 2170 | /* get the speed from SFP cmd when phy |
46a3df9f S |
2171 | * doesn't exit. |
2172 | */ | |
5d497936 | 2173 | if (mac.phydev) |
46a3df9f S |
2174 | return 0; |
2175 | ||
5d497936 PL |
2176 | /* if IMP does not support get SFP/qSFP speed, return directly */ |
2177 | if (!hdev->support_sfp_query) | |
2178 | return 0; | |
46a3df9f | 2179 | |
5d497936 PL |
2180 | ret = hclge_get_sfp_speed(hdev, &speed); |
2181 | if (ret == -EOPNOTSUPP) { | |
2182 | hdev->support_sfp_query = false; | |
2183 | return ret; | |
2184 | } else if (ret) { | |
2d03eacc | 2185 | return ret; |
46a3df9f S |
2186 | } |
2187 | ||
5d497936 PL |
2188 | if (speed == HCLGE_MAC_SPEED_UNKNOWN) |
2189 | return 0; /* do nothing if no SFP */ | |
2190 | ||
2191 | /* must config full duplex for SFP */ | |
2192 | return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); | |
46a3df9f S |
2193 | } |
2194 | ||
2195 | static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) | |
2196 | { | |
2197 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2198 | struct hclge_dev *hdev = vport->back; | |
2199 | ||
2200 | return hclge_update_speed_duplex(hdev); | |
2201 | } | |
2202 | ||
2203 | static int hclge_get_status(struct hnae3_handle *handle) | |
2204 | { | |
2205 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2206 | struct hclge_dev *hdev = vport->back; | |
2207 | ||
2208 | hclge_update_link_status(hdev); | |
2209 | ||
2210 | return hdev->hw.mac.link; | |
2211 | } | |
2212 | ||
d039ef68 | 2213 | static void hclge_service_timer(struct timer_list *t) |
46a3df9f | 2214 | { |
d039ef68 | 2215 | struct hclge_dev *hdev = from_timer(hdev, t, service_timer); |
46a3df9f | 2216 | |
d039ef68 | 2217 | mod_timer(&hdev->service_timer, jiffies + HZ); |
c5f65480 | 2218 | hdev->hw_stats.stats_timer++; |
46a3df9f S |
2219 | hclge_task_schedule(hdev); |
2220 | } | |
2221 | ||
2222 | static void hclge_service_complete(struct hclge_dev *hdev) | |
2223 | { | |
2224 | WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); | |
2225 | ||
2226 | /* Flush memory before next watchdog */ | |
2227 | smp_mb__before_atomic(); | |
2228 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); | |
2229 | } | |
2230 | ||
ca1d7669 SM |
2231 | static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) |
2232 | { | |
f6162d44 | 2233 | u32 rst_src_reg, cmdq_src_reg, msix_src_reg; |
ca1d7669 SM |
2234 | |
2235 | /* fetch the events from their corresponding regs */ | |
9ca8d1a7 | 2236 | rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); |
c1a81619 | 2237 | cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); |
f6162d44 SM |
2238 | msix_src_reg = hclge_read_dev(&hdev->hw, |
2239 | HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); | |
c1a81619 SM |
2240 | |
2241 | /* Assumption: If by any chance reset and mailbox events are reported | |
2242 | * together then we will only process reset event in this go and will | |
2243 | * defer the processing of the mailbox events. Since, we would have not | |
2244 | * cleared RX CMDQ event this time we would receive again another | |
2245 | * interrupt from H/W just for the mailbox. | |
2246 | */ | |
ca1d7669 SM |
2247 | |
2248 | /* check for vector0 reset event sources */ | |
6dd22bbc HT |
2249 | if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { |
2250 | dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); | |
2251 | set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); | |
2252 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2253 | *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2254 | return HCLGE_VECTOR0_EVENT_RST; | |
2255 | } | |
2256 | ||
ca1d7669 | 2257 | if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { |
65e41e7e | 2258 | dev_info(&hdev->pdev->dev, "global reset interrupt\n"); |
8d40854f | 2259 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
ca1d7669 SM |
2260 | set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); |
2261 | *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2262 | return HCLGE_VECTOR0_EVENT_RST; | |
2263 | } | |
2264 | ||
2265 | if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { | |
65e41e7e | 2266 | dev_info(&hdev->pdev->dev, "core reset interrupt\n"); |
8d40854f | 2267 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
ca1d7669 SM |
2268 | set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); |
2269 | *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2270 | return HCLGE_VECTOR0_EVENT_RST; | |
2271 | } | |
2272 | ||
f6162d44 SM |
2273 | /* check for vector0 msix event source */ |
2274 | if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) | |
2275 | return HCLGE_VECTOR0_EVENT_ERR; | |
2276 | ||
c1a81619 SM |
2277 | /* check for vector0 mailbox(=CMDQ RX) event source */ |
2278 | if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { | |
2279 | cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); | |
2280 | *clearval = cmdq_src_reg; | |
2281 | return HCLGE_VECTOR0_EVENT_MBX; | |
2282 | } | |
ca1d7669 SM |
2283 | |
2284 | return HCLGE_VECTOR0_EVENT_OTHER; | |
2285 | } | |
2286 | ||
2287 | static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, | |
2288 | u32 regclr) | |
2289 | { | |
c1a81619 SM |
2290 | switch (event_type) { |
2291 | case HCLGE_VECTOR0_EVENT_RST: | |
ca1d7669 | 2292 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); |
c1a81619 SM |
2293 | break; |
2294 | case HCLGE_VECTOR0_EVENT_MBX: | |
2295 | hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); | |
2296 | break; | |
fa7a4bd5 JS |
2297 | default: |
2298 | break; | |
c1a81619 | 2299 | } |
ca1d7669 SM |
2300 | } |
2301 | ||
8e52a602 XW |
2302 | static void hclge_clear_all_event_cause(struct hclge_dev *hdev) |
2303 | { | |
2304 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, | |
2305 | BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | | |
2306 | BIT(HCLGE_VECTOR0_CORERESET_INT_B) | | |
2307 | BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); | |
2308 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); | |
2309 | } | |
2310 | ||
466b0c00 L |
2311 | static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) |
2312 | { | |
2313 | writel(enable ? 1 : 0, vector->addr); | |
2314 | } | |
2315 | ||
2316 | static irqreturn_t hclge_misc_irq_handle(int irq, void *data) | |
2317 | { | |
2318 | struct hclge_dev *hdev = data; | |
ca1d7669 SM |
2319 | u32 event_cause; |
2320 | u32 clearval; | |
466b0c00 L |
2321 | |
2322 | hclge_enable_vector(&hdev->misc_vector, false); | |
ca1d7669 SM |
2323 | event_cause = hclge_check_event_cause(hdev, &clearval); |
2324 | ||
c1a81619 | 2325 | /* vector 0 interrupt is shared with reset and mailbox source events.*/ |
ca1d7669 | 2326 | switch (event_cause) { |
f6162d44 SM |
2327 | case HCLGE_VECTOR0_EVENT_ERR: |
2328 | /* we do not know what type of reset is required now. This could | |
2329 | * only be decided after we fetch the type of errors which | |
2330 | * caused this event. Therefore, we will do below for now: | |
2331 | * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we | |
2332 | * have defered type of reset to be used. | |
2333 | * 2. Schedule the reset serivce task. | |
2334 | * 3. When service task receives HNAE3_UNKNOWN_RESET type it | |
2335 | * will fetch the correct type of reset. This would be done | |
2336 | * by first decoding the types of errors. | |
2337 | */ | |
2338 | set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); | |
2339 | /* fall through */ | |
ca1d7669 | 2340 | case HCLGE_VECTOR0_EVENT_RST: |
cb1b9f77 | 2341 | hclge_reset_task_schedule(hdev); |
ca1d7669 | 2342 | break; |
c1a81619 SM |
2343 | case HCLGE_VECTOR0_EVENT_MBX: |
2344 | /* If we are here then, | |
2345 | * 1. Either we are not handling any mbx task and we are not | |
2346 | * scheduled as well | |
2347 | * OR | |
2348 | * 2. We could be handling a mbx task but nothing more is | |
2349 | * scheduled. | |
2350 | * In both cases, we should schedule mbx task as there are more | |
2351 | * mbx messages reported by this interrupt. | |
2352 | */ | |
2353 | hclge_mbx_task_schedule(hdev); | |
f0ad97ac | 2354 | break; |
ca1d7669 | 2355 | default: |
f0ad97ac YL |
2356 | dev_warn(&hdev->pdev->dev, |
2357 | "received unknown or unhandled event of vector0\n"); | |
ca1d7669 SM |
2358 | break; |
2359 | } | |
2360 | ||
cd8c5c26 | 2361 | /* clear the source of interrupt if it is not cause by reset */ |
0d441140 | 2362 | if (event_cause == HCLGE_VECTOR0_EVENT_MBX) { |
cd8c5c26 YL |
2363 | hclge_clear_event_cause(hdev, event_cause, clearval); |
2364 | hclge_enable_vector(&hdev->misc_vector, true); | |
2365 | } | |
466b0c00 L |
2366 | |
2367 | return IRQ_HANDLED; | |
2368 | } | |
2369 | ||
2370 | static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) | |
2371 | { | |
36cbbdf6 PL |
2372 | if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { |
2373 | dev_warn(&hdev->pdev->dev, | |
2374 | "vector(vector_id %d) has been freed.\n", vector_id); | |
2375 | return; | |
2376 | } | |
2377 | ||
466b0c00 L |
2378 | hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; |
2379 | hdev->num_msi_left += 1; | |
2380 | hdev->num_msi_used -= 1; | |
2381 | } | |
2382 | ||
2383 | static void hclge_get_misc_vector(struct hclge_dev *hdev) | |
2384 | { | |
2385 | struct hclge_misc_vector *vector = &hdev->misc_vector; | |
2386 | ||
2387 | vector->vector_irq = pci_irq_vector(hdev->pdev, 0); | |
2388 | ||
2389 | vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; | |
2390 | hdev->vector_status[0] = 0; | |
2391 | ||
2392 | hdev->num_msi_left -= 1; | |
2393 | hdev->num_msi_used += 1; | |
2394 | } | |
2395 | ||
2396 | static int hclge_misc_irq_init(struct hclge_dev *hdev) | |
2397 | { | |
2398 | int ret; | |
2399 | ||
2400 | hclge_get_misc_vector(hdev); | |
2401 | ||
ca1d7669 SM |
2402 | /* this would be explicitly freed in the end */ |
2403 | ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, | |
2404 | 0, "hclge_misc", hdev); | |
466b0c00 L |
2405 | if (ret) { |
2406 | hclge_free_vector(hdev, 0); | |
2407 | dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", | |
2408 | hdev->misc_vector.vector_irq); | |
2409 | } | |
2410 | ||
2411 | return ret; | |
2412 | } | |
2413 | ||
ca1d7669 SM |
2414 | static void hclge_misc_irq_uninit(struct hclge_dev *hdev) |
2415 | { | |
2416 | free_irq(hdev->misc_vector.vector_irq, hdev); | |
2417 | hclge_free_vector(hdev, 0); | |
2418 | } | |
2419 | ||
4ed340ab L |
2420 | static int hclge_notify_client(struct hclge_dev *hdev, |
2421 | enum hnae3_reset_notify_type type) | |
2422 | { | |
2423 | struct hnae3_client *client = hdev->nic_client; | |
2424 | u16 i; | |
2425 | ||
2426 | if (!client->ops->reset_notify) | |
2427 | return -EOPNOTSUPP; | |
2428 | ||
2429 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2430 | struct hnae3_handle *handle = &hdev->vport[i].nic; | |
2431 | int ret; | |
2432 | ||
2433 | ret = client->ops->reset_notify(handle, type); | |
65e41e7e HT |
2434 | if (ret) { |
2435 | dev_err(&hdev->pdev->dev, | |
2436 | "notify nic client failed %d(%d)\n", type, ret); | |
4ed340ab | 2437 | return ret; |
65e41e7e | 2438 | } |
4ed340ab L |
2439 | } |
2440 | ||
2441 | return 0; | |
2442 | } | |
2443 | ||
f403a84f HT |
2444 | static int hclge_notify_roce_client(struct hclge_dev *hdev, |
2445 | enum hnae3_reset_notify_type type) | |
2446 | { | |
2447 | struct hnae3_client *client = hdev->roce_client; | |
2448 | int ret = 0; | |
2449 | u16 i; | |
2450 | ||
2451 | if (!client) | |
2452 | return 0; | |
2453 | ||
2454 | if (!client->ops->reset_notify) | |
2455 | return -EOPNOTSUPP; | |
2456 | ||
2457 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2458 | struct hnae3_handle *handle = &hdev->vport[i].roce; | |
2459 | ||
2460 | ret = client->ops->reset_notify(handle, type); | |
2461 | if (ret) { | |
2462 | dev_err(&hdev->pdev->dev, | |
2463 | "notify roce client failed %d(%d)", | |
2464 | type, ret); | |
2465 | return ret; | |
2466 | } | |
2467 | } | |
2468 | ||
2469 | return ret; | |
2470 | } | |
2471 | ||
4ed340ab L |
2472 | static int hclge_reset_wait(struct hclge_dev *hdev) |
2473 | { | |
2474 | #define HCLGE_RESET_WATI_MS 100 | |
6dd22bbc | 2475 | #define HCLGE_RESET_WAIT_CNT 200 |
4ed340ab L |
2476 | u32 val, reg, reg_bit; |
2477 | u32 cnt = 0; | |
2478 | ||
2479 | switch (hdev->reset_type) { | |
6dd22bbc HT |
2480 | case HNAE3_IMP_RESET: |
2481 | reg = HCLGE_GLOBAL_RESET_REG; | |
2482 | reg_bit = HCLGE_IMP_RESET_BIT; | |
2483 | break; | |
4ed340ab L |
2484 | case HNAE3_GLOBAL_RESET: |
2485 | reg = HCLGE_GLOBAL_RESET_REG; | |
2486 | reg_bit = HCLGE_GLOBAL_RESET_BIT; | |
2487 | break; | |
2488 | case HNAE3_CORE_RESET: | |
2489 | reg = HCLGE_GLOBAL_RESET_REG; | |
2490 | reg_bit = HCLGE_CORE_RESET_BIT; | |
2491 | break; | |
2492 | case HNAE3_FUNC_RESET: | |
2493 | reg = HCLGE_FUN_RST_ING; | |
2494 | reg_bit = HCLGE_FUN_RST_ING_B; | |
2495 | break; | |
6b9a97ee HT |
2496 | case HNAE3_FLR_RESET: |
2497 | break; | |
4ed340ab L |
2498 | default: |
2499 | dev_err(&hdev->pdev->dev, | |
2500 | "Wait for unsupported reset type: %d\n", | |
2501 | hdev->reset_type); | |
2502 | return -EINVAL; | |
2503 | } | |
2504 | ||
6b9a97ee HT |
2505 | if (hdev->reset_type == HNAE3_FLR_RESET) { |
2506 | while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && | |
2507 | cnt++ < HCLGE_RESET_WAIT_CNT) | |
2508 | msleep(HCLGE_RESET_WATI_MS); | |
2509 | ||
2510 | if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { | |
2511 | dev_err(&hdev->pdev->dev, | |
2512 | "flr wait timeout: %d\n", cnt); | |
2513 | return -EBUSY; | |
2514 | } | |
2515 | ||
2516 | return 0; | |
2517 | } | |
2518 | ||
4ed340ab | 2519 | val = hclge_read_dev(&hdev->hw, reg); |
e4e87715 | 2520 | while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { |
4ed340ab L |
2521 | msleep(HCLGE_RESET_WATI_MS); |
2522 | val = hclge_read_dev(&hdev->hw, reg); | |
2523 | cnt++; | |
2524 | } | |
2525 | ||
4ed340ab L |
2526 | if (cnt >= HCLGE_RESET_WAIT_CNT) { |
2527 | dev_warn(&hdev->pdev->dev, | |
2528 | "Wait for reset timeout: %d\n", hdev->reset_type); | |
2529 | return -EBUSY; | |
2530 | } | |
2531 | ||
2532 | return 0; | |
2533 | } | |
2534 | ||
aa5c4f17 HT |
2535 | static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) |
2536 | { | |
2537 | struct hclge_vf_rst_cmd *req; | |
2538 | struct hclge_desc desc; | |
2539 | ||
2540 | req = (struct hclge_vf_rst_cmd *)desc.data; | |
2541 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); | |
2542 | req->dest_vfid = func_id; | |
2543 | ||
2544 | if (reset) | |
2545 | req->vf_rst = 0x1; | |
2546 | ||
2547 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
2548 | } | |
2549 | ||
2550 | int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) | |
2551 | { | |
2552 | int i; | |
2553 | ||
2554 | for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { | |
2555 | struct hclge_vport *vport = &hdev->vport[i]; | |
2556 | int ret; | |
2557 | ||
2558 | /* Send cmd to set/clear VF's FUNC_RST_ING */ | |
2559 | ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); | |
2560 | if (ret) { | |
2561 | dev_err(&hdev->pdev->dev, | |
790cd1a8 | 2562 | "set vf(%d) rst failed %d!\n", |
aa5c4f17 HT |
2563 | vport->vport_id, ret); |
2564 | return ret; | |
2565 | } | |
2566 | ||
2567 | if (!reset) | |
2568 | continue; | |
2569 | ||
2570 | /* Inform VF to process the reset. | |
2571 | * hclge_inform_reset_assert_to_vf may fail if VF | |
2572 | * driver is not loaded. | |
2573 | */ | |
2574 | ret = hclge_inform_reset_assert_to_vf(vport); | |
2575 | if (ret) | |
2576 | dev_warn(&hdev->pdev->dev, | |
790cd1a8 | 2577 | "inform reset to vf(%d) failed %d!\n", |
aa5c4f17 HT |
2578 | vport->vport_id, ret); |
2579 | } | |
2580 | ||
2581 | return 0; | |
2582 | } | |
2583 | ||
2bfbd35d | 2584 | int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) |
4ed340ab L |
2585 | { |
2586 | struct hclge_desc desc; | |
2587 | struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; | |
2588 | int ret; | |
2589 | ||
2590 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); | |
e4e87715 | 2591 | hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); |
4ed340ab L |
2592 | req->fun_reset_vfid = func_id; |
2593 | ||
2594 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2595 | if (ret) | |
2596 | dev_err(&hdev->pdev->dev, | |
2597 | "send function reset cmd fail, status =%d\n", ret); | |
2598 | ||
2599 | return ret; | |
2600 | } | |
2601 | ||
f2f432f2 | 2602 | static void hclge_do_reset(struct hclge_dev *hdev) |
4ed340ab L |
2603 | { |
2604 | struct pci_dev *pdev = hdev->pdev; | |
2605 | u32 val; | |
2606 | ||
f2f432f2 | 2607 | switch (hdev->reset_type) { |
4ed340ab L |
2608 | case HNAE3_GLOBAL_RESET: |
2609 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
e4e87715 | 2610 | hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); |
4ed340ab L |
2611 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2612 | dev_info(&pdev->dev, "Global Reset requested\n"); | |
2613 | break; | |
2614 | case HNAE3_CORE_RESET: | |
2615 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
e4e87715 | 2616 | hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); |
4ed340ab L |
2617 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2618 | dev_info(&pdev->dev, "Core Reset requested\n"); | |
2619 | break; | |
2620 | case HNAE3_FUNC_RESET: | |
2621 | dev_info(&pdev->dev, "PF Reset requested\n"); | |
cb1b9f77 SM |
2622 | /* schedule again to check later */ |
2623 | set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); | |
2624 | hclge_reset_task_schedule(hdev); | |
4ed340ab | 2625 | break; |
6b9a97ee HT |
2626 | case HNAE3_FLR_RESET: |
2627 | dev_info(&pdev->dev, "FLR requested\n"); | |
2628 | /* schedule again to check later */ | |
2629 | set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); | |
2630 | hclge_reset_task_schedule(hdev); | |
2631 | break; | |
4ed340ab L |
2632 | default: |
2633 | dev_warn(&pdev->dev, | |
f2f432f2 | 2634 | "Unsupported reset type: %d\n", hdev->reset_type); |
4ed340ab L |
2635 | break; |
2636 | } | |
2637 | } | |
2638 | ||
f2f432f2 SM |
2639 | static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, |
2640 | unsigned long *addr) | |
2641 | { | |
2642 | enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; | |
2643 | ||
f6162d44 SM |
2644 | /* first, resolve any unknown reset type to the known type(s) */ |
2645 | if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { | |
2646 | /* we will intentionally ignore any errors from this function | |
2647 | * as we will end up in *some* reset request in any case | |
2648 | */ | |
2649 | hclge_handle_hw_msix_error(hdev, addr); | |
2650 | clear_bit(HNAE3_UNKNOWN_RESET, addr); | |
2651 | /* We defered the clearing of the error event which caused | |
2652 | * interrupt since it was not posssible to do that in | |
2653 | * interrupt context (and this is the reason we introduced | |
2654 | * new UNKNOWN reset type). Now, the errors have been | |
2655 | * handled and cleared in hardware we can safely enable | |
2656 | * interrupts. This is an exception to the norm. | |
2657 | */ | |
2658 | hclge_enable_vector(&hdev->misc_vector, true); | |
2659 | } | |
2660 | ||
f2f432f2 | 2661 | /* return the highest priority reset level amongst all */ |
7cea834d HT |
2662 | if (test_bit(HNAE3_IMP_RESET, addr)) { |
2663 | rst_level = HNAE3_IMP_RESET; | |
2664 | clear_bit(HNAE3_IMP_RESET, addr); | |
2665 | clear_bit(HNAE3_GLOBAL_RESET, addr); | |
2666 | clear_bit(HNAE3_CORE_RESET, addr); | |
2667 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2668 | } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { | |
f2f432f2 | 2669 | rst_level = HNAE3_GLOBAL_RESET; |
7cea834d HT |
2670 | clear_bit(HNAE3_GLOBAL_RESET, addr); |
2671 | clear_bit(HNAE3_CORE_RESET, addr); | |
2672 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2673 | } else if (test_bit(HNAE3_CORE_RESET, addr)) { | |
f2f432f2 | 2674 | rst_level = HNAE3_CORE_RESET; |
7cea834d HT |
2675 | clear_bit(HNAE3_CORE_RESET, addr); |
2676 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2677 | } else if (test_bit(HNAE3_FUNC_RESET, addr)) { | |
f2f432f2 | 2678 | rst_level = HNAE3_FUNC_RESET; |
7cea834d | 2679 | clear_bit(HNAE3_FUNC_RESET, addr); |
6b9a97ee HT |
2680 | } else if (test_bit(HNAE3_FLR_RESET, addr)) { |
2681 | rst_level = HNAE3_FLR_RESET; | |
2682 | clear_bit(HNAE3_FLR_RESET, addr); | |
7cea834d | 2683 | } |
f2f432f2 SM |
2684 | |
2685 | return rst_level; | |
2686 | } | |
2687 | ||
cd8c5c26 YL |
2688 | static void hclge_clear_reset_cause(struct hclge_dev *hdev) |
2689 | { | |
2690 | u32 clearval = 0; | |
2691 | ||
2692 | switch (hdev->reset_type) { | |
2693 | case HNAE3_IMP_RESET: | |
2694 | clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2695 | break; | |
2696 | case HNAE3_GLOBAL_RESET: | |
2697 | clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2698 | break; | |
2699 | case HNAE3_CORE_RESET: | |
2700 | clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2701 | break; | |
2702 | default: | |
cd8c5c26 YL |
2703 | break; |
2704 | } | |
2705 | ||
2706 | if (!clearval) | |
2707 | return; | |
2708 | ||
2709 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); | |
2710 | hclge_enable_vector(&hdev->misc_vector, true); | |
2711 | } | |
2712 | ||
aa5c4f17 HT |
2713 | static int hclge_reset_prepare_down(struct hclge_dev *hdev) |
2714 | { | |
2715 | int ret = 0; | |
2716 | ||
2717 | switch (hdev->reset_type) { | |
2718 | case HNAE3_FUNC_RESET: | |
6b9a97ee HT |
2719 | /* fall through */ |
2720 | case HNAE3_FLR_RESET: | |
aa5c4f17 HT |
2721 | ret = hclge_set_all_vf_rst(hdev, true); |
2722 | break; | |
2723 | default: | |
2724 | break; | |
2725 | } | |
2726 | ||
2727 | return ret; | |
2728 | } | |
2729 | ||
35d93a30 HT |
2730 | static int hclge_reset_prepare_wait(struct hclge_dev *hdev) |
2731 | { | |
6dd22bbc | 2732 | u32 reg_val; |
35d93a30 HT |
2733 | int ret = 0; |
2734 | ||
2735 | switch (hdev->reset_type) { | |
2736 | case HNAE3_FUNC_RESET: | |
aa5c4f17 HT |
2737 | /* There is no mechanism for PF to know if VF has stopped IO |
2738 | * for now, just wait 100 ms for VF to stop IO | |
2739 | */ | |
2740 | msleep(100); | |
35d93a30 HT |
2741 | ret = hclge_func_reset_cmd(hdev, 0); |
2742 | if (ret) { | |
2743 | dev_err(&hdev->pdev->dev, | |
141b95d5 | 2744 | "asserting function reset fail %d!\n", ret); |
35d93a30 HT |
2745 | return ret; |
2746 | } | |
2747 | ||
2748 | /* After performaning pf reset, it is not necessary to do the | |
2749 | * mailbox handling or send any command to firmware, because | |
2750 | * any mailbox handling or command to firmware is only valid | |
2751 | * after hclge_cmd_init is called. | |
2752 | */ | |
2753 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2754 | break; | |
6b9a97ee HT |
2755 | case HNAE3_FLR_RESET: |
2756 | /* There is no mechanism for PF to know if VF has stopped IO | |
2757 | * for now, just wait 100 ms for VF to stop IO | |
2758 | */ | |
2759 | msleep(100); | |
2760 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2761 | set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
2762 | break; | |
6dd22bbc HT |
2763 | case HNAE3_IMP_RESET: |
2764 | reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); | |
2765 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, | |
2766 | BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); | |
2767 | break; | |
35d93a30 HT |
2768 | default: |
2769 | break; | |
2770 | } | |
2771 | ||
2772 | dev_info(&hdev->pdev->dev, "prepare wait ok\n"); | |
2773 | ||
2774 | return ret; | |
2775 | } | |
2776 | ||
65e41e7e HT |
2777 | static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) |
2778 | { | |
2779 | #define MAX_RESET_FAIL_CNT 5 | |
2780 | #define RESET_UPGRADE_DELAY_SEC 10 | |
2781 | ||
2782 | if (hdev->reset_pending) { | |
2783 | dev_info(&hdev->pdev->dev, "Reset pending %lu\n", | |
2784 | hdev->reset_pending); | |
2785 | return true; | |
2786 | } else if ((hdev->reset_type != HNAE3_IMP_RESET) && | |
2787 | (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & | |
2788 | BIT(HCLGE_IMP_RESET_BIT))) { | |
2789 | dev_info(&hdev->pdev->dev, | |
2790 | "reset failed because IMP Reset is pending\n"); | |
2791 | hclge_clear_reset_cause(hdev); | |
2792 | return false; | |
2793 | } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { | |
2794 | hdev->reset_fail_cnt++; | |
2795 | if (is_timeout) { | |
2796 | set_bit(hdev->reset_type, &hdev->reset_pending); | |
2797 | dev_info(&hdev->pdev->dev, | |
2798 | "re-schedule to wait for hw reset done\n"); | |
2799 | return true; | |
2800 | } | |
2801 | ||
2802 | dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); | |
2803 | hclge_clear_reset_cause(hdev); | |
2804 | mod_timer(&hdev->reset_timer, | |
2805 | jiffies + RESET_UPGRADE_DELAY_SEC * HZ); | |
2806 | ||
2807 | return false; | |
2808 | } | |
2809 | ||
2810 | hclge_clear_reset_cause(hdev); | |
2811 | dev_err(&hdev->pdev->dev, "Reset fail!\n"); | |
2812 | return false; | |
2813 | } | |
2814 | ||
aa5c4f17 HT |
2815 | static int hclge_reset_prepare_up(struct hclge_dev *hdev) |
2816 | { | |
2817 | int ret = 0; | |
2818 | ||
2819 | switch (hdev->reset_type) { | |
2820 | case HNAE3_FUNC_RESET: | |
6b9a97ee HT |
2821 | /* fall through */ |
2822 | case HNAE3_FLR_RESET: | |
aa5c4f17 HT |
2823 | ret = hclge_set_all_vf_rst(hdev, false); |
2824 | break; | |
2825 | default: | |
2826 | break; | |
2827 | } | |
2828 | ||
2829 | return ret; | |
2830 | } | |
2831 | ||
f2f432f2 SM |
2832 | static void hclge_reset(struct hclge_dev *hdev) |
2833 | { | |
6871af29 | 2834 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); |
65e41e7e HT |
2835 | bool is_timeout = false; |
2836 | int ret; | |
9de0b86f | 2837 | |
6871af29 JS |
2838 | /* Initialize ae_dev reset status as well, in case enet layer wants to |
2839 | * know if device is undergoing reset | |
2840 | */ | |
2841 | ae_dev->reset_type = hdev->reset_type; | |
4d60291b | 2842 | hdev->reset_count++; |
f2f432f2 | 2843 | /* perform reset of the stack & ae device for a client */ |
65e41e7e HT |
2844 | ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); |
2845 | if (ret) | |
2846 | goto err_reset; | |
2847 | ||
aa5c4f17 HT |
2848 | ret = hclge_reset_prepare_down(hdev); |
2849 | if (ret) | |
2850 | goto err_reset; | |
2851 | ||
6d4fab39 | 2852 | rtnl_lock(); |
65e41e7e HT |
2853 | ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
2854 | if (ret) | |
2855 | goto err_reset_lock; | |
f2f432f2 | 2856 | |
65e41e7e | 2857 | rtnl_unlock(); |
35d93a30 | 2858 | |
65e41e7e HT |
2859 | ret = hclge_reset_prepare_wait(hdev); |
2860 | if (ret) | |
2861 | goto err_reset; | |
cd8c5c26 | 2862 | |
65e41e7e HT |
2863 | if (hclge_reset_wait(hdev)) { |
2864 | is_timeout = true; | |
2865 | goto err_reset; | |
f2f432f2 SM |
2866 | } |
2867 | ||
65e41e7e HT |
2868 | ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); |
2869 | if (ret) | |
2870 | goto err_reset; | |
2871 | ||
2872 | rtnl_lock(); | |
2873 | ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); | |
2874 | if (ret) | |
2875 | goto err_reset_lock; | |
2876 | ||
2877 | ret = hclge_reset_ae_dev(hdev->ae_dev); | |
2878 | if (ret) | |
2879 | goto err_reset_lock; | |
2880 | ||
2881 | ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); | |
2882 | if (ret) | |
2883 | goto err_reset_lock; | |
2884 | ||
2885 | hclge_clear_reset_cause(hdev); | |
2886 | ||
aa5c4f17 HT |
2887 | ret = hclge_reset_prepare_up(hdev); |
2888 | if (ret) | |
2889 | goto err_reset_lock; | |
2890 | ||
65e41e7e HT |
2891 | ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); |
2892 | if (ret) | |
2893 | goto err_reset_lock; | |
2894 | ||
6d4fab39 | 2895 | rtnl_unlock(); |
f403a84f | 2896 | |
65e41e7e HT |
2897 | ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); |
2898 | if (ret) | |
2899 | goto err_reset; | |
2900 | ||
2901 | ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); | |
2902 | if (ret) | |
2903 | goto err_reset; | |
2904 | ||
b644a8d4 HT |
2905 | hdev->last_reset_time = jiffies; |
2906 | hdev->reset_fail_cnt = 0; | |
2907 | ae_dev->reset_type = HNAE3_NONE_RESET; | |
2908 | ||
65e41e7e HT |
2909 | return; |
2910 | ||
2911 | err_reset_lock: | |
2912 | rtnl_unlock(); | |
2913 | err_reset: | |
2914 | if (hclge_reset_err_handle(hdev, is_timeout)) | |
2915 | hclge_reset_task_schedule(hdev); | |
f2f432f2 SM |
2916 | } |
2917 | ||
6ae4e733 SJ |
2918 | static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) |
2919 | { | |
2920 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
2921 | struct hclge_dev *hdev = ae_dev->priv; | |
2922 | ||
2923 | /* We might end up getting called broadly because of 2 below cases: | |
2924 | * 1. Recoverable error was conveyed through APEI and only way to bring | |
2925 | * normalcy is to reset. | |
2926 | * 2. A new reset request from the stack due to timeout | |
2927 | * | |
2928 | * For the first case,error event might not have ae handle available. | |
2929 | * check if this is a new reset request and we are not here just because | |
6d4c3981 SM |
2930 | * last reset attempt did not succeed and watchdog hit us again. We will |
2931 | * know this if last reset request did not occur very recently (watchdog | |
2932 | * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) | |
2933 | * In case of new request we reset the "reset level" to PF reset. | |
9de0b86f HT |
2934 | * And if it is a repeat reset request of the most recent one then we |
2935 | * want to make sure we throttle the reset request. Therefore, we will | |
2936 | * not allow it again before 3*HZ times. | |
6d4c3981 | 2937 | */ |
6ae4e733 SJ |
2938 | if (!handle) |
2939 | handle = &hdev->vport[0].nic; | |
2940 | ||
0742ed7c | 2941 | if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) |
9de0b86f | 2942 | return; |
720bd583 | 2943 | else if (hdev->default_reset_request) |
0742ed7c | 2944 | hdev->reset_level = |
720bd583 HT |
2945 | hclge_get_reset_level(hdev, |
2946 | &hdev->default_reset_request); | |
0742ed7c HT |
2947 | else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) |
2948 | hdev->reset_level = HNAE3_FUNC_RESET; | |
4ed340ab | 2949 | |
6d4c3981 | 2950 | dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", |
0742ed7c | 2951 | hdev->reset_level); |
6d4c3981 SM |
2952 | |
2953 | /* request reset & schedule reset task */ | |
0742ed7c | 2954 | set_bit(hdev->reset_level, &hdev->reset_request); |
6d4c3981 SM |
2955 | hclge_reset_task_schedule(hdev); |
2956 | ||
0742ed7c HT |
2957 | if (hdev->reset_level < HNAE3_GLOBAL_RESET) |
2958 | hdev->reset_level++; | |
4ed340ab L |
2959 | } |
2960 | ||
720bd583 HT |
2961 | static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, |
2962 | enum hnae3_reset_type rst_type) | |
2963 | { | |
2964 | struct hclge_dev *hdev = ae_dev->priv; | |
2965 | ||
2966 | set_bit(rst_type, &hdev->default_reset_request); | |
2967 | } | |
2968 | ||
65e41e7e HT |
2969 | static void hclge_reset_timer(struct timer_list *t) |
2970 | { | |
2971 | struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); | |
2972 | ||
2973 | dev_info(&hdev->pdev->dev, | |
2974 | "triggering global reset in reset timer\n"); | |
2975 | set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); | |
2976 | hclge_reset_event(hdev->pdev, NULL); | |
2977 | } | |
2978 | ||
4ed340ab L |
2979 | static void hclge_reset_subtask(struct hclge_dev *hdev) |
2980 | { | |
f2f432f2 SM |
2981 | /* check if there is any ongoing reset in the hardware. This status can |
2982 | * be checked from reset_pending. If there is then, we need to wait for | |
2983 | * hardware to complete reset. | |
2984 | * a. If we are able to figure out in reasonable time that hardware | |
2985 | * has fully resetted then, we can proceed with driver, client | |
2986 | * reset. | |
2987 | * b. else, we can come back later to check this status so re-sched | |
2988 | * now. | |
2989 | */ | |
0742ed7c | 2990 | hdev->last_reset_time = jiffies; |
f2f432f2 SM |
2991 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); |
2992 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2993 | hclge_reset(hdev); | |
4ed340ab | 2994 | |
f2f432f2 SM |
2995 | /* check if we got any *new* reset requests to be honored */ |
2996 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); | |
2997 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2998 | hclge_do_reset(hdev); | |
4ed340ab | 2999 | |
4ed340ab L |
3000 | hdev->reset_type = HNAE3_NONE_RESET; |
3001 | } | |
3002 | ||
cb1b9f77 | 3003 | static void hclge_reset_service_task(struct work_struct *work) |
466b0c00 | 3004 | { |
cb1b9f77 SM |
3005 | struct hclge_dev *hdev = |
3006 | container_of(work, struct hclge_dev, rst_service_task); | |
3007 | ||
3008 | if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) | |
3009 | return; | |
3010 | ||
3011 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
3012 | ||
4ed340ab | 3013 | hclge_reset_subtask(hdev); |
cb1b9f77 SM |
3014 | |
3015 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
466b0c00 L |
3016 | } |
3017 | ||
c1a81619 SM |
3018 | static void hclge_mailbox_service_task(struct work_struct *work) |
3019 | { | |
3020 | struct hclge_dev *hdev = | |
3021 | container_of(work, struct hclge_dev, mbx_service_task); | |
3022 | ||
3023 | if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) | |
3024 | return; | |
3025 | ||
3026 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
3027 | ||
3028 | hclge_mbx_handler(hdev); | |
3029 | ||
3030 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
3031 | } | |
3032 | ||
a6d818e3 YL |
3033 | static void hclge_update_vport_alive(struct hclge_dev *hdev) |
3034 | { | |
3035 | int i; | |
3036 | ||
3037 | /* start from vport 1 for PF is always alive */ | |
3038 | for (i = 1; i < hdev->num_alloc_vport; i++) { | |
3039 | struct hclge_vport *vport = &hdev->vport[i]; | |
3040 | ||
3041 | if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) | |
3042 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
818f1675 YL |
3043 | |
3044 | /* If vf is not alive, set to default value */ | |
3045 | if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) | |
3046 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; | |
a6d818e3 YL |
3047 | } |
3048 | } | |
3049 | ||
46a3df9f S |
3050 | static void hclge_service_task(struct work_struct *work) |
3051 | { | |
3052 | struct hclge_dev *hdev = | |
3053 | container_of(work, struct hclge_dev, service_task); | |
3054 | ||
c5f65480 JS |
3055 | if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { |
3056 | hclge_update_stats_for_all(hdev); | |
3057 | hdev->hw_stats.stats_timer = 0; | |
3058 | } | |
3059 | ||
46a3df9f S |
3060 | hclge_update_speed_duplex(hdev); |
3061 | hclge_update_link_status(hdev); | |
a6d818e3 | 3062 | hclge_update_vport_alive(hdev); |
46a3df9f S |
3063 | hclge_service_complete(hdev); |
3064 | } | |
3065 | ||
46a3df9f S |
3066 | struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) |
3067 | { | |
3068 | /* VF handle has no client */ | |
3069 | if (!handle->client) | |
3070 | return container_of(handle, struct hclge_vport, nic); | |
3071 | else if (handle->client->type == HNAE3_CLIENT_ROCE) | |
3072 | return container_of(handle, struct hclge_vport, roce); | |
3073 | else | |
3074 | return container_of(handle, struct hclge_vport, nic); | |
3075 | } | |
3076 | ||
3077 | static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, | |
3078 | struct hnae3_vector_info *vector_info) | |
3079 | { | |
3080 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3081 | struct hnae3_vector_info *vector = vector_info; | |
3082 | struct hclge_dev *hdev = vport->back; | |
3083 | int alloc = 0; | |
3084 | int i, j; | |
3085 | ||
3086 | vector_num = min(hdev->num_msi_left, vector_num); | |
3087 | ||
3088 | for (j = 0; j < vector_num; j++) { | |
3089 | for (i = 1; i < hdev->num_msi; i++) { | |
3090 | if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { | |
3091 | vector->vector = pci_irq_vector(hdev->pdev, i); | |
3092 | vector->io_addr = hdev->hw.io_base + | |
3093 | HCLGE_VECTOR_REG_BASE + | |
3094 | (i - 1) * HCLGE_VECTOR_REG_OFFSET + | |
3095 | vport->vport_id * | |
3096 | HCLGE_VECTOR_VF_OFFSET; | |
3097 | hdev->vector_status[i] = vport->vport_id; | |
887c3820 | 3098 | hdev->vector_irq[i] = vector->vector; |
46a3df9f S |
3099 | |
3100 | vector++; | |
3101 | alloc++; | |
3102 | ||
3103 | break; | |
3104 | } | |
3105 | } | |
3106 | } | |
3107 | hdev->num_msi_left -= alloc; | |
3108 | hdev->num_msi_used += alloc; | |
3109 | ||
3110 | return alloc; | |
3111 | } | |
3112 | ||
3113 | static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) | |
3114 | { | |
3115 | int i; | |
3116 | ||
887c3820 SM |
3117 | for (i = 0; i < hdev->num_msi; i++) |
3118 | if (vector == hdev->vector_irq[i]) | |
3119 | return i; | |
3120 | ||
46a3df9f S |
3121 | return -EINVAL; |
3122 | } | |
3123 | ||
0d3e6631 YL |
3124 | static int hclge_put_vector(struct hnae3_handle *handle, int vector) |
3125 | { | |
3126 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3127 | struct hclge_dev *hdev = vport->back; | |
3128 | int vector_id; | |
3129 | ||
3130 | vector_id = hclge_get_vector_index(hdev, vector); | |
3131 | if (vector_id < 0) { | |
3132 | dev_err(&hdev->pdev->dev, | |
3133 | "Get vector index fail. vector_id =%d\n", vector_id); | |
3134 | return vector_id; | |
3135 | } | |
3136 | ||
3137 | hclge_free_vector(hdev, vector_id); | |
3138 | ||
3139 | return 0; | |
3140 | } | |
3141 | ||
46a3df9f S |
3142 | static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) |
3143 | { | |
3144 | return HCLGE_RSS_KEY_SIZE; | |
3145 | } | |
3146 | ||
3147 | static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) | |
3148 | { | |
3149 | return HCLGE_RSS_IND_TBL_SIZE; | |
3150 | } | |
3151 | ||
46a3df9f S |
3152 | static int hclge_set_rss_algo_key(struct hclge_dev *hdev, |
3153 | const u8 hfunc, const u8 *key) | |
3154 | { | |
d44f9b63 | 3155 | struct hclge_rss_config_cmd *req; |
46a3df9f S |
3156 | struct hclge_desc desc; |
3157 | int key_offset; | |
3158 | int key_size; | |
3159 | int ret; | |
3160 | ||
d44f9b63 | 3161 | req = (struct hclge_rss_config_cmd *)desc.data; |
46a3df9f S |
3162 | |
3163 | for (key_offset = 0; key_offset < 3; key_offset++) { | |
3164 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, | |
3165 | false); | |
3166 | ||
3167 | req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); | |
3168 | req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); | |
3169 | ||
3170 | if (key_offset == 2) | |
3171 | key_size = | |
3172 | HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; | |
3173 | else | |
3174 | key_size = HCLGE_RSS_HASH_KEY_NUM; | |
3175 | ||
3176 | memcpy(req->hash_key, | |
3177 | key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); | |
3178 | ||
3179 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3180 | if (ret) { | |
3181 | dev_err(&hdev->pdev->dev, | |
3182 | "Configure RSS config fail, status = %d\n", | |
3183 | ret); | |
3184 | return ret; | |
3185 | } | |
3186 | } | |
3187 | return 0; | |
3188 | } | |
3189 | ||
89523cfa | 3190 | static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) |
46a3df9f | 3191 | { |
d44f9b63 | 3192 | struct hclge_rss_indirection_table_cmd *req; |
46a3df9f S |
3193 | struct hclge_desc desc; |
3194 | int i, j; | |
3195 | int ret; | |
3196 | ||
d44f9b63 | 3197 | req = (struct hclge_rss_indirection_table_cmd *)desc.data; |
46a3df9f S |
3198 | |
3199 | for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { | |
3200 | hclge_cmd_setup_basic_desc | |
3201 | (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); | |
3202 | ||
a90bb9a5 YL |
3203 | req->start_table_index = |
3204 | cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); | |
3205 | req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); | |
46a3df9f S |
3206 | |
3207 | for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) | |
3208 | req->rss_result[j] = | |
3209 | indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; | |
3210 | ||
3211 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3212 | if (ret) { | |
3213 | dev_err(&hdev->pdev->dev, | |
3214 | "Configure rss indir table fail,status = %d\n", | |
3215 | ret); | |
3216 | return ret; | |
3217 | } | |
3218 | } | |
3219 | return 0; | |
3220 | } | |
3221 | ||
3222 | static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, | |
3223 | u16 *tc_size, u16 *tc_offset) | |
3224 | { | |
d44f9b63 | 3225 | struct hclge_rss_tc_mode_cmd *req; |
46a3df9f S |
3226 | struct hclge_desc desc; |
3227 | int ret; | |
3228 | int i; | |
3229 | ||
3230 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); | |
d44f9b63 | 3231 | req = (struct hclge_rss_tc_mode_cmd *)desc.data; |
46a3df9f S |
3232 | |
3233 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
a90bb9a5 YL |
3234 | u16 mode = 0; |
3235 | ||
e4e87715 PL |
3236 | hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); |
3237 | hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, | |
3238 | HCLGE_RSS_TC_SIZE_S, tc_size[i]); | |
3239 | hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, | |
3240 | HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); | |
a90bb9a5 YL |
3241 | |
3242 | req->rss_tc_mode[i] = cpu_to_le16(mode); | |
46a3df9f S |
3243 | } |
3244 | ||
3245 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 3246 | if (ret) |
46a3df9f S |
3247 | dev_err(&hdev->pdev->dev, |
3248 | "Configure rss tc mode fail, status = %d\n", ret); | |
46a3df9f | 3249 | |
3f639907 | 3250 | return ret; |
46a3df9f S |
3251 | } |
3252 | ||
232fc64b PL |
3253 | static void hclge_get_rss_type(struct hclge_vport *vport) |
3254 | { | |
3255 | if (vport->rss_tuple_sets.ipv4_tcp_en || | |
3256 | vport->rss_tuple_sets.ipv4_udp_en || | |
3257 | vport->rss_tuple_sets.ipv4_sctp_en || | |
3258 | vport->rss_tuple_sets.ipv6_tcp_en || | |
3259 | vport->rss_tuple_sets.ipv6_udp_en || | |
3260 | vport->rss_tuple_sets.ipv6_sctp_en) | |
3261 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; | |
3262 | else if (vport->rss_tuple_sets.ipv4_fragment_en || | |
3263 | vport->rss_tuple_sets.ipv6_fragment_en) | |
3264 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; | |
3265 | else | |
3266 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; | |
3267 | } | |
3268 | ||
46a3df9f S |
3269 | static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) |
3270 | { | |
d44f9b63 | 3271 | struct hclge_rss_input_tuple_cmd *req; |
46a3df9f S |
3272 | struct hclge_desc desc; |
3273 | int ret; | |
3274 | ||
3275 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); | |
3276 | ||
d44f9b63 | 3277 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; |
6f2af429 YL |
3278 | |
3279 | /* Get the tuple cfg from pf */ | |
3280 | req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; | |
3281 | req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; | |
3282 | req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; | |
3283 | req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; | |
3284 | req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; | |
3285 | req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; | |
3286 | req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; | |
3287 | req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; | |
232fc64b | 3288 | hclge_get_rss_type(&hdev->vport[0]); |
46a3df9f | 3289 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
3f639907 | 3290 | if (ret) |
46a3df9f S |
3291 | dev_err(&hdev->pdev->dev, |
3292 | "Configure rss input fail, status = %d\n", ret); | |
3f639907 | 3293 | return ret; |
46a3df9f S |
3294 | } |
3295 | ||
3296 | static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, | |
3297 | u8 *key, u8 *hfunc) | |
3298 | { | |
3299 | struct hclge_vport *vport = hclge_get_vport(handle); | |
46a3df9f S |
3300 | int i; |
3301 | ||
3302 | /* Get hash algorithm */ | |
775501a1 JS |
3303 | if (hfunc) { |
3304 | switch (vport->rss_algo) { | |
3305 | case HCLGE_RSS_HASH_ALGO_TOEPLITZ: | |
3306 | *hfunc = ETH_RSS_HASH_TOP; | |
3307 | break; | |
3308 | case HCLGE_RSS_HASH_ALGO_SIMPLE: | |
3309 | *hfunc = ETH_RSS_HASH_XOR; | |
3310 | break; | |
3311 | default: | |
3312 | *hfunc = ETH_RSS_HASH_UNKNOWN; | |
3313 | break; | |
3314 | } | |
3315 | } | |
46a3df9f S |
3316 | |
3317 | /* Get the RSS Key required by the user */ | |
3318 | if (key) | |
3319 | memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
3320 | ||
3321 | /* Get indirect table */ | |
3322 | if (indir) | |
3323 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3324 | indir[i] = vport->rss_indirection_tbl[i]; | |
3325 | ||
3326 | return 0; | |
3327 | } | |
3328 | ||
3329 | static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, | |
3330 | const u8 *key, const u8 hfunc) | |
3331 | { | |
3332 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3333 | struct hclge_dev *hdev = vport->back; | |
3334 | u8 hash_algo; | |
3335 | int ret, i; | |
3336 | ||
3337 | /* Set the RSS Hash Key if specififed by the user */ | |
3338 | if (key) { | |
775501a1 JS |
3339 | switch (hfunc) { |
3340 | case ETH_RSS_HASH_TOP: | |
46a3df9f | 3341 | hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; |
775501a1 JS |
3342 | break; |
3343 | case ETH_RSS_HASH_XOR: | |
3344 | hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; | |
3345 | break; | |
3346 | case ETH_RSS_HASH_NO_CHANGE: | |
3347 | hash_algo = vport->rss_algo; | |
3348 | break; | |
3349 | default: | |
46a3df9f | 3350 | return -EINVAL; |
775501a1 JS |
3351 | } |
3352 | ||
46a3df9f S |
3353 | ret = hclge_set_rss_algo_key(hdev, hash_algo, key); |
3354 | if (ret) | |
3355 | return ret; | |
89523cfa YL |
3356 | |
3357 | /* Update the shadow RSS key with user specified qids */ | |
3358 | memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); | |
3359 | vport->rss_algo = hash_algo; | |
46a3df9f S |
3360 | } |
3361 | ||
3362 | /* Update the shadow RSS table with user specified qids */ | |
3363 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3364 | vport->rss_indirection_tbl[i] = indir[i]; | |
3365 | ||
3366 | /* Update the hardware */ | |
89523cfa | 3367 | return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); |
46a3df9f S |
3368 | } |
3369 | ||
f7db940a L |
3370 | static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) |
3371 | { | |
3372 | u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; | |
3373 | ||
3374 | if (nfc->data & RXH_L4_B_2_3) | |
3375 | hash_sets |= HCLGE_D_PORT_BIT; | |
3376 | else | |
3377 | hash_sets &= ~HCLGE_D_PORT_BIT; | |
3378 | ||
3379 | if (nfc->data & RXH_IP_SRC) | |
3380 | hash_sets |= HCLGE_S_IP_BIT; | |
3381 | else | |
3382 | hash_sets &= ~HCLGE_S_IP_BIT; | |
3383 | ||
3384 | if (nfc->data & RXH_IP_DST) | |
3385 | hash_sets |= HCLGE_D_IP_BIT; | |
3386 | else | |
3387 | hash_sets &= ~HCLGE_D_IP_BIT; | |
3388 | ||
3389 | if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) | |
3390 | hash_sets |= HCLGE_V_TAG_BIT; | |
3391 | ||
3392 | return hash_sets; | |
3393 | } | |
3394 | ||
3395 | static int hclge_set_rss_tuple(struct hnae3_handle *handle, | |
3396 | struct ethtool_rxnfc *nfc) | |
3397 | { | |
3398 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3399 | struct hclge_dev *hdev = vport->back; | |
3400 | struct hclge_rss_input_tuple_cmd *req; | |
3401 | struct hclge_desc desc; | |
3402 | u8 tuple_sets; | |
3403 | int ret; | |
3404 | ||
3405 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | | |
3406 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) | |
3407 | return -EINVAL; | |
3408 | ||
3409 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; | |
6f2af429 | 3410 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); |
f7db940a | 3411 | |
6f2af429 YL |
3412 | req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; |
3413 | req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; | |
3414 | req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; | |
3415 | req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; | |
3416 | req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; | |
3417 | req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; | |
3418 | req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; | |
3419 | req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; | |
f7db940a L |
3420 | |
3421 | tuple_sets = hclge_get_rss_hash_bits(nfc); | |
3422 | switch (nfc->flow_type) { | |
3423 | case TCP_V4_FLOW: | |
3424 | req->ipv4_tcp_en = tuple_sets; | |
3425 | break; | |
3426 | case TCP_V6_FLOW: | |
3427 | req->ipv6_tcp_en = tuple_sets; | |
3428 | break; | |
3429 | case UDP_V4_FLOW: | |
3430 | req->ipv4_udp_en = tuple_sets; | |
3431 | break; | |
3432 | case UDP_V6_FLOW: | |
3433 | req->ipv6_udp_en = tuple_sets; | |
3434 | break; | |
3435 | case SCTP_V4_FLOW: | |
3436 | req->ipv4_sctp_en = tuple_sets; | |
3437 | break; | |
3438 | case SCTP_V6_FLOW: | |
3439 | if ((nfc->data & RXH_L4_B_0_1) || | |
3440 | (nfc->data & RXH_L4_B_2_3)) | |
3441 | return -EINVAL; | |
3442 | ||
3443 | req->ipv6_sctp_en = tuple_sets; | |
3444 | break; | |
3445 | case IPV4_FLOW: | |
3446 | req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3447 | break; | |
3448 | case IPV6_FLOW: | |
3449 | req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3450 | break; | |
3451 | default: | |
3452 | return -EINVAL; | |
3453 | } | |
3454 | ||
3455 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6f2af429 | 3456 | if (ret) { |
f7db940a L |
3457 | dev_err(&hdev->pdev->dev, |
3458 | "Set rss tuple fail, status = %d\n", ret); | |
6f2af429 YL |
3459 | return ret; |
3460 | } | |
f7db940a | 3461 | |
6f2af429 YL |
3462 | vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; |
3463 | vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; | |
3464 | vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; | |
3465 | vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; | |
3466 | vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; | |
3467 | vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; | |
3468 | vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; | |
3469 | vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; | |
232fc64b | 3470 | hclge_get_rss_type(vport); |
6f2af429 | 3471 | return 0; |
f7db940a L |
3472 | } |
3473 | ||
07d29954 L |
3474 | static int hclge_get_rss_tuple(struct hnae3_handle *handle, |
3475 | struct ethtool_rxnfc *nfc) | |
3476 | { | |
3477 | struct hclge_vport *vport = hclge_get_vport(handle); | |
07d29954 | 3478 | u8 tuple_sets; |
07d29954 L |
3479 | |
3480 | nfc->data = 0; | |
3481 | ||
07d29954 L |
3482 | switch (nfc->flow_type) { |
3483 | case TCP_V4_FLOW: | |
6f2af429 | 3484 | tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; |
07d29954 L |
3485 | break; |
3486 | case UDP_V4_FLOW: | |
6f2af429 | 3487 | tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; |
07d29954 L |
3488 | break; |
3489 | case TCP_V6_FLOW: | |
6f2af429 | 3490 | tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; |
07d29954 L |
3491 | break; |
3492 | case UDP_V6_FLOW: | |
6f2af429 | 3493 | tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; |
07d29954 L |
3494 | break; |
3495 | case SCTP_V4_FLOW: | |
6f2af429 | 3496 | tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; |
07d29954 L |
3497 | break; |
3498 | case SCTP_V6_FLOW: | |
6f2af429 | 3499 | tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; |
07d29954 L |
3500 | break; |
3501 | case IPV4_FLOW: | |
3502 | case IPV6_FLOW: | |
3503 | tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; | |
3504 | break; | |
3505 | default: | |
3506 | return -EINVAL; | |
3507 | } | |
3508 | ||
3509 | if (!tuple_sets) | |
3510 | return 0; | |
3511 | ||
3512 | if (tuple_sets & HCLGE_D_PORT_BIT) | |
3513 | nfc->data |= RXH_L4_B_2_3; | |
3514 | if (tuple_sets & HCLGE_S_PORT_BIT) | |
3515 | nfc->data |= RXH_L4_B_0_1; | |
3516 | if (tuple_sets & HCLGE_D_IP_BIT) | |
3517 | nfc->data |= RXH_IP_DST; | |
3518 | if (tuple_sets & HCLGE_S_IP_BIT) | |
3519 | nfc->data |= RXH_IP_SRC; | |
3520 | ||
3521 | return 0; | |
3522 | } | |
3523 | ||
46a3df9f S |
3524 | static int hclge_get_tc_size(struct hnae3_handle *handle) |
3525 | { | |
3526 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3527 | struct hclge_dev *hdev = vport->back; | |
3528 | ||
3529 | return hdev->rss_size_max; | |
3530 | } | |
3531 | ||
77f255c1 | 3532 | int hclge_rss_init_hw(struct hclge_dev *hdev) |
46a3df9f | 3533 | { |
46a3df9f | 3534 | struct hclge_vport *vport = hdev->vport; |
268f5dfa YL |
3535 | u8 *rss_indir = vport[0].rss_indirection_tbl; |
3536 | u16 rss_size = vport[0].alloc_rss_size; | |
3537 | u8 *key = vport[0].rss_hash_key; | |
3538 | u8 hfunc = vport[0].rss_algo; | |
46a3df9f | 3539 | u16 tc_offset[HCLGE_MAX_TC_NUM]; |
46a3df9f S |
3540 | u16 tc_valid[HCLGE_MAX_TC_NUM]; |
3541 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
268f5dfa YL |
3542 | u16 roundup_size; |
3543 | int i, ret; | |
68ece54e | 3544 | |
46a3df9f S |
3545 | ret = hclge_set_rss_indir_table(hdev, rss_indir); |
3546 | if (ret) | |
268f5dfa | 3547 | return ret; |
46a3df9f | 3548 | |
46a3df9f S |
3549 | ret = hclge_set_rss_algo_key(hdev, hfunc, key); |
3550 | if (ret) | |
268f5dfa | 3551 | return ret; |
46a3df9f S |
3552 | |
3553 | ret = hclge_set_rss_input_tuple(hdev); | |
3554 | if (ret) | |
268f5dfa | 3555 | return ret; |
46a3df9f | 3556 | |
68ece54e YL |
3557 | /* Each TC have the same queue size, and tc_size set to hardware is |
3558 | * the log2 of roundup power of two of rss_size, the acutal queue | |
3559 | * size is limited by indirection table. | |
3560 | */ | |
3561 | if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { | |
3562 | dev_err(&hdev->pdev->dev, | |
3563 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | |
3564 | rss_size); | |
268f5dfa | 3565 | return -EINVAL; |
68ece54e YL |
3566 | } |
3567 | ||
3568 | roundup_size = roundup_pow_of_two(rss_size); | |
3569 | roundup_size = ilog2(roundup_size); | |
3570 | ||
46a3df9f | 3571 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
68ece54e | 3572 | tc_valid[i] = 0; |
46a3df9f | 3573 | |
68ece54e YL |
3574 | if (!(hdev->hw_tc_map & BIT(i))) |
3575 | continue; | |
3576 | ||
3577 | tc_valid[i] = 1; | |
3578 | tc_size[i] = roundup_size; | |
3579 | tc_offset[i] = rss_size * i; | |
46a3df9f | 3580 | } |
68ece54e | 3581 | |
268f5dfa YL |
3582 | return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); |
3583 | } | |
46a3df9f | 3584 | |
268f5dfa YL |
3585 | void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) |
3586 | { | |
3587 | struct hclge_vport *vport = hdev->vport; | |
3588 | int i, j; | |
46a3df9f | 3589 | |
268f5dfa YL |
3590 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { |
3591 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3592 | vport[j].rss_indirection_tbl[i] = | |
3593 | i % vport[j].alloc_rss_size; | |
3594 | } | |
3595 | } | |
3596 | ||
3597 | static void hclge_rss_init_cfg(struct hclge_dev *hdev) | |
3598 | { | |
3599 | struct hclge_vport *vport = hdev->vport; | |
3600 | int i; | |
3601 | ||
268f5dfa YL |
3602 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { |
3603 | vport[i].rss_tuple_sets.ipv4_tcp_en = | |
3604 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3605 | vport[i].rss_tuple_sets.ipv4_udp_en = | |
3606 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3607 | vport[i].rss_tuple_sets.ipv4_sctp_en = | |
3608 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3609 | vport[i].rss_tuple_sets.ipv4_fragment_en = | |
3610 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3611 | vport[i].rss_tuple_sets.ipv6_tcp_en = | |
3612 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3613 | vport[i].rss_tuple_sets.ipv6_udp_en = | |
3614 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3615 | vport[i].rss_tuple_sets.ipv6_sctp_en = | |
3616 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3617 | vport[i].rss_tuple_sets.ipv6_fragment_en = | |
3618 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3619 | ||
3620 | vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
ea739c90 FL |
3621 | |
3622 | netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
268f5dfa YL |
3623 | } |
3624 | ||
3625 | hclge_rss_indir_init_cfg(hdev); | |
46a3df9f S |
3626 | } |
3627 | ||
84e095d6 SM |
3628 | int hclge_bind_ring_with_vector(struct hclge_vport *vport, |
3629 | int vector_id, bool en, | |
3630 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3631 | { |
3632 | struct hclge_dev *hdev = vport->back; | |
46a3df9f S |
3633 | struct hnae3_ring_chain_node *node; |
3634 | struct hclge_desc desc; | |
84e095d6 SM |
3635 | struct hclge_ctrl_vector_chain_cmd *req |
3636 | = (struct hclge_ctrl_vector_chain_cmd *)desc.data; | |
3637 | enum hclge_cmd_status status; | |
3638 | enum hclge_opcode_type op; | |
3639 | u16 tqp_type_and_id; | |
46a3df9f S |
3640 | int i; |
3641 | ||
84e095d6 SM |
3642 | op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; |
3643 | hclge_cmd_setup_basic_desc(&desc, op, false); | |
46a3df9f S |
3644 | req->int_vector_id = vector_id; |
3645 | ||
3646 | i = 0; | |
3647 | for (node = ring_chain; node; node = node->next) { | |
84e095d6 | 3648 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); |
e4e87715 PL |
3649 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, |
3650 | HCLGE_INT_TYPE_S, | |
3651 | hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); | |
3652 | hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, | |
3653 | HCLGE_TQP_ID_S, node->tqp_index); | |
3654 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, | |
3655 | HCLGE_INT_GL_IDX_S, | |
3656 | hnae3_get_field(node->int_gl_idx, | |
3657 | HNAE3_RING_GL_IDX_M, | |
3658 | HNAE3_RING_GL_IDX_S)); | |
84e095d6 | 3659 | req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); |
46a3df9f S |
3660 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
3661 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | |
84e095d6 | 3662 | req->vfid = vport->vport_id; |
46a3df9f | 3663 | |
84e095d6 SM |
3664 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
3665 | if (status) { | |
46a3df9f S |
3666 | dev_err(&hdev->pdev->dev, |
3667 | "Map TQP fail, status is %d.\n", | |
84e095d6 SM |
3668 | status); |
3669 | return -EIO; | |
46a3df9f S |
3670 | } |
3671 | i = 0; | |
3672 | ||
3673 | hclge_cmd_setup_basic_desc(&desc, | |
84e095d6 | 3674 | op, |
46a3df9f S |
3675 | false); |
3676 | req->int_vector_id = vector_id; | |
3677 | } | |
3678 | } | |
3679 | ||
3680 | if (i > 0) { | |
3681 | req->int_cause_num = i; | |
84e095d6 SM |
3682 | req->vfid = vport->vport_id; |
3683 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3684 | if (status) { | |
46a3df9f | 3685 | dev_err(&hdev->pdev->dev, |
84e095d6 SM |
3686 | "Map TQP fail, status is %d.\n", status); |
3687 | return -EIO; | |
46a3df9f S |
3688 | } |
3689 | } | |
3690 | ||
3691 | return 0; | |
3692 | } | |
3693 | ||
84e095d6 SM |
3694 | static int hclge_map_ring_to_vector(struct hnae3_handle *handle, |
3695 | int vector, | |
3696 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3697 | { |
3698 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3699 | struct hclge_dev *hdev = vport->back; | |
3700 | int vector_id; | |
3701 | ||
3702 | vector_id = hclge_get_vector_index(hdev, vector); | |
3703 | if (vector_id < 0) { | |
3704 | dev_err(&hdev->pdev->dev, | |
84e095d6 | 3705 | "Get vector index fail. vector_id =%d\n", vector_id); |
46a3df9f S |
3706 | return vector_id; |
3707 | } | |
3708 | ||
84e095d6 | 3709 | return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); |
46a3df9f S |
3710 | } |
3711 | ||
84e095d6 SM |
3712 | static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, |
3713 | int vector, | |
3714 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3715 | { |
3716 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3717 | struct hclge_dev *hdev = vport->back; | |
84e095d6 | 3718 | int vector_id, ret; |
46a3df9f | 3719 | |
b50ae26c PL |
3720 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
3721 | return 0; | |
3722 | ||
46a3df9f S |
3723 | vector_id = hclge_get_vector_index(hdev, vector); |
3724 | if (vector_id < 0) { | |
3725 | dev_err(&handle->pdev->dev, | |
3726 | "Get vector index fail. ret =%d\n", vector_id); | |
3727 | return vector_id; | |
3728 | } | |
3729 | ||
84e095d6 | 3730 | ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); |
0d3e6631 | 3731 | if (ret) |
84e095d6 SM |
3732 | dev_err(&handle->pdev->dev, |
3733 | "Unmap ring from vector fail. vectorid=%d, ret =%d\n", | |
3734 | vector_id, | |
3735 | ret); | |
46a3df9f | 3736 | |
0d3e6631 | 3737 | return ret; |
46a3df9f S |
3738 | } |
3739 | ||
3740 | int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, | |
3741 | struct hclge_promisc_param *param) | |
3742 | { | |
d44f9b63 | 3743 | struct hclge_promisc_cfg_cmd *req; |
46a3df9f S |
3744 | struct hclge_desc desc; |
3745 | int ret; | |
3746 | ||
3747 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); | |
3748 | ||
d44f9b63 | 3749 | req = (struct hclge_promisc_cfg_cmd *)desc.data; |
46a3df9f | 3750 | req->vf_id = param->vf_id; |
96c0e861 PL |
3751 | |
3752 | /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on | |
3753 | * pdev revision(0x20), new revision support them. The | |
3754 | * value of this two fields will not return error when driver | |
3755 | * send command to fireware in revision(0x20). | |
3756 | */ | |
3757 | req->flag = (param->enable << HCLGE_PROMISC_EN_B) | | |
3758 | HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; | |
46a3df9f S |
3759 | |
3760 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 3761 | if (ret) |
46a3df9f S |
3762 | dev_err(&hdev->pdev->dev, |
3763 | "Set promisc mode fail, status is %d.\n", ret); | |
3f639907 JS |
3764 | |
3765 | return ret; | |
46a3df9f S |
3766 | } |
3767 | ||
3768 | void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, | |
3769 | bool en_mc, bool en_bc, int vport_id) | |
3770 | { | |
3771 | if (!param) | |
3772 | return; | |
3773 | ||
3774 | memset(param, 0, sizeof(struct hclge_promisc_param)); | |
3775 | if (en_uc) | |
3776 | param->enable = HCLGE_PROMISC_EN_UC; | |
3777 | if (en_mc) | |
3778 | param->enable |= HCLGE_PROMISC_EN_MC; | |
3779 | if (en_bc) | |
3780 | param->enable |= HCLGE_PROMISC_EN_BC; | |
3781 | param->vf_id = vport_id; | |
3782 | } | |
3783 | ||
7fa6be4f HT |
3784 | static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, |
3785 | bool en_mc_pmc) | |
46a3df9f S |
3786 | { |
3787 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3788 | struct hclge_dev *hdev = vport->back; | |
3789 | struct hclge_promisc_param param; | |
3790 | ||
3b75c3df PL |
3791 | hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, |
3792 | vport->vport_id); | |
7fa6be4f | 3793 | return hclge_cmd_set_promisc_mode(hdev, ¶m); |
46a3df9f S |
3794 | } |
3795 | ||
d695964d JS |
3796 | static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) |
3797 | { | |
3798 | struct hclge_get_fd_mode_cmd *req; | |
3799 | struct hclge_desc desc; | |
3800 | int ret; | |
3801 | ||
3802 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); | |
3803 | ||
3804 | req = (struct hclge_get_fd_mode_cmd *)desc.data; | |
3805 | ||
3806 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3807 | if (ret) { | |
3808 | dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); | |
3809 | return ret; | |
3810 | } | |
3811 | ||
3812 | *fd_mode = req->mode; | |
3813 | ||
3814 | return ret; | |
3815 | } | |
3816 | ||
3817 | static int hclge_get_fd_allocation(struct hclge_dev *hdev, | |
3818 | u32 *stage1_entry_num, | |
3819 | u32 *stage2_entry_num, | |
3820 | u16 *stage1_counter_num, | |
3821 | u16 *stage2_counter_num) | |
3822 | { | |
3823 | struct hclge_get_fd_allocation_cmd *req; | |
3824 | struct hclge_desc desc; | |
3825 | int ret; | |
3826 | ||
3827 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); | |
3828 | ||
3829 | req = (struct hclge_get_fd_allocation_cmd *)desc.data; | |
3830 | ||
3831 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3832 | if (ret) { | |
3833 | dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", | |
3834 | ret); | |
3835 | return ret; | |
3836 | } | |
3837 | ||
3838 | *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); | |
3839 | *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); | |
3840 | *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); | |
3841 | *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); | |
3842 | ||
3843 | return ret; | |
3844 | } | |
3845 | ||
3846 | static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) | |
3847 | { | |
3848 | struct hclge_set_fd_key_config_cmd *req; | |
3849 | struct hclge_fd_key_cfg *stage; | |
3850 | struct hclge_desc desc; | |
3851 | int ret; | |
3852 | ||
3853 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); | |
3854 | ||
3855 | req = (struct hclge_set_fd_key_config_cmd *)desc.data; | |
3856 | stage = &hdev->fd_cfg.key_cfg[stage_num]; | |
3857 | req->stage = stage_num; | |
3858 | req->key_select = stage->key_sel; | |
3859 | req->inner_sipv6_word_en = stage->inner_sipv6_word_en; | |
3860 | req->inner_dipv6_word_en = stage->inner_dipv6_word_en; | |
3861 | req->outer_sipv6_word_en = stage->outer_sipv6_word_en; | |
3862 | req->outer_dipv6_word_en = stage->outer_dipv6_word_en; | |
3863 | req->tuple_mask = cpu_to_le32(~stage->tuple_active); | |
3864 | req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); | |
3865 | ||
3866 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3867 | if (ret) | |
3868 | dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); | |
3869 | ||
3870 | return ret; | |
3871 | } | |
3872 | ||
3873 | static int hclge_init_fd_config(struct hclge_dev *hdev) | |
3874 | { | |
3875 | #define LOW_2_WORDS 0x03 | |
3876 | struct hclge_fd_key_cfg *key_cfg; | |
3877 | int ret; | |
3878 | ||
3879 | if (!hnae3_dev_fd_supported(hdev)) | |
3880 | return 0; | |
3881 | ||
3882 | ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); | |
3883 | if (ret) | |
3884 | return ret; | |
3885 | ||
3886 | switch (hdev->fd_cfg.fd_mode) { | |
3887 | case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: | |
3888 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; | |
3889 | break; | |
3890 | case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: | |
3891 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; | |
3892 | break; | |
3893 | default: | |
3894 | dev_err(&hdev->pdev->dev, | |
3895 | "Unsupported flow director mode %d\n", | |
3896 | hdev->fd_cfg.fd_mode); | |
3897 | return -EOPNOTSUPP; | |
3898 | } | |
3899 | ||
3900 | hdev->fd_cfg.fd_en = true; | |
3901 | hdev->fd_cfg.proto_support = | |
3902 | TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | | |
3903 | UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; | |
3904 | key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; | |
3905 | key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, | |
3906 | key_cfg->inner_sipv6_word_en = LOW_2_WORDS; | |
3907 | key_cfg->inner_dipv6_word_en = LOW_2_WORDS; | |
3908 | key_cfg->outer_sipv6_word_en = 0; | |
3909 | key_cfg->outer_dipv6_word_en = 0; | |
3910 | ||
3911 | key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | | |
3912 | BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | | |
3913 | BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
3914 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
3915 | ||
3916 | /* If use max 400bit key, we can support tuples for ether type */ | |
3917 | if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { | |
3918 | hdev->fd_cfg.proto_support |= ETHER_FLOW; | |
3919 | key_cfg->tuple_active |= | |
3920 | BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); | |
3921 | } | |
3922 | ||
3923 | /* roce_type is used to filter roce frames | |
3924 | * dst_vport is used to specify the rule | |
3925 | */ | |
3926 | key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); | |
3927 | ||
3928 | ret = hclge_get_fd_allocation(hdev, | |
3929 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], | |
3930 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], | |
3931 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], | |
3932 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); | |
3933 | if (ret) | |
3934 | return ret; | |
3935 | ||
3936 | return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); | |
3937 | } | |
3938 | ||
11732868 JS |
3939 | static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, |
3940 | int loc, u8 *key, bool is_add) | |
3941 | { | |
3942 | struct hclge_fd_tcam_config_1_cmd *req1; | |
3943 | struct hclge_fd_tcam_config_2_cmd *req2; | |
3944 | struct hclge_fd_tcam_config_3_cmd *req3; | |
3945 | struct hclge_desc desc[3]; | |
3946 | int ret; | |
3947 | ||
3948 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); | |
3949 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3950 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); | |
3951 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3952 | hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); | |
3953 | ||
3954 | req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; | |
3955 | req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; | |
3956 | req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; | |
3957 | ||
3958 | req1->stage = stage; | |
3959 | req1->xy_sel = sel_x ? 1 : 0; | |
3960 | hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); | |
3961 | req1->index = cpu_to_le32(loc); | |
3962 | req1->entry_vld = sel_x ? is_add : 0; | |
3963 | ||
3964 | if (key) { | |
3965 | memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); | |
3966 | memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], | |
3967 | sizeof(req2->tcam_data)); | |
3968 | memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + | |
3969 | sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); | |
3970 | } | |
3971 | ||
3972 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
3973 | if (ret) | |
3974 | dev_err(&hdev->pdev->dev, | |
3975 | "config tcam key fail, ret=%d\n", | |
3976 | ret); | |
3977 | ||
3978 | return ret; | |
3979 | } | |
3980 | ||
3981 | static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, | |
3982 | struct hclge_fd_ad_data *action) | |
3983 | { | |
3984 | struct hclge_fd_ad_config_cmd *req; | |
3985 | struct hclge_desc desc; | |
3986 | u64 ad_data = 0; | |
3987 | int ret; | |
3988 | ||
3989 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); | |
3990 | ||
3991 | req = (struct hclge_fd_ad_config_cmd *)desc.data; | |
3992 | req->index = cpu_to_le32(loc); | |
3993 | req->stage = stage; | |
3994 | ||
3995 | hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, | |
3996 | action->write_rule_id_to_bd); | |
3997 | hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, | |
3998 | action->rule_id); | |
3999 | ad_data <<= 32; | |
4000 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); | |
4001 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, | |
4002 | action->forward_to_direct_queue); | |
4003 | hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, | |
4004 | action->queue_id); | |
4005 | hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); | |
4006 | hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, | |
4007 | HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); | |
4008 | hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); | |
4009 | hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, | |
4010 | action->counter_id); | |
4011 | ||
4012 | req->ad_data = cpu_to_le64(ad_data); | |
4013 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4014 | if (ret) | |
4015 | dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); | |
4016 | ||
4017 | return ret; | |
4018 | } | |
4019 | ||
4020 | static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, | |
4021 | struct hclge_fd_rule *rule) | |
4022 | { | |
4023 | u16 tmp_x_s, tmp_y_s; | |
4024 | u32 tmp_x_l, tmp_y_l; | |
4025 | int i; | |
4026 | ||
4027 | if (rule->unused_tuple & tuple_bit) | |
4028 | return true; | |
4029 | ||
4030 | switch (tuple_bit) { | |
4031 | case 0: | |
4032 | return false; | |
4033 | case BIT(INNER_DST_MAC): | |
4034 | for (i = 0; i < 6; i++) { | |
4035 | calc_x(key_x[5 - i], rule->tuples.dst_mac[i], | |
4036 | rule->tuples_mask.dst_mac[i]); | |
4037 | calc_y(key_y[5 - i], rule->tuples.dst_mac[i], | |
4038 | rule->tuples_mask.dst_mac[i]); | |
4039 | } | |
4040 | ||
4041 | return true; | |
4042 | case BIT(INNER_SRC_MAC): | |
4043 | for (i = 0; i < 6; i++) { | |
4044 | calc_x(key_x[5 - i], rule->tuples.src_mac[i], | |
4045 | rule->tuples.src_mac[i]); | |
4046 | calc_y(key_y[5 - i], rule->tuples.src_mac[i], | |
4047 | rule->tuples.src_mac[i]); | |
4048 | } | |
4049 | ||
4050 | return true; | |
4051 | case BIT(INNER_VLAN_TAG_FST): | |
4052 | calc_x(tmp_x_s, rule->tuples.vlan_tag1, | |
4053 | rule->tuples_mask.vlan_tag1); | |
4054 | calc_y(tmp_y_s, rule->tuples.vlan_tag1, | |
4055 | rule->tuples_mask.vlan_tag1); | |
4056 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4057 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4058 | ||
4059 | return true; | |
4060 | case BIT(INNER_ETH_TYPE): | |
4061 | calc_x(tmp_x_s, rule->tuples.ether_proto, | |
4062 | rule->tuples_mask.ether_proto); | |
4063 | calc_y(tmp_y_s, rule->tuples.ether_proto, | |
4064 | rule->tuples_mask.ether_proto); | |
4065 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4066 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4067 | ||
4068 | return true; | |
4069 | case BIT(INNER_IP_TOS): | |
4070 | calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
4071 | calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
4072 | ||
4073 | return true; | |
4074 | case BIT(INNER_IP_PROTO): | |
4075 | calc_x(*key_x, rule->tuples.ip_proto, | |
4076 | rule->tuples_mask.ip_proto); | |
4077 | calc_y(*key_y, rule->tuples.ip_proto, | |
4078 | rule->tuples_mask.ip_proto); | |
4079 | ||
4080 | return true; | |
4081 | case BIT(INNER_SRC_IP): | |
4082 | calc_x(tmp_x_l, rule->tuples.src_ip[3], | |
4083 | rule->tuples_mask.src_ip[3]); | |
4084 | calc_y(tmp_y_l, rule->tuples.src_ip[3], | |
4085 | rule->tuples_mask.src_ip[3]); | |
4086 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
4087 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
4088 | ||
4089 | return true; | |
4090 | case BIT(INNER_DST_IP): | |
4091 | calc_x(tmp_x_l, rule->tuples.dst_ip[3], | |
4092 | rule->tuples_mask.dst_ip[3]); | |
4093 | calc_y(tmp_y_l, rule->tuples.dst_ip[3], | |
4094 | rule->tuples_mask.dst_ip[3]); | |
4095 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
4096 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
4097 | ||
4098 | return true; | |
4099 | case BIT(INNER_SRC_PORT): | |
4100 | calc_x(tmp_x_s, rule->tuples.src_port, | |
4101 | rule->tuples_mask.src_port); | |
4102 | calc_y(tmp_y_s, rule->tuples.src_port, | |
4103 | rule->tuples_mask.src_port); | |
4104 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4105 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4106 | ||
4107 | return true; | |
4108 | case BIT(INNER_DST_PORT): | |
4109 | calc_x(tmp_x_s, rule->tuples.dst_port, | |
4110 | rule->tuples_mask.dst_port); | |
4111 | calc_y(tmp_y_s, rule->tuples.dst_port, | |
4112 | rule->tuples_mask.dst_port); | |
4113 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4114 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4115 | ||
4116 | return true; | |
4117 | default: | |
4118 | return false; | |
4119 | } | |
4120 | } | |
4121 | ||
4122 | static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, | |
4123 | u8 vf_id, u8 network_port_id) | |
4124 | { | |
4125 | u32 port_number = 0; | |
4126 | ||
4127 | if (port_type == HOST_PORT) { | |
4128 | hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, | |
4129 | pf_id); | |
4130 | hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, | |
4131 | vf_id); | |
4132 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); | |
4133 | } else { | |
4134 | hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, | |
4135 | HCLGE_NETWORK_PORT_ID_S, network_port_id); | |
4136 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); | |
4137 | } | |
4138 | ||
4139 | return port_number; | |
4140 | } | |
4141 | ||
4142 | static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, | |
4143 | __le32 *key_x, __le32 *key_y, | |
4144 | struct hclge_fd_rule *rule) | |
4145 | { | |
4146 | u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; | |
4147 | u8 cur_pos = 0, tuple_size, shift_bits; | |
4148 | int i; | |
4149 | ||
4150 | for (i = 0; i < MAX_META_DATA; i++) { | |
4151 | tuple_size = meta_data_key_info[i].key_length; | |
4152 | tuple_bit = key_cfg->meta_data_active & BIT(i); | |
4153 | ||
4154 | switch (tuple_bit) { | |
4155 | case BIT(ROCE_TYPE): | |
4156 | hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); | |
4157 | cur_pos += tuple_size; | |
4158 | break; | |
4159 | case BIT(DST_VPORT): | |
4160 | port_number = hclge_get_port_number(HOST_PORT, 0, | |
4161 | rule->vf_id, 0); | |
4162 | hnae3_set_field(meta_data, | |
4163 | GENMASK(cur_pos + tuple_size, cur_pos), | |
4164 | cur_pos, port_number); | |
4165 | cur_pos += tuple_size; | |
4166 | break; | |
4167 | default: | |
4168 | break; | |
4169 | } | |
4170 | } | |
4171 | ||
4172 | calc_x(tmp_x, meta_data, 0xFFFFFFFF); | |
4173 | calc_y(tmp_y, meta_data, 0xFFFFFFFF); | |
4174 | shift_bits = sizeof(meta_data) * 8 - cur_pos; | |
4175 | ||
4176 | *key_x = cpu_to_le32(tmp_x << shift_bits); | |
4177 | *key_y = cpu_to_le32(tmp_y << shift_bits); | |
4178 | } | |
4179 | ||
4180 | /* A complete key is combined with meta data key and tuple key. | |
4181 | * Meta data key is stored at the MSB region, and tuple key is stored at | |
4182 | * the LSB region, unused bits will be filled 0. | |
4183 | */ | |
4184 | static int hclge_config_key(struct hclge_dev *hdev, u8 stage, | |
4185 | struct hclge_fd_rule *rule) | |
4186 | { | |
4187 | struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; | |
4188 | u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; | |
4189 | u8 *cur_key_x, *cur_key_y; | |
4190 | int i, ret, tuple_size; | |
4191 | u8 meta_data_region; | |
4192 | ||
4193 | memset(key_x, 0, sizeof(key_x)); | |
4194 | memset(key_y, 0, sizeof(key_y)); | |
4195 | cur_key_x = key_x; | |
4196 | cur_key_y = key_y; | |
4197 | ||
4198 | for (i = 0 ; i < MAX_TUPLE; i++) { | |
4199 | bool tuple_valid; | |
4200 | u32 check_tuple; | |
4201 | ||
4202 | tuple_size = tuple_key_info[i].key_length / 8; | |
4203 | check_tuple = key_cfg->tuple_active & BIT(i); | |
4204 | ||
4205 | tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, | |
4206 | cur_key_y, rule); | |
4207 | if (tuple_valid) { | |
4208 | cur_key_x += tuple_size; | |
4209 | cur_key_y += tuple_size; | |
4210 | } | |
4211 | } | |
4212 | ||
4213 | meta_data_region = hdev->fd_cfg.max_key_length / 8 - | |
4214 | MAX_META_DATA_LENGTH / 8; | |
4215 | ||
4216 | hclge_fd_convert_meta_data(key_cfg, | |
4217 | (__le32 *)(key_x + meta_data_region), | |
4218 | (__le32 *)(key_y + meta_data_region), | |
4219 | rule); | |
4220 | ||
4221 | ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, | |
4222 | true); | |
4223 | if (ret) { | |
4224 | dev_err(&hdev->pdev->dev, | |
4225 | "fd key_y config fail, loc=%d, ret=%d\n", | |
4226 | rule->queue_id, ret); | |
4227 | return ret; | |
4228 | } | |
4229 | ||
4230 | ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, | |
4231 | true); | |
4232 | if (ret) | |
4233 | dev_err(&hdev->pdev->dev, | |
4234 | "fd key_x config fail, loc=%d, ret=%d\n", | |
4235 | rule->queue_id, ret); | |
4236 | return ret; | |
4237 | } | |
4238 | ||
4239 | static int hclge_config_action(struct hclge_dev *hdev, u8 stage, | |
4240 | struct hclge_fd_rule *rule) | |
4241 | { | |
4242 | struct hclge_fd_ad_data ad_data; | |
4243 | ||
4244 | ad_data.ad_id = rule->location; | |
4245 | ||
4246 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
4247 | ad_data.drop_packet = true; | |
4248 | ad_data.forward_to_direct_queue = false; | |
4249 | ad_data.queue_id = 0; | |
4250 | } else { | |
4251 | ad_data.drop_packet = false; | |
4252 | ad_data.forward_to_direct_queue = true; | |
4253 | ad_data.queue_id = rule->queue_id; | |
4254 | } | |
4255 | ||
4256 | ad_data.use_counter = false; | |
4257 | ad_data.counter_id = 0; | |
4258 | ||
4259 | ad_data.use_next_stage = false; | |
4260 | ad_data.next_input_key = 0; | |
4261 | ||
4262 | ad_data.write_rule_id_to_bd = true; | |
4263 | ad_data.rule_id = rule->location; | |
4264 | ||
4265 | return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); | |
4266 | } | |
4267 | ||
dd74f815 JS |
4268 | static int hclge_fd_check_spec(struct hclge_dev *hdev, |
4269 | struct ethtool_rx_flow_spec *fs, u32 *unused) | |
4270 | { | |
4271 | struct ethtool_tcpip4_spec *tcp_ip4_spec; | |
4272 | struct ethtool_usrip4_spec *usr_ip4_spec; | |
4273 | struct ethtool_tcpip6_spec *tcp_ip6_spec; | |
4274 | struct ethtool_usrip6_spec *usr_ip6_spec; | |
4275 | struct ethhdr *ether_spec; | |
4276 | ||
4277 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4278 | return -EINVAL; | |
4279 | ||
4280 | if (!(fs->flow_type & hdev->fd_cfg.proto_support)) | |
4281 | return -EOPNOTSUPP; | |
4282 | ||
4283 | if ((fs->flow_type & FLOW_EXT) && | |
4284 | (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { | |
4285 | dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); | |
4286 | return -EOPNOTSUPP; | |
4287 | } | |
4288 | ||
4289 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4290 | case SCTP_V4_FLOW: | |
4291 | case TCP_V4_FLOW: | |
4292 | case UDP_V4_FLOW: | |
4293 | tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; | |
4294 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); | |
4295 | ||
4296 | if (!tcp_ip4_spec->ip4src) | |
4297 | *unused |= BIT(INNER_SRC_IP); | |
4298 | ||
4299 | if (!tcp_ip4_spec->ip4dst) | |
4300 | *unused |= BIT(INNER_DST_IP); | |
4301 | ||
4302 | if (!tcp_ip4_spec->psrc) | |
4303 | *unused |= BIT(INNER_SRC_PORT); | |
4304 | ||
4305 | if (!tcp_ip4_spec->pdst) | |
4306 | *unused |= BIT(INNER_DST_PORT); | |
4307 | ||
4308 | if (!tcp_ip4_spec->tos) | |
4309 | *unused |= BIT(INNER_IP_TOS); | |
4310 | ||
4311 | break; | |
4312 | case IP_USER_FLOW: | |
4313 | usr_ip4_spec = &fs->h_u.usr_ip4_spec; | |
4314 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4315 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
4316 | ||
4317 | if (!usr_ip4_spec->ip4src) | |
4318 | *unused |= BIT(INNER_SRC_IP); | |
4319 | ||
4320 | if (!usr_ip4_spec->ip4dst) | |
4321 | *unused |= BIT(INNER_DST_IP); | |
4322 | ||
4323 | if (!usr_ip4_spec->tos) | |
4324 | *unused |= BIT(INNER_IP_TOS); | |
4325 | ||
4326 | if (!usr_ip4_spec->proto) | |
4327 | *unused |= BIT(INNER_IP_PROTO); | |
4328 | ||
4329 | if (usr_ip4_spec->l4_4_bytes) | |
4330 | return -EOPNOTSUPP; | |
4331 | ||
4332 | if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) | |
4333 | return -EOPNOTSUPP; | |
4334 | ||
4335 | break; | |
4336 | case SCTP_V6_FLOW: | |
4337 | case TCP_V6_FLOW: | |
4338 | case UDP_V6_FLOW: | |
4339 | tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; | |
4340 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4341 | BIT(INNER_IP_TOS); | |
4342 | ||
4343 | if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && | |
4344 | !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) | |
4345 | *unused |= BIT(INNER_SRC_IP); | |
4346 | ||
4347 | if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && | |
4348 | !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) | |
4349 | *unused |= BIT(INNER_DST_IP); | |
4350 | ||
4351 | if (!tcp_ip6_spec->psrc) | |
4352 | *unused |= BIT(INNER_SRC_PORT); | |
4353 | ||
4354 | if (!tcp_ip6_spec->pdst) | |
4355 | *unused |= BIT(INNER_DST_PORT); | |
4356 | ||
4357 | if (tcp_ip6_spec->tclass) | |
4358 | return -EOPNOTSUPP; | |
4359 | ||
4360 | break; | |
4361 | case IPV6_USER_FLOW: | |
4362 | usr_ip6_spec = &fs->h_u.usr_ip6_spec; | |
4363 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4364 | BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | | |
4365 | BIT(INNER_DST_PORT); | |
4366 | ||
4367 | if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && | |
4368 | !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) | |
4369 | *unused |= BIT(INNER_SRC_IP); | |
4370 | ||
4371 | if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && | |
4372 | !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) | |
4373 | *unused |= BIT(INNER_DST_IP); | |
4374 | ||
4375 | if (!usr_ip6_spec->l4_proto) | |
4376 | *unused |= BIT(INNER_IP_PROTO); | |
4377 | ||
4378 | if (usr_ip6_spec->tclass) | |
4379 | return -EOPNOTSUPP; | |
4380 | ||
4381 | if (usr_ip6_spec->l4_4_bytes) | |
4382 | return -EOPNOTSUPP; | |
4383 | ||
4384 | break; | |
4385 | case ETHER_FLOW: | |
4386 | ether_spec = &fs->h_u.ether_spec; | |
4387 | *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
4388 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | | |
4389 | BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); | |
4390 | ||
4391 | if (is_zero_ether_addr(ether_spec->h_source)) | |
4392 | *unused |= BIT(INNER_SRC_MAC); | |
4393 | ||
4394 | if (is_zero_ether_addr(ether_spec->h_dest)) | |
4395 | *unused |= BIT(INNER_DST_MAC); | |
4396 | ||
4397 | if (!ether_spec->h_proto) | |
4398 | *unused |= BIT(INNER_ETH_TYPE); | |
4399 | ||
4400 | break; | |
4401 | default: | |
4402 | return -EOPNOTSUPP; | |
4403 | } | |
4404 | ||
4405 | if ((fs->flow_type & FLOW_EXT)) { | |
4406 | if (fs->h_ext.vlan_etype) | |
4407 | return -EOPNOTSUPP; | |
4408 | if (!fs->h_ext.vlan_tci) | |
4409 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4410 | ||
4411 | if (fs->m_ext.vlan_tci) { | |
4412 | if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) | |
4413 | return -EINVAL; | |
4414 | } | |
4415 | } else { | |
4416 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4417 | } | |
4418 | ||
4419 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4420 | if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) | |
4421 | return -EOPNOTSUPP; | |
4422 | ||
4423 | if (is_zero_ether_addr(fs->h_ext.h_dest)) | |
4424 | *unused |= BIT(INNER_DST_MAC); | |
4425 | else | |
4426 | *unused &= ~(BIT(INNER_DST_MAC)); | |
4427 | } | |
4428 | ||
4429 | return 0; | |
4430 | } | |
4431 | ||
4432 | static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) | |
4433 | { | |
4434 | struct hclge_fd_rule *rule = NULL; | |
4435 | struct hlist_node *node2; | |
4436 | ||
4437 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4438 | if (rule->location >= location) | |
4439 | break; | |
4440 | } | |
4441 | ||
4442 | return rule && rule->location == location; | |
4443 | } | |
4444 | ||
4445 | static int hclge_fd_update_rule_list(struct hclge_dev *hdev, | |
4446 | struct hclge_fd_rule *new_rule, | |
4447 | u16 location, | |
4448 | bool is_add) | |
4449 | { | |
4450 | struct hclge_fd_rule *rule = NULL, *parent = NULL; | |
4451 | struct hlist_node *node2; | |
4452 | ||
4453 | if (is_add && !new_rule) | |
4454 | return -EINVAL; | |
4455 | ||
4456 | hlist_for_each_entry_safe(rule, node2, | |
4457 | &hdev->fd_rule_list, rule_node) { | |
4458 | if (rule->location >= location) | |
4459 | break; | |
4460 | parent = rule; | |
4461 | } | |
4462 | ||
4463 | if (rule && rule->location == location) { | |
4464 | hlist_del(&rule->rule_node); | |
4465 | kfree(rule); | |
4466 | hdev->hclge_fd_rule_num--; | |
4467 | ||
4468 | if (!is_add) | |
4469 | return 0; | |
4470 | ||
4471 | } else if (!is_add) { | |
4472 | dev_err(&hdev->pdev->dev, | |
4473 | "delete fail, rule %d is inexistent\n", | |
4474 | location); | |
4475 | return -EINVAL; | |
4476 | } | |
4477 | ||
4478 | INIT_HLIST_NODE(&new_rule->rule_node); | |
4479 | ||
4480 | if (parent) | |
4481 | hlist_add_behind(&new_rule->rule_node, &parent->rule_node); | |
4482 | else | |
4483 | hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); | |
4484 | ||
4485 | hdev->hclge_fd_rule_num++; | |
4486 | ||
4487 | return 0; | |
4488 | } | |
4489 | ||
4490 | static int hclge_fd_get_tuple(struct hclge_dev *hdev, | |
4491 | struct ethtool_rx_flow_spec *fs, | |
4492 | struct hclge_fd_rule *rule) | |
4493 | { | |
4494 | u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); | |
4495 | ||
4496 | switch (flow_type) { | |
4497 | case SCTP_V4_FLOW: | |
4498 | case TCP_V4_FLOW: | |
4499 | case UDP_V4_FLOW: | |
4500 | rule->tuples.src_ip[3] = | |
4501 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); | |
4502 | rule->tuples_mask.src_ip[3] = | |
4503 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); | |
4504 | ||
4505 | rule->tuples.dst_ip[3] = | |
4506 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); | |
4507 | rule->tuples_mask.dst_ip[3] = | |
4508 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); | |
4509 | ||
4510 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); | |
4511 | rule->tuples_mask.src_port = | |
4512 | be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); | |
4513 | ||
4514 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); | |
4515 | rule->tuples_mask.dst_port = | |
4516 | be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); | |
4517 | ||
4518 | rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; | |
4519 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; | |
4520 | ||
4521 | rule->tuples.ether_proto = ETH_P_IP; | |
4522 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4523 | ||
4524 | break; | |
4525 | case IP_USER_FLOW: | |
4526 | rule->tuples.src_ip[3] = | |
4527 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); | |
4528 | rule->tuples_mask.src_ip[3] = | |
4529 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); | |
4530 | ||
4531 | rule->tuples.dst_ip[3] = | |
4532 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); | |
4533 | rule->tuples_mask.dst_ip[3] = | |
4534 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); | |
4535 | ||
4536 | rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; | |
4537 | rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; | |
4538 | ||
4539 | rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; | |
4540 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; | |
4541 | ||
4542 | rule->tuples.ether_proto = ETH_P_IP; | |
4543 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4544 | ||
4545 | break; | |
4546 | case SCTP_V6_FLOW: | |
4547 | case TCP_V6_FLOW: | |
4548 | case UDP_V6_FLOW: | |
4549 | be32_to_cpu_array(rule->tuples.src_ip, | |
4550 | fs->h_u.tcp_ip6_spec.ip6src, 4); | |
4551 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4552 | fs->m_u.tcp_ip6_spec.ip6src, 4); | |
4553 | ||
4554 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4555 | fs->h_u.tcp_ip6_spec.ip6dst, 4); | |
4556 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4557 | fs->m_u.tcp_ip6_spec.ip6dst, 4); | |
4558 | ||
4559 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); | |
4560 | rule->tuples_mask.src_port = | |
4561 | be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); | |
4562 | ||
4563 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); | |
4564 | rule->tuples_mask.dst_port = | |
4565 | be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); | |
4566 | ||
4567 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4568 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4569 | ||
4570 | break; | |
4571 | case IPV6_USER_FLOW: | |
4572 | be32_to_cpu_array(rule->tuples.src_ip, | |
4573 | fs->h_u.usr_ip6_spec.ip6src, 4); | |
4574 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4575 | fs->m_u.usr_ip6_spec.ip6src, 4); | |
4576 | ||
4577 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4578 | fs->h_u.usr_ip6_spec.ip6dst, 4); | |
4579 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4580 | fs->m_u.usr_ip6_spec.ip6dst, 4); | |
4581 | ||
4582 | rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; | |
4583 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; | |
4584 | ||
4585 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4586 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4587 | ||
4588 | break; | |
4589 | case ETHER_FLOW: | |
4590 | ether_addr_copy(rule->tuples.src_mac, | |
4591 | fs->h_u.ether_spec.h_source); | |
4592 | ether_addr_copy(rule->tuples_mask.src_mac, | |
4593 | fs->m_u.ether_spec.h_source); | |
4594 | ||
4595 | ether_addr_copy(rule->tuples.dst_mac, | |
4596 | fs->h_u.ether_spec.h_dest); | |
4597 | ether_addr_copy(rule->tuples_mask.dst_mac, | |
4598 | fs->m_u.ether_spec.h_dest); | |
4599 | ||
4600 | rule->tuples.ether_proto = | |
4601 | be16_to_cpu(fs->h_u.ether_spec.h_proto); | |
4602 | rule->tuples_mask.ether_proto = | |
4603 | be16_to_cpu(fs->m_u.ether_spec.h_proto); | |
4604 | ||
4605 | break; | |
4606 | default: | |
4607 | return -EOPNOTSUPP; | |
4608 | } | |
4609 | ||
4610 | switch (flow_type) { | |
4611 | case SCTP_V4_FLOW: | |
4612 | case SCTP_V6_FLOW: | |
4613 | rule->tuples.ip_proto = IPPROTO_SCTP; | |
4614 | rule->tuples_mask.ip_proto = 0xFF; | |
4615 | break; | |
4616 | case TCP_V4_FLOW: | |
4617 | case TCP_V6_FLOW: | |
4618 | rule->tuples.ip_proto = IPPROTO_TCP; | |
4619 | rule->tuples_mask.ip_proto = 0xFF; | |
4620 | break; | |
4621 | case UDP_V4_FLOW: | |
4622 | case UDP_V6_FLOW: | |
4623 | rule->tuples.ip_proto = IPPROTO_UDP; | |
4624 | rule->tuples_mask.ip_proto = 0xFF; | |
4625 | break; | |
4626 | default: | |
4627 | break; | |
4628 | } | |
4629 | ||
4630 | if ((fs->flow_type & FLOW_EXT)) { | |
4631 | rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); | |
4632 | rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); | |
4633 | } | |
4634 | ||
4635 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4636 | ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); | |
4637 | ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); | |
4638 | } | |
4639 | ||
4640 | return 0; | |
4641 | } | |
4642 | ||
4643 | static int hclge_add_fd_entry(struct hnae3_handle *handle, | |
4644 | struct ethtool_rxnfc *cmd) | |
4645 | { | |
4646 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4647 | struct hclge_dev *hdev = vport->back; | |
4648 | u16 dst_vport_id = 0, q_index = 0; | |
4649 | struct ethtool_rx_flow_spec *fs; | |
4650 | struct hclge_fd_rule *rule; | |
4651 | u32 unused = 0; | |
4652 | u8 action; | |
4653 | int ret; | |
4654 | ||
4655 | if (!hnae3_dev_fd_supported(hdev)) | |
4656 | return -EOPNOTSUPP; | |
4657 | ||
4658 | if (!hdev->fd_cfg.fd_en) { | |
4659 | dev_warn(&hdev->pdev->dev, | |
4660 | "Please enable flow director first\n"); | |
4661 | return -EOPNOTSUPP; | |
4662 | } | |
4663 | ||
4664 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4665 | ||
4666 | ret = hclge_fd_check_spec(hdev, fs, &unused); | |
4667 | if (ret) { | |
4668 | dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); | |
4669 | return ret; | |
4670 | } | |
4671 | ||
4672 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) { | |
4673 | action = HCLGE_FD_ACTION_DROP_PACKET; | |
4674 | } else { | |
4675 | u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); | |
4676 | u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); | |
4677 | u16 tqps; | |
4678 | ||
4679 | dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; | |
4680 | tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; | |
4681 | ||
4682 | if (ring >= tqps) { | |
4683 | dev_err(&hdev->pdev->dev, | |
4684 | "Error: queue id (%d) > max tqp num (%d)\n", | |
4685 | ring, tqps - 1); | |
4686 | return -EINVAL; | |
4687 | } | |
4688 | ||
4689 | if (vf > hdev->num_req_vfs) { | |
4690 | dev_err(&hdev->pdev->dev, | |
4691 | "Error: vf id (%d) > max vf num (%d)\n", | |
4692 | vf, hdev->num_req_vfs); | |
4693 | return -EINVAL; | |
4694 | } | |
4695 | ||
4696 | action = HCLGE_FD_ACTION_ACCEPT_PACKET; | |
4697 | q_index = ring; | |
4698 | } | |
4699 | ||
4700 | rule = kzalloc(sizeof(*rule), GFP_KERNEL); | |
4701 | if (!rule) | |
4702 | return -ENOMEM; | |
4703 | ||
4704 | ret = hclge_fd_get_tuple(hdev, fs, rule); | |
4705 | if (ret) | |
4706 | goto free_rule; | |
4707 | ||
4708 | rule->flow_type = fs->flow_type; | |
4709 | ||
4710 | rule->location = fs->location; | |
4711 | rule->unused_tuple = unused; | |
4712 | rule->vf_id = dst_vport_id; | |
4713 | rule->queue_id = q_index; | |
4714 | rule->action = action; | |
4715 | ||
4716 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4717 | if (ret) | |
4718 | goto free_rule; | |
4719 | ||
4720 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4721 | if (ret) | |
4722 | goto free_rule; | |
4723 | ||
4724 | ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); | |
4725 | if (ret) | |
4726 | goto free_rule; | |
4727 | ||
4728 | return ret; | |
4729 | ||
4730 | free_rule: | |
4731 | kfree(rule); | |
4732 | return ret; | |
4733 | } | |
4734 | ||
4735 | static int hclge_del_fd_entry(struct hnae3_handle *handle, | |
4736 | struct ethtool_rxnfc *cmd) | |
4737 | { | |
4738 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4739 | struct hclge_dev *hdev = vport->back; | |
4740 | struct ethtool_rx_flow_spec *fs; | |
4741 | int ret; | |
4742 | ||
4743 | if (!hnae3_dev_fd_supported(hdev)) | |
4744 | return -EOPNOTSUPP; | |
4745 | ||
4746 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4747 | ||
4748 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4749 | return -EINVAL; | |
4750 | ||
4751 | if (!hclge_fd_rule_exist(hdev, fs->location)) { | |
4752 | dev_err(&hdev->pdev->dev, | |
4753 | "Delete fail, rule %d is inexistent\n", | |
4754 | fs->location); | |
4755 | return -ENOENT; | |
4756 | } | |
4757 | ||
4758 | ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4759 | fs->location, NULL, false); | |
4760 | if (ret) | |
4761 | return ret; | |
4762 | ||
4763 | return hclge_fd_update_rule_list(hdev, NULL, fs->location, | |
4764 | false); | |
4765 | } | |
4766 | ||
6871af29 JS |
4767 | static void hclge_del_all_fd_entries(struct hnae3_handle *handle, |
4768 | bool clear_list) | |
4769 | { | |
4770 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4771 | struct hclge_dev *hdev = vport->back; | |
4772 | struct hclge_fd_rule *rule; | |
4773 | struct hlist_node *node; | |
4774 | ||
4775 | if (!hnae3_dev_fd_supported(hdev)) | |
4776 | return; | |
4777 | ||
4778 | if (clear_list) { | |
4779 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4780 | rule_node) { | |
4781 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4782 | rule->location, NULL, false); | |
4783 | hlist_del(&rule->rule_node); | |
4784 | kfree(rule); | |
4785 | hdev->hclge_fd_rule_num--; | |
4786 | } | |
4787 | } else { | |
4788 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4789 | rule_node) | |
4790 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4791 | rule->location, NULL, false); | |
4792 | } | |
4793 | } | |
4794 | ||
4795 | static int hclge_restore_fd_entries(struct hnae3_handle *handle) | |
4796 | { | |
4797 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4798 | struct hclge_dev *hdev = vport->back; | |
4799 | struct hclge_fd_rule *rule; | |
4800 | struct hlist_node *node; | |
4801 | int ret; | |
4802 | ||
65e41e7e HT |
4803 | /* Return ok here, because reset error handling will check this |
4804 | * return value. If error is returned here, the reset process will | |
4805 | * fail. | |
4806 | */ | |
6871af29 | 4807 | if (!hnae3_dev_fd_supported(hdev)) |
65e41e7e | 4808 | return 0; |
6871af29 JS |
4809 | |
4810 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { | |
4811 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4812 | if (!ret) | |
4813 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4814 | ||
4815 | if (ret) { | |
4816 | dev_warn(&hdev->pdev->dev, | |
4817 | "Restore rule %d failed, remove it\n", | |
4818 | rule->location); | |
4819 | hlist_del(&rule->rule_node); | |
4820 | kfree(rule); | |
4821 | hdev->hclge_fd_rule_num--; | |
4822 | } | |
4823 | } | |
4824 | return 0; | |
4825 | } | |
4826 | ||
05c2314f JS |
4827 | static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, |
4828 | struct ethtool_rxnfc *cmd) | |
4829 | { | |
4830 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4831 | struct hclge_dev *hdev = vport->back; | |
4832 | ||
4833 | if (!hnae3_dev_fd_supported(hdev)) | |
4834 | return -EOPNOTSUPP; | |
4835 | ||
4836 | cmd->rule_cnt = hdev->hclge_fd_rule_num; | |
4837 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
4838 | ||
4839 | return 0; | |
4840 | } | |
4841 | ||
4842 | static int hclge_get_fd_rule_info(struct hnae3_handle *handle, | |
4843 | struct ethtool_rxnfc *cmd) | |
4844 | { | |
4845 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4846 | struct hclge_fd_rule *rule = NULL; | |
4847 | struct hclge_dev *hdev = vport->back; | |
4848 | struct ethtool_rx_flow_spec *fs; | |
4849 | struct hlist_node *node2; | |
4850 | ||
4851 | if (!hnae3_dev_fd_supported(hdev)) | |
4852 | return -EOPNOTSUPP; | |
4853 | ||
4854 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4855 | ||
4856 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4857 | if (rule->location >= fs->location) | |
4858 | break; | |
4859 | } | |
4860 | ||
4861 | if (!rule || fs->location != rule->location) | |
4862 | return -ENOENT; | |
4863 | ||
4864 | fs->flow_type = rule->flow_type; | |
4865 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4866 | case SCTP_V4_FLOW: | |
4867 | case TCP_V4_FLOW: | |
4868 | case UDP_V4_FLOW: | |
4869 | fs->h_u.tcp_ip4_spec.ip4src = | |
4870 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4871 | fs->m_u.tcp_ip4_spec.ip4src = | |
4872 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4873 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4874 | ||
4875 | fs->h_u.tcp_ip4_spec.ip4dst = | |
4876 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4877 | fs->m_u.tcp_ip4_spec.ip4dst = | |
4878 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4879 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4880 | ||
4881 | fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4882 | fs->m_u.tcp_ip4_spec.psrc = | |
4883 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4884 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4885 | ||
4886 | fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4887 | fs->m_u.tcp_ip4_spec.pdst = | |
4888 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4889 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4890 | ||
4891 | fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; | |
4892 | fs->m_u.tcp_ip4_spec.tos = | |
4893 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4894 | 0 : rule->tuples_mask.ip_tos; | |
4895 | ||
4896 | break; | |
4897 | case IP_USER_FLOW: | |
4898 | fs->h_u.usr_ip4_spec.ip4src = | |
4899 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4900 | fs->m_u.tcp_ip4_spec.ip4src = | |
4901 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4902 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4903 | ||
4904 | fs->h_u.usr_ip4_spec.ip4dst = | |
4905 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4906 | fs->m_u.usr_ip4_spec.ip4dst = | |
4907 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4908 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4909 | ||
4910 | fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; | |
4911 | fs->m_u.usr_ip4_spec.tos = | |
4912 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4913 | 0 : rule->tuples_mask.ip_tos; | |
4914 | ||
4915 | fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; | |
4916 | fs->m_u.usr_ip4_spec.proto = | |
4917 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4918 | 0 : rule->tuples_mask.ip_proto; | |
4919 | ||
4920 | fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; | |
4921 | ||
4922 | break; | |
4923 | case SCTP_V6_FLOW: | |
4924 | case TCP_V6_FLOW: | |
4925 | case UDP_V6_FLOW: | |
4926 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, | |
4927 | rule->tuples.src_ip, 4); | |
4928 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4929 | memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4930 | else | |
4931 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, | |
4932 | rule->tuples_mask.src_ip, 4); | |
4933 | ||
4934 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, | |
4935 | rule->tuples.dst_ip, 4); | |
4936 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4937 | memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4938 | else | |
4939 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, | |
4940 | rule->tuples_mask.dst_ip, 4); | |
4941 | ||
4942 | fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4943 | fs->m_u.tcp_ip6_spec.psrc = | |
4944 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4945 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4946 | ||
4947 | fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4948 | fs->m_u.tcp_ip6_spec.pdst = | |
4949 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4950 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4951 | ||
4952 | break; | |
4953 | case IPV6_USER_FLOW: | |
4954 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, | |
4955 | rule->tuples.src_ip, 4); | |
4956 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4957 | memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4958 | else | |
4959 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, | |
4960 | rule->tuples_mask.src_ip, 4); | |
4961 | ||
4962 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, | |
4963 | rule->tuples.dst_ip, 4); | |
4964 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4965 | memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4966 | else | |
4967 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, | |
4968 | rule->tuples_mask.dst_ip, 4); | |
4969 | ||
4970 | fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; | |
4971 | fs->m_u.usr_ip6_spec.l4_proto = | |
4972 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4973 | 0 : rule->tuples_mask.ip_proto; | |
4974 | ||
4975 | break; | |
4976 | case ETHER_FLOW: | |
4977 | ether_addr_copy(fs->h_u.ether_spec.h_source, | |
4978 | rule->tuples.src_mac); | |
4979 | if (rule->unused_tuple & BIT(INNER_SRC_MAC)) | |
4980 | eth_zero_addr(fs->m_u.ether_spec.h_source); | |
4981 | else | |
4982 | ether_addr_copy(fs->m_u.ether_spec.h_source, | |
4983 | rule->tuples_mask.src_mac); | |
4984 | ||
4985 | ether_addr_copy(fs->h_u.ether_spec.h_dest, | |
4986 | rule->tuples.dst_mac); | |
4987 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4988 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4989 | else | |
4990 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4991 | rule->tuples_mask.dst_mac); | |
4992 | ||
4993 | fs->h_u.ether_spec.h_proto = | |
4994 | cpu_to_be16(rule->tuples.ether_proto); | |
4995 | fs->m_u.ether_spec.h_proto = | |
4996 | rule->unused_tuple & BIT(INNER_ETH_TYPE) ? | |
4997 | 0 : cpu_to_be16(rule->tuples_mask.ether_proto); | |
4998 | ||
4999 | break; | |
5000 | default: | |
5001 | return -EOPNOTSUPP; | |
5002 | } | |
5003 | ||
5004 | if (fs->flow_type & FLOW_EXT) { | |
5005 | fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); | |
5006 | fs->m_ext.vlan_tci = | |
5007 | rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? | |
5008 | cpu_to_be16(VLAN_VID_MASK) : | |
5009 | cpu_to_be16(rule->tuples_mask.vlan_tag1); | |
5010 | } | |
5011 | ||
5012 | if (fs->flow_type & FLOW_MAC_EXT) { | |
5013 | ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); | |
5014 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
5015 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
5016 | else | |
5017 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
5018 | rule->tuples_mask.dst_mac); | |
5019 | } | |
5020 | ||
5021 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
5022 | fs->ring_cookie = RX_CLS_FLOW_DISC; | |
5023 | } else { | |
5024 | u64 vf_id; | |
5025 | ||
5026 | fs->ring_cookie = rule->queue_id; | |
5027 | vf_id = rule->vf_id; | |
5028 | vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; | |
5029 | fs->ring_cookie |= vf_id; | |
5030 | } | |
5031 | ||
5032 | return 0; | |
5033 | } | |
5034 | ||
5035 | static int hclge_get_all_rules(struct hnae3_handle *handle, | |
5036 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | |
5037 | { | |
5038 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5039 | struct hclge_dev *hdev = vport->back; | |
5040 | struct hclge_fd_rule *rule; | |
5041 | struct hlist_node *node2; | |
5042 | int cnt = 0; | |
5043 | ||
5044 | if (!hnae3_dev_fd_supported(hdev)) | |
5045 | return -EOPNOTSUPP; | |
5046 | ||
5047 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
5048 | ||
5049 | hlist_for_each_entry_safe(rule, node2, | |
5050 | &hdev->fd_rule_list, rule_node) { | |
5051 | if (cnt == cmd->rule_cnt) | |
5052 | return -EMSGSIZE; | |
5053 | ||
5054 | rule_locs[cnt] = rule->location; | |
5055 | cnt++; | |
5056 | } | |
5057 | ||
5058 | cmd->rule_cnt = cnt; | |
5059 | ||
5060 | return 0; | |
5061 | } | |
5062 | ||
4d60291b HT |
5063 | static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) |
5064 | { | |
5065 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5066 | struct hclge_dev *hdev = vport->back; | |
5067 | ||
5068 | return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || | |
5069 | hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); | |
5070 | } | |
5071 | ||
5072 | static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) | |
5073 | { | |
5074 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5075 | struct hclge_dev *hdev = vport->back; | |
5076 | ||
5077 | return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
5078 | } | |
5079 | ||
5080 | static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) | |
5081 | { | |
5082 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5083 | struct hclge_dev *hdev = vport->back; | |
5084 | ||
5085 | return hdev->reset_count; | |
5086 | } | |
5087 | ||
c17852a8 JS |
5088 | static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) |
5089 | { | |
5090 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5091 | struct hclge_dev *hdev = vport->back; | |
5092 | ||
5093 | hdev->fd_cfg.fd_en = enable; | |
5094 | if (!enable) | |
5095 | hclge_del_all_fd_entries(handle, false); | |
5096 | else | |
5097 | hclge_restore_fd_entries(handle); | |
5098 | } | |
5099 | ||
46a3df9f S |
5100 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) |
5101 | { | |
5102 | struct hclge_desc desc; | |
d44f9b63 YL |
5103 | struct hclge_config_mac_mode_cmd *req = |
5104 | (struct hclge_config_mac_mode_cmd *)desc.data; | |
a90bb9a5 | 5105 | u32 loop_en = 0; |
46a3df9f S |
5106 | int ret; |
5107 | ||
5108 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); | |
e4e87715 PL |
5109 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); |
5110 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); | |
5111 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); | |
5112 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); | |
5113 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); | |
5114 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); | |
5115 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); | |
5116 | hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); | |
5117 | hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); | |
5118 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); | |
5119 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); | |
5120 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); | |
5121 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); | |
5122 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); | |
a90bb9a5 | 5123 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); |
46a3df9f S |
5124 | |
5125 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5126 | if (ret) | |
5127 | dev_err(&hdev->pdev->dev, | |
5128 | "mac enable fail, ret =%d.\n", ret); | |
5129 | } | |
5130 | ||
eb66d503 | 5131 | static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) |
c39c4d98 | 5132 | { |
c39c4d98 | 5133 | struct hclge_config_mac_mode_cmd *req; |
c39c4d98 YL |
5134 | struct hclge_desc desc; |
5135 | u32 loop_en; | |
5136 | int ret; | |
5137 | ||
e4d68dae YL |
5138 | req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; |
5139 | /* 1 Read out the MAC mode config at first */ | |
5140 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); | |
5141 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5142 | if (ret) { | |
5143 | dev_err(&hdev->pdev->dev, | |
5144 | "mac loopback get fail, ret =%d.\n", ret); | |
5145 | return ret; | |
5146 | } | |
c39c4d98 | 5147 | |
e4d68dae YL |
5148 | /* 2 Then setup the loopback flag */ |
5149 | loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); | |
e4e87715 | 5150 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); |
0f29fc23 YL |
5151 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); |
5152 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); | |
e4d68dae YL |
5153 | |
5154 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); | |
c39c4d98 | 5155 | |
e4d68dae YL |
5156 | /* 3 Config mac work mode with loopback flag |
5157 | * and its original configure parameters | |
5158 | */ | |
5159 | hclge_cmd_reuse_desc(&desc, false); | |
5160 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5161 | if (ret) | |
5162 | dev_err(&hdev->pdev->dev, | |
5163 | "mac loopback set fail, ret =%d.\n", ret); | |
5164 | return ret; | |
5165 | } | |
c39c4d98 | 5166 | |
4dc13b96 FL |
5167 | static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, |
5168 | enum hnae3_loop loop_mode) | |
5fd50ac3 PL |
5169 | { |
5170 | #define HCLGE_SERDES_RETRY_MS 10 | |
5171 | #define HCLGE_SERDES_RETRY_NUM 100 | |
5172 | struct hclge_serdes_lb_cmd *req; | |
5173 | struct hclge_desc desc; | |
5174 | int ret, i = 0; | |
4dc13b96 | 5175 | u8 loop_mode_b; |
5fd50ac3 | 5176 | |
d0d72bac | 5177 | req = (struct hclge_serdes_lb_cmd *)desc.data; |
5fd50ac3 PL |
5178 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); |
5179 | ||
4dc13b96 FL |
5180 | switch (loop_mode) { |
5181 | case HNAE3_LOOP_SERIAL_SERDES: | |
5182 | loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; | |
5183 | break; | |
5184 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5185 | loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; | |
5186 | break; | |
5187 | default: | |
5188 | dev_err(&hdev->pdev->dev, | |
5189 | "unsupported serdes loopback mode %d\n", loop_mode); | |
5190 | return -ENOTSUPP; | |
5191 | } | |
5192 | ||
5fd50ac3 | 5193 | if (en) { |
4dc13b96 FL |
5194 | req->enable = loop_mode_b; |
5195 | req->mask = loop_mode_b; | |
5fd50ac3 | 5196 | } else { |
4dc13b96 | 5197 | req->mask = loop_mode_b; |
5fd50ac3 PL |
5198 | } |
5199 | ||
5200 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5201 | if (ret) { | |
5202 | dev_err(&hdev->pdev->dev, | |
5203 | "serdes loopback set fail, ret = %d\n", ret); | |
5204 | return ret; | |
5205 | } | |
5206 | ||
5207 | do { | |
5208 | msleep(HCLGE_SERDES_RETRY_MS); | |
5209 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, | |
5210 | true); | |
5211 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5212 | if (ret) { | |
5213 | dev_err(&hdev->pdev->dev, | |
5214 | "serdes loopback get, ret = %d\n", ret); | |
5215 | return ret; | |
5216 | } | |
5217 | } while (++i < HCLGE_SERDES_RETRY_NUM && | |
5218 | !(req->result & HCLGE_CMD_SERDES_DONE_B)); | |
5219 | ||
5220 | if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { | |
5221 | dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); | |
5222 | return -EBUSY; | |
5223 | } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { | |
5224 | dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); | |
5225 | return -EIO; | |
5226 | } | |
5227 | ||
0f29fc23 | 5228 | hclge_cfg_mac_mode(hdev, en); |
5fd50ac3 PL |
5229 | return 0; |
5230 | } | |
5231 | ||
0f29fc23 YL |
5232 | static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, |
5233 | int stream_id, bool enable) | |
5234 | { | |
5235 | struct hclge_desc desc; | |
5236 | struct hclge_cfg_com_tqp_queue_cmd *req = | |
5237 | (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; | |
5238 | int ret; | |
5239 | ||
5240 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); | |
5241 | req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); | |
5242 | req->stream_id = cpu_to_le16(stream_id); | |
5243 | req->enable |= enable << HCLGE_TQP_ENABLE_B; | |
5244 | ||
5245 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5246 | if (ret) | |
5247 | dev_err(&hdev->pdev->dev, | |
5248 | "Tqp enable fail, status =%d.\n", ret); | |
5249 | return ret; | |
5250 | } | |
5251 | ||
e4d68dae YL |
5252 | static int hclge_set_loopback(struct hnae3_handle *handle, |
5253 | enum hnae3_loop loop_mode, bool en) | |
5254 | { | |
5255 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5256 | struct hclge_dev *hdev = vport->back; | |
0f29fc23 | 5257 | int i, ret; |
e4d68dae YL |
5258 | |
5259 | switch (loop_mode) { | |
eb66d503 FL |
5260 | case HNAE3_LOOP_APP: |
5261 | ret = hclge_set_app_loopback(hdev, en); | |
c39c4d98 | 5262 | break; |
4dc13b96 FL |
5263 | case HNAE3_LOOP_SERIAL_SERDES: |
5264 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5265 | ret = hclge_set_serdes_loopback(hdev, en, loop_mode); | |
5fd50ac3 | 5266 | break; |
c39c4d98 YL |
5267 | default: |
5268 | ret = -ENOTSUPP; | |
5269 | dev_err(&hdev->pdev->dev, | |
5270 | "loop_mode %d is not supported\n", loop_mode); | |
5271 | break; | |
5272 | } | |
5273 | ||
0f29fc23 YL |
5274 | for (i = 0; i < vport->alloc_tqps; i++) { |
5275 | ret = hclge_tqp_enable(hdev, i, 0, en); | |
5276 | if (ret) | |
5277 | return ret; | |
5278 | } | |
46a3df9f | 5279 | |
0f29fc23 | 5280 | return 0; |
46a3df9f S |
5281 | } |
5282 | ||
5283 | static void hclge_reset_tqp_stats(struct hnae3_handle *handle) | |
5284 | { | |
5285 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5286 | struct hnae3_queue *queue; | |
5287 | struct hclge_tqp *tqp; | |
5288 | int i; | |
5289 | ||
5290 | for (i = 0; i < vport->alloc_tqps; i++) { | |
5291 | queue = handle->kinfo.tqp[i]; | |
5292 | tqp = container_of(queue, struct hclge_tqp, q); | |
5293 | memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); | |
5294 | } | |
5295 | } | |
5296 | ||
5297 | static int hclge_ae_start(struct hnae3_handle *handle) | |
5298 | { | |
5299 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5300 | struct hclge_dev *hdev = vport->back; | |
46a3df9f | 5301 | |
46a3df9f S |
5302 | /* mac enable */ |
5303 | hclge_cfg_mac_mode(hdev, true); | |
5304 | clear_bit(HCLGE_STATE_DOWN, &hdev->state); | |
d039ef68 | 5305 | mod_timer(&hdev->service_timer, jiffies + HZ); |
be8d8cdb | 5306 | hdev->hw.mac.link = 0; |
46a3df9f | 5307 | |
b50ae26c PL |
5308 | /* reset tqp stats */ |
5309 | hclge_reset_tqp_stats(handle); | |
5310 | ||
b01b7cf1 | 5311 | hclge_mac_start_phy(hdev); |
46a3df9f | 5312 | |
46a3df9f S |
5313 | return 0; |
5314 | } | |
5315 | ||
5316 | static void hclge_ae_stop(struct hnae3_handle *handle) | |
5317 | { | |
5318 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5319 | struct hclge_dev *hdev = vport->back; | |
46a3df9f | 5320 | |
2f7e4896 FL |
5321 | set_bit(HCLGE_STATE_DOWN, &hdev->state); |
5322 | ||
b50ae26c PL |
5323 | del_timer_sync(&hdev->service_timer); |
5324 | cancel_work_sync(&hdev->service_task); | |
f5be7967 | 5325 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); |
b50ae26c | 5326 | |
35d93a30 HT |
5327 | /* If it is not PF reset, the firmware will disable the MAC, |
5328 | * so it only need to stop phy here. | |
5329 | */ | |
5330 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && | |
5331 | hdev->reset_type != HNAE3_FUNC_RESET) { | |
9617f668 | 5332 | hclge_mac_stop_phy(hdev); |
b50ae26c | 5333 | return; |
9617f668 | 5334 | } |
b50ae26c | 5335 | |
46a3df9f S |
5336 | /* Mac disable */ |
5337 | hclge_cfg_mac_mode(hdev, false); | |
5338 | ||
5339 | hclge_mac_stop_phy(hdev); | |
5340 | ||
5341 | /* reset tqp stats */ | |
5342 | hclge_reset_tqp_stats(handle); | |
f30dfddc FL |
5343 | del_timer_sync(&hdev->service_timer); |
5344 | cancel_work_sync(&hdev->service_task); | |
5345 | hclge_update_link_status(hdev); | |
46a3df9f S |
5346 | } |
5347 | ||
a6d818e3 YL |
5348 | int hclge_vport_start(struct hclge_vport *vport) |
5349 | { | |
5350 | set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5351 | vport->last_active_jiffies = jiffies; | |
5352 | return 0; | |
5353 | } | |
5354 | ||
5355 | void hclge_vport_stop(struct hclge_vport *vport) | |
5356 | { | |
5357 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5358 | } | |
5359 | ||
5360 | static int hclge_client_start(struct hnae3_handle *handle) | |
5361 | { | |
5362 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5363 | ||
5364 | return hclge_vport_start(vport); | |
5365 | } | |
5366 | ||
5367 | static void hclge_client_stop(struct hnae3_handle *handle) | |
5368 | { | |
5369 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5370 | ||
5371 | hclge_vport_stop(vport); | |
5372 | } | |
5373 | ||
46a3df9f S |
5374 | static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, |
5375 | u16 cmdq_resp, u8 resp_code, | |
5376 | enum hclge_mac_vlan_tbl_opcode op) | |
5377 | { | |
5378 | struct hclge_dev *hdev = vport->back; | |
5379 | int return_status = -EIO; | |
5380 | ||
5381 | if (cmdq_resp) { | |
5382 | dev_err(&hdev->pdev->dev, | |
5383 | "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", | |
5384 | cmdq_resp); | |
5385 | return -EIO; | |
5386 | } | |
5387 | ||
5388 | if (op == HCLGE_MAC_VLAN_ADD) { | |
5389 | if ((!resp_code) || (resp_code == 1)) { | |
5390 | return_status = 0; | |
5391 | } else if (resp_code == 2) { | |
eefd00a5 | 5392 | return_status = -ENOSPC; |
46a3df9f S |
5393 | dev_err(&hdev->pdev->dev, |
5394 | "add mac addr failed for uc_overflow.\n"); | |
5395 | } else if (resp_code == 3) { | |
eefd00a5 | 5396 | return_status = -ENOSPC; |
46a3df9f S |
5397 | dev_err(&hdev->pdev->dev, |
5398 | "add mac addr failed for mc_overflow.\n"); | |
5399 | } else { | |
5400 | dev_err(&hdev->pdev->dev, | |
5401 | "add mac addr failed for undefined, code=%d.\n", | |
5402 | resp_code); | |
5403 | } | |
5404 | } else if (op == HCLGE_MAC_VLAN_REMOVE) { | |
5405 | if (!resp_code) { | |
5406 | return_status = 0; | |
5407 | } else if (resp_code == 1) { | |
eefd00a5 | 5408 | return_status = -ENOENT; |
46a3df9f S |
5409 | dev_dbg(&hdev->pdev->dev, |
5410 | "remove mac addr failed for miss.\n"); | |
5411 | } else { | |
5412 | dev_err(&hdev->pdev->dev, | |
5413 | "remove mac addr failed for undefined, code=%d.\n", | |
5414 | resp_code); | |
5415 | } | |
5416 | } else if (op == HCLGE_MAC_VLAN_LKUP) { | |
5417 | if (!resp_code) { | |
5418 | return_status = 0; | |
5419 | } else if (resp_code == 1) { | |
eefd00a5 | 5420 | return_status = -ENOENT; |
46a3df9f S |
5421 | dev_dbg(&hdev->pdev->dev, |
5422 | "lookup mac addr failed for miss.\n"); | |
5423 | } else { | |
5424 | dev_err(&hdev->pdev->dev, | |
5425 | "lookup mac addr failed for undefined, code=%d.\n", | |
5426 | resp_code); | |
5427 | } | |
5428 | } else { | |
eefd00a5 | 5429 | return_status = -EINVAL; |
46a3df9f S |
5430 | dev_err(&hdev->pdev->dev, |
5431 | "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", | |
5432 | op); | |
5433 | } | |
5434 | ||
5435 | return return_status; | |
5436 | } | |
5437 | ||
5438 | static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) | |
5439 | { | |
5440 | int word_num; | |
5441 | int bit_num; | |
5442 | ||
5443 | if (vfid > 255 || vfid < 0) | |
5444 | return -EIO; | |
5445 | ||
5446 | if (vfid >= 0 && vfid <= 191) { | |
5447 | word_num = vfid / 32; | |
5448 | bit_num = vfid % 32; | |
5449 | if (clr) | |
a90bb9a5 | 5450 | desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5451 | else |
a90bb9a5 | 5452 | desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5453 | } else { |
5454 | word_num = (vfid - 192) / 32; | |
5455 | bit_num = vfid % 32; | |
5456 | if (clr) | |
a90bb9a5 | 5457 | desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5458 | else |
a90bb9a5 | 5459 | desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5460 | } |
5461 | ||
5462 | return 0; | |
5463 | } | |
5464 | ||
5465 | static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) | |
5466 | { | |
5467 | #define HCLGE_DESC_NUMBER 3 | |
5468 | #define HCLGE_FUNC_NUMBER_PER_DESC 6 | |
5469 | int i, j; | |
5470 | ||
6c39d527 | 5471 | for (i = 1; i < HCLGE_DESC_NUMBER; i++) |
46a3df9f S |
5472 | for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) |
5473 | if (desc[i].data[j]) | |
5474 | return false; | |
5475 | ||
5476 | return true; | |
5477 | } | |
5478 | ||
d44f9b63 | 5479 | static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, |
46a3df9f S |
5480 | const u8 *addr) |
5481 | { | |
5482 | const unsigned char *mac_addr = addr; | |
5483 | u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | | |
5484 | (mac_addr[0]) | (mac_addr[1] << 8); | |
5485 | u32 low_val = mac_addr[4] | (mac_addr[5] << 8); | |
5486 | ||
5487 | new_req->mac_addr_hi32 = cpu_to_le32(high_val); | |
5488 | new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); | |
5489 | } | |
5490 | ||
46a3df9f | 5491 | static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, |
d44f9b63 | 5492 | struct hclge_mac_vlan_tbl_entry_cmd *req) |
46a3df9f S |
5493 | { |
5494 | struct hclge_dev *hdev = vport->back; | |
5495 | struct hclge_desc desc; | |
5496 | u8 resp_code; | |
a90bb9a5 | 5497 | u16 retval; |
46a3df9f S |
5498 | int ret; |
5499 | ||
5500 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); | |
5501 | ||
d44f9b63 | 5502 | memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5503 | |
5504 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5505 | if (ret) { | |
5506 | dev_err(&hdev->pdev->dev, | |
5507 | "del mac addr failed for cmd_send, ret =%d.\n", | |
5508 | ret); | |
5509 | return ret; | |
5510 | } | |
a90bb9a5 YL |
5511 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5512 | retval = le16_to_cpu(desc.retval); | |
46a3df9f | 5513 | |
a90bb9a5 | 5514 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5515 | HCLGE_MAC_VLAN_REMOVE); |
5516 | } | |
5517 | ||
5518 | static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5519 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5520 | struct hclge_desc *desc, |
5521 | bool is_mc) | |
5522 | { | |
5523 | struct hclge_dev *hdev = vport->back; | |
5524 | u8 resp_code; | |
a90bb9a5 | 5525 | u16 retval; |
46a3df9f S |
5526 | int ret; |
5527 | ||
5528 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); | |
5529 | if (is_mc) { | |
5530 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5531 | memcpy(desc[0].data, | |
5532 | req, | |
d44f9b63 | 5533 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5534 | hclge_cmd_setup_basic_desc(&desc[1], |
5535 | HCLGE_OPC_MAC_VLAN_ADD, | |
5536 | true); | |
5537 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5538 | hclge_cmd_setup_basic_desc(&desc[2], | |
5539 | HCLGE_OPC_MAC_VLAN_ADD, | |
5540 | true); | |
5541 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
5542 | } else { | |
5543 | memcpy(desc[0].data, | |
5544 | req, | |
d44f9b63 | 5545 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5546 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
5547 | } | |
5548 | if (ret) { | |
5549 | dev_err(&hdev->pdev->dev, | |
5550 | "lookup mac addr failed for cmd_send, ret =%d.\n", | |
5551 | ret); | |
5552 | return ret; | |
5553 | } | |
a90bb9a5 YL |
5554 | resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; |
5555 | retval = le16_to_cpu(desc[0].retval); | |
46a3df9f | 5556 | |
a90bb9a5 | 5557 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5558 | HCLGE_MAC_VLAN_LKUP); |
5559 | } | |
5560 | ||
5561 | static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5562 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5563 | struct hclge_desc *mc_desc) |
5564 | { | |
5565 | struct hclge_dev *hdev = vport->back; | |
5566 | int cfg_status; | |
5567 | u8 resp_code; | |
a90bb9a5 | 5568 | u16 retval; |
46a3df9f S |
5569 | int ret; |
5570 | ||
5571 | if (!mc_desc) { | |
5572 | struct hclge_desc desc; | |
5573 | ||
5574 | hclge_cmd_setup_basic_desc(&desc, | |
5575 | HCLGE_OPC_MAC_VLAN_ADD, | |
5576 | false); | |
d44f9b63 YL |
5577 | memcpy(desc.data, req, |
5578 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); | |
46a3df9f | 5579 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
a90bb9a5 YL |
5580 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5581 | retval = le16_to_cpu(desc.retval); | |
5582 | ||
5583 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5584 | resp_code, |
5585 | HCLGE_MAC_VLAN_ADD); | |
5586 | } else { | |
c3b6f755 | 5587 | hclge_cmd_reuse_desc(&mc_desc[0], false); |
46a3df9f | 5588 | mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5589 | hclge_cmd_reuse_desc(&mc_desc[1], false); |
46a3df9f | 5590 | mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5591 | hclge_cmd_reuse_desc(&mc_desc[2], false); |
46a3df9f S |
5592 | mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); |
5593 | memcpy(mc_desc[0].data, req, | |
d44f9b63 | 5594 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f | 5595 | ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); |
a90bb9a5 YL |
5596 | resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; |
5597 | retval = le16_to_cpu(mc_desc[0].retval); | |
5598 | ||
5599 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5600 | resp_code, |
5601 | HCLGE_MAC_VLAN_ADD); | |
5602 | } | |
5603 | ||
5604 | if (ret) { | |
5605 | dev_err(&hdev->pdev->dev, | |
5606 | "add mac addr failed for cmd_send, ret =%d.\n", | |
5607 | ret); | |
5608 | return ret; | |
5609 | } | |
5610 | ||
5611 | return cfg_status; | |
5612 | } | |
5613 | ||
39932473 JS |
5614 | static int hclge_init_umv_space(struct hclge_dev *hdev) |
5615 | { | |
5616 | u16 allocated_size = 0; | |
5617 | int ret; | |
5618 | ||
5619 | ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, | |
5620 | true); | |
5621 | if (ret) | |
5622 | return ret; | |
5623 | ||
5624 | if (allocated_size < hdev->wanted_umv_size) | |
5625 | dev_warn(&hdev->pdev->dev, | |
5626 | "Alloc umv space failed, want %d, get %d\n", | |
5627 | hdev->wanted_umv_size, allocated_size); | |
5628 | ||
5629 | mutex_init(&hdev->umv_mutex); | |
5630 | hdev->max_umv_size = allocated_size; | |
5631 | hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); | |
5632 | hdev->share_umv_size = hdev->priv_umv_size + | |
5633 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5634 | ||
5635 | return 0; | |
5636 | } | |
5637 | ||
5638 | static int hclge_uninit_umv_space(struct hclge_dev *hdev) | |
5639 | { | |
5640 | int ret; | |
5641 | ||
5642 | if (hdev->max_umv_size > 0) { | |
5643 | ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, | |
5644 | false); | |
5645 | if (ret) | |
5646 | return ret; | |
5647 | hdev->max_umv_size = 0; | |
5648 | } | |
5649 | mutex_destroy(&hdev->umv_mutex); | |
5650 | ||
5651 | return 0; | |
5652 | } | |
5653 | ||
5654 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, | |
5655 | u16 *allocated_size, bool is_alloc) | |
5656 | { | |
5657 | struct hclge_umv_spc_alc_cmd *req; | |
5658 | struct hclge_desc desc; | |
5659 | int ret; | |
5660 | ||
5661 | req = (struct hclge_umv_spc_alc_cmd *)desc.data; | |
5662 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); | |
5663 | hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); | |
5664 | req->space_size = cpu_to_le32(space_size); | |
5665 | ||
5666 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5667 | if (ret) { | |
5668 | dev_err(&hdev->pdev->dev, | |
5669 | "%s umv space failed for cmd_send, ret =%d\n", | |
5670 | is_alloc ? "allocate" : "free", ret); | |
5671 | return ret; | |
5672 | } | |
5673 | ||
5674 | if (is_alloc && allocated_size) | |
5675 | *allocated_size = le32_to_cpu(desc.data[1]); | |
5676 | ||
5677 | return 0; | |
5678 | } | |
5679 | ||
5680 | static void hclge_reset_umv_space(struct hclge_dev *hdev) | |
5681 | { | |
5682 | struct hclge_vport *vport; | |
5683 | int i; | |
5684 | ||
5685 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
5686 | vport = &hdev->vport[i]; | |
5687 | vport->used_umv_num = 0; | |
5688 | } | |
5689 | ||
5690 | mutex_lock(&hdev->umv_mutex); | |
5691 | hdev->share_umv_size = hdev->priv_umv_size + | |
5692 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5693 | mutex_unlock(&hdev->umv_mutex); | |
5694 | } | |
5695 | ||
5696 | static bool hclge_is_umv_space_full(struct hclge_vport *vport) | |
5697 | { | |
5698 | struct hclge_dev *hdev = vport->back; | |
5699 | bool is_full; | |
5700 | ||
5701 | mutex_lock(&hdev->umv_mutex); | |
5702 | is_full = (vport->used_umv_num >= hdev->priv_umv_size && | |
5703 | hdev->share_umv_size == 0); | |
5704 | mutex_unlock(&hdev->umv_mutex); | |
5705 | ||
5706 | return is_full; | |
5707 | } | |
5708 | ||
5709 | static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) | |
5710 | { | |
5711 | struct hclge_dev *hdev = vport->back; | |
5712 | ||
5713 | mutex_lock(&hdev->umv_mutex); | |
5714 | if (is_free) { | |
5715 | if (vport->used_umv_num > hdev->priv_umv_size) | |
5716 | hdev->share_umv_size++; | |
5717 | vport->used_umv_num--; | |
5718 | } else { | |
5719 | if (vport->used_umv_num >= hdev->priv_umv_size) | |
5720 | hdev->share_umv_size--; | |
5721 | vport->used_umv_num++; | |
5722 | } | |
5723 | mutex_unlock(&hdev->umv_mutex); | |
5724 | } | |
5725 | ||
46a3df9f S |
5726 | static int hclge_add_uc_addr(struct hnae3_handle *handle, |
5727 | const unsigned char *addr) | |
5728 | { | |
5729 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5730 | ||
5731 | return hclge_add_uc_addr_common(vport, addr); | |
5732 | } | |
5733 | ||
5734 | int hclge_add_uc_addr_common(struct hclge_vport *vport, | |
5735 | const unsigned char *addr) | |
5736 | { | |
5737 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5738 | struct hclge_mac_vlan_tbl_entry_cmd req; |
d07b6bb4 | 5739 | struct hclge_desc desc; |
a90bb9a5 | 5740 | u16 egress_port = 0; |
aa7a795e | 5741 | int ret; |
46a3df9f S |
5742 | |
5743 | /* mac addr check */ | |
5744 | if (is_zero_ether_addr(addr) || | |
5745 | is_broadcast_ether_addr(addr) || | |
5746 | is_multicast_ether_addr(addr)) { | |
5747 | dev_err(&hdev->pdev->dev, | |
5748 | "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", | |
5749 | addr, | |
5750 | is_zero_ether_addr(addr), | |
5751 | is_broadcast_ether_addr(addr), | |
5752 | is_multicast_ether_addr(addr)); | |
5753 | return -EINVAL; | |
5754 | } | |
5755 | ||
5756 | memset(&req, 0, sizeof(req)); | |
e4e87715 | 5757 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
a90bb9a5 | 5758 | |
e4e87715 PL |
5759 | hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, |
5760 | HCLGE_MAC_EPORT_VFID_S, vport->vport_id); | |
a90bb9a5 YL |
5761 | |
5762 | req.egress_port = cpu_to_le16(egress_port); | |
46a3df9f S |
5763 | |
5764 | hclge_prepare_mac_addr(&req, addr); | |
5765 | ||
d07b6bb4 JS |
5766 | /* Lookup the mac address in the mac_vlan table, and add |
5767 | * it if the entry is inexistent. Repeated unicast entry | |
5768 | * is not allowed in the mac vlan table. | |
5769 | */ | |
5770 | ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); | |
39932473 JS |
5771 | if (ret == -ENOENT) { |
5772 | if (!hclge_is_umv_space_full(vport)) { | |
5773 | ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); | |
5774 | if (!ret) | |
5775 | hclge_update_umv_space(vport, false); | |
5776 | return ret; | |
5777 | } | |
5778 | ||
5779 | dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", | |
5780 | hdev->priv_umv_size); | |
5781 | ||
5782 | return -ENOSPC; | |
5783 | } | |
d07b6bb4 JS |
5784 | |
5785 | /* check if we just hit the duplicate */ | |
5786 | if (!ret) | |
5787 | ret = -EINVAL; | |
5788 | ||
5789 | dev_err(&hdev->pdev->dev, | |
5790 | "PF failed to add unicast entry(%pM) in the MAC table\n", | |
5791 | addr); | |
46a3df9f | 5792 | |
aa7a795e | 5793 | return ret; |
46a3df9f S |
5794 | } |
5795 | ||
5796 | static int hclge_rm_uc_addr(struct hnae3_handle *handle, | |
5797 | const unsigned char *addr) | |
5798 | { | |
5799 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5800 | ||
5801 | return hclge_rm_uc_addr_common(vport, addr); | |
5802 | } | |
5803 | ||
5804 | int hclge_rm_uc_addr_common(struct hclge_vport *vport, | |
5805 | const unsigned char *addr) | |
5806 | { | |
5807 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5808 | struct hclge_mac_vlan_tbl_entry_cmd req; |
aa7a795e | 5809 | int ret; |
46a3df9f S |
5810 | |
5811 | /* mac addr check */ | |
5812 | if (is_zero_ether_addr(addr) || | |
5813 | is_broadcast_ether_addr(addr) || | |
5814 | is_multicast_ether_addr(addr)) { | |
5815 | dev_dbg(&hdev->pdev->dev, | |
5816 | "Remove mac err! invalid mac:%pM.\n", | |
5817 | addr); | |
5818 | return -EINVAL; | |
5819 | } | |
5820 | ||
5821 | memset(&req, 0, sizeof(req)); | |
e4e87715 PL |
5822 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5823 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
46a3df9f | 5824 | hclge_prepare_mac_addr(&req, addr); |
aa7a795e | 5825 | ret = hclge_remove_mac_vlan_tbl(vport, &req); |
39932473 JS |
5826 | if (!ret) |
5827 | hclge_update_umv_space(vport, true); | |
46a3df9f | 5828 | |
aa7a795e | 5829 | return ret; |
46a3df9f S |
5830 | } |
5831 | ||
5832 | static int hclge_add_mc_addr(struct hnae3_handle *handle, | |
5833 | const unsigned char *addr) | |
5834 | { | |
5835 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5836 | ||
a10829c4 | 5837 | return hclge_add_mc_addr_common(vport, addr); |
46a3df9f S |
5838 | } |
5839 | ||
5840 | int hclge_add_mc_addr_common(struct hclge_vport *vport, | |
5841 | const unsigned char *addr) | |
5842 | { | |
5843 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5844 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f | 5845 | struct hclge_desc desc[3]; |
46a3df9f S |
5846 | int status; |
5847 | ||
5848 | /* mac addr check */ | |
5849 | if (!is_multicast_ether_addr(addr)) { | |
5850 | dev_err(&hdev->pdev->dev, | |
5851 | "Add mc mac err! invalid mac:%pM.\n", | |
5852 | addr); | |
5853 | return -EINVAL; | |
5854 | } | |
5855 | memset(&req, 0, sizeof(req)); | |
e4e87715 PL |
5856 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5857 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5858 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
fd5f9da3 | 5859 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5860 | hclge_prepare_mac_addr(&req, addr); |
5861 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5862 | if (!status) { | |
5863 | /* This mac addr exist, update VFID for it */ | |
5864 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5865 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5866 | } else { | |
5867 | /* This mac addr do not exist, add new entry for it */ | |
5868 | memset(desc[0].data, 0, sizeof(desc[0].data)); | |
5869 | memset(desc[1].data, 0, sizeof(desc[0].data)); | |
5870 | memset(desc[2].data, 0, sizeof(desc[0].data)); | |
5871 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5872 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5873 | } | |
5874 | ||
1f6db589 JS |
5875 | if (status == -ENOSPC) |
5876 | dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); | |
46a3df9f S |
5877 | |
5878 | return status; | |
5879 | } | |
5880 | ||
5881 | static int hclge_rm_mc_addr(struct hnae3_handle *handle, | |
5882 | const unsigned char *addr) | |
5883 | { | |
5884 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5885 | ||
5886 | return hclge_rm_mc_addr_common(vport, addr); | |
5887 | } | |
5888 | ||
5889 | int hclge_rm_mc_addr_common(struct hclge_vport *vport, | |
5890 | const unsigned char *addr) | |
5891 | { | |
5892 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5893 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
5894 | enum hclge_cmd_status status; |
5895 | struct hclge_desc desc[3]; | |
46a3df9f S |
5896 | |
5897 | /* mac addr check */ | |
5898 | if (!is_multicast_ether_addr(addr)) { | |
5899 | dev_dbg(&hdev->pdev->dev, | |
5900 | "Remove mc mac err! invalid mac:%pM.\n", | |
5901 | addr); | |
5902 | return -EINVAL; | |
5903 | } | |
5904 | ||
5905 | memset(&req, 0, sizeof(req)); | |
e4e87715 PL |
5906 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5907 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5908 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
fd5f9da3 | 5909 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5910 | hclge_prepare_mac_addr(&req, addr); |
5911 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5912 | if (!status) { | |
5913 | /* This mac addr exist, remove this handle's VFID for it */ | |
5914 | hclge_update_desc_vfid(desc, vport->vport_id, true); | |
5915 | ||
5916 | if (hclge_is_all_function_id_zero(desc)) | |
5917 | /* All the vfid is zero, so need to delete this entry */ | |
5918 | status = hclge_remove_mac_vlan_tbl(vport, &req); | |
5919 | else | |
5920 | /* Not all the vfid is zero, update the vfid */ | |
5921 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5922 | ||
5923 | } else { | |
40cca1c5 XW |
5924 | /* Maybe this mac address is in mta table, but it cannot be |
5925 | * deleted here because an entry of mta represents an address | |
5926 | * range rather than a specific address. the delete action to | |
5927 | * all entries will take effect in update_mta_status called by | |
5928 | * hns3_nic_set_rx_mode. | |
5929 | */ | |
5930 | status = 0; | |
46a3df9f S |
5931 | } |
5932 | ||
46a3df9f S |
5933 | return status; |
5934 | } | |
5935 | ||
f5aac71c FL |
5936 | static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, |
5937 | u16 cmdq_resp, u8 resp_code) | |
5938 | { | |
5939 | #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 | |
5940 | #define HCLGE_ETHERTYPE_ALREADY_ADD 1 | |
5941 | #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 | |
5942 | #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 | |
5943 | ||
5944 | int return_status; | |
5945 | ||
5946 | if (cmdq_resp) { | |
5947 | dev_err(&hdev->pdev->dev, | |
5948 | "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", | |
5949 | cmdq_resp); | |
5950 | return -EIO; | |
5951 | } | |
5952 | ||
5953 | switch (resp_code) { | |
5954 | case HCLGE_ETHERTYPE_SUCCESS_ADD: | |
5955 | case HCLGE_ETHERTYPE_ALREADY_ADD: | |
5956 | return_status = 0; | |
5957 | break; | |
5958 | case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: | |
5959 | dev_err(&hdev->pdev->dev, | |
5960 | "add mac ethertype failed for manager table overflow.\n"); | |
5961 | return_status = -EIO; | |
5962 | break; | |
5963 | case HCLGE_ETHERTYPE_KEY_CONFLICT: | |
5964 | dev_err(&hdev->pdev->dev, | |
5965 | "add mac ethertype failed for key conflict.\n"); | |
5966 | return_status = -EIO; | |
5967 | break; | |
5968 | default: | |
5969 | dev_err(&hdev->pdev->dev, | |
5970 | "add mac ethertype failed for undefined, code=%d.\n", | |
5971 | resp_code); | |
5972 | return_status = -EIO; | |
5973 | } | |
5974 | ||
5975 | return return_status; | |
5976 | } | |
5977 | ||
5978 | static int hclge_add_mgr_tbl(struct hclge_dev *hdev, | |
5979 | const struct hclge_mac_mgr_tbl_entry_cmd *req) | |
5980 | { | |
5981 | struct hclge_desc desc; | |
5982 | u8 resp_code; | |
5983 | u16 retval; | |
5984 | int ret; | |
5985 | ||
5986 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); | |
5987 | memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); | |
5988 | ||
5989 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5990 | if (ret) { | |
5991 | dev_err(&hdev->pdev->dev, | |
5992 | "add mac ethertype failed for cmd_send, ret =%d.\n", | |
5993 | ret); | |
5994 | return ret; | |
5995 | } | |
5996 | ||
5997 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; | |
5998 | retval = le16_to_cpu(desc.retval); | |
5999 | ||
6000 | return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); | |
6001 | } | |
6002 | ||
6003 | static int init_mgr_tbl(struct hclge_dev *hdev) | |
6004 | { | |
6005 | int ret; | |
6006 | int i; | |
6007 | ||
6008 | for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { | |
6009 | ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); | |
6010 | if (ret) { | |
6011 | dev_err(&hdev->pdev->dev, | |
6012 | "add mac ethertype failed, ret =%d.\n", | |
6013 | ret); | |
6014 | return ret; | |
6015 | } | |
6016 | } | |
6017 | ||
6018 | return 0; | |
6019 | } | |
6020 | ||
46a3df9f S |
6021 | static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) |
6022 | { | |
6023 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6024 | struct hclge_dev *hdev = vport->back; | |
6025 | ||
6026 | ether_addr_copy(p, hdev->hw.mac.mac_addr); | |
6027 | } | |
6028 | ||
59098055 FL |
6029 | static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, |
6030 | bool is_first) | |
46a3df9f S |
6031 | { |
6032 | const unsigned char *new_addr = (const unsigned char *)p; | |
6033 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6034 | struct hclge_dev *hdev = vport->back; | |
18838d0c | 6035 | int ret; |
46a3df9f S |
6036 | |
6037 | /* mac addr check */ | |
6038 | if (is_zero_ether_addr(new_addr) || | |
6039 | is_broadcast_ether_addr(new_addr) || | |
6040 | is_multicast_ether_addr(new_addr)) { | |
6041 | dev_err(&hdev->pdev->dev, | |
6042 | "Change uc mac err! invalid mac:%p.\n", | |
6043 | new_addr); | |
6044 | return -EINVAL; | |
6045 | } | |
6046 | ||
59098055 | 6047 | if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) |
18838d0c | 6048 | dev_warn(&hdev->pdev->dev, |
59098055 | 6049 | "remove old uc mac address fail.\n"); |
46a3df9f | 6050 | |
18838d0c FL |
6051 | ret = hclge_add_uc_addr(handle, new_addr); |
6052 | if (ret) { | |
6053 | dev_err(&hdev->pdev->dev, | |
6054 | "add uc mac address fail, ret =%d.\n", | |
6055 | ret); | |
6056 | ||
59098055 FL |
6057 | if (!is_first && |
6058 | hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) | |
18838d0c | 6059 | dev_err(&hdev->pdev->dev, |
59098055 | 6060 | "restore uc mac address fail.\n"); |
18838d0c FL |
6061 | |
6062 | return -EIO; | |
46a3df9f S |
6063 | } |
6064 | ||
e98d7183 | 6065 | ret = hclge_pause_addr_cfg(hdev, new_addr); |
18838d0c FL |
6066 | if (ret) { |
6067 | dev_err(&hdev->pdev->dev, | |
6068 | "configure mac pause address fail, ret =%d.\n", | |
6069 | ret); | |
6070 | return -EIO; | |
6071 | } | |
6072 | ||
6073 | ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); | |
6074 | ||
6075 | return 0; | |
46a3df9f S |
6076 | } |
6077 | ||
26483246 XW |
6078 | static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, |
6079 | int cmd) | |
6080 | { | |
6081 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6082 | struct hclge_dev *hdev = vport->back; | |
6083 | ||
6084 | if (!hdev->hw.mac.phydev) | |
6085 | return -EOPNOTSUPP; | |
6086 | ||
6087 | return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); | |
6088 | } | |
6089 | ||
46a3df9f | 6090 | static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, |
64d114f0 | 6091 | u8 fe_type, bool filter_en) |
46a3df9f | 6092 | { |
d44f9b63 | 6093 | struct hclge_vlan_filter_ctrl_cmd *req; |
46a3df9f S |
6094 | struct hclge_desc desc; |
6095 | int ret; | |
6096 | ||
6097 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); | |
6098 | ||
d44f9b63 | 6099 | req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; |
46a3df9f | 6100 | req->vlan_type = vlan_type; |
64d114f0 | 6101 | req->vlan_fe = filter_en ? fe_type : 0; |
46a3df9f S |
6102 | |
6103 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 6104 | if (ret) |
46a3df9f S |
6105 | dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", |
6106 | ret); | |
46a3df9f | 6107 | |
3f639907 | 6108 | return ret; |
46a3df9f S |
6109 | } |
6110 | ||
391b5e93 JS |
6111 | #define HCLGE_FILTER_TYPE_VF 0 |
6112 | #define HCLGE_FILTER_TYPE_PORT 1 | |
64d114f0 ZL |
6113 | #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) |
6114 | #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) | |
6115 | #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) | |
6116 | #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) | |
6117 | #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) | |
6118 | #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ | |
6119 | | HCLGE_FILTER_FE_ROCE_EGRESS_B) | |
6120 | #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ | |
6121 | | HCLGE_FILTER_FE_ROCE_INGRESS_B) | |
391b5e93 JS |
6122 | |
6123 | static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) | |
6124 | { | |
6125 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6126 | struct hclge_dev *hdev = vport->back; | |
6127 | ||
64d114f0 ZL |
6128 | if (hdev->pdev->revision >= 0x21) { |
6129 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6130 | HCLGE_FILTER_FE_EGRESS, enable); | |
6131 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, | |
6132 | HCLGE_FILTER_FE_INGRESS, enable); | |
6133 | } else { | |
6134 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6135 | HCLGE_FILTER_FE_EGRESS_V1_B, enable); | |
6136 | } | |
c60edc17 JS |
6137 | if (enable) |
6138 | handle->netdev_flags |= HNAE3_VLAN_FLTR; | |
6139 | else | |
6140 | handle->netdev_flags &= ~HNAE3_VLAN_FLTR; | |
391b5e93 JS |
6141 | } |
6142 | ||
dc8131d8 YL |
6143 | static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, |
6144 | bool is_kill, u16 vlan, u8 qos, | |
6145 | __be16 proto) | |
46a3df9f S |
6146 | { |
6147 | #define HCLGE_MAX_VF_BYTES 16 | |
d44f9b63 YL |
6148 | struct hclge_vlan_filter_vf_cfg_cmd *req0; |
6149 | struct hclge_vlan_filter_vf_cfg_cmd *req1; | |
46a3df9f S |
6150 | struct hclge_desc desc[2]; |
6151 | u8 vf_byte_val; | |
6152 | u8 vf_byte_off; | |
6153 | int ret; | |
6154 | ||
6155 | hclge_cmd_setup_basic_desc(&desc[0], | |
6156 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6157 | hclge_cmd_setup_basic_desc(&desc[1], | |
6158 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6159 | ||
6160 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
6161 | ||
6162 | vf_byte_off = vfid / 8; | |
6163 | vf_byte_val = 1 << (vfid % 8); | |
6164 | ||
d44f9b63 YL |
6165 | req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
6166 | req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; | |
46a3df9f | 6167 | |
a90bb9a5 | 6168 | req0->vlan_id = cpu_to_le16(vlan); |
46a3df9f S |
6169 | req0->vlan_cfg = is_kill; |
6170 | ||
6171 | if (vf_byte_off < HCLGE_MAX_VF_BYTES) | |
6172 | req0->vf_bitmap[vf_byte_off] = vf_byte_val; | |
6173 | else | |
6174 | req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; | |
6175 | ||
6176 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
6177 | if (ret) { | |
6178 | dev_err(&hdev->pdev->dev, | |
6179 | "Send vf vlan command fail, ret =%d.\n", | |
6180 | ret); | |
6181 | return ret; | |
6182 | } | |
6183 | ||
6184 | if (!is_kill) { | |
6c251711 | 6185 | #define HCLGE_VF_VLAN_NO_ENTRY 2 |
46a3df9f S |
6186 | if (!req0->resp_code || req0->resp_code == 1) |
6187 | return 0; | |
6188 | ||
6c251711 YL |
6189 | if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { |
6190 | dev_warn(&hdev->pdev->dev, | |
6191 | "vf vlan table is full, vf vlan filter is disabled\n"); | |
6192 | return 0; | |
6193 | } | |
6194 | ||
46a3df9f S |
6195 | dev_err(&hdev->pdev->dev, |
6196 | "Add vf vlan filter fail, ret =%d.\n", | |
6197 | req0->resp_code); | |
6198 | } else { | |
41dafea2 | 6199 | #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 |
46a3df9f S |
6200 | if (!req0->resp_code) |
6201 | return 0; | |
6202 | ||
41dafea2 YL |
6203 | if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { |
6204 | dev_warn(&hdev->pdev->dev, | |
6205 | "vlan %d filter is not in vf vlan table\n", | |
6206 | vlan); | |
6207 | return 0; | |
6208 | } | |
6209 | ||
46a3df9f S |
6210 | dev_err(&hdev->pdev->dev, |
6211 | "Kill vf vlan filter fail, ret =%d.\n", | |
6212 | req0->resp_code); | |
6213 | } | |
6214 | ||
6215 | return -EIO; | |
6216 | } | |
6217 | ||
dc8131d8 YL |
6218 | static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, |
6219 | u16 vlan_id, bool is_kill) | |
46a3df9f | 6220 | { |
d44f9b63 | 6221 | struct hclge_vlan_filter_pf_cfg_cmd *req; |
46a3df9f S |
6222 | struct hclge_desc desc; |
6223 | u8 vlan_offset_byte_val; | |
6224 | u8 vlan_offset_byte; | |
6225 | u8 vlan_offset_160; | |
6226 | int ret; | |
6227 | ||
6228 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); | |
6229 | ||
6230 | vlan_offset_160 = vlan_id / 160; | |
6231 | vlan_offset_byte = (vlan_id % 160) / 8; | |
6232 | vlan_offset_byte_val = 1 << (vlan_id % 8); | |
6233 | ||
d44f9b63 | 6234 | req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; |
46a3df9f S |
6235 | req->vlan_offset = vlan_offset_160; |
6236 | req->vlan_cfg = is_kill; | |
6237 | req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; | |
6238 | ||
6239 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
dc8131d8 YL |
6240 | if (ret) |
6241 | dev_err(&hdev->pdev->dev, | |
6242 | "port vlan command, send fail, ret =%d.\n", ret); | |
6243 | return ret; | |
6244 | } | |
6245 | ||
6246 | static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, | |
6247 | u16 vport_id, u16 vlan_id, u8 qos, | |
6248 | bool is_kill) | |
6249 | { | |
6250 | u16 vport_idx, vport_num = 0; | |
6251 | int ret; | |
6252 | ||
daaa8521 YL |
6253 | if (is_kill && !vlan_id) |
6254 | return 0; | |
6255 | ||
dc8131d8 YL |
6256 | ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, |
6257 | 0, proto); | |
46a3df9f S |
6258 | if (ret) { |
6259 | dev_err(&hdev->pdev->dev, | |
dc8131d8 YL |
6260 | "Set %d vport vlan filter config fail, ret =%d.\n", |
6261 | vport_id, ret); | |
46a3df9f S |
6262 | return ret; |
6263 | } | |
6264 | ||
dc8131d8 YL |
6265 | /* vlan 0 may be added twice when 8021q module is enabled */ |
6266 | if (!is_kill && !vlan_id && | |
6267 | test_bit(vport_id, hdev->vlan_table[vlan_id])) | |
6268 | return 0; | |
6269 | ||
6270 | if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
46a3df9f | 6271 | dev_err(&hdev->pdev->dev, |
dc8131d8 YL |
6272 | "Add port vlan failed, vport %d is already in vlan %d\n", |
6273 | vport_id, vlan_id); | |
6274 | return -EINVAL; | |
46a3df9f S |
6275 | } |
6276 | ||
dc8131d8 YL |
6277 | if (is_kill && |
6278 | !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
6279 | dev_err(&hdev->pdev->dev, | |
6280 | "Delete port vlan failed, vport %d is not in vlan %d\n", | |
6281 | vport_id, vlan_id); | |
6282 | return -EINVAL; | |
6283 | } | |
6284 | ||
54e97d11 | 6285 | for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) |
dc8131d8 YL |
6286 | vport_num++; |
6287 | ||
6288 | if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) | |
6289 | ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, | |
6290 | is_kill); | |
6291 | ||
6292 | return ret; | |
6293 | } | |
6294 | ||
6295 | int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, | |
6296 | u16 vlan_id, bool is_kill) | |
6297 | { | |
6298 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6299 | struct hclge_dev *hdev = vport->back; | |
6300 | ||
6301 | return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, | |
6302 | 0, is_kill); | |
46a3df9f S |
6303 | } |
6304 | ||
6305 | static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, | |
6306 | u16 vlan, u8 qos, __be16 proto) | |
6307 | { | |
6308 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6309 | struct hclge_dev *hdev = vport->back; | |
6310 | ||
6311 | if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) | |
6312 | return -EINVAL; | |
6313 | if (proto != htons(ETH_P_8021Q)) | |
6314 | return -EPROTONOSUPPORT; | |
6315 | ||
dc8131d8 | 6316 | return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); |
46a3df9f S |
6317 | } |
6318 | ||
5f6ea83f PL |
6319 | static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) |
6320 | { | |
6321 | struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; | |
6322 | struct hclge_vport_vtag_tx_cfg_cmd *req; | |
6323 | struct hclge_dev *hdev = vport->back; | |
6324 | struct hclge_desc desc; | |
6325 | int status; | |
6326 | ||
6327 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); | |
6328 | ||
6329 | req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; | |
6330 | req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); | |
6331 | req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); | |
e4e87715 PL |
6332 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, |
6333 | vcfg->accept_tag1 ? 1 : 0); | |
6334 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, | |
6335 | vcfg->accept_untag1 ? 1 : 0); | |
6336 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, | |
6337 | vcfg->accept_tag2 ? 1 : 0); | |
6338 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, | |
6339 | vcfg->accept_untag2 ? 1 : 0); | |
6340 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, | |
6341 | vcfg->insert_tag1_en ? 1 : 0); | |
6342 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, | |
6343 | vcfg->insert_tag2_en ? 1 : 0); | |
6344 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); | |
5f6ea83f PL |
6345 | |
6346 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6347 | req->vf_bitmap[req->vf_offset] = | |
6348 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6349 | ||
6350 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6351 | if (status) | |
6352 | dev_err(&hdev->pdev->dev, | |
6353 | "Send port txvlan cfg command fail, ret =%d\n", | |
6354 | status); | |
6355 | ||
6356 | return status; | |
6357 | } | |
6358 | ||
6359 | static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) | |
6360 | { | |
6361 | struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; | |
6362 | struct hclge_vport_vtag_rx_cfg_cmd *req; | |
6363 | struct hclge_dev *hdev = vport->back; | |
6364 | struct hclge_desc desc; | |
6365 | int status; | |
6366 | ||
6367 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); | |
6368 | ||
6369 | req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; | |
e4e87715 PL |
6370 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, |
6371 | vcfg->strip_tag1_en ? 1 : 0); | |
6372 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, | |
6373 | vcfg->strip_tag2_en ? 1 : 0); | |
6374 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, | |
6375 | vcfg->vlan1_vlan_prionly ? 1 : 0); | |
6376 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, | |
6377 | vcfg->vlan2_vlan_prionly ? 1 : 0); | |
5f6ea83f PL |
6378 | |
6379 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6380 | req->vf_bitmap[req->vf_offset] = | |
6381 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6382 | ||
6383 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6384 | if (status) | |
6385 | dev_err(&hdev->pdev->dev, | |
6386 | "Send port rxvlan cfg command fail, ret =%d\n", | |
6387 | status); | |
6388 | ||
6389 | return status; | |
6390 | } | |
6391 | ||
6392 | static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) | |
6393 | { | |
6394 | struct hclge_rx_vlan_type_cfg_cmd *rx_req; | |
6395 | struct hclge_tx_vlan_type_cfg_cmd *tx_req; | |
6396 | struct hclge_desc desc; | |
6397 | int status; | |
6398 | ||
6399 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); | |
6400 | rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; | |
6401 | rx_req->ot_fst_vlan_type = | |
6402 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); | |
6403 | rx_req->ot_sec_vlan_type = | |
6404 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); | |
6405 | rx_req->in_fst_vlan_type = | |
6406 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); | |
6407 | rx_req->in_sec_vlan_type = | |
6408 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); | |
6409 | ||
6410 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6411 | if (status) { | |
6412 | dev_err(&hdev->pdev->dev, | |
6413 | "Send rxvlan protocol type command fail, ret =%d\n", | |
6414 | status); | |
6415 | return status; | |
6416 | } | |
6417 | ||
6418 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); | |
6419 | ||
d0d72bac | 6420 | tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; |
5f6ea83f PL |
6421 | tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); |
6422 | tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); | |
6423 | ||
6424 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6425 | if (status) | |
6426 | dev_err(&hdev->pdev->dev, | |
6427 | "Send txvlan protocol type command fail, ret =%d\n", | |
6428 | status); | |
6429 | ||
6430 | return status; | |
6431 | } | |
6432 | ||
46a3df9f S |
6433 | static int hclge_init_vlan_config(struct hclge_dev *hdev) |
6434 | { | |
5f6ea83f PL |
6435 | #define HCLGE_DEF_VLAN_TYPE 0x8100 |
6436 | ||
c60edc17 | 6437 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
5f6ea83f | 6438 | struct hclge_vport *vport; |
46a3df9f | 6439 | int ret; |
5f6ea83f PL |
6440 | int i; |
6441 | ||
64d114f0 ZL |
6442 | if (hdev->pdev->revision >= 0x21) { |
6443 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6444 | HCLGE_FILTER_FE_EGRESS, true); | |
6445 | if (ret) | |
6446 | return ret; | |
46a3df9f | 6447 | |
64d114f0 ZL |
6448 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, |
6449 | HCLGE_FILTER_FE_INGRESS, true); | |
6450 | if (ret) | |
6451 | return ret; | |
6452 | } else { | |
6453 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6454 | HCLGE_FILTER_FE_EGRESS_V1_B, | |
6455 | true); | |
6456 | if (ret) | |
6457 | return ret; | |
6458 | } | |
46a3df9f | 6459 | |
c60edc17 JS |
6460 | handle->netdev_flags |= HNAE3_VLAN_FLTR; |
6461 | ||
5f6ea83f PL |
6462 | hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; |
6463 | hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6464 | hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6465 | hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6466 | hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6467 | hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6468 | ||
6469 | ret = hclge_set_vlan_protocol_type(hdev); | |
5e43aef8 L |
6470 | if (ret) |
6471 | return ret; | |
46a3df9f | 6472 | |
5f6ea83f PL |
6473 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
6474 | vport = &hdev->vport[i]; | |
dcb35cce PL |
6475 | vport->txvlan_cfg.accept_tag1 = true; |
6476 | vport->txvlan_cfg.accept_untag1 = true; | |
6477 | ||
6478 | /* accept_tag2 and accept_untag2 are not supported on | |
6479 | * pdev revision(0x20), new revision support them. The | |
6480 | * value of this two fields will not return error when driver | |
6481 | * send command to fireware in revision(0x20). | |
6482 | * This two fields can not configured by user. | |
6483 | */ | |
6484 | vport->txvlan_cfg.accept_tag2 = true; | |
6485 | vport->txvlan_cfg.accept_untag2 = true; | |
6486 | ||
5f6ea83f PL |
6487 | vport->txvlan_cfg.insert_tag1_en = false; |
6488 | vport->txvlan_cfg.insert_tag2_en = false; | |
6489 | vport->txvlan_cfg.default_tag1 = 0; | |
6490 | vport->txvlan_cfg.default_tag2 = 0; | |
6491 | ||
6492 | ret = hclge_set_vlan_tx_offload_cfg(vport); | |
6493 | if (ret) | |
6494 | return ret; | |
6495 | ||
6496 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6497 | vport->rxvlan_cfg.strip_tag2_en = true; | |
6498 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6499 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6500 | ||
6501 | ret = hclge_set_vlan_rx_offload_cfg(vport); | |
6502 | if (ret) | |
6503 | return ret; | |
6504 | } | |
6505 | ||
dc8131d8 | 6506 | return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); |
46a3df9f S |
6507 | } |
6508 | ||
b2641e2a | 6509 | int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
052ece6d PL |
6510 | { |
6511 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6512 | ||
6513 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6514 | vport->rxvlan_cfg.strip_tag2_en = enable; | |
6515 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6516 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6517 | ||
6518 | return hclge_set_vlan_rx_offload_cfg(vport); | |
6519 | } | |
6520 | ||
e6d7d79d | 6521 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) |
46a3df9f | 6522 | { |
d44f9b63 | 6523 | struct hclge_config_max_frm_size_cmd *req; |
46a3df9f | 6524 | struct hclge_desc desc; |
46a3df9f | 6525 | |
46a3df9f S |
6526 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); |
6527 | ||
d44f9b63 | 6528 | req = (struct hclge_config_max_frm_size_cmd *)desc.data; |
e6d7d79d | 6529 | req->max_frm_size = cpu_to_le16(new_mps); |
8fc7346c | 6530 | req->min_frm_size = HCLGE_MAC_MIN_FRAME; |
46a3df9f | 6531 | |
e6d7d79d | 6532 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
46a3df9f S |
6533 | } |
6534 | ||
dd72140c FL |
6535 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
6536 | { | |
6537 | struct hclge_vport *vport = hclge_get_vport(handle); | |
818f1675 YL |
6538 | |
6539 | return hclge_set_vport_mtu(vport, new_mtu); | |
6540 | } | |
6541 | ||
6542 | int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) | |
6543 | { | |
dd72140c | 6544 | struct hclge_dev *hdev = vport->back; |
818f1675 | 6545 | int i, max_frm_size, ret = 0; |
dd72140c | 6546 | |
e6d7d79d YL |
6547 | max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
6548 | if (max_frm_size < HCLGE_MAC_MIN_FRAME || | |
6549 | max_frm_size > HCLGE_MAC_MAX_FRAME) | |
6550 | return -EINVAL; | |
6551 | ||
818f1675 YL |
6552 | max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); |
6553 | mutex_lock(&hdev->vport_lock); | |
6554 | /* VF's mps must fit within hdev->mps */ | |
6555 | if (vport->vport_id && max_frm_size > hdev->mps) { | |
6556 | mutex_unlock(&hdev->vport_lock); | |
6557 | return -EINVAL; | |
6558 | } else if (vport->vport_id) { | |
6559 | vport->mps = max_frm_size; | |
6560 | mutex_unlock(&hdev->vport_lock); | |
6561 | return 0; | |
6562 | } | |
6563 | ||
6564 | /* PF's mps must be greater then VF's mps */ | |
6565 | for (i = 1; i < hdev->num_alloc_vport; i++) | |
6566 | if (max_frm_size < hdev->vport[i].mps) { | |
6567 | mutex_unlock(&hdev->vport_lock); | |
6568 | return -EINVAL; | |
6569 | } | |
6570 | ||
cdca4c48 YL |
6571 | hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
6572 | ||
e6d7d79d | 6573 | ret = hclge_set_mac_mtu(hdev, max_frm_size); |
dd72140c FL |
6574 | if (ret) { |
6575 | dev_err(&hdev->pdev->dev, | |
6576 | "Change mtu fail, ret =%d\n", ret); | |
818f1675 | 6577 | goto out; |
dd72140c FL |
6578 | } |
6579 | ||
e6d7d79d | 6580 | hdev->mps = max_frm_size; |
818f1675 | 6581 | vport->mps = max_frm_size; |
e6d7d79d | 6582 | |
dd72140c FL |
6583 | ret = hclge_buffer_alloc(hdev); |
6584 | if (ret) | |
6585 | dev_err(&hdev->pdev->dev, | |
6586 | "Allocate buffer fail, ret =%d\n", ret); | |
6587 | ||
818f1675 | 6588 | out: |
cdca4c48 | 6589 | hclge_notify_client(hdev, HNAE3_UP_CLIENT); |
818f1675 | 6590 | mutex_unlock(&hdev->vport_lock); |
dd72140c FL |
6591 | return ret; |
6592 | } | |
6593 | ||
46a3df9f S |
6594 | static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, |
6595 | bool enable) | |
6596 | { | |
d44f9b63 | 6597 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6598 | struct hclge_desc desc; |
6599 | int ret; | |
6600 | ||
6601 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); | |
6602 | ||
d44f9b63 | 6603 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f | 6604 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
e4e87715 | 6605 | hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); |
46a3df9f S |
6606 | |
6607 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6608 | if (ret) { | |
6609 | dev_err(&hdev->pdev->dev, | |
6610 | "Send tqp reset cmd error, status =%d\n", ret); | |
6611 | return ret; | |
6612 | } | |
6613 | ||
6614 | return 0; | |
6615 | } | |
6616 | ||
6617 | static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) | |
6618 | { | |
d44f9b63 | 6619 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6620 | struct hclge_desc desc; |
6621 | int ret; | |
6622 | ||
6623 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); | |
6624 | ||
d44f9b63 | 6625 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f S |
6626 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
6627 | ||
6628 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6629 | if (ret) { | |
6630 | dev_err(&hdev->pdev->dev, | |
6631 | "Get reset status error, status =%d\n", ret); | |
6632 | return ret; | |
6633 | } | |
6634 | ||
e4e87715 | 6635 | return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); |
46a3df9f S |
6636 | } |
6637 | ||
0c29d191 | 6638 | u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) |
814e0274 PL |
6639 | { |
6640 | struct hnae3_queue *queue; | |
6641 | struct hclge_tqp *tqp; | |
6642 | ||
6643 | queue = handle->kinfo.tqp[queue_id]; | |
6644 | tqp = container_of(queue, struct hclge_tqp, q); | |
6645 | ||
6646 | return tqp->index; | |
6647 | } | |
6648 | ||
7fa6be4f | 6649 | int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
46a3df9f S |
6650 | { |
6651 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6652 | struct hclge_dev *hdev = vport->back; | |
6653 | int reset_try_times = 0; | |
6654 | int reset_status; | |
814e0274 | 6655 | u16 queue_gid; |
7fa6be4f | 6656 | int ret = 0; |
46a3df9f | 6657 | |
814e0274 PL |
6658 | queue_gid = hclge_covert_handle_qid_global(handle, queue_id); |
6659 | ||
46a3df9f S |
6660 | ret = hclge_tqp_enable(hdev, queue_id, 0, false); |
6661 | if (ret) { | |
7fa6be4f HT |
6662 | dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); |
6663 | return ret; | |
46a3df9f S |
6664 | } |
6665 | ||
814e0274 | 6666 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); |
46a3df9f | 6667 | if (ret) { |
7fa6be4f HT |
6668 | dev_err(&hdev->pdev->dev, |
6669 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6670 | return ret; | |
46a3df9f S |
6671 | } |
6672 | ||
6673 | reset_try_times = 0; | |
6674 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6675 | /* Wait for tqp hw reset */ | |
6676 | msleep(20); | |
814e0274 | 6677 | reset_status = hclge_get_reset_status(hdev, queue_gid); |
46a3df9f S |
6678 | if (reset_status) |
6679 | break; | |
6680 | } | |
6681 | ||
6682 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
7fa6be4f HT |
6683 | dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); |
6684 | return ret; | |
46a3df9f S |
6685 | } |
6686 | ||
814e0274 | 6687 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); |
7fa6be4f HT |
6688 | if (ret) |
6689 | dev_err(&hdev->pdev->dev, | |
6690 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6691 | ||
6692 | return ret; | |
46a3df9f S |
6693 | } |
6694 | ||
1a426f8b PL |
6695 | void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) |
6696 | { | |
6697 | struct hclge_dev *hdev = vport->back; | |
6698 | int reset_try_times = 0; | |
6699 | int reset_status; | |
6700 | u16 queue_gid; | |
6701 | int ret; | |
6702 | ||
6703 | queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); | |
6704 | ||
6705 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); | |
6706 | if (ret) { | |
6707 | dev_warn(&hdev->pdev->dev, | |
6708 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6709 | return; | |
6710 | } | |
6711 | ||
6712 | reset_try_times = 0; | |
6713 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6714 | /* Wait for tqp hw reset */ | |
6715 | msleep(20); | |
6716 | reset_status = hclge_get_reset_status(hdev, queue_gid); | |
6717 | if (reset_status) | |
6718 | break; | |
6719 | } | |
6720 | ||
6721 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
6722 | dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); | |
6723 | return; | |
6724 | } | |
6725 | ||
6726 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); | |
6727 | if (ret) | |
6728 | dev_warn(&hdev->pdev->dev, | |
6729 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6730 | } | |
6731 | ||
46a3df9f S |
6732 | static u32 hclge_get_fw_version(struct hnae3_handle *handle) |
6733 | { | |
6734 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6735 | struct hclge_dev *hdev = vport->back; | |
6736 | ||
6737 | return hdev->fw_version; | |
6738 | } | |
6739 | ||
61387774 PL |
6740 | static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) |
6741 | { | |
6742 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6743 | ||
6744 | if (!phydev) | |
6745 | return; | |
6746 | ||
70814e81 | 6747 | phy_set_asym_pause(phydev, rx_en, tx_en); |
61387774 PL |
6748 | } |
6749 | ||
6750 | static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) | |
6751 | { | |
61387774 PL |
6752 | int ret; |
6753 | ||
6754 | if (rx_en && tx_en) | |
40173a2e | 6755 | hdev->fc_mode_last_time = HCLGE_FC_FULL; |
61387774 | 6756 | else if (rx_en && !tx_en) |
40173a2e | 6757 | hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; |
61387774 | 6758 | else if (!rx_en && tx_en) |
40173a2e | 6759 | hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; |
61387774 | 6760 | else |
40173a2e | 6761 | hdev->fc_mode_last_time = HCLGE_FC_NONE; |
61387774 | 6762 | |
40173a2e | 6763 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) |
61387774 | 6764 | return 0; |
61387774 PL |
6765 | |
6766 | ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); | |
6767 | if (ret) { | |
6768 | dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", | |
6769 | ret); | |
6770 | return ret; | |
6771 | } | |
6772 | ||
40173a2e | 6773 | hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
61387774 PL |
6774 | |
6775 | return 0; | |
6776 | } | |
6777 | ||
1770a7a3 PL |
6778 | int hclge_cfg_flowctrl(struct hclge_dev *hdev) |
6779 | { | |
6780 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6781 | u16 remote_advertising = 0; | |
6782 | u16 local_advertising = 0; | |
6783 | u32 rx_pause, tx_pause; | |
6784 | u8 flowctl; | |
6785 | ||
6786 | if (!phydev->link || !phydev->autoneg) | |
6787 | return 0; | |
6788 | ||
3c1bcc86 | 6789 | local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); |
1770a7a3 PL |
6790 | |
6791 | if (phydev->pause) | |
6792 | remote_advertising = LPA_PAUSE_CAP; | |
6793 | ||
6794 | if (phydev->asym_pause) | |
6795 | remote_advertising |= LPA_PAUSE_ASYM; | |
6796 | ||
6797 | flowctl = mii_resolve_flowctrl_fdx(local_advertising, | |
6798 | remote_advertising); | |
6799 | tx_pause = flowctl & FLOW_CTRL_TX; | |
6800 | rx_pause = flowctl & FLOW_CTRL_RX; | |
6801 | ||
6802 | if (phydev->duplex == HCLGE_MAC_HALF) { | |
6803 | tx_pause = 0; | |
6804 | rx_pause = 0; | |
6805 | } | |
6806 | ||
6807 | return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); | |
6808 | } | |
6809 | ||
46a3df9f S |
6810 | static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, |
6811 | u32 *rx_en, u32 *tx_en) | |
6812 | { | |
6813 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6814 | struct hclge_dev *hdev = vport->back; | |
6815 | ||
6816 | *auto_neg = hclge_get_autoneg(handle); | |
6817 | ||
6818 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6819 | *rx_en = 0; | |
6820 | *tx_en = 0; | |
6821 | return; | |
6822 | } | |
6823 | ||
6824 | if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { | |
6825 | *rx_en = 1; | |
6826 | *tx_en = 0; | |
6827 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { | |
6828 | *tx_en = 1; | |
6829 | *rx_en = 0; | |
6830 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { | |
6831 | *rx_en = 1; | |
6832 | *tx_en = 1; | |
6833 | } else { | |
6834 | *rx_en = 0; | |
6835 | *tx_en = 0; | |
6836 | } | |
6837 | } | |
6838 | ||
61387774 PL |
6839 | static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, |
6840 | u32 rx_en, u32 tx_en) | |
6841 | { | |
6842 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6843 | struct hclge_dev *hdev = vport->back; | |
6844 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6845 | u32 fc_autoneg; | |
6846 | ||
61387774 PL |
6847 | fc_autoneg = hclge_get_autoneg(handle); |
6848 | if (auto_neg != fc_autoneg) { | |
6849 | dev_info(&hdev->pdev->dev, | |
6850 | "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); | |
6851 | return -EOPNOTSUPP; | |
6852 | } | |
6853 | ||
6854 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6855 | dev_info(&hdev->pdev->dev, | |
6856 | "Priority flow control enabled. Cannot set link flow control.\n"); | |
6857 | return -EOPNOTSUPP; | |
6858 | } | |
6859 | ||
6860 | hclge_set_flowctrl_adv(hdev, rx_en, tx_en); | |
6861 | ||
6862 | if (!fc_autoneg) | |
6863 | return hclge_cfg_pauseparam(hdev, rx_en, tx_en); | |
6864 | ||
0c963e8c FL |
6865 | /* Only support flow control negotiation for netdev with |
6866 | * phy attached for now. | |
6867 | */ | |
6868 | if (!phydev) | |
6869 | return -EOPNOTSUPP; | |
6870 | ||
61387774 PL |
6871 | return phy_start_aneg(phydev); |
6872 | } | |
6873 | ||
46a3df9f S |
6874 | static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, |
6875 | u8 *auto_neg, u32 *speed, u8 *duplex) | |
6876 | { | |
6877 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6878 | struct hclge_dev *hdev = vport->back; | |
6879 | ||
6880 | if (speed) | |
6881 | *speed = hdev->hw.mac.speed; | |
6882 | if (duplex) | |
6883 | *duplex = hdev->hw.mac.duplex; | |
6884 | if (auto_neg) | |
6885 | *auto_neg = hdev->hw.mac.autoneg; | |
6886 | } | |
6887 | ||
6888 | static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) | |
6889 | { | |
6890 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6891 | struct hclge_dev *hdev = vport->back; | |
6892 | ||
6893 | if (media_type) | |
6894 | *media_type = hdev->hw.mac.media_type; | |
6895 | } | |
6896 | ||
6897 | static void hclge_get_mdix_mode(struct hnae3_handle *handle, | |
6898 | u8 *tp_mdix_ctrl, u8 *tp_mdix) | |
6899 | { | |
6900 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6901 | struct hclge_dev *hdev = vport->back; | |
6902 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6903 | int mdix_ctrl, mdix, retval, is_resolved; | |
6904 | ||
6905 | if (!phydev) { | |
6906 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6907 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6908 | return; | |
6909 | } | |
6910 | ||
6911 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); | |
6912 | ||
6913 | retval = phy_read(phydev, HCLGE_PHY_CSC_REG); | |
e4e87715 PL |
6914 | mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, |
6915 | HCLGE_PHY_MDIX_CTRL_S); | |
46a3df9f S |
6916 | |
6917 | retval = phy_read(phydev, HCLGE_PHY_CSS_REG); | |
e4e87715 PL |
6918 | mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); |
6919 | is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); | |
46a3df9f S |
6920 | |
6921 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); | |
6922 | ||
6923 | switch (mdix_ctrl) { | |
6924 | case 0x0: | |
6925 | *tp_mdix_ctrl = ETH_TP_MDI; | |
6926 | break; | |
6927 | case 0x1: | |
6928 | *tp_mdix_ctrl = ETH_TP_MDI_X; | |
6929 | break; | |
6930 | case 0x3: | |
6931 | *tp_mdix_ctrl = ETH_TP_MDI_AUTO; | |
6932 | break; | |
6933 | default: | |
6934 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6935 | break; | |
6936 | } | |
6937 | ||
6938 | if (!is_resolved) | |
6939 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6940 | else if (mdix) | |
6941 | *tp_mdix = ETH_TP_MDI_X; | |
6942 | else | |
6943 | *tp_mdix = ETH_TP_MDI; | |
6944 | } | |
6945 | ||
b01b7cf1 FL |
6946 | static int hclge_init_instance_hw(struct hclge_dev *hdev) |
6947 | { | |
6948 | return hclge_mac_connect_phy(hdev); | |
6949 | } | |
6950 | ||
6951 | static void hclge_uninit_instance_hw(struct hclge_dev *hdev) | |
6952 | { | |
6953 | hclge_mac_disconnect_phy(hdev); | |
6954 | } | |
6955 | ||
46a3df9f S |
6956 | static int hclge_init_client_instance(struct hnae3_client *client, |
6957 | struct hnae3_ae_dev *ae_dev) | |
6958 | { | |
6959 | struct hclge_dev *hdev = ae_dev->priv; | |
6960 | struct hclge_vport *vport; | |
6961 | int i, ret; | |
6962 | ||
6963 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
6964 | vport = &hdev->vport[i]; | |
6965 | ||
6966 | switch (client->type) { | |
6967 | case HNAE3_CLIENT_KNIC: | |
6968 | ||
6969 | hdev->nic_client = client; | |
6970 | vport->nic.client = client; | |
6971 | ret = client->ops->init_instance(&vport->nic); | |
6972 | if (ret) | |
49dd8054 | 6973 | goto clear_nic; |
46a3df9f | 6974 | |
b01b7cf1 FL |
6975 | ret = hclge_init_instance_hw(hdev); |
6976 | if (ret) { | |
6977 | client->ops->uninit_instance(&vport->nic, | |
6978 | 0); | |
49dd8054 | 6979 | goto clear_nic; |
b01b7cf1 FL |
6980 | } |
6981 | ||
d9f28fc2 JS |
6982 | hnae3_set_client_init_flag(client, ae_dev, 1); |
6983 | ||
46a3df9f | 6984 | if (hdev->roce_client && |
e92a0843 | 6985 | hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
6986 | struct hnae3_client *rc = hdev->roce_client; |
6987 | ||
6988 | ret = hclge_init_roce_base_info(vport); | |
6989 | if (ret) | |
49dd8054 | 6990 | goto clear_roce; |
46a3df9f S |
6991 | |
6992 | ret = rc->ops->init_instance(&vport->roce); | |
6993 | if (ret) | |
49dd8054 | 6994 | goto clear_roce; |
d9f28fc2 JS |
6995 | |
6996 | hnae3_set_client_init_flag(hdev->roce_client, | |
6997 | ae_dev, 1); | |
46a3df9f S |
6998 | } |
6999 | ||
7000 | break; | |
7001 | case HNAE3_CLIENT_UNIC: | |
7002 | hdev->nic_client = client; | |
7003 | vport->nic.client = client; | |
7004 | ||
7005 | ret = client->ops->init_instance(&vport->nic); | |
7006 | if (ret) | |
49dd8054 | 7007 | goto clear_nic; |
46a3df9f | 7008 | |
d9f28fc2 JS |
7009 | hnae3_set_client_init_flag(client, ae_dev, 1); |
7010 | ||
46a3df9f S |
7011 | break; |
7012 | case HNAE3_CLIENT_ROCE: | |
e92a0843 | 7013 | if (hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
7014 | hdev->roce_client = client; |
7015 | vport->roce.client = client; | |
7016 | } | |
7017 | ||
3a46f34d | 7018 | if (hdev->roce_client && hdev->nic_client) { |
46a3df9f S |
7019 | ret = hclge_init_roce_base_info(vport); |
7020 | if (ret) | |
49dd8054 | 7021 | goto clear_roce; |
46a3df9f S |
7022 | |
7023 | ret = client->ops->init_instance(&vport->roce); | |
7024 | if (ret) | |
49dd8054 | 7025 | goto clear_roce; |
d9f28fc2 JS |
7026 | |
7027 | hnae3_set_client_init_flag(client, ae_dev, 1); | |
46a3df9f | 7028 | } |
fa7a4bd5 JS |
7029 | |
7030 | break; | |
7031 | default: | |
7032 | return -EINVAL; | |
46a3df9f S |
7033 | } |
7034 | } | |
7035 | ||
7036 | return 0; | |
49dd8054 JS |
7037 | |
7038 | clear_nic: | |
7039 | hdev->nic_client = NULL; | |
7040 | vport->nic.client = NULL; | |
7041 | return ret; | |
7042 | clear_roce: | |
7043 | hdev->roce_client = NULL; | |
7044 | vport->roce.client = NULL; | |
7045 | return ret; | |
46a3df9f S |
7046 | } |
7047 | ||
7048 | static void hclge_uninit_client_instance(struct hnae3_client *client, | |
7049 | struct hnae3_ae_dev *ae_dev) | |
7050 | { | |
7051 | struct hclge_dev *hdev = ae_dev->priv; | |
7052 | struct hclge_vport *vport; | |
7053 | int i; | |
7054 | ||
7055 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
7056 | vport = &hdev->vport[i]; | |
a17dcf3f | 7057 | if (hdev->roce_client) { |
46a3df9f S |
7058 | hdev->roce_client->ops->uninit_instance(&vport->roce, |
7059 | 0); | |
a17dcf3f L |
7060 | hdev->roce_client = NULL; |
7061 | vport->roce.client = NULL; | |
7062 | } | |
46a3df9f S |
7063 | if (client->type == HNAE3_CLIENT_ROCE) |
7064 | return; | |
49dd8054 | 7065 | if (hdev->nic_client && client->ops->uninit_instance) { |
b01b7cf1 | 7066 | hclge_uninit_instance_hw(hdev); |
46a3df9f | 7067 | client->ops->uninit_instance(&vport->nic, 0); |
a17dcf3f L |
7068 | hdev->nic_client = NULL; |
7069 | vport->nic.client = NULL; | |
7070 | } | |
46a3df9f S |
7071 | } |
7072 | } | |
7073 | ||
7074 | static int hclge_pci_init(struct hclge_dev *hdev) | |
7075 | { | |
7076 | struct pci_dev *pdev = hdev->pdev; | |
7077 | struct hclge_hw *hw; | |
7078 | int ret; | |
7079 | ||
7080 | ret = pci_enable_device(pdev); | |
7081 | if (ret) { | |
7082 | dev_err(&pdev->dev, "failed to enable PCI device\n"); | |
3e249d3b | 7083 | return ret; |
46a3df9f S |
7084 | } |
7085 | ||
7086 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
7087 | if (ret) { | |
7088 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
7089 | if (ret) { | |
7090 | dev_err(&pdev->dev, | |
7091 | "can't set consistent PCI DMA"); | |
7092 | goto err_disable_device; | |
7093 | } | |
7094 | dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); | |
7095 | } | |
7096 | ||
7097 | ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); | |
7098 | if (ret) { | |
7099 | dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); | |
7100 | goto err_disable_device; | |
7101 | } | |
7102 | ||
7103 | pci_set_master(pdev); | |
7104 | hw = &hdev->hw; | |
46a3df9f S |
7105 | hw->io_base = pcim_iomap(pdev, 2, 0); |
7106 | if (!hw->io_base) { | |
7107 | dev_err(&pdev->dev, "Can't map configuration register space\n"); | |
7108 | ret = -ENOMEM; | |
7109 | goto err_clr_master; | |
7110 | } | |
7111 | ||
709eb41a L |
7112 | hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); |
7113 | ||
46a3df9f S |
7114 | return 0; |
7115 | err_clr_master: | |
7116 | pci_clear_master(pdev); | |
7117 | pci_release_regions(pdev); | |
7118 | err_disable_device: | |
7119 | pci_disable_device(pdev); | |
46a3df9f S |
7120 | |
7121 | return ret; | |
7122 | } | |
7123 | ||
7124 | static void hclge_pci_uninit(struct hclge_dev *hdev) | |
7125 | { | |
7126 | struct pci_dev *pdev = hdev->pdev; | |
7127 | ||
6a814413 | 7128 | pcim_iounmap(pdev, hdev->hw.io_base); |
887c3820 | 7129 | pci_free_irq_vectors(pdev); |
46a3df9f S |
7130 | pci_clear_master(pdev); |
7131 | pci_release_mem_regions(pdev); | |
7132 | pci_disable_device(pdev); | |
7133 | } | |
7134 | ||
48569cda PL |
7135 | static void hclge_state_init(struct hclge_dev *hdev) |
7136 | { | |
7137 | set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); | |
7138 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7139 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
7140 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
7141 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
7142 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
7143 | } | |
7144 | ||
7145 | static void hclge_state_uninit(struct hclge_dev *hdev) | |
7146 | { | |
7147 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7148 | ||
7149 | if (hdev->service_timer.function) | |
7150 | del_timer_sync(&hdev->service_timer); | |
65e41e7e HT |
7151 | if (hdev->reset_timer.function) |
7152 | del_timer_sync(&hdev->reset_timer); | |
48569cda PL |
7153 | if (hdev->service_task.func) |
7154 | cancel_work_sync(&hdev->service_task); | |
7155 | if (hdev->rst_service_task.func) | |
7156 | cancel_work_sync(&hdev->rst_service_task); | |
7157 | if (hdev->mbx_service_task.func) | |
7158 | cancel_work_sync(&hdev->mbx_service_task); | |
7159 | } | |
7160 | ||
6b9a97ee HT |
7161 | static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) |
7162 | { | |
7163 | #define HCLGE_FLR_WAIT_MS 100 | |
7164 | #define HCLGE_FLR_WAIT_CNT 50 | |
7165 | struct hclge_dev *hdev = ae_dev->priv; | |
7166 | int cnt = 0; | |
7167 | ||
7168 | clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
7169 | clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7170 | set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); | |
7171 | hclge_reset_event(hdev->pdev, NULL); | |
7172 | ||
7173 | while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && | |
7174 | cnt++ < HCLGE_FLR_WAIT_CNT) | |
7175 | msleep(HCLGE_FLR_WAIT_MS); | |
7176 | ||
7177 | if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) | |
7178 | dev_err(&hdev->pdev->dev, | |
7179 | "flr wait down timeout: %d\n", cnt); | |
7180 | } | |
7181 | ||
7182 | static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) | |
7183 | { | |
7184 | struct hclge_dev *hdev = ae_dev->priv; | |
7185 | ||
7186 | set_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7187 | } | |
7188 | ||
46a3df9f S |
7189 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
7190 | { | |
7191 | struct pci_dev *pdev = ae_dev->pdev; | |
46a3df9f S |
7192 | struct hclge_dev *hdev; |
7193 | int ret; | |
7194 | ||
7195 | hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); | |
7196 | if (!hdev) { | |
7197 | ret = -ENOMEM; | |
ffd5656e | 7198 | goto out; |
46a3df9f S |
7199 | } |
7200 | ||
46a3df9f S |
7201 | hdev->pdev = pdev; |
7202 | hdev->ae_dev = ae_dev; | |
4ed340ab | 7203 | hdev->reset_type = HNAE3_NONE_RESET; |
0742ed7c | 7204 | hdev->reset_level = HNAE3_FUNC_RESET; |
46a3df9f | 7205 | ae_dev->priv = hdev; |
e6d7d79d | 7206 | hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
46a3df9f | 7207 | |
818f1675 YL |
7208 | mutex_init(&hdev->vport_lock); |
7209 | ||
46a3df9f S |
7210 | ret = hclge_pci_init(hdev); |
7211 | if (ret) { | |
7212 | dev_err(&pdev->dev, "PCI init failed\n"); | |
ffd5656e | 7213 | goto out; |
46a3df9f S |
7214 | } |
7215 | ||
3efb960f L |
7216 | /* Firmware command queue initialize */ |
7217 | ret = hclge_cmd_queue_init(hdev); | |
7218 | if (ret) { | |
7219 | dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); | |
ffd5656e | 7220 | goto err_pci_uninit; |
3efb960f L |
7221 | } |
7222 | ||
7223 | /* Firmware command initialize */ | |
46a3df9f S |
7224 | ret = hclge_cmd_init(hdev); |
7225 | if (ret) | |
ffd5656e | 7226 | goto err_cmd_uninit; |
46a3df9f S |
7227 | |
7228 | ret = hclge_get_cap(hdev); | |
7229 | if (ret) { | |
e00e2197 CIK |
7230 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", |
7231 | ret); | |
ffd5656e | 7232 | goto err_cmd_uninit; |
46a3df9f S |
7233 | } |
7234 | ||
7235 | ret = hclge_configure(hdev); | |
7236 | if (ret) { | |
7237 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
ffd5656e | 7238 | goto err_cmd_uninit; |
46a3df9f S |
7239 | } |
7240 | ||
887c3820 | 7241 | ret = hclge_init_msi(hdev); |
46a3df9f | 7242 | if (ret) { |
887c3820 | 7243 | dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); |
ffd5656e | 7244 | goto err_cmd_uninit; |
46a3df9f S |
7245 | } |
7246 | ||
466b0c00 L |
7247 | ret = hclge_misc_irq_init(hdev); |
7248 | if (ret) { | |
7249 | dev_err(&pdev->dev, | |
7250 | "Misc IRQ(vector0) init error, ret = %d.\n", | |
7251 | ret); | |
ffd5656e | 7252 | goto err_msi_uninit; |
466b0c00 L |
7253 | } |
7254 | ||
46a3df9f S |
7255 | ret = hclge_alloc_tqps(hdev); |
7256 | if (ret) { | |
7257 | dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); | |
ffd5656e | 7258 | goto err_msi_irq_uninit; |
46a3df9f S |
7259 | } |
7260 | ||
7261 | ret = hclge_alloc_vport(hdev); | |
7262 | if (ret) { | |
7263 | dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); | |
ffd5656e | 7264 | goto err_msi_irq_uninit; |
46a3df9f S |
7265 | } |
7266 | ||
7df7dad6 L |
7267 | ret = hclge_map_tqp(hdev); |
7268 | if (ret) { | |
7269 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
2312e050 | 7270 | goto err_msi_irq_uninit; |
7df7dad6 L |
7271 | } |
7272 | ||
c5ef83cb HT |
7273 | if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { |
7274 | ret = hclge_mac_mdio_config(hdev); | |
7275 | if (ret) { | |
7276 | dev_err(&hdev->pdev->dev, | |
7277 | "mdio config fail ret=%d\n", ret); | |
2312e050 | 7278 | goto err_msi_irq_uninit; |
c5ef83cb | 7279 | } |
cf9cca2d | 7280 | } |
7281 | ||
39932473 JS |
7282 | ret = hclge_init_umv_space(hdev); |
7283 | if (ret) { | |
7284 | dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); | |
7285 | goto err_msi_irq_uninit; | |
7286 | } | |
7287 | ||
46a3df9f S |
7288 | ret = hclge_mac_init(hdev); |
7289 | if (ret) { | |
7290 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
ffd5656e | 7291 | goto err_mdiobus_unreg; |
46a3df9f | 7292 | } |
46a3df9f S |
7293 | |
7294 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); | |
7295 | if (ret) { | |
7296 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
ffd5656e | 7297 | goto err_mdiobus_unreg; |
46a3df9f S |
7298 | } |
7299 | ||
b26a6fea PL |
7300 | ret = hclge_config_gro(hdev, true); |
7301 | if (ret) | |
7302 | goto err_mdiobus_unreg; | |
7303 | ||
46a3df9f S |
7304 | ret = hclge_init_vlan_config(hdev); |
7305 | if (ret) { | |
7306 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
ffd5656e | 7307 | goto err_mdiobus_unreg; |
46a3df9f S |
7308 | } |
7309 | ||
7310 | ret = hclge_tm_schd_init(hdev); | |
7311 | if (ret) { | |
7312 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
ffd5656e | 7313 | goto err_mdiobus_unreg; |
68ece54e YL |
7314 | } |
7315 | ||
268f5dfa | 7316 | hclge_rss_init_cfg(hdev); |
68ece54e YL |
7317 | ret = hclge_rss_init_hw(hdev); |
7318 | if (ret) { | |
7319 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
ffd5656e | 7320 | goto err_mdiobus_unreg; |
46a3df9f S |
7321 | } |
7322 | ||
f5aac71c FL |
7323 | ret = init_mgr_tbl(hdev); |
7324 | if (ret) { | |
7325 | dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); | |
ffd5656e | 7326 | goto err_mdiobus_unreg; |
f5aac71c FL |
7327 | } |
7328 | ||
d695964d JS |
7329 | ret = hclge_init_fd_config(hdev); |
7330 | if (ret) { | |
7331 | dev_err(&pdev->dev, | |
7332 | "fd table init fail, ret=%d\n", ret); | |
7333 | goto err_mdiobus_unreg; | |
7334 | } | |
7335 | ||
99714195 SJ |
7336 | ret = hclge_hw_error_set_state(hdev, true); |
7337 | if (ret) { | |
7338 | dev_err(&pdev->dev, | |
f3fa4a94 | 7339 | "fail(%d) to enable hw error interrupts\n", ret); |
99714195 SJ |
7340 | goto err_mdiobus_unreg; |
7341 | } | |
7342 | ||
cacde272 YL |
7343 | hclge_dcb_ops_set(hdev); |
7344 | ||
d039ef68 | 7345 | timer_setup(&hdev->service_timer, hclge_service_timer, 0); |
65e41e7e | 7346 | timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); |
46a3df9f | 7347 | INIT_WORK(&hdev->service_task, hclge_service_task); |
cb1b9f77 | 7348 | INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); |
c1a81619 | 7349 | INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); |
46a3df9f | 7350 | |
8e52a602 XW |
7351 | hclge_clear_all_event_cause(hdev); |
7352 | ||
466b0c00 L |
7353 | /* Enable MISC vector(vector0) */ |
7354 | hclge_enable_vector(&hdev->misc_vector, true); | |
7355 | ||
48569cda | 7356 | hclge_state_init(hdev); |
0742ed7c | 7357 | hdev->last_reset_time = jiffies; |
46a3df9f S |
7358 | |
7359 | pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); | |
7360 | return 0; | |
7361 | ||
ffd5656e HT |
7362 | err_mdiobus_unreg: |
7363 | if (hdev->hw.mac.phydev) | |
7364 | mdiobus_unregister(hdev->hw.mac.mdio_bus); | |
ffd5656e HT |
7365 | err_msi_irq_uninit: |
7366 | hclge_misc_irq_uninit(hdev); | |
7367 | err_msi_uninit: | |
7368 | pci_free_irq_vectors(pdev); | |
7369 | err_cmd_uninit: | |
7370 | hclge_destroy_cmd_queue(&hdev->hw); | |
7371 | err_pci_uninit: | |
6a814413 | 7372 | pcim_iounmap(pdev, hdev->hw.io_base); |
ffd5656e | 7373 | pci_clear_master(pdev); |
46a3df9f | 7374 | pci_release_regions(pdev); |
ffd5656e | 7375 | pci_disable_device(pdev); |
ffd5656e | 7376 | out: |
46a3df9f S |
7377 | return ret; |
7378 | } | |
7379 | ||
c6dc5213 | 7380 | static void hclge_stats_clear(struct hclge_dev *hdev) |
7381 | { | |
7382 | memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); | |
7383 | } | |
7384 | ||
a6d818e3 YL |
7385 | static void hclge_reset_vport_state(struct hclge_dev *hdev) |
7386 | { | |
7387 | struct hclge_vport *vport = hdev->vport; | |
7388 | int i; | |
7389 | ||
7390 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
7391 | hclge_vport_start(vport); | |
7392 | vport++; | |
7393 | } | |
7394 | } | |
7395 | ||
4ed340ab L |
7396 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) |
7397 | { | |
7398 | struct hclge_dev *hdev = ae_dev->priv; | |
7399 | struct pci_dev *pdev = ae_dev->pdev; | |
7400 | int ret; | |
7401 | ||
7402 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7403 | ||
c6dc5213 | 7404 | hclge_stats_clear(hdev); |
dc8131d8 | 7405 | memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); |
c6dc5213 | 7406 | |
4ed340ab L |
7407 | ret = hclge_cmd_init(hdev); |
7408 | if (ret) { | |
7409 | dev_err(&pdev->dev, "Cmd queue init failed\n"); | |
7410 | return ret; | |
7411 | } | |
7412 | ||
4ed340ab L |
7413 | ret = hclge_map_tqp(hdev); |
7414 | if (ret) { | |
7415 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
7416 | return ret; | |
7417 | } | |
7418 | ||
39932473 JS |
7419 | hclge_reset_umv_space(hdev); |
7420 | ||
4ed340ab L |
7421 | ret = hclge_mac_init(hdev); |
7422 | if (ret) { | |
7423 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
7424 | return ret; | |
7425 | } | |
7426 | ||
4ed340ab L |
7427 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); |
7428 | if (ret) { | |
7429 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
7430 | return ret; | |
7431 | } | |
7432 | ||
b26a6fea PL |
7433 | ret = hclge_config_gro(hdev, true); |
7434 | if (ret) | |
7435 | return ret; | |
7436 | ||
4ed340ab L |
7437 | ret = hclge_init_vlan_config(hdev); |
7438 | if (ret) { | |
7439 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
7440 | return ret; | |
7441 | } | |
7442 | ||
f31c1ba6 | 7443 | ret = hclge_tm_init_hw(hdev); |
4ed340ab | 7444 | if (ret) { |
f31c1ba6 | 7445 | dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); |
4ed340ab L |
7446 | return ret; |
7447 | } | |
7448 | ||
7449 | ret = hclge_rss_init_hw(hdev); | |
7450 | if (ret) { | |
7451 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
7452 | return ret; | |
7453 | } | |
7454 | ||
d695964d JS |
7455 | ret = hclge_init_fd_config(hdev); |
7456 | if (ret) { | |
7457 | dev_err(&pdev->dev, | |
7458 | "fd table init fail, ret=%d\n", ret); | |
7459 | return ret; | |
7460 | } | |
7461 | ||
f3fa4a94 SJ |
7462 | /* Re-enable the hw error interrupts because |
7463 | * the interrupts get disabled on core/global reset. | |
01865a50 | 7464 | */ |
f3fa4a94 SJ |
7465 | ret = hclge_hw_error_set_state(hdev, true); |
7466 | if (ret) { | |
7467 | dev_err(&pdev->dev, | |
7468 | "fail(%d) to re-enable HNS hw error interrupts\n", ret); | |
7469 | return ret; | |
7470 | } | |
01865a50 | 7471 | |
a6d818e3 YL |
7472 | hclge_reset_vport_state(hdev); |
7473 | ||
4ed340ab L |
7474 | dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", |
7475 | HCLGE_DRIVER_NAME); | |
7476 | ||
7477 | return 0; | |
7478 | } | |
7479 | ||
46a3df9f S |
7480 | static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
7481 | { | |
7482 | struct hclge_dev *hdev = ae_dev->priv; | |
7483 | struct hclge_mac *mac = &hdev->hw.mac; | |
7484 | ||
48569cda | 7485 | hclge_state_uninit(hdev); |
46a3df9f S |
7486 | |
7487 | if (mac->phydev) | |
7488 | mdiobus_unregister(mac->mdio_bus); | |
7489 | ||
39932473 JS |
7490 | hclge_uninit_umv_space(hdev); |
7491 | ||
466b0c00 L |
7492 | /* Disable MISC vector(vector0) */ |
7493 | hclge_enable_vector(&hdev->misc_vector, false); | |
8e52a602 XW |
7494 | synchronize_irq(hdev->misc_vector.vector_irq); |
7495 | ||
99714195 | 7496 | hclge_hw_error_set_state(hdev, false); |
46a3df9f | 7497 | hclge_destroy_cmd_queue(&hdev->hw); |
ca1d7669 | 7498 | hclge_misc_irq_uninit(hdev); |
46a3df9f | 7499 | hclge_pci_uninit(hdev); |
818f1675 | 7500 | mutex_destroy(&hdev->vport_lock); |
46a3df9f S |
7501 | ae_dev->priv = NULL; |
7502 | } | |
7503 | ||
482d2e9c PL |
7504 | static u32 hclge_get_max_channels(struct hnae3_handle *handle) |
7505 | { | |
7506 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
7507 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7508 | struct hclge_dev *hdev = vport->back; | |
7509 | ||
7510 | return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); | |
7511 | } | |
7512 | ||
7513 | static void hclge_get_channels(struct hnae3_handle *handle, | |
7514 | struct ethtool_channels *ch) | |
7515 | { | |
7516 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7517 | ||
7518 | ch->max_combined = hclge_get_max_channels(handle); | |
7519 | ch->other_count = 1; | |
7520 | ch->max_other = 1; | |
7521 | ch->combined_count = vport->alloc_tqps; | |
7522 | } | |
7523 | ||
09f2af64 | 7524 | static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, |
0d43bf45 | 7525 | u16 *alloc_tqps, u16 *max_rss_size) |
09f2af64 PL |
7526 | { |
7527 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7528 | struct hclge_dev *hdev = vport->back; | |
09f2af64 | 7529 | |
0d43bf45 | 7530 | *alloc_tqps = vport->alloc_tqps; |
09f2af64 PL |
7531 | *max_rss_size = hdev->rss_size_max; |
7532 | } | |
7533 | ||
7534 | static void hclge_release_tqp(struct hclge_vport *vport) | |
7535 | { | |
7536 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7537 | struct hclge_dev *hdev = vport->back; | |
7538 | int i; | |
7539 | ||
7540 | for (i = 0; i < kinfo->num_tqps; i++) { | |
7541 | struct hclge_tqp *tqp = | |
7542 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
7543 | ||
7544 | tqp->q.handle = NULL; | |
7545 | tqp->q.tqp_index = 0; | |
7546 | tqp->alloced = false; | |
7547 | } | |
7548 | ||
7549 | devm_kfree(&hdev->pdev->dev, kinfo->tqp); | |
7550 | kinfo->tqp = NULL; | |
7551 | } | |
7552 | ||
7553 | static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) | |
7554 | { | |
7555 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7556 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7557 | struct hclge_dev *hdev = vport->back; | |
7558 | int cur_rss_size = kinfo->rss_size; | |
7559 | int cur_tqps = kinfo->num_tqps; | |
7560 | u16 tc_offset[HCLGE_MAX_TC_NUM]; | |
7561 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | |
7562 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
7563 | u16 roundup_size; | |
7564 | u32 *rss_indir; | |
7565 | int ret, i; | |
7566 | ||
fdace1bc | 7567 | /* Free old tqps, and reallocate with new tqp number when nic setup */ |
09f2af64 PL |
7568 | hclge_release_tqp(vport); |
7569 | ||
128b900d | 7570 | ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); |
09f2af64 PL |
7571 | if (ret) { |
7572 | dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); | |
7573 | return ret; | |
7574 | } | |
7575 | ||
7576 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
7577 | if (ret) { | |
7578 | dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); | |
7579 | return ret; | |
7580 | } | |
7581 | ||
7582 | ret = hclge_tm_schd_init(hdev); | |
7583 | if (ret) { | |
7584 | dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
7585 | return ret; | |
7586 | } | |
7587 | ||
7588 | roundup_size = roundup_pow_of_two(kinfo->rss_size); | |
7589 | roundup_size = ilog2(roundup_size); | |
7590 | /* Set the RSS TC mode according to the new RSS size */ | |
7591 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
7592 | tc_valid[i] = 0; | |
7593 | ||
7594 | if (!(hdev->hw_tc_map & BIT(i))) | |
7595 | continue; | |
7596 | ||
7597 | tc_valid[i] = 1; | |
7598 | tc_size[i] = roundup_size; | |
7599 | tc_offset[i] = kinfo->rss_size * i; | |
7600 | } | |
7601 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); | |
7602 | if (ret) | |
7603 | return ret; | |
7604 | ||
7605 | /* Reinitializes the rss indirect table according to the new RSS size */ | |
7606 | rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); | |
7607 | if (!rss_indir) | |
7608 | return -ENOMEM; | |
7609 | ||
7610 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
7611 | rss_indir[i] = i % kinfo->rss_size; | |
7612 | ||
7613 | ret = hclge_set_rss(handle, rss_indir, NULL, 0); | |
7614 | if (ret) | |
7615 | dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", | |
7616 | ret); | |
7617 | ||
7618 | kfree(rss_indir); | |
7619 | ||
7620 | if (!ret) | |
7621 | dev_info(&hdev->pdev->dev, | |
7622 | "Channels changed, rss_size from %d to %d, tqps from %d to %d", | |
7623 | cur_rss_size, kinfo->rss_size, | |
7624 | cur_tqps, kinfo->rss_size * kinfo->num_tc); | |
7625 | ||
7626 | return ret; | |
7627 | } | |
7628 | ||
77b34110 FL |
7629 | static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, |
7630 | u32 *regs_num_64_bit) | |
7631 | { | |
7632 | struct hclge_desc desc; | |
7633 | u32 total_num; | |
7634 | int ret; | |
7635 | ||
7636 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); | |
7637 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7638 | if (ret) { | |
7639 | dev_err(&hdev->pdev->dev, | |
7640 | "Query register number cmd failed, ret = %d.\n", ret); | |
7641 | return ret; | |
7642 | } | |
7643 | ||
7644 | *regs_num_32_bit = le32_to_cpu(desc.data[0]); | |
7645 | *regs_num_64_bit = le32_to_cpu(desc.data[1]); | |
7646 | ||
7647 | total_num = *regs_num_32_bit + *regs_num_64_bit; | |
7648 | if (!total_num) | |
7649 | return -EINVAL; | |
7650 | ||
7651 | return 0; | |
7652 | } | |
7653 | ||
7654 | static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7655 | void *data) | |
7656 | { | |
7657 | #define HCLGE_32_BIT_REG_RTN_DATANUM 8 | |
7658 | ||
7659 | struct hclge_desc *desc; | |
7660 | u32 *reg_val = data; | |
7661 | __le32 *desc_data; | |
7662 | int cmd_num; | |
7663 | int i, k, n; | |
7664 | int ret; | |
7665 | ||
7666 | if (regs_num == 0) | |
7667 | return 0; | |
7668 | ||
7669 | cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); | |
7670 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7671 | if (!desc) | |
7672 | return -ENOMEM; | |
7673 | ||
7674 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); | |
7675 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7676 | if (ret) { | |
7677 | dev_err(&hdev->pdev->dev, | |
7678 | "Query 32 bit register cmd failed, ret = %d.\n", ret); | |
7679 | kfree(desc); | |
7680 | return ret; | |
7681 | } | |
7682 | ||
7683 | for (i = 0; i < cmd_num; i++) { | |
7684 | if (i == 0) { | |
7685 | desc_data = (__le32 *)(&desc[i].data[0]); | |
7686 | n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; | |
7687 | } else { | |
7688 | desc_data = (__le32 *)(&desc[i]); | |
7689 | n = HCLGE_32_BIT_REG_RTN_DATANUM; | |
7690 | } | |
7691 | for (k = 0; k < n; k++) { | |
7692 | *reg_val++ = le32_to_cpu(*desc_data++); | |
7693 | ||
7694 | regs_num--; | |
7695 | if (!regs_num) | |
7696 | break; | |
7697 | } | |
7698 | } | |
7699 | ||
7700 | kfree(desc); | |
7701 | return 0; | |
7702 | } | |
7703 | ||
7704 | static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7705 | void *data) | |
7706 | { | |
7707 | #define HCLGE_64_BIT_REG_RTN_DATANUM 4 | |
7708 | ||
7709 | struct hclge_desc *desc; | |
7710 | u64 *reg_val = data; | |
7711 | __le64 *desc_data; | |
7712 | int cmd_num; | |
7713 | int i, k, n; | |
7714 | int ret; | |
7715 | ||
7716 | if (regs_num == 0) | |
7717 | return 0; | |
7718 | ||
7719 | cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); | |
7720 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7721 | if (!desc) | |
7722 | return -ENOMEM; | |
7723 | ||
7724 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); | |
7725 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7726 | if (ret) { | |
7727 | dev_err(&hdev->pdev->dev, | |
7728 | "Query 64 bit register cmd failed, ret = %d.\n", ret); | |
7729 | kfree(desc); | |
7730 | return ret; | |
7731 | } | |
7732 | ||
7733 | for (i = 0; i < cmd_num; i++) { | |
7734 | if (i == 0) { | |
7735 | desc_data = (__le64 *)(&desc[i].data[0]); | |
7736 | n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; | |
7737 | } else { | |
7738 | desc_data = (__le64 *)(&desc[i]); | |
7739 | n = HCLGE_64_BIT_REG_RTN_DATANUM; | |
7740 | } | |
7741 | for (k = 0; k < n; k++) { | |
7742 | *reg_val++ = le64_to_cpu(*desc_data++); | |
7743 | ||
7744 | regs_num--; | |
7745 | if (!regs_num) | |
7746 | break; | |
7747 | } | |
7748 | } | |
7749 | ||
7750 | kfree(desc); | |
7751 | return 0; | |
7752 | } | |
7753 | ||
ea4750ca JS |
7754 | #define MAX_SEPARATE_NUM 4 |
7755 | #define SEPARATOR_VALUE 0xFFFFFFFF | |
7756 | #define REG_NUM_PER_LINE 4 | |
7757 | #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) | |
7758 | ||
77b34110 FL |
7759 | static int hclge_get_regs_len(struct hnae3_handle *handle) |
7760 | { | |
ea4750ca JS |
7761 | int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; |
7762 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
77b34110 FL |
7763 | struct hclge_vport *vport = hclge_get_vport(handle); |
7764 | struct hclge_dev *hdev = vport->back; | |
7765 | u32 regs_num_32_bit, regs_num_64_bit; | |
7766 | int ret; | |
7767 | ||
7768 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7769 | if (ret) { | |
7770 | dev_err(&hdev->pdev->dev, | |
7771 | "Get register number failed, ret = %d.\n", ret); | |
7772 | return -EOPNOTSUPP; | |
7773 | } | |
7774 | ||
ea4750ca JS |
7775 | cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; |
7776 | common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7777 | ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7778 | tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7779 | ||
7780 | return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + | |
7781 | tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + | |
7782 | regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); | |
77b34110 FL |
7783 | } |
7784 | ||
7785 | static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, | |
7786 | void *data) | |
7787 | { | |
ea4750ca | 7788 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
77b34110 FL |
7789 | struct hclge_vport *vport = hclge_get_vport(handle); |
7790 | struct hclge_dev *hdev = vport->back; | |
7791 | u32 regs_num_32_bit, regs_num_64_bit; | |
ea4750ca JS |
7792 | int i, j, reg_um, separator_num; |
7793 | u32 *reg = data; | |
77b34110 FL |
7794 | int ret; |
7795 | ||
7796 | *version = hdev->fw_version; | |
7797 | ||
7798 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7799 | if (ret) { | |
7800 | dev_err(&hdev->pdev->dev, | |
7801 | "Get register number failed, ret = %d.\n", ret); | |
7802 | return; | |
7803 | } | |
7804 | ||
ea4750ca JS |
7805 | /* fetching per-PF registers valus from PF PCIe register space */ |
7806 | reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); | |
7807 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7808 | for (i = 0; i < reg_um; i++) | |
7809 | *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); | |
7810 | for (i = 0; i < separator_num; i++) | |
7811 | *reg++ = SEPARATOR_VALUE; | |
7812 | ||
7813 | reg_um = sizeof(common_reg_addr_list) / sizeof(u32); | |
7814 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7815 | for (i = 0; i < reg_um; i++) | |
7816 | *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); | |
7817 | for (i = 0; i < separator_num; i++) | |
7818 | *reg++ = SEPARATOR_VALUE; | |
7819 | ||
7820 | reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); | |
7821 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7822 | for (j = 0; j < kinfo->num_tqps; j++) { | |
7823 | for (i = 0; i < reg_um; i++) | |
7824 | *reg++ = hclge_read_dev(&hdev->hw, | |
7825 | ring_reg_addr_list[i] + | |
7826 | 0x200 * j); | |
7827 | for (i = 0; i < separator_num; i++) | |
7828 | *reg++ = SEPARATOR_VALUE; | |
7829 | } | |
7830 | ||
7831 | reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); | |
7832 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7833 | for (j = 0; j < hdev->num_msi_used - 1; j++) { | |
7834 | for (i = 0; i < reg_um; i++) | |
7835 | *reg++ = hclge_read_dev(&hdev->hw, | |
7836 | tqp_intr_reg_addr_list[i] + | |
7837 | 4 * j); | |
7838 | for (i = 0; i < separator_num; i++) | |
7839 | *reg++ = SEPARATOR_VALUE; | |
7840 | } | |
7841 | ||
7842 | /* fetching PF common registers values from firmware */ | |
7843 | ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); | |
77b34110 FL |
7844 | if (ret) { |
7845 | dev_err(&hdev->pdev->dev, | |
7846 | "Get 32 bit register failed, ret = %d.\n", ret); | |
7847 | return; | |
7848 | } | |
7849 | ||
ea4750ca JS |
7850 | reg += regs_num_32_bit; |
7851 | ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); | |
77b34110 FL |
7852 | if (ret) |
7853 | dev_err(&hdev->pdev->dev, | |
7854 | "Get 64 bit register failed, ret = %d.\n", ret); | |
7855 | } | |
7856 | ||
f6f75abc | 7857 | static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) |
07f8e940 JS |
7858 | { |
7859 | struct hclge_set_led_state_cmd *req; | |
7860 | struct hclge_desc desc; | |
7861 | int ret; | |
7862 | ||
7863 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); | |
7864 | ||
7865 | req = (struct hclge_set_led_state_cmd *)desc.data; | |
e4e87715 PL |
7866 | hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, |
7867 | HCLGE_LED_LOCATE_STATE_S, locate_led_status); | |
07f8e940 JS |
7868 | |
7869 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7870 | if (ret) | |
7871 | dev_err(&hdev->pdev->dev, | |
7872 | "Send set led state cmd error, ret =%d\n", ret); | |
7873 | ||
7874 | return ret; | |
7875 | } | |
7876 | ||
7877 | enum hclge_led_status { | |
7878 | HCLGE_LED_OFF, | |
7879 | HCLGE_LED_ON, | |
7880 | HCLGE_LED_NO_CHANGE = 0xFF, | |
7881 | }; | |
7882 | ||
7883 | static int hclge_set_led_id(struct hnae3_handle *handle, | |
7884 | enum ethtool_phys_id_state status) | |
7885 | { | |
07f8e940 JS |
7886 | struct hclge_vport *vport = hclge_get_vport(handle); |
7887 | struct hclge_dev *hdev = vport->back; | |
07f8e940 JS |
7888 | |
7889 | switch (status) { | |
7890 | case ETHTOOL_ID_ACTIVE: | |
f6f75abc | 7891 | return hclge_set_led_status(hdev, HCLGE_LED_ON); |
07f8e940 | 7892 | case ETHTOOL_ID_INACTIVE: |
f6f75abc | 7893 | return hclge_set_led_status(hdev, HCLGE_LED_OFF); |
07f8e940 | 7894 | default: |
f6f75abc | 7895 | return -EINVAL; |
07f8e940 | 7896 | } |
07f8e940 JS |
7897 | } |
7898 | ||
0979aa0b FL |
7899 | static void hclge_get_link_mode(struct hnae3_handle *handle, |
7900 | unsigned long *supported, | |
7901 | unsigned long *advertising) | |
7902 | { | |
7903 | unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); | |
7904 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7905 | struct hclge_dev *hdev = vport->back; | |
7906 | unsigned int idx = 0; | |
7907 | ||
7908 | for (; idx < size; idx++) { | |
7909 | supported[idx] = hdev->hw.mac.supported[idx]; | |
7910 | advertising[idx] = hdev->hw.mac.advertising[idx]; | |
7911 | } | |
7912 | } | |
7913 | ||
5c9f6b39 PL |
7914 | static int hclge_gro_en(struct hnae3_handle *handle, int enable) |
7915 | { | |
7916 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7917 | struct hclge_dev *hdev = vport->back; | |
7918 | ||
7919 | return hclge_config_gro(hdev, enable); | |
7920 | } | |
7921 | ||
46a3df9f S |
7922 | static const struct hnae3_ae_ops hclge_ops = { |
7923 | .init_ae_dev = hclge_init_ae_dev, | |
7924 | .uninit_ae_dev = hclge_uninit_ae_dev, | |
6b9a97ee HT |
7925 | .flr_prepare = hclge_flr_prepare, |
7926 | .flr_done = hclge_flr_done, | |
46a3df9f S |
7927 | .init_client_instance = hclge_init_client_instance, |
7928 | .uninit_client_instance = hclge_uninit_client_instance, | |
84e095d6 SM |
7929 | .map_ring_to_vector = hclge_map_ring_to_vector, |
7930 | .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, | |
46a3df9f | 7931 | .get_vector = hclge_get_vector, |
0d3e6631 | 7932 | .put_vector = hclge_put_vector, |
46a3df9f | 7933 | .set_promisc_mode = hclge_set_promisc_mode, |
c39c4d98 | 7934 | .set_loopback = hclge_set_loopback, |
46a3df9f S |
7935 | .start = hclge_ae_start, |
7936 | .stop = hclge_ae_stop, | |
a6d818e3 YL |
7937 | .client_start = hclge_client_start, |
7938 | .client_stop = hclge_client_stop, | |
46a3df9f S |
7939 | .get_status = hclge_get_status, |
7940 | .get_ksettings_an_result = hclge_get_ksettings_an_result, | |
7941 | .update_speed_duplex_h = hclge_update_speed_duplex_h, | |
7942 | .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, | |
7943 | .get_media_type = hclge_get_media_type, | |
7944 | .get_rss_key_size = hclge_get_rss_key_size, | |
7945 | .get_rss_indir_size = hclge_get_rss_indir_size, | |
7946 | .get_rss = hclge_get_rss, | |
7947 | .set_rss = hclge_set_rss, | |
f7db940a | 7948 | .set_rss_tuple = hclge_set_rss_tuple, |
07d29954 | 7949 | .get_rss_tuple = hclge_get_rss_tuple, |
46a3df9f S |
7950 | .get_tc_size = hclge_get_tc_size, |
7951 | .get_mac_addr = hclge_get_mac_addr, | |
7952 | .set_mac_addr = hclge_set_mac_addr, | |
26483246 | 7953 | .do_ioctl = hclge_do_ioctl, |
46a3df9f S |
7954 | .add_uc_addr = hclge_add_uc_addr, |
7955 | .rm_uc_addr = hclge_rm_uc_addr, | |
7956 | .add_mc_addr = hclge_add_mc_addr, | |
7957 | .rm_mc_addr = hclge_rm_mc_addr, | |
7958 | .set_autoneg = hclge_set_autoneg, | |
7959 | .get_autoneg = hclge_get_autoneg, | |
7960 | .get_pauseparam = hclge_get_pauseparam, | |
61387774 | 7961 | .set_pauseparam = hclge_set_pauseparam, |
46a3df9f S |
7962 | .set_mtu = hclge_set_mtu, |
7963 | .reset_queue = hclge_reset_tqp, | |
7964 | .get_stats = hclge_get_stats, | |
7965 | .update_stats = hclge_update_stats, | |
7966 | .get_strings = hclge_get_strings, | |
7967 | .get_sset_count = hclge_get_sset_count, | |
7968 | .get_fw_version = hclge_get_fw_version, | |
7969 | .get_mdix_mode = hclge_get_mdix_mode, | |
391b5e93 | 7970 | .enable_vlan_filter = hclge_enable_vlan_filter, |
dc8131d8 | 7971 | .set_vlan_filter = hclge_set_vlan_filter, |
46a3df9f | 7972 | .set_vf_vlan_filter = hclge_set_vf_vlan_filter, |
052ece6d | 7973 | .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, |
4ed340ab | 7974 | .reset_event = hclge_reset_event, |
720bd583 | 7975 | .set_default_reset_request = hclge_set_def_reset_request, |
09f2af64 PL |
7976 | .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, |
7977 | .set_channels = hclge_set_channels, | |
482d2e9c | 7978 | .get_channels = hclge_get_channels, |
77b34110 FL |
7979 | .get_regs_len = hclge_get_regs_len, |
7980 | .get_regs = hclge_get_regs, | |
07f8e940 | 7981 | .set_led_id = hclge_set_led_id, |
0979aa0b | 7982 | .get_link_mode = hclge_get_link_mode, |
dd74f815 JS |
7983 | .add_fd_entry = hclge_add_fd_entry, |
7984 | .del_fd_entry = hclge_del_fd_entry, | |
6871af29 | 7985 | .del_all_fd_entries = hclge_del_all_fd_entries, |
05c2314f JS |
7986 | .get_fd_rule_cnt = hclge_get_fd_rule_cnt, |
7987 | .get_fd_rule_info = hclge_get_fd_rule_info, | |
7988 | .get_fd_all_rules = hclge_get_all_rules, | |
6871af29 | 7989 | .restore_fd_rules = hclge_restore_fd_entries, |
c17852a8 | 7990 | .enable_fd = hclge_enable_fd, |
3c666b58 | 7991 | .dbg_run_cmd = hclge_dbg_run_cmd, |
381c356e | 7992 | .handle_hw_ras_error = hclge_handle_hw_ras_error, |
4d60291b HT |
7993 | .get_hw_reset_stat = hclge_get_hw_reset_stat, |
7994 | .ae_dev_resetting = hclge_ae_dev_resetting, | |
7995 | .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, | |
5c9f6b39 | 7996 | .set_gro_en = hclge_gro_en, |
0c29d191 | 7997 | .get_global_queue_id = hclge_covert_handle_qid_global, |
46a3df9f S |
7998 | }; |
7999 | ||
8000 | static struct hnae3_ae_algo ae_algo = { | |
8001 | .ops = &hclge_ops, | |
46a3df9f S |
8002 | .pdev_id_table = ae_algo_pci_tbl, |
8003 | }; | |
8004 | ||
8005 | static int hclge_init(void) | |
8006 | { | |
8007 | pr_info("%s is initializing\n", HCLGE_NAME); | |
8008 | ||
854cf33a FL |
8009 | hnae3_register_ae_algo(&ae_algo); |
8010 | ||
8011 | return 0; | |
46a3df9f S |
8012 | } |
8013 | ||
8014 | static void hclge_exit(void) | |
8015 | { | |
8016 | hnae3_unregister_ae_algo(&ae_algo); | |
8017 | } | |
8018 | module_init(hclge_init); | |
8019 | module_exit(hclge_exit); | |
8020 | ||
8021 | MODULE_LICENSE("GPL"); | |
8022 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); | |
8023 | MODULE_DESCRIPTION("HCLGE Driver"); | |
8024 | MODULE_VERSION(HCLGE_MOD_VERSION); |