Commit | Line | Data |
---|---|---|
d71d8381 JS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. | |
46a3df9f S |
3 | |
4 | #include <linux/acpi.h> | |
5 | #include <linux/device.h> | |
6 | #include <linux/etherdevice.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/platform_device.h> | |
2866ccb2 | 14 | #include <linux/if_vlan.h> |
f2f432f2 | 15 | #include <net/rtnetlink.h> |
46a3df9f | 16 | #include "hclge_cmd.h" |
cacde272 | 17 | #include "hclge_dcb.h" |
46a3df9f | 18 | #include "hclge_main.h" |
dde1a86e | 19 | #include "hclge_mbx.h" |
46a3df9f S |
20 | #include "hclge_mdio.h" |
21 | #include "hclge_tm.h" | |
22 | #include "hnae3.h" | |
23 | ||
24 | #define HCLGE_NAME "hclge" | |
25 | #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) | |
26 | #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) | |
46a3df9f | 27 | |
f9fd82a9 | 28 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); |
46a3df9f | 29 | static int hclge_init_vlan_config(struct hclge_dev *hdev); |
4ed340ab | 30 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); |
39932473 JS |
31 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, |
32 | u16 *allocated_size, bool is_alloc); | |
46a3df9f S |
33 | |
34 | static struct hnae3_ae_algo ae_algo; | |
35 | ||
36 | static const struct pci_device_id ae_algo_pci_tbl[] = { | |
37 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | |
38 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | |
39 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | |
40 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | |
41 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | |
42 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | |
43 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | |
e92a0843 | 44 | /* required last entry */ |
46a3df9f S |
45 | {0, } |
46 | }; | |
47 | ||
2f550a46 YL |
48 | MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); |
49 | ||
46a3df9f | 50 | static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { |
eb66d503 | 51 | "App Loopback test", |
4dc13b96 FL |
52 | "Serdes serial Loopback test", |
53 | "Serdes parallel Loopback test", | |
46a3df9f S |
54 | "Phy Loopback test" |
55 | }; | |
56 | ||
46a3df9f S |
57 | static const struct hclge_comm_stats_str g_mac_stats_string[] = { |
58 | {"mac_tx_mac_pause_num", | |
59 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, | |
60 | {"mac_rx_mac_pause_num", | |
61 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, | |
62 | {"mac_tx_pfc_pri0_pkt_num", | |
63 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, | |
64 | {"mac_tx_pfc_pri1_pkt_num", | |
65 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, | |
66 | {"mac_tx_pfc_pri2_pkt_num", | |
67 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, | |
68 | {"mac_tx_pfc_pri3_pkt_num", | |
69 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, | |
70 | {"mac_tx_pfc_pri4_pkt_num", | |
71 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, | |
72 | {"mac_tx_pfc_pri5_pkt_num", | |
73 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, | |
74 | {"mac_tx_pfc_pri6_pkt_num", | |
75 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, | |
76 | {"mac_tx_pfc_pri7_pkt_num", | |
77 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, | |
78 | {"mac_rx_pfc_pri0_pkt_num", | |
79 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, | |
80 | {"mac_rx_pfc_pri1_pkt_num", | |
81 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, | |
82 | {"mac_rx_pfc_pri2_pkt_num", | |
83 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, | |
84 | {"mac_rx_pfc_pri3_pkt_num", | |
85 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, | |
86 | {"mac_rx_pfc_pri4_pkt_num", | |
87 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, | |
88 | {"mac_rx_pfc_pri5_pkt_num", | |
89 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, | |
90 | {"mac_rx_pfc_pri6_pkt_num", | |
91 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, | |
92 | {"mac_rx_pfc_pri7_pkt_num", | |
93 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, | |
94 | {"mac_tx_total_pkt_num", | |
95 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, | |
96 | {"mac_tx_total_oct_num", | |
97 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, | |
98 | {"mac_tx_good_pkt_num", | |
99 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, | |
100 | {"mac_tx_bad_pkt_num", | |
101 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, | |
102 | {"mac_tx_good_oct_num", | |
103 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, | |
104 | {"mac_tx_bad_oct_num", | |
105 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, | |
106 | {"mac_tx_uni_pkt_num", | |
107 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, | |
108 | {"mac_tx_multi_pkt_num", | |
109 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, | |
110 | {"mac_tx_broad_pkt_num", | |
111 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, | |
112 | {"mac_tx_undersize_pkt_num", | |
113 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, | |
200a88c6 JS |
114 | {"mac_tx_oversize_pkt_num", |
115 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, | |
46a3df9f S |
116 | {"mac_tx_64_oct_pkt_num", |
117 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, | |
118 | {"mac_tx_65_127_oct_pkt_num", | |
119 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, | |
120 | {"mac_tx_128_255_oct_pkt_num", | |
121 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, | |
122 | {"mac_tx_256_511_oct_pkt_num", | |
123 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, | |
124 | {"mac_tx_512_1023_oct_pkt_num", | |
125 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, | |
126 | {"mac_tx_1024_1518_oct_pkt_num", | |
127 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, | |
91f384f6 JS |
128 | {"mac_tx_1519_2047_oct_pkt_num", |
129 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, | |
130 | {"mac_tx_2048_4095_oct_pkt_num", | |
131 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, | |
132 | {"mac_tx_4096_8191_oct_pkt_num", | |
133 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, | |
91f384f6 JS |
134 | {"mac_tx_8192_9216_oct_pkt_num", |
135 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, | |
136 | {"mac_tx_9217_12287_oct_pkt_num", | |
137 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, | |
138 | {"mac_tx_12288_16383_oct_pkt_num", | |
139 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, | |
140 | {"mac_tx_1519_max_good_pkt_num", | |
141 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, | |
142 | {"mac_tx_1519_max_bad_pkt_num", | |
143 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f S |
144 | {"mac_rx_total_pkt_num", |
145 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, | |
146 | {"mac_rx_total_oct_num", | |
147 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, | |
148 | {"mac_rx_good_pkt_num", | |
149 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, | |
150 | {"mac_rx_bad_pkt_num", | |
151 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, | |
152 | {"mac_rx_good_oct_num", | |
153 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, | |
154 | {"mac_rx_bad_oct_num", | |
155 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, | |
156 | {"mac_rx_uni_pkt_num", | |
157 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, | |
158 | {"mac_rx_multi_pkt_num", | |
159 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, | |
160 | {"mac_rx_broad_pkt_num", | |
161 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, | |
162 | {"mac_rx_undersize_pkt_num", | |
163 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, | |
200a88c6 JS |
164 | {"mac_rx_oversize_pkt_num", |
165 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, | |
46a3df9f S |
166 | {"mac_rx_64_oct_pkt_num", |
167 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, | |
168 | {"mac_rx_65_127_oct_pkt_num", | |
169 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, | |
170 | {"mac_rx_128_255_oct_pkt_num", | |
171 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, | |
172 | {"mac_rx_256_511_oct_pkt_num", | |
173 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, | |
174 | {"mac_rx_512_1023_oct_pkt_num", | |
175 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, | |
176 | {"mac_rx_1024_1518_oct_pkt_num", | |
177 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, | |
91f384f6 JS |
178 | {"mac_rx_1519_2047_oct_pkt_num", |
179 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, | |
180 | {"mac_rx_2048_4095_oct_pkt_num", | |
181 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, | |
182 | {"mac_rx_4096_8191_oct_pkt_num", | |
183 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, | |
91f384f6 JS |
184 | {"mac_rx_8192_9216_oct_pkt_num", |
185 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, | |
186 | {"mac_rx_9217_12287_oct_pkt_num", | |
187 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, | |
188 | {"mac_rx_12288_16383_oct_pkt_num", | |
189 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, | |
190 | {"mac_rx_1519_max_good_pkt_num", | |
191 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, | |
192 | {"mac_rx_1519_max_bad_pkt_num", | |
193 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f | 194 | |
a6c51c26 JS |
195 | {"mac_tx_fragment_pkt_num", |
196 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, | |
197 | {"mac_tx_undermin_pkt_num", | |
198 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, | |
199 | {"mac_tx_jabber_pkt_num", | |
200 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, | |
201 | {"mac_tx_err_all_pkt_num", | |
202 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, | |
203 | {"mac_tx_from_app_good_pkt_num", | |
204 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, | |
205 | {"mac_tx_from_app_bad_pkt_num", | |
206 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, | |
207 | {"mac_rx_fragment_pkt_num", | |
208 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, | |
209 | {"mac_rx_undermin_pkt_num", | |
210 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, | |
211 | {"mac_rx_jabber_pkt_num", | |
212 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, | |
213 | {"mac_rx_fcs_err_pkt_num", | |
214 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, | |
215 | {"mac_rx_send_app_good_pkt_num", | |
216 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, | |
217 | {"mac_rx_send_app_bad_pkt_num", | |
218 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} | |
46a3df9f S |
219 | }; |
220 | ||
f5aac71c FL |
221 | static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { |
222 | { | |
223 | .flags = HCLGE_MAC_MGR_MASK_VLAN_B, | |
224 | .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), | |
225 | .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), | |
226 | .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), | |
227 | .i_port_bitmap = 0x1, | |
228 | }, | |
229 | }; | |
230 | ||
46a3df9f S |
231 | static int hclge_mac_update_stats(struct hclge_dev *hdev) |
232 | { | |
91f384f6 | 233 | #define HCLGE_MAC_CMD_NUM 21 |
46a3df9f S |
234 | #define HCLGE_RTN_DATA_NUM 4 |
235 | ||
236 | u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); | |
237 | struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; | |
a90bb9a5 | 238 | __le64 *desc_data; |
46a3df9f S |
239 | int i, k, n; |
240 | int ret; | |
241 | ||
242 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); | |
243 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); | |
244 | if (ret) { | |
245 | dev_err(&hdev->pdev->dev, | |
246 | "Get MAC pkt stats fail, status = %d.\n", ret); | |
247 | ||
248 | return ret; | |
249 | } | |
250 | ||
251 | for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { | |
252 | if (unlikely(i == 0)) { | |
a90bb9a5 | 253 | desc_data = (__le64 *)(&desc[i].data[0]); |
46a3df9f S |
254 | n = HCLGE_RTN_DATA_NUM - 2; |
255 | } else { | |
a90bb9a5 | 256 | desc_data = (__le64 *)(&desc[i]); |
46a3df9f S |
257 | n = HCLGE_RTN_DATA_NUM; |
258 | } | |
259 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 260 | *data++ += le64_to_cpu(*desc_data); |
46a3df9f S |
261 | desc_data++; |
262 | } | |
263 | } | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
268 | static int hclge_tqps_update_stats(struct hnae3_handle *handle) | |
269 | { | |
270 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
271 | struct hclge_vport *vport = hclge_get_vport(handle); | |
272 | struct hclge_dev *hdev = vport->back; | |
273 | struct hnae3_queue *queue; | |
274 | struct hclge_desc desc[1]; | |
275 | struct hclge_tqp *tqp; | |
276 | int ret, i; | |
277 | ||
278 | for (i = 0; i < kinfo->num_tqps; i++) { | |
279 | queue = handle->kinfo.tqp[i]; | |
280 | tqp = container_of(queue, struct hclge_tqp, q); | |
281 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
282 | hclge_cmd_setup_basic_desc(&desc[0], | |
283 | HCLGE_OPC_QUERY_RX_STATUS, | |
284 | true); | |
285 | ||
a90bb9a5 | 286 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
287 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
288 | if (ret) { | |
289 | dev_err(&hdev->pdev->dev, | |
290 | "Query tqp stat fail, status = %d,queue = %d\n", | |
291 | ret, i); | |
292 | return ret; | |
293 | } | |
294 | tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += | |
cf72fa63 | 295 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
296 | } |
297 | ||
298 | for (i = 0; i < kinfo->num_tqps; i++) { | |
299 | queue = handle->kinfo.tqp[i]; | |
300 | tqp = container_of(queue, struct hclge_tqp, q); | |
301 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
302 | hclge_cmd_setup_basic_desc(&desc[0], | |
303 | HCLGE_OPC_QUERY_TX_STATUS, | |
304 | true); | |
305 | ||
a90bb9a5 | 306 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
307 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
308 | if (ret) { | |
309 | dev_err(&hdev->pdev->dev, | |
310 | "Query tqp stat fail, status = %d,queue = %d\n", | |
311 | ret, i); | |
312 | return ret; | |
313 | } | |
314 | tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += | |
cf72fa63 | 315 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
316 | } |
317 | ||
318 | return 0; | |
319 | } | |
320 | ||
321 | static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) | |
322 | { | |
323 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
324 | struct hclge_tqp *tqp; | |
325 | u64 *buff = data; | |
326 | int i; | |
327 | ||
328 | for (i = 0; i < kinfo->num_tqps; i++) { | |
329 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 330 | *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; |
46a3df9f S |
331 | } |
332 | ||
333 | for (i = 0; i < kinfo->num_tqps; i++) { | |
334 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 335 | *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; |
46a3df9f S |
336 | } |
337 | ||
338 | return buff; | |
339 | } | |
340 | ||
341 | static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) | |
342 | { | |
343 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
344 | ||
345 | return kinfo->num_tqps * (2); | |
346 | } | |
347 | ||
348 | static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) | |
349 | { | |
350 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
351 | u8 *buff = data; | |
352 | int i = 0; | |
353 | ||
354 | for (i = 0; i < kinfo->num_tqps; i++) { | |
355 | struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], | |
356 | struct hclge_tqp, q); | |
0c218123 | 357 | snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", |
46a3df9f S |
358 | tqp->index); |
359 | buff = buff + ETH_GSTRING_LEN; | |
360 | } | |
361 | ||
362 | for (i = 0; i < kinfo->num_tqps; i++) { | |
363 | struct hclge_tqp *tqp = container_of(kinfo->tqp[i], | |
364 | struct hclge_tqp, q); | |
0c218123 | 365 | snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", |
46a3df9f S |
366 | tqp->index); |
367 | buff = buff + ETH_GSTRING_LEN; | |
368 | } | |
369 | ||
370 | return buff; | |
371 | } | |
372 | ||
373 | static u64 *hclge_comm_get_stats(void *comm_stats, | |
374 | const struct hclge_comm_stats_str strs[], | |
375 | int size, u64 *data) | |
376 | { | |
377 | u64 *buf = data; | |
378 | u32 i; | |
379 | ||
380 | for (i = 0; i < size; i++) | |
381 | buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); | |
382 | ||
383 | return buf + size; | |
384 | } | |
385 | ||
386 | static u8 *hclge_comm_get_strings(u32 stringset, | |
387 | const struct hclge_comm_stats_str strs[], | |
388 | int size, u8 *data) | |
389 | { | |
390 | char *buff = (char *)data; | |
391 | u32 i; | |
392 | ||
393 | if (stringset != ETH_SS_STATS) | |
394 | return buff; | |
395 | ||
396 | for (i = 0; i < size; i++) { | |
397 | snprintf(buff, ETH_GSTRING_LEN, | |
398 | strs[i].desc); | |
399 | buff = buff + ETH_GSTRING_LEN; | |
400 | } | |
401 | ||
402 | return (u8 *)buff; | |
403 | } | |
404 | ||
405 | static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, | |
406 | struct net_device_stats *net_stats) | |
407 | { | |
408 | net_stats->tx_dropped = 0; | |
200a88c6 | 409 | net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 410 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; |
a6c51c26 | 411 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
412 | |
413 | net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; | |
414 | net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; | |
415 | ||
a6c51c26 | 416 | net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
417 | net_stats->rx_length_errors = |
418 | hw_stats->mac_stats.mac_rx_undersize_pkt_num; | |
419 | net_stats->rx_length_errors += | |
200a88c6 | 420 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 421 | net_stats->rx_over_errors = |
200a88c6 | 422 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f S |
423 | } |
424 | ||
425 | static void hclge_update_stats_for_all(struct hclge_dev *hdev) | |
426 | { | |
427 | struct hnae3_handle *handle; | |
428 | int status; | |
429 | ||
430 | handle = &hdev->vport[0].nic; | |
431 | if (handle->client) { | |
432 | status = hclge_tqps_update_stats(handle); | |
433 | if (status) { | |
434 | dev_err(&hdev->pdev->dev, | |
435 | "Update TQPS stats fail, status = %d.\n", | |
436 | status); | |
437 | } | |
438 | } | |
439 | ||
440 | status = hclge_mac_update_stats(hdev); | |
441 | if (status) | |
442 | dev_err(&hdev->pdev->dev, | |
443 | "Update MAC stats fail, status = %d.\n", status); | |
444 | ||
46a3df9f S |
445 | hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); |
446 | } | |
447 | ||
448 | static void hclge_update_stats(struct hnae3_handle *handle, | |
449 | struct net_device_stats *net_stats) | |
450 | { | |
451 | struct hclge_vport *vport = hclge_get_vport(handle); | |
452 | struct hclge_dev *hdev = vport->back; | |
453 | struct hclge_hw_stats *hw_stats = &hdev->hw_stats; | |
454 | int status; | |
455 | ||
c5f65480 JS |
456 | if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) |
457 | return; | |
458 | ||
46a3df9f S |
459 | status = hclge_mac_update_stats(hdev); |
460 | if (status) | |
461 | dev_err(&hdev->pdev->dev, | |
462 | "Update MAC stats fail, status = %d.\n", | |
463 | status); | |
464 | ||
46a3df9f S |
465 | status = hclge_tqps_update_stats(handle); |
466 | if (status) | |
467 | dev_err(&hdev->pdev->dev, | |
468 | "Update TQPS stats fail, status = %d.\n", | |
469 | status); | |
470 | ||
471 | hclge_update_netstat(hw_stats, net_stats); | |
c5f65480 JS |
472 | |
473 | clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); | |
46a3df9f S |
474 | } |
475 | ||
476 | static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) | |
477 | { | |
4dc13b96 FL |
478 | #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ |
479 | HNAE3_SUPPORT_PHY_LOOPBACK |\ | |
480 | HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ | |
481 | HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) | |
46a3df9f S |
482 | |
483 | struct hclge_vport *vport = hclge_get_vport(handle); | |
484 | struct hclge_dev *hdev = vport->back; | |
485 | int count = 0; | |
486 | ||
487 | /* Loopback test support rules: | |
488 | * mac: only GE mode support | |
489 | * serdes: all mac mode will support include GE/XGE/LGE/CGE | |
490 | * phy: only support when phy device exist on board | |
491 | */ | |
492 | if (stringset == ETH_SS_TEST) { | |
493 | /* clear loopback bit flags at first */ | |
494 | handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); | |
3ff6cde8 | 495 | if (hdev->pdev->revision >= 0x21 || |
4dc13b96 | 496 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || |
46a3df9f S |
497 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || |
498 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { | |
499 | count += 1; | |
eb66d503 | 500 | handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; |
46a3df9f | 501 | } |
5fd50ac3 | 502 | |
4dc13b96 FL |
503 | count += 2; |
504 | handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; | |
505 | handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; | |
46a3df9f S |
506 | } else if (stringset == ETH_SS_STATS) { |
507 | count = ARRAY_SIZE(g_mac_stats_string) + | |
46a3df9f S |
508 | hclge_tqps_get_sset_count(handle, stringset); |
509 | } | |
510 | ||
511 | return count; | |
512 | } | |
513 | ||
514 | static void hclge_get_strings(struct hnae3_handle *handle, | |
515 | u32 stringset, | |
516 | u8 *data) | |
517 | { | |
518 | u8 *p = (char *)data; | |
519 | int size; | |
520 | ||
521 | if (stringset == ETH_SS_STATS) { | |
522 | size = ARRAY_SIZE(g_mac_stats_string); | |
523 | p = hclge_comm_get_strings(stringset, | |
524 | g_mac_stats_string, | |
525 | size, | |
526 | p); | |
46a3df9f S |
527 | p = hclge_tqps_get_strings(handle, p); |
528 | } else if (stringset == ETH_SS_TEST) { | |
eb66d503 | 529 | if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { |
46a3df9f | 530 | memcpy(p, |
eb66d503 | 531 | hns3_nic_test_strs[HNAE3_LOOP_APP], |
46a3df9f S |
532 | ETH_GSTRING_LEN); |
533 | p += ETH_GSTRING_LEN; | |
534 | } | |
4dc13b96 | 535 | if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { |
46a3df9f | 536 | memcpy(p, |
4dc13b96 FL |
537 | hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], |
538 | ETH_GSTRING_LEN); | |
539 | p += ETH_GSTRING_LEN; | |
540 | } | |
541 | if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { | |
542 | memcpy(p, | |
543 | hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], | |
46a3df9f S |
544 | ETH_GSTRING_LEN); |
545 | p += ETH_GSTRING_LEN; | |
546 | } | |
547 | if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { | |
548 | memcpy(p, | |
a7b687b3 | 549 | hns3_nic_test_strs[HNAE3_LOOP_PHY], |
46a3df9f S |
550 | ETH_GSTRING_LEN); |
551 | p += ETH_GSTRING_LEN; | |
552 | } | |
553 | } | |
554 | } | |
555 | ||
556 | static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) | |
557 | { | |
558 | struct hclge_vport *vport = hclge_get_vport(handle); | |
559 | struct hclge_dev *hdev = vport->back; | |
560 | u64 *p; | |
561 | ||
562 | p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, | |
563 | g_mac_stats_string, | |
564 | ARRAY_SIZE(g_mac_stats_string), | |
565 | data); | |
46a3df9f S |
566 | p = hclge_tqps_get_stats(handle, p); |
567 | } | |
568 | ||
569 | static int hclge_parse_func_status(struct hclge_dev *hdev, | |
d44f9b63 | 570 | struct hclge_func_status_cmd *status) |
46a3df9f S |
571 | { |
572 | if (!(status->pf_state & HCLGE_PF_STATE_DONE)) | |
573 | return -EINVAL; | |
574 | ||
575 | /* Set the pf to main pf */ | |
576 | if (status->pf_state & HCLGE_PF_STATE_MAIN) | |
577 | hdev->flag |= HCLGE_FLAG_MAIN; | |
578 | else | |
579 | hdev->flag &= ~HCLGE_FLAG_MAIN; | |
580 | ||
46a3df9f S |
581 | return 0; |
582 | } | |
583 | ||
584 | static int hclge_query_function_status(struct hclge_dev *hdev) | |
585 | { | |
d44f9b63 | 586 | struct hclge_func_status_cmd *req; |
46a3df9f S |
587 | struct hclge_desc desc; |
588 | int timeout = 0; | |
589 | int ret; | |
590 | ||
591 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); | |
d44f9b63 | 592 | req = (struct hclge_func_status_cmd *)desc.data; |
46a3df9f S |
593 | |
594 | do { | |
595 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
596 | if (ret) { | |
597 | dev_err(&hdev->pdev->dev, | |
598 | "query function status failed %d.\n", | |
599 | ret); | |
600 | ||
601 | return ret; | |
602 | } | |
603 | ||
604 | /* Check pf reset is done */ | |
605 | if (req->pf_state) | |
606 | break; | |
607 | usleep_range(1000, 2000); | |
608 | } while (timeout++ < 5); | |
609 | ||
610 | ret = hclge_parse_func_status(hdev, req); | |
611 | ||
612 | return ret; | |
613 | } | |
614 | ||
615 | static int hclge_query_pf_resource(struct hclge_dev *hdev) | |
616 | { | |
d44f9b63 | 617 | struct hclge_pf_res_cmd *req; |
46a3df9f S |
618 | struct hclge_desc desc; |
619 | int ret; | |
620 | ||
621 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); | |
622 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
623 | if (ret) { | |
624 | dev_err(&hdev->pdev->dev, | |
625 | "query pf resource failed %d.\n", ret); | |
626 | return ret; | |
627 | } | |
628 | ||
d44f9b63 | 629 | req = (struct hclge_pf_res_cmd *)desc.data; |
46a3df9f S |
630 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); |
631 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; | |
632 | ||
e92a0843 | 633 | if (hnae3_dev_roce_supported(hdev)) { |
375dd5e4 JS |
634 | hdev->roce_base_msix_offset = |
635 | hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), | |
636 | HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); | |
887c3820 | 637 | hdev->num_roce_msi = |
e4e87715 PL |
638 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
639 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
640 | |
641 | /* PF should have NIC vectors and Roce vectors, | |
642 | * NIC vectors are queued before Roce vectors. | |
643 | */ | |
375dd5e4 JS |
644 | hdev->num_msi = hdev->num_roce_msi + |
645 | hdev->roce_base_msix_offset; | |
46a3df9f S |
646 | } else { |
647 | hdev->num_msi = | |
e4e87715 PL |
648 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
649 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
650 | } |
651 | ||
652 | return 0; | |
653 | } | |
654 | ||
655 | static int hclge_parse_speed(int speed_cmd, int *speed) | |
656 | { | |
657 | switch (speed_cmd) { | |
658 | case 6: | |
659 | *speed = HCLGE_MAC_SPEED_10M; | |
660 | break; | |
661 | case 7: | |
662 | *speed = HCLGE_MAC_SPEED_100M; | |
663 | break; | |
664 | case 0: | |
665 | *speed = HCLGE_MAC_SPEED_1G; | |
666 | break; | |
667 | case 1: | |
668 | *speed = HCLGE_MAC_SPEED_10G; | |
669 | break; | |
670 | case 2: | |
671 | *speed = HCLGE_MAC_SPEED_25G; | |
672 | break; | |
673 | case 3: | |
674 | *speed = HCLGE_MAC_SPEED_40G; | |
675 | break; | |
676 | case 4: | |
677 | *speed = HCLGE_MAC_SPEED_50G; | |
678 | break; | |
679 | case 5: | |
680 | *speed = HCLGE_MAC_SPEED_100G; | |
681 | break; | |
682 | default: | |
683 | return -EINVAL; | |
684 | } | |
685 | ||
686 | return 0; | |
687 | } | |
688 | ||
0979aa0b FL |
689 | static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, |
690 | u8 speed_ability) | |
691 | { | |
692 | unsigned long *supported = hdev->hw.mac.supported; | |
693 | ||
694 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) | |
695 | set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, | |
696 | supported); | |
697 | ||
698 | if (speed_ability & HCLGE_SUPPORT_10G_BIT) | |
699 | set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, | |
700 | supported); | |
701 | ||
702 | if (speed_ability & HCLGE_SUPPORT_25G_BIT) | |
703 | set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, | |
704 | supported); | |
705 | ||
706 | if (speed_ability & HCLGE_SUPPORT_50G_BIT) | |
707 | set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, | |
708 | supported); | |
709 | ||
710 | if (speed_ability & HCLGE_SUPPORT_100G_BIT) | |
711 | set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, | |
712 | supported); | |
713 | ||
714 | set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); | |
715 | set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); | |
716 | } | |
717 | ||
718 | static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) | |
719 | { | |
720 | u8 media_type = hdev->hw.mac.media_type; | |
721 | ||
722 | if (media_type != HNAE3_MEDIA_TYPE_FIBER) | |
723 | return; | |
724 | ||
725 | hclge_parse_fiber_link_mode(hdev, speed_ability); | |
726 | } | |
727 | ||
46a3df9f S |
728 | static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) |
729 | { | |
d44f9b63 | 730 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
731 | u64 mac_addr_tmp_high; |
732 | u64 mac_addr_tmp; | |
733 | int i; | |
734 | ||
d44f9b63 | 735 | req = (struct hclge_cfg_param_cmd *)desc[0].data; |
46a3df9f S |
736 | |
737 | /* get the configuration */ | |
e4e87715 PL |
738 | cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), |
739 | HCLGE_CFG_VMDQ_M, | |
740 | HCLGE_CFG_VMDQ_S); | |
741 | cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
742 | HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); | |
743 | cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
744 | HCLGE_CFG_TQP_DESC_N_M, | |
745 | HCLGE_CFG_TQP_DESC_N_S); | |
746 | ||
747 | cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
748 | HCLGE_CFG_PHY_ADDR_M, | |
749 | HCLGE_CFG_PHY_ADDR_S); | |
750 | cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
751 | HCLGE_CFG_MEDIA_TP_M, | |
752 | HCLGE_CFG_MEDIA_TP_S); | |
753 | cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
754 | HCLGE_CFG_RX_BUF_LEN_M, | |
755 | HCLGE_CFG_RX_BUF_LEN_S); | |
46a3df9f S |
756 | /* get mac_address */ |
757 | mac_addr_tmp = __le32_to_cpu(req->param[2]); | |
e4e87715 PL |
758 | mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), |
759 | HCLGE_CFG_MAC_ADDR_H_M, | |
760 | HCLGE_CFG_MAC_ADDR_H_S); | |
46a3df9f S |
761 | |
762 | mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; | |
763 | ||
e4e87715 PL |
764 | cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), |
765 | HCLGE_CFG_DEFAULT_SPEED_M, | |
766 | HCLGE_CFG_DEFAULT_SPEED_S); | |
767 | cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), | |
768 | HCLGE_CFG_RSS_SIZE_M, | |
769 | HCLGE_CFG_RSS_SIZE_S); | |
0e7a40cd | 770 | |
46a3df9f S |
771 | for (i = 0; i < ETH_ALEN; i++) |
772 | cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; | |
773 | ||
d44f9b63 | 774 | req = (struct hclge_cfg_param_cmd *)desc[1].data; |
46a3df9f | 775 | cfg->numa_node_map = __le32_to_cpu(req->param[0]); |
0979aa0b | 776 | |
e4e87715 PL |
777 | cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), |
778 | HCLGE_CFG_SPEED_ABILITY_M, | |
779 | HCLGE_CFG_SPEED_ABILITY_S); | |
39932473 JS |
780 | cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), |
781 | HCLGE_CFG_UMV_TBL_SPACE_M, | |
782 | HCLGE_CFG_UMV_TBL_SPACE_S); | |
783 | if (!cfg->umv_space) | |
784 | cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; | |
46a3df9f S |
785 | } |
786 | ||
787 | /* hclge_get_cfg: query the static parameter from flash | |
788 | * @hdev: pointer to struct hclge_dev | |
789 | * @hcfg: the config structure to be getted | |
790 | */ | |
791 | static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) | |
792 | { | |
793 | struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; | |
d44f9b63 | 794 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
795 | int i, ret; |
796 | ||
797 | for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { | |
a90bb9a5 YL |
798 | u32 offset = 0; |
799 | ||
d44f9b63 | 800 | req = (struct hclge_cfg_param_cmd *)desc[i].data; |
46a3df9f S |
801 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, |
802 | true); | |
e4e87715 PL |
803 | hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, |
804 | HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); | |
46a3df9f | 805 | /* Len should be united by 4 bytes when send to hardware */ |
e4e87715 PL |
806 | hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, |
807 | HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); | |
a90bb9a5 | 808 | req->offset = cpu_to_le32(offset); |
46a3df9f S |
809 | } |
810 | ||
811 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); | |
812 | if (ret) { | |
3f639907 | 813 | dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); |
46a3df9f S |
814 | return ret; |
815 | } | |
816 | ||
817 | hclge_parse_cfg(hcfg, desc); | |
3f639907 | 818 | |
46a3df9f S |
819 | return 0; |
820 | } | |
821 | ||
822 | static int hclge_get_cap(struct hclge_dev *hdev) | |
823 | { | |
824 | int ret; | |
825 | ||
826 | ret = hclge_query_function_status(hdev); | |
827 | if (ret) { | |
828 | dev_err(&hdev->pdev->dev, | |
829 | "query function status error %d.\n", ret); | |
830 | return ret; | |
831 | } | |
832 | ||
833 | /* get pf resource */ | |
834 | ret = hclge_query_pf_resource(hdev); | |
3f639907 JS |
835 | if (ret) |
836 | dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); | |
46a3df9f | 837 | |
3f639907 | 838 | return ret; |
46a3df9f S |
839 | } |
840 | ||
841 | static int hclge_configure(struct hclge_dev *hdev) | |
842 | { | |
843 | struct hclge_cfg cfg; | |
844 | int ret, i; | |
845 | ||
846 | ret = hclge_get_cfg(hdev, &cfg); | |
847 | if (ret) { | |
848 | dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); | |
849 | return ret; | |
850 | } | |
851 | ||
852 | hdev->num_vmdq_vport = cfg.vmdq_vport_num; | |
853 | hdev->base_tqp_pid = 0; | |
0e7a40cd | 854 | hdev->rss_size_max = cfg.rss_size_max; |
46a3df9f | 855 | hdev->rx_buf_len = cfg.rx_buf_len; |
fbbb1536 | 856 | ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); |
46a3df9f | 857 | hdev->hw.mac.media_type = cfg.media_type; |
2a4776e1 | 858 | hdev->hw.mac.phy_addr = cfg.phy_addr; |
46a3df9f S |
859 | hdev->num_desc = cfg.tqp_desc_num; |
860 | hdev->tm_info.num_pg = 1; | |
cacde272 | 861 | hdev->tc_max = cfg.tc_num; |
46a3df9f | 862 | hdev->tm_info.hw_pfc_map = 0; |
39932473 | 863 | hdev->wanted_umv_size = cfg.umv_space; |
46a3df9f S |
864 | |
865 | ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); | |
866 | if (ret) { | |
867 | dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); | |
868 | return ret; | |
869 | } | |
870 | ||
0979aa0b FL |
871 | hclge_parse_link_mode(hdev, cfg.speed_ability); |
872 | ||
cacde272 YL |
873 | if ((hdev->tc_max > HNAE3_MAX_TC) || |
874 | (hdev->tc_max < 1)) { | |
46a3df9f | 875 | dev_warn(&hdev->pdev->dev, "TC num = %d.\n", |
cacde272 YL |
876 | hdev->tc_max); |
877 | hdev->tc_max = 1; | |
46a3df9f S |
878 | } |
879 | ||
cacde272 YL |
880 | /* Dev does not support DCB */ |
881 | if (!hnae3_dev_dcb_supported(hdev)) { | |
882 | hdev->tc_max = 1; | |
883 | hdev->pfc_max = 0; | |
884 | } else { | |
885 | hdev->pfc_max = hdev->tc_max; | |
886 | } | |
887 | ||
888 | hdev->tm_info.num_tc = hdev->tc_max; | |
889 | ||
46a3df9f | 890 | /* Currently not support uncontiuous tc */ |
cacde272 | 891 | for (i = 0; i < hdev->tm_info.num_tc; i++) |
e4e87715 | 892 | hnae3_set_bit(hdev->hw_tc_map, i, 1); |
46a3df9f | 893 | |
71b83869 | 894 | hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; |
46a3df9f S |
895 | |
896 | return ret; | |
897 | } | |
898 | ||
899 | static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, | |
900 | int tso_mss_max) | |
901 | { | |
d44f9b63 | 902 | struct hclge_cfg_tso_status_cmd *req; |
46a3df9f | 903 | struct hclge_desc desc; |
a90bb9a5 | 904 | u16 tso_mss; |
46a3df9f S |
905 | |
906 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); | |
907 | ||
d44f9b63 | 908 | req = (struct hclge_cfg_tso_status_cmd *)desc.data; |
a90bb9a5 YL |
909 | |
910 | tso_mss = 0; | |
e4e87715 PL |
911 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
912 | HCLGE_TSO_MSS_MIN_S, tso_mss_min); | |
a90bb9a5 YL |
913 | req->tso_mss_min = cpu_to_le16(tso_mss); |
914 | ||
915 | tso_mss = 0; | |
e4e87715 PL |
916 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
917 | HCLGE_TSO_MSS_MIN_S, tso_mss_max); | |
a90bb9a5 | 918 | req->tso_mss_max = cpu_to_le16(tso_mss); |
46a3df9f S |
919 | |
920 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
921 | } | |
922 | ||
923 | static int hclge_alloc_tqps(struct hclge_dev *hdev) | |
924 | { | |
925 | struct hclge_tqp *tqp; | |
926 | int i; | |
927 | ||
928 | hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, | |
929 | sizeof(struct hclge_tqp), GFP_KERNEL); | |
930 | if (!hdev->htqp) | |
931 | return -ENOMEM; | |
932 | ||
933 | tqp = hdev->htqp; | |
934 | ||
935 | for (i = 0; i < hdev->num_tqps; i++) { | |
936 | tqp->dev = &hdev->pdev->dev; | |
937 | tqp->index = i; | |
938 | ||
939 | tqp->q.ae_algo = &ae_algo; | |
940 | tqp->q.buf_size = hdev->rx_buf_len; | |
941 | tqp->q.desc_num = hdev->num_desc; | |
942 | tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + | |
943 | i * HCLGE_TQP_REG_SIZE; | |
944 | ||
945 | tqp++; | |
946 | } | |
947 | ||
948 | return 0; | |
949 | } | |
950 | ||
951 | static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, | |
952 | u16 tqp_pid, u16 tqp_vid, bool is_pf) | |
953 | { | |
d44f9b63 | 954 | struct hclge_tqp_map_cmd *req; |
46a3df9f S |
955 | struct hclge_desc desc; |
956 | int ret; | |
957 | ||
958 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); | |
959 | ||
d44f9b63 | 960 | req = (struct hclge_tqp_map_cmd *)desc.data; |
46a3df9f | 961 | req->tqp_id = cpu_to_le16(tqp_pid); |
a90bb9a5 | 962 | req->tqp_vf = func_id; |
46a3df9f S |
963 | req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | |
964 | 1 << HCLGE_TQP_MAP_EN_B; | |
965 | req->tqp_vid = cpu_to_le16(tqp_vid); | |
966 | ||
967 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 JS |
968 | if (ret) |
969 | dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); | |
46a3df9f | 970 | |
3f639907 | 971 | return ret; |
46a3df9f S |
972 | } |
973 | ||
128b900d | 974 | static int hclge_assign_tqp(struct hclge_vport *vport) |
46a3df9f | 975 | { |
128b900d | 976 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
46a3df9f | 977 | struct hclge_dev *hdev = vport->back; |
7df7dad6 | 978 | int i, alloced; |
46a3df9f S |
979 | |
980 | for (i = 0, alloced = 0; i < hdev->num_tqps && | |
128b900d | 981 | alloced < kinfo->num_tqps; i++) { |
46a3df9f S |
982 | if (!hdev->htqp[i].alloced) { |
983 | hdev->htqp[i].q.handle = &vport->nic; | |
984 | hdev->htqp[i].q.tqp_index = alloced; | |
128b900d YL |
985 | hdev->htqp[i].q.desc_num = kinfo->num_desc; |
986 | kinfo->tqp[alloced] = &hdev->htqp[i].q; | |
46a3df9f | 987 | hdev->htqp[i].alloced = true; |
46a3df9f S |
988 | alloced++; |
989 | } | |
990 | } | |
128b900d | 991 | vport->alloc_tqps = kinfo->num_tqps; |
46a3df9f S |
992 | |
993 | return 0; | |
994 | } | |
995 | ||
128b900d YL |
996 | static int hclge_knic_setup(struct hclge_vport *vport, |
997 | u16 num_tqps, u16 num_desc) | |
46a3df9f S |
998 | { |
999 | struct hnae3_handle *nic = &vport->nic; | |
1000 | struct hnae3_knic_private_info *kinfo = &nic->kinfo; | |
1001 | struct hclge_dev *hdev = vport->back; | |
1002 | int i, ret; | |
1003 | ||
128b900d | 1004 | kinfo->num_desc = num_desc; |
46a3df9f S |
1005 | kinfo->rx_buf_len = hdev->rx_buf_len; |
1006 | kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); | |
1007 | kinfo->rss_size | |
1008 | = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); | |
1009 | kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; | |
1010 | ||
1011 | for (i = 0; i < HNAE3_MAX_TC; i++) { | |
1012 | if (hdev->hw_tc_map & BIT(i)) { | |
1013 | kinfo->tc_info[i].enable = true; | |
1014 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; | |
1015 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; | |
1016 | kinfo->tc_info[i].tc = i; | |
1017 | } else { | |
1018 | /* Set to default queue if TC is disable */ | |
1019 | kinfo->tc_info[i].enable = false; | |
1020 | kinfo->tc_info[i].tqp_offset = 0; | |
1021 | kinfo->tc_info[i].tqp_count = 1; | |
1022 | kinfo->tc_info[i].tc = 0; | |
1023 | } | |
1024 | } | |
1025 | ||
1026 | kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, | |
1027 | sizeof(struct hnae3_queue *), GFP_KERNEL); | |
1028 | if (!kinfo->tqp) | |
1029 | return -ENOMEM; | |
1030 | ||
128b900d | 1031 | ret = hclge_assign_tqp(vport); |
3f639907 | 1032 | if (ret) |
46a3df9f | 1033 | dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); |
46a3df9f | 1034 | |
3f639907 | 1035 | return ret; |
46a3df9f S |
1036 | } |
1037 | ||
7df7dad6 L |
1038 | static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, |
1039 | struct hclge_vport *vport) | |
1040 | { | |
1041 | struct hnae3_handle *nic = &vport->nic; | |
1042 | struct hnae3_knic_private_info *kinfo; | |
1043 | u16 i; | |
1044 | ||
1045 | kinfo = &nic->kinfo; | |
1046 | for (i = 0; i < kinfo->num_tqps; i++) { | |
1047 | struct hclge_tqp *q = | |
1048 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
1049 | bool is_pf; | |
1050 | int ret; | |
1051 | ||
1052 | is_pf = !(vport->vport_id); | |
1053 | ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, | |
1054 | i, is_pf); | |
1055 | if (ret) | |
1056 | return ret; | |
1057 | } | |
1058 | ||
1059 | return 0; | |
1060 | } | |
1061 | ||
1062 | static int hclge_map_tqp(struct hclge_dev *hdev) | |
1063 | { | |
1064 | struct hclge_vport *vport = hdev->vport; | |
1065 | u16 i, num_vport; | |
1066 | ||
1067 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1068 | for (i = 0; i < num_vport; i++) { | |
1069 | int ret; | |
1070 | ||
1071 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
1072 | if (ret) | |
1073 | return ret; | |
1074 | ||
1075 | vport++; | |
1076 | } | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | ||
46a3df9f S |
1081 | static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) |
1082 | { | |
1083 | /* this would be initialized later */ | |
1084 | } | |
1085 | ||
1086 | static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) | |
1087 | { | |
1088 | struct hnae3_handle *nic = &vport->nic; | |
1089 | struct hclge_dev *hdev = vport->back; | |
1090 | int ret; | |
1091 | ||
1092 | nic->pdev = hdev->pdev; | |
1093 | nic->ae_algo = &ae_algo; | |
1094 | nic->numa_node_mask = hdev->numa_node_mask; | |
1095 | ||
1096 | if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { | |
128b900d | 1097 | ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); |
46a3df9f S |
1098 | if (ret) { |
1099 | dev_err(&hdev->pdev->dev, "knic setup failed %d\n", | |
1100 | ret); | |
1101 | return ret; | |
1102 | } | |
1103 | } else { | |
1104 | hclge_unic_setup(vport, num_tqps); | |
1105 | } | |
1106 | ||
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | static int hclge_alloc_vport(struct hclge_dev *hdev) | |
1111 | { | |
1112 | struct pci_dev *pdev = hdev->pdev; | |
1113 | struct hclge_vport *vport; | |
1114 | u32 tqp_main_vport; | |
1115 | u32 tqp_per_vport; | |
1116 | int num_vport, i; | |
1117 | int ret; | |
1118 | ||
1119 | /* We need to alloc a vport for main NIC of PF */ | |
1120 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1121 | ||
38e62046 HT |
1122 | if (hdev->num_tqps < num_vport) { |
1123 | dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", | |
1124 | hdev->num_tqps, num_vport); | |
1125 | return -EINVAL; | |
1126 | } | |
46a3df9f S |
1127 | |
1128 | /* Alloc the same number of TQPs for every vport */ | |
1129 | tqp_per_vport = hdev->num_tqps / num_vport; | |
1130 | tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; | |
1131 | ||
1132 | vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), | |
1133 | GFP_KERNEL); | |
1134 | if (!vport) | |
1135 | return -ENOMEM; | |
1136 | ||
1137 | hdev->vport = vport; | |
1138 | hdev->num_alloc_vport = num_vport; | |
1139 | ||
2312e050 FL |
1140 | if (IS_ENABLED(CONFIG_PCI_IOV)) |
1141 | hdev->num_alloc_vfs = hdev->num_req_vfs; | |
46a3df9f S |
1142 | |
1143 | for (i = 0; i < num_vport; i++) { | |
1144 | vport->back = hdev; | |
1145 | vport->vport_id = i; | |
1146 | ||
1147 | if (i == 0) | |
1148 | ret = hclge_vport_setup(vport, tqp_main_vport); | |
1149 | else | |
1150 | ret = hclge_vport_setup(vport, tqp_per_vport); | |
1151 | if (ret) { | |
1152 | dev_err(&pdev->dev, | |
1153 | "vport setup failed for vport %d, %d\n", | |
1154 | i, ret); | |
1155 | return ret; | |
1156 | } | |
1157 | ||
1158 | vport++; | |
1159 | } | |
1160 | ||
1161 | return 0; | |
1162 | } | |
1163 | ||
acf61ecd YL |
1164 | static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, |
1165 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1166 | { |
1167 | /* TX buffer size is unit by 128 byte */ | |
1168 | #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 | |
1169 | #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) | |
d44f9b63 | 1170 | struct hclge_tx_buff_alloc_cmd *req; |
46a3df9f S |
1171 | struct hclge_desc desc; |
1172 | int ret; | |
1173 | u8 i; | |
1174 | ||
d44f9b63 | 1175 | req = (struct hclge_tx_buff_alloc_cmd *)desc.data; |
46a3df9f S |
1176 | |
1177 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); | |
9ffe79a9 | 1178 | for (i = 0; i < HCLGE_TC_NUM; i++) { |
acf61ecd | 1179 | u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 | 1180 | |
46a3df9f S |
1181 | req->tx_pkt_buff[i] = |
1182 | cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | | |
1183 | HCLGE_BUF_SIZE_UPDATE_EN_MSK); | |
9ffe79a9 | 1184 | } |
46a3df9f S |
1185 | |
1186 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 1187 | if (ret) |
46a3df9f S |
1188 | dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", |
1189 | ret); | |
46a3df9f | 1190 | |
3f639907 | 1191 | return ret; |
46a3df9f S |
1192 | } |
1193 | ||
acf61ecd YL |
1194 | static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, |
1195 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1196 | { |
acf61ecd | 1197 | int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); |
46a3df9f | 1198 | |
3f639907 JS |
1199 | if (ret) |
1200 | dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); | |
46a3df9f | 1201 | |
3f639907 | 1202 | return ret; |
46a3df9f S |
1203 | } |
1204 | ||
1205 | static int hclge_get_tc_num(struct hclge_dev *hdev) | |
1206 | { | |
1207 | int i, cnt = 0; | |
1208 | ||
1209 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1210 | if (hdev->hw_tc_map & BIT(i)) | |
1211 | cnt++; | |
1212 | return cnt; | |
1213 | } | |
1214 | ||
1215 | static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) | |
1216 | { | |
1217 | int i, cnt = 0; | |
1218 | ||
1219 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1220 | if (hdev->hw_tc_map & BIT(i) && | |
1221 | hdev->tm_info.hw_pfc_map & BIT(i)) | |
1222 | cnt++; | |
1223 | return cnt; | |
1224 | } | |
1225 | ||
1226 | /* Get the number of pfc enabled TCs, which have private buffer */ | |
acf61ecd YL |
1227 | static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, |
1228 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1229 | { |
1230 | struct hclge_priv_buf *priv; | |
1231 | int i, cnt = 0; | |
1232 | ||
1233 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1234 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1235 | if ((hdev->tm_info.hw_pfc_map & BIT(i)) && |
1236 | priv->enable) | |
1237 | cnt++; | |
1238 | } | |
1239 | ||
1240 | return cnt; | |
1241 | } | |
1242 | ||
1243 | /* Get the number of pfc disabled TCs, which have private buffer */ | |
acf61ecd YL |
1244 | static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, |
1245 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1246 | { |
1247 | struct hclge_priv_buf *priv; | |
1248 | int i, cnt = 0; | |
1249 | ||
1250 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1251 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1252 | if (hdev->hw_tc_map & BIT(i) && |
1253 | !(hdev->tm_info.hw_pfc_map & BIT(i)) && | |
1254 | priv->enable) | |
1255 | cnt++; | |
1256 | } | |
1257 | ||
1258 | return cnt; | |
1259 | } | |
1260 | ||
acf61ecd | 1261 | static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
46a3df9f S |
1262 | { |
1263 | struct hclge_priv_buf *priv; | |
1264 | u32 rx_priv = 0; | |
1265 | int i; | |
1266 | ||
1267 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1268 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1269 | if (priv->enable) |
1270 | rx_priv += priv->buf_size; | |
1271 | } | |
1272 | return rx_priv; | |
1273 | } | |
1274 | ||
acf61ecd | 1275 | static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
9ffe79a9 YL |
1276 | { |
1277 | u32 i, total_tx_size = 0; | |
1278 | ||
1279 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
acf61ecd | 1280 | total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 YL |
1281 | |
1282 | return total_tx_size; | |
1283 | } | |
1284 | ||
acf61ecd YL |
1285 | static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, |
1286 | struct hclge_pkt_buf_alloc *buf_alloc, | |
1287 | u32 rx_all) | |
46a3df9f S |
1288 | { |
1289 | u32 shared_buf_min, shared_buf_tc, shared_std; | |
1290 | int tc_num, pfc_enable_num; | |
1291 | u32 shared_buf; | |
1292 | u32 rx_priv; | |
1293 | int i; | |
1294 | ||
1295 | tc_num = hclge_get_tc_num(hdev); | |
1296 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); | |
1297 | ||
d221df4e YL |
1298 | if (hnae3_dev_dcb_supported(hdev)) |
1299 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; | |
1300 | else | |
1301 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; | |
1302 | ||
46a3df9f S |
1303 | shared_buf_tc = pfc_enable_num * hdev->mps + |
1304 | (tc_num - pfc_enable_num) * hdev->mps / 2 + | |
1305 | hdev->mps; | |
1306 | shared_std = max_t(u32, shared_buf_min, shared_buf_tc); | |
1307 | ||
acf61ecd | 1308 | rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); |
46a3df9f S |
1309 | if (rx_all <= rx_priv + shared_std) |
1310 | return false; | |
1311 | ||
1312 | shared_buf = rx_all - rx_priv; | |
acf61ecd YL |
1313 | buf_alloc->s_buf.buf_size = shared_buf; |
1314 | buf_alloc->s_buf.self.high = shared_buf; | |
1315 | buf_alloc->s_buf.self.low = 2 * hdev->mps; | |
46a3df9f S |
1316 | |
1317 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
1318 | if ((hdev->hw_tc_map & BIT(i)) && | |
1319 | (hdev->tm_info.hw_pfc_map & BIT(i))) { | |
acf61ecd YL |
1320 | buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; |
1321 | buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; | |
46a3df9f | 1322 | } else { |
acf61ecd YL |
1323 | buf_alloc->s_buf.tc_thrd[i].low = 0; |
1324 | buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; | |
46a3df9f S |
1325 | } |
1326 | } | |
1327 | ||
1328 | return true; | |
1329 | } | |
1330 | ||
acf61ecd YL |
1331 | static int hclge_tx_buffer_calc(struct hclge_dev *hdev, |
1332 | struct hclge_pkt_buf_alloc *buf_alloc) | |
9ffe79a9 YL |
1333 | { |
1334 | u32 i, total_size; | |
1335 | ||
1336 | total_size = hdev->pkt_buf_size; | |
1337 | ||
1338 | /* alloc tx buffer for all enabled tc */ | |
1339 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1340 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
9ffe79a9 YL |
1341 | |
1342 | if (total_size < HCLGE_DEFAULT_TX_BUF) | |
1343 | return -ENOMEM; | |
1344 | ||
1345 | if (hdev->hw_tc_map & BIT(i)) | |
1346 | priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; | |
1347 | else | |
1348 | priv->tx_buf_size = 0; | |
1349 | ||
1350 | total_size -= priv->tx_buf_size; | |
1351 | } | |
1352 | ||
1353 | return 0; | |
1354 | } | |
1355 | ||
46a3df9f S |
1356 | /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs |
1357 | * @hdev: pointer to struct hclge_dev | |
acf61ecd | 1358 | * @buf_alloc: pointer to buffer calculation data |
46a3df9f S |
1359 | * @return: 0: calculate sucessful, negative: fail |
1360 | */ | |
1db9b1bf YL |
1361 | static int hclge_rx_buffer_calc(struct hclge_dev *hdev, |
1362 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1363 | { |
996ff918 YL |
1364 | #define HCLGE_BUF_SIZE_UNIT 128 |
1365 | u32 rx_all = hdev->pkt_buf_size, aligned_mps; | |
46a3df9f S |
1366 | int no_pfc_priv_num, pfc_priv_num; |
1367 | struct hclge_priv_buf *priv; | |
1368 | int i; | |
1369 | ||
996ff918 | 1370 | aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
acf61ecd | 1371 | rx_all -= hclge_get_tx_buff_alloced(buf_alloc); |
9ffe79a9 | 1372 | |
d602a525 YL |
1373 | /* When DCB is not supported, rx private |
1374 | * buffer is not allocated. | |
1375 | */ | |
1376 | if (!hnae3_dev_dcb_supported(hdev)) { | |
acf61ecd | 1377 | if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
d602a525 YL |
1378 | return -ENOMEM; |
1379 | ||
1380 | return 0; | |
1381 | } | |
1382 | ||
46a3df9f S |
1383 | /* step 1, try to alloc private buffer for all enabled tc */ |
1384 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1385 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1386 | if (hdev->hw_tc_map & BIT(i)) { |
1387 | priv->enable = 1; | |
1388 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
996ff918 YL |
1389 | priv->wl.low = aligned_mps; |
1390 | priv->wl.high = priv->wl.low + aligned_mps; | |
46a3df9f S |
1391 | priv->buf_size = priv->wl.high + |
1392 | HCLGE_DEFAULT_DV; | |
1393 | } else { | |
1394 | priv->wl.low = 0; | |
996ff918 | 1395 | priv->wl.high = 2 * aligned_mps; |
46a3df9f S |
1396 | priv->buf_size = priv->wl.high; |
1397 | } | |
bb1fe9ea YL |
1398 | } else { |
1399 | priv->enable = 0; | |
1400 | priv->wl.low = 0; | |
1401 | priv->wl.high = 0; | |
1402 | priv->buf_size = 0; | |
46a3df9f S |
1403 | } |
1404 | } | |
1405 | ||
acf61ecd | 1406 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1407 | return 0; |
1408 | ||
1409 | /* step 2, try to decrease the buffer size of | |
1410 | * no pfc TC's private buffer | |
1411 | */ | |
1412 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1413 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f | 1414 | |
bb1fe9ea YL |
1415 | priv->enable = 0; |
1416 | priv->wl.low = 0; | |
1417 | priv->wl.high = 0; | |
1418 | priv->buf_size = 0; | |
1419 | ||
1420 | if (!(hdev->hw_tc_map & BIT(i))) | |
1421 | continue; | |
1422 | ||
1423 | priv->enable = 1; | |
46a3df9f S |
1424 | |
1425 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1426 | priv->wl.low = 128; | |
996ff918 | 1427 | priv->wl.high = priv->wl.low + aligned_mps; |
46a3df9f S |
1428 | priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; |
1429 | } else { | |
1430 | priv->wl.low = 0; | |
996ff918 | 1431 | priv->wl.high = aligned_mps; |
46a3df9f S |
1432 | priv->buf_size = priv->wl.high; |
1433 | } | |
1434 | } | |
1435 | ||
acf61ecd | 1436 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1437 | return 0; |
1438 | ||
1439 | /* step 3, try to reduce the number of pfc disabled TCs, | |
1440 | * which have private buffer | |
1441 | */ | |
1442 | /* get the total no pfc enable TC number, which have private buffer */ | |
acf61ecd | 1443 | no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1444 | |
1445 | /* let the last to be cleared first */ | |
1446 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1447 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1448 | |
1449 | if (hdev->hw_tc_map & BIT(i) && | |
1450 | !(hdev->tm_info.hw_pfc_map & BIT(i))) { | |
1451 | /* Clear the no pfc TC private buffer */ | |
1452 | priv->wl.low = 0; | |
1453 | priv->wl.high = 0; | |
1454 | priv->buf_size = 0; | |
1455 | priv->enable = 0; | |
1456 | no_pfc_priv_num--; | |
1457 | } | |
1458 | ||
acf61ecd | 1459 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1460 | no_pfc_priv_num == 0) |
1461 | break; | |
1462 | } | |
1463 | ||
acf61ecd | 1464 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1465 | return 0; |
1466 | ||
1467 | /* step 4, try to reduce the number of pfc enabled TCs | |
1468 | * which have private buffer. | |
1469 | */ | |
acf61ecd | 1470 | pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1471 | |
1472 | /* let the last to be cleared first */ | |
1473 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1474 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1475 | |
1476 | if (hdev->hw_tc_map & BIT(i) && | |
1477 | hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1478 | /* Reduce the number of pfc TC with private buffer */ | |
1479 | priv->wl.low = 0; | |
1480 | priv->enable = 0; | |
1481 | priv->wl.high = 0; | |
1482 | priv->buf_size = 0; | |
1483 | pfc_priv_num--; | |
1484 | } | |
1485 | ||
acf61ecd | 1486 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1487 | pfc_priv_num == 0) |
1488 | break; | |
1489 | } | |
acf61ecd | 1490 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1491 | return 0; |
1492 | ||
1493 | return -ENOMEM; | |
1494 | } | |
1495 | ||
acf61ecd YL |
1496 | static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, |
1497 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1498 | { |
d44f9b63 | 1499 | struct hclge_rx_priv_buff_cmd *req; |
46a3df9f S |
1500 | struct hclge_desc desc; |
1501 | int ret; | |
1502 | int i; | |
1503 | ||
1504 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); | |
d44f9b63 | 1505 | req = (struct hclge_rx_priv_buff_cmd *)desc.data; |
46a3df9f S |
1506 | |
1507 | /* Alloc private buffer TCs */ | |
1508 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1509 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1510 | |
1511 | req->buf_num[i] = | |
1512 | cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); | |
1513 | req->buf_num[i] |= | |
5bca3b94 | 1514 | cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); |
46a3df9f S |
1515 | } |
1516 | ||
b8c8bf47 | 1517 | req->shared_buf = |
acf61ecd | 1518 | cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | |
b8c8bf47 YL |
1519 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); |
1520 | ||
46a3df9f | 1521 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
3f639907 | 1522 | if (ret) |
46a3df9f S |
1523 | dev_err(&hdev->pdev->dev, |
1524 | "rx private buffer alloc cmd failed %d\n", ret); | |
46a3df9f | 1525 | |
3f639907 | 1526 | return ret; |
46a3df9f S |
1527 | } |
1528 | ||
acf61ecd YL |
1529 | static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, |
1530 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1531 | { |
1532 | struct hclge_rx_priv_wl_buf *req; | |
1533 | struct hclge_priv_buf *priv; | |
1534 | struct hclge_desc desc[2]; | |
1535 | int i, j; | |
1536 | int ret; | |
1537 | ||
1538 | for (i = 0; i < 2; i++) { | |
1539 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, | |
1540 | false); | |
1541 | req = (struct hclge_rx_priv_wl_buf *)desc[i].data; | |
1542 | ||
1543 | /* The first descriptor set the NEXT bit to 1 */ | |
1544 | if (i == 0) | |
1545 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1546 | else | |
1547 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1548 | ||
1549 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
acf61ecd YL |
1550 | u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; |
1551 | ||
1552 | priv = &buf_alloc->priv_buf[idx]; | |
46a3df9f S |
1553 | req->tc_wl[j].high = |
1554 | cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); | |
1555 | req->tc_wl[j].high |= | |
3738287c | 1556 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1557 | req->tc_wl[j].low = |
1558 | cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); | |
1559 | req->tc_wl[j].low |= | |
3738287c | 1560 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1561 | } |
1562 | } | |
1563 | ||
1564 | /* Send 2 descriptor at one time */ | |
1565 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
3f639907 | 1566 | if (ret) |
46a3df9f S |
1567 | dev_err(&hdev->pdev->dev, |
1568 | "rx private waterline config cmd failed %d\n", | |
1569 | ret); | |
3f639907 | 1570 | return ret; |
46a3df9f S |
1571 | } |
1572 | ||
acf61ecd YL |
1573 | static int hclge_common_thrd_config(struct hclge_dev *hdev, |
1574 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1575 | { |
acf61ecd | 1576 | struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; |
46a3df9f S |
1577 | struct hclge_rx_com_thrd *req; |
1578 | struct hclge_desc desc[2]; | |
1579 | struct hclge_tc_thrd *tc; | |
1580 | int i, j; | |
1581 | int ret; | |
1582 | ||
1583 | for (i = 0; i < 2; i++) { | |
1584 | hclge_cmd_setup_basic_desc(&desc[i], | |
1585 | HCLGE_OPC_RX_COM_THRD_ALLOC, false); | |
1586 | req = (struct hclge_rx_com_thrd *)&desc[i].data; | |
1587 | ||
1588 | /* The first descriptor set the NEXT bit to 1 */ | |
1589 | if (i == 0) | |
1590 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1591 | else | |
1592 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1593 | ||
1594 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
1595 | tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; | |
1596 | ||
1597 | req->com_thrd[j].high = | |
1598 | cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); | |
1599 | req->com_thrd[j].high |= | |
3738287c | 1600 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1601 | req->com_thrd[j].low = |
1602 | cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); | |
1603 | req->com_thrd[j].low |= | |
3738287c | 1604 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1605 | } |
1606 | } | |
1607 | ||
1608 | /* Send 2 descriptors at one time */ | |
1609 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
3f639907 | 1610 | if (ret) |
46a3df9f S |
1611 | dev_err(&hdev->pdev->dev, |
1612 | "common threshold config cmd failed %d\n", ret); | |
3f639907 | 1613 | return ret; |
46a3df9f S |
1614 | } |
1615 | ||
acf61ecd YL |
1616 | static int hclge_common_wl_config(struct hclge_dev *hdev, |
1617 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1618 | { |
acf61ecd | 1619 | struct hclge_shared_buf *buf = &buf_alloc->s_buf; |
46a3df9f S |
1620 | struct hclge_rx_com_wl *req; |
1621 | struct hclge_desc desc; | |
1622 | int ret; | |
1623 | ||
1624 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); | |
1625 | ||
1626 | req = (struct hclge_rx_com_wl *)desc.data; | |
1627 | req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); | |
3738287c | 1628 | req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1629 | |
1630 | req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); | |
3738287c | 1631 | req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1632 | |
1633 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 1634 | if (ret) |
46a3df9f S |
1635 | dev_err(&hdev->pdev->dev, |
1636 | "common waterline config cmd failed %d\n", ret); | |
46a3df9f | 1637 | |
3f639907 | 1638 | return ret; |
46a3df9f S |
1639 | } |
1640 | ||
1641 | int hclge_buffer_alloc(struct hclge_dev *hdev) | |
1642 | { | |
acf61ecd | 1643 | struct hclge_pkt_buf_alloc *pkt_buf; |
46a3df9f S |
1644 | int ret; |
1645 | ||
acf61ecd YL |
1646 | pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); |
1647 | if (!pkt_buf) | |
46a3df9f S |
1648 | return -ENOMEM; |
1649 | ||
acf61ecd | 1650 | ret = hclge_tx_buffer_calc(hdev, pkt_buf); |
9ffe79a9 YL |
1651 | if (ret) { |
1652 | dev_err(&hdev->pdev->dev, | |
1653 | "could not calc tx buffer size for all TCs %d\n", ret); | |
acf61ecd | 1654 | goto out; |
9ffe79a9 YL |
1655 | } |
1656 | ||
acf61ecd | 1657 | ret = hclge_tx_buffer_alloc(hdev, pkt_buf); |
46a3df9f S |
1658 | if (ret) { |
1659 | dev_err(&hdev->pdev->dev, | |
1660 | "could not alloc tx buffers %d\n", ret); | |
acf61ecd | 1661 | goto out; |
46a3df9f S |
1662 | } |
1663 | ||
acf61ecd | 1664 | ret = hclge_rx_buffer_calc(hdev, pkt_buf); |
46a3df9f S |
1665 | if (ret) { |
1666 | dev_err(&hdev->pdev->dev, | |
1667 | "could not calc rx priv buffer size for all TCs %d\n", | |
1668 | ret); | |
acf61ecd | 1669 | goto out; |
46a3df9f S |
1670 | } |
1671 | ||
acf61ecd | 1672 | ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); |
46a3df9f S |
1673 | if (ret) { |
1674 | dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", | |
1675 | ret); | |
acf61ecd | 1676 | goto out; |
46a3df9f S |
1677 | } |
1678 | ||
2daf4a65 | 1679 | if (hnae3_dev_dcb_supported(hdev)) { |
acf61ecd | 1680 | ret = hclge_rx_priv_wl_config(hdev, pkt_buf); |
2daf4a65 YL |
1681 | if (ret) { |
1682 | dev_err(&hdev->pdev->dev, | |
1683 | "could not configure rx private waterline %d\n", | |
1684 | ret); | |
acf61ecd | 1685 | goto out; |
2daf4a65 | 1686 | } |
46a3df9f | 1687 | |
acf61ecd | 1688 | ret = hclge_common_thrd_config(hdev, pkt_buf); |
2daf4a65 YL |
1689 | if (ret) { |
1690 | dev_err(&hdev->pdev->dev, | |
1691 | "could not configure common threshold %d\n", | |
1692 | ret); | |
acf61ecd | 1693 | goto out; |
2daf4a65 | 1694 | } |
46a3df9f S |
1695 | } |
1696 | ||
acf61ecd YL |
1697 | ret = hclge_common_wl_config(hdev, pkt_buf); |
1698 | if (ret) | |
46a3df9f S |
1699 | dev_err(&hdev->pdev->dev, |
1700 | "could not configure common waterline %d\n", ret); | |
46a3df9f | 1701 | |
acf61ecd YL |
1702 | out: |
1703 | kfree(pkt_buf); | |
1704 | return ret; | |
46a3df9f S |
1705 | } |
1706 | ||
1707 | static int hclge_init_roce_base_info(struct hclge_vport *vport) | |
1708 | { | |
1709 | struct hnae3_handle *roce = &vport->roce; | |
1710 | struct hnae3_handle *nic = &vport->nic; | |
1711 | ||
887c3820 | 1712 | roce->rinfo.num_vectors = vport->back->num_roce_msi; |
46a3df9f S |
1713 | |
1714 | if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || | |
1715 | vport->back->num_msi_left == 0) | |
1716 | return -EINVAL; | |
1717 | ||
1718 | roce->rinfo.base_vector = vport->back->roce_base_vector; | |
1719 | ||
1720 | roce->rinfo.netdev = nic->kinfo.netdev; | |
1721 | roce->rinfo.roce_io_base = vport->back->hw.io_base; | |
1722 | ||
1723 | roce->pdev = nic->pdev; | |
1724 | roce->ae_algo = nic->ae_algo; | |
1725 | roce->numa_node_mask = nic->numa_node_mask; | |
1726 | ||
1727 | return 0; | |
1728 | } | |
1729 | ||
887c3820 | 1730 | static int hclge_init_msi(struct hclge_dev *hdev) |
46a3df9f S |
1731 | { |
1732 | struct pci_dev *pdev = hdev->pdev; | |
887c3820 SM |
1733 | int vectors; |
1734 | int i; | |
46a3df9f | 1735 | |
887c3820 SM |
1736 | vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, |
1737 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | |
1738 | if (vectors < 0) { | |
1739 | dev_err(&pdev->dev, | |
1740 | "failed(%d) to allocate MSI/MSI-X vectors\n", | |
1741 | vectors); | |
1742 | return vectors; | |
46a3df9f | 1743 | } |
887c3820 SM |
1744 | if (vectors < hdev->num_msi) |
1745 | dev_warn(&hdev->pdev->dev, | |
1746 | "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", | |
1747 | hdev->num_msi, vectors); | |
46a3df9f | 1748 | |
887c3820 SM |
1749 | hdev->num_msi = vectors; |
1750 | hdev->num_msi_left = vectors; | |
1751 | hdev->base_msi_vector = pdev->irq; | |
46a3df9f | 1752 | hdev->roce_base_vector = hdev->base_msi_vector + |
375dd5e4 | 1753 | hdev->roce_base_msix_offset; |
46a3df9f | 1754 | |
46a3df9f S |
1755 | hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1756 | sizeof(u16), GFP_KERNEL); | |
887c3820 SM |
1757 | if (!hdev->vector_status) { |
1758 | pci_free_irq_vectors(pdev); | |
46a3df9f | 1759 | return -ENOMEM; |
887c3820 | 1760 | } |
46a3df9f S |
1761 | |
1762 | for (i = 0; i < hdev->num_msi; i++) | |
1763 | hdev->vector_status[i] = HCLGE_INVALID_VPORT; | |
1764 | ||
887c3820 SM |
1765 | hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1766 | sizeof(int), GFP_KERNEL); | |
1767 | if (!hdev->vector_irq) { | |
1768 | pci_free_irq_vectors(pdev); | |
1769 | return -ENOMEM; | |
46a3df9f | 1770 | } |
46a3df9f S |
1771 | |
1772 | return 0; | |
1773 | } | |
1774 | ||
2d03eacc | 1775 | static u8 hclge_check_speed_dup(u8 duplex, int speed) |
46a3df9f | 1776 | { |
46a3df9f | 1777 | |
2d03eacc YL |
1778 | if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) |
1779 | duplex = HCLGE_MAC_FULL; | |
46a3df9f | 1780 | |
2d03eacc | 1781 | return duplex; |
46a3df9f S |
1782 | } |
1783 | ||
2d03eacc YL |
1784 | static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, |
1785 | u8 duplex) | |
46a3df9f | 1786 | { |
d44f9b63 | 1787 | struct hclge_config_mac_speed_dup_cmd *req; |
46a3df9f S |
1788 | struct hclge_desc desc; |
1789 | int ret; | |
1790 | ||
d44f9b63 | 1791 | req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; |
46a3df9f S |
1792 | |
1793 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); | |
1794 | ||
e4e87715 | 1795 | hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); |
46a3df9f S |
1796 | |
1797 | switch (speed) { | |
1798 | case HCLGE_MAC_SPEED_10M: | |
e4e87715 PL |
1799 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1800 | HCLGE_CFG_SPEED_S, 6); | |
46a3df9f S |
1801 | break; |
1802 | case HCLGE_MAC_SPEED_100M: | |
e4e87715 PL |
1803 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1804 | HCLGE_CFG_SPEED_S, 7); | |
46a3df9f S |
1805 | break; |
1806 | case HCLGE_MAC_SPEED_1G: | |
e4e87715 PL |
1807 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1808 | HCLGE_CFG_SPEED_S, 0); | |
46a3df9f S |
1809 | break; |
1810 | case HCLGE_MAC_SPEED_10G: | |
e4e87715 PL |
1811 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1812 | HCLGE_CFG_SPEED_S, 1); | |
46a3df9f S |
1813 | break; |
1814 | case HCLGE_MAC_SPEED_25G: | |
e4e87715 PL |
1815 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1816 | HCLGE_CFG_SPEED_S, 2); | |
46a3df9f S |
1817 | break; |
1818 | case HCLGE_MAC_SPEED_40G: | |
e4e87715 PL |
1819 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1820 | HCLGE_CFG_SPEED_S, 3); | |
46a3df9f S |
1821 | break; |
1822 | case HCLGE_MAC_SPEED_50G: | |
e4e87715 PL |
1823 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1824 | HCLGE_CFG_SPEED_S, 4); | |
46a3df9f S |
1825 | break; |
1826 | case HCLGE_MAC_SPEED_100G: | |
e4e87715 PL |
1827 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1828 | HCLGE_CFG_SPEED_S, 5); | |
46a3df9f S |
1829 | break; |
1830 | default: | |
d7629e74 | 1831 | dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); |
46a3df9f S |
1832 | return -EINVAL; |
1833 | } | |
1834 | ||
e4e87715 PL |
1835 | hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, |
1836 | 1); | |
46a3df9f S |
1837 | |
1838 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1839 | if (ret) { | |
1840 | dev_err(&hdev->pdev->dev, | |
1841 | "mac speed/duplex config cmd failed %d.\n", ret); | |
1842 | return ret; | |
1843 | } | |
1844 | ||
2d03eacc YL |
1845 | return 0; |
1846 | } | |
1847 | ||
1848 | int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) | |
1849 | { | |
1850 | int ret; | |
1851 | ||
1852 | duplex = hclge_check_speed_dup(duplex, speed); | |
1853 | if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) | |
1854 | return 0; | |
1855 | ||
1856 | ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); | |
1857 | if (ret) | |
1858 | return ret; | |
1859 | ||
1860 | hdev->hw.mac.speed = speed; | |
1861 | hdev->hw.mac.duplex = duplex; | |
46a3df9f S |
1862 | |
1863 | return 0; | |
1864 | } | |
1865 | ||
1866 | static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, | |
1867 | u8 duplex) | |
1868 | { | |
1869 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1870 | struct hclge_dev *hdev = vport->back; | |
1871 | ||
1872 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex); | |
1873 | } | |
1874 | ||
1875 | static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, | |
1876 | u8 *duplex) | |
1877 | { | |
d44f9b63 | 1878 | struct hclge_query_an_speed_dup_cmd *req; |
46a3df9f S |
1879 | struct hclge_desc desc; |
1880 | int speed_tmp; | |
1881 | int ret; | |
1882 | ||
d44f9b63 | 1883 | req = (struct hclge_query_an_speed_dup_cmd *)desc.data; |
46a3df9f S |
1884 | |
1885 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); | |
1886 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1887 | if (ret) { | |
1888 | dev_err(&hdev->pdev->dev, | |
1889 | "mac speed/autoneg/duplex query cmd failed %d\n", | |
1890 | ret); | |
1891 | return ret; | |
1892 | } | |
1893 | ||
e4e87715 PL |
1894 | *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); |
1895 | speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, | |
1896 | HCLGE_QUERY_SPEED_S); | |
46a3df9f S |
1897 | |
1898 | ret = hclge_parse_speed(speed_tmp, speed); | |
3f639907 | 1899 | if (ret) |
46a3df9f S |
1900 | dev_err(&hdev->pdev->dev, |
1901 | "could not parse speed(=%d), %d\n", speed_tmp, ret); | |
46a3df9f | 1902 | |
3f639907 | 1903 | return ret; |
46a3df9f S |
1904 | } |
1905 | ||
46a3df9f S |
1906 | static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) |
1907 | { | |
d44f9b63 | 1908 | struct hclge_config_auto_neg_cmd *req; |
46a3df9f | 1909 | struct hclge_desc desc; |
a90bb9a5 | 1910 | u32 flag = 0; |
46a3df9f S |
1911 | int ret; |
1912 | ||
1913 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); | |
1914 | ||
d44f9b63 | 1915 | req = (struct hclge_config_auto_neg_cmd *)desc.data; |
e4e87715 | 1916 | hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); |
a90bb9a5 | 1917 | req->cfg_an_cmd_flag = cpu_to_le32(flag); |
46a3df9f S |
1918 | |
1919 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 1920 | if (ret) |
46a3df9f S |
1921 | dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", |
1922 | ret); | |
46a3df9f | 1923 | |
3f639907 | 1924 | return ret; |
46a3df9f S |
1925 | } |
1926 | ||
1927 | static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) | |
1928 | { | |
1929 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1930 | struct hclge_dev *hdev = vport->back; | |
1931 | ||
1932 | return hclge_set_autoneg_en(hdev, enable); | |
1933 | } | |
1934 | ||
1935 | static int hclge_get_autoneg(struct hnae3_handle *handle) | |
1936 | { | |
1937 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1938 | struct hclge_dev *hdev = vport->back; | |
27b5bf49 FL |
1939 | struct phy_device *phydev = hdev->hw.mac.phydev; |
1940 | ||
1941 | if (phydev) | |
1942 | return phydev->autoneg; | |
46a3df9f S |
1943 | |
1944 | return hdev->hw.mac.autoneg; | |
1945 | } | |
1946 | ||
1947 | static int hclge_mac_init(struct hclge_dev *hdev) | |
1948 | { | |
f9fd82a9 FL |
1949 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
1950 | struct net_device *netdev = handle->kinfo.netdev; | |
46a3df9f | 1951 | struct hclge_mac *mac = &hdev->hw.mac; |
f9fd82a9 | 1952 | int mtu; |
46a3df9f S |
1953 | int ret; |
1954 | ||
2d03eacc YL |
1955 | hdev->hw.mac.duplex = HCLGE_MAC_FULL; |
1956 | ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, | |
1957 | hdev->hw.mac.duplex); | |
46a3df9f S |
1958 | if (ret) { |
1959 | dev_err(&hdev->pdev->dev, | |
1960 | "Config mac speed dup fail ret=%d\n", ret); | |
1961 | return ret; | |
1962 | } | |
1963 | ||
1964 | mac->link = 0; | |
1965 | ||
f9fd82a9 FL |
1966 | if (netdev) |
1967 | mtu = netdev->mtu; | |
1968 | else | |
1969 | mtu = ETH_DATA_LEN; | |
1970 | ||
1971 | ret = hclge_set_mtu(handle, mtu); | |
3f639907 | 1972 | if (ret) |
f9fd82a9 FL |
1973 | dev_err(&hdev->pdev->dev, |
1974 | "set mtu failed ret=%d\n", ret); | |
f9fd82a9 | 1975 | |
3f639907 | 1976 | return ret; |
46a3df9f S |
1977 | } |
1978 | ||
c1a81619 SM |
1979 | static void hclge_mbx_task_schedule(struct hclge_dev *hdev) |
1980 | { | |
1981 | if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) | |
1982 | schedule_work(&hdev->mbx_service_task); | |
1983 | } | |
1984 | ||
cb1b9f77 SM |
1985 | static void hclge_reset_task_schedule(struct hclge_dev *hdev) |
1986 | { | |
1987 | if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) | |
1988 | schedule_work(&hdev->rst_service_task); | |
1989 | } | |
1990 | ||
46a3df9f S |
1991 | static void hclge_task_schedule(struct hclge_dev *hdev) |
1992 | { | |
1993 | if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && | |
1994 | !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && | |
1995 | !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) | |
1996 | (void)schedule_work(&hdev->service_task); | |
1997 | } | |
1998 | ||
1999 | static int hclge_get_mac_link_status(struct hclge_dev *hdev) | |
2000 | { | |
d44f9b63 | 2001 | struct hclge_link_status_cmd *req; |
46a3df9f S |
2002 | struct hclge_desc desc; |
2003 | int link_status; | |
2004 | int ret; | |
2005 | ||
2006 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); | |
2007 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2008 | if (ret) { | |
2009 | dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", | |
2010 | ret); | |
2011 | return ret; | |
2012 | } | |
2013 | ||
d44f9b63 | 2014 | req = (struct hclge_link_status_cmd *)desc.data; |
c79301d8 | 2015 | link_status = req->status & HCLGE_LINK_STATUS_UP_M; |
46a3df9f S |
2016 | |
2017 | return !!link_status; | |
2018 | } | |
2019 | ||
2020 | static int hclge_get_mac_phy_link(struct hclge_dev *hdev) | |
2021 | { | |
2022 | int mac_state; | |
2023 | int link_stat; | |
2024 | ||
582d37bb PL |
2025 | if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) |
2026 | return 0; | |
2027 | ||
46a3df9f S |
2028 | mac_state = hclge_get_mac_link_status(hdev); |
2029 | ||
2030 | if (hdev->hw.mac.phydev) { | |
fd813314 | 2031 | if (hdev->hw.mac.phydev->state == PHY_RUNNING) |
46a3df9f S |
2032 | link_stat = mac_state & |
2033 | hdev->hw.mac.phydev->link; | |
2034 | else | |
2035 | link_stat = 0; | |
2036 | ||
2037 | } else { | |
2038 | link_stat = mac_state; | |
2039 | } | |
2040 | ||
2041 | return !!link_stat; | |
2042 | } | |
2043 | ||
2044 | static void hclge_update_link_status(struct hclge_dev *hdev) | |
2045 | { | |
2046 | struct hnae3_client *client = hdev->nic_client; | |
2047 | struct hnae3_handle *handle; | |
2048 | int state; | |
2049 | int i; | |
2050 | ||
2051 | if (!client) | |
2052 | return; | |
2053 | state = hclge_get_mac_phy_link(hdev); | |
2054 | if (state != hdev->hw.mac.link) { | |
2055 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2056 | handle = &hdev->vport[i].nic; | |
2057 | client->ops->link_status_change(handle, state); | |
2058 | } | |
2059 | hdev->hw.mac.link = state; | |
2060 | } | |
2061 | } | |
2062 | ||
2063 | static int hclge_update_speed_duplex(struct hclge_dev *hdev) | |
2064 | { | |
2065 | struct hclge_mac mac = hdev->hw.mac; | |
2066 | u8 duplex; | |
2067 | int speed; | |
2068 | int ret; | |
2069 | ||
2070 | /* get the speed and duplex as autoneg'result from mac cmd when phy | |
2071 | * doesn't exit. | |
2072 | */ | |
c040366b | 2073 | if (mac.phydev || !mac.autoneg) |
46a3df9f S |
2074 | return 0; |
2075 | ||
2076 | ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); | |
2077 | if (ret) { | |
2078 | dev_err(&hdev->pdev->dev, | |
2079 | "mac autoneg/speed/duplex query failed %d\n", ret); | |
2080 | return ret; | |
2081 | } | |
2082 | ||
2d03eacc YL |
2083 | ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); |
2084 | if (ret) { | |
2085 | dev_err(&hdev->pdev->dev, | |
2086 | "mac speed/duplex config failed %d\n", ret); | |
2087 | return ret; | |
46a3df9f S |
2088 | } |
2089 | ||
2090 | return 0; | |
2091 | } | |
2092 | ||
2093 | static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) | |
2094 | { | |
2095 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2096 | struct hclge_dev *hdev = vport->back; | |
2097 | ||
2098 | return hclge_update_speed_duplex(hdev); | |
2099 | } | |
2100 | ||
2101 | static int hclge_get_status(struct hnae3_handle *handle) | |
2102 | { | |
2103 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2104 | struct hclge_dev *hdev = vport->back; | |
2105 | ||
2106 | hclge_update_link_status(hdev); | |
2107 | ||
2108 | return hdev->hw.mac.link; | |
2109 | } | |
2110 | ||
d039ef68 | 2111 | static void hclge_service_timer(struct timer_list *t) |
46a3df9f | 2112 | { |
d039ef68 | 2113 | struct hclge_dev *hdev = from_timer(hdev, t, service_timer); |
46a3df9f | 2114 | |
d039ef68 | 2115 | mod_timer(&hdev->service_timer, jiffies + HZ); |
c5f65480 | 2116 | hdev->hw_stats.stats_timer++; |
46a3df9f S |
2117 | hclge_task_schedule(hdev); |
2118 | } | |
2119 | ||
2120 | static void hclge_service_complete(struct hclge_dev *hdev) | |
2121 | { | |
2122 | WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); | |
2123 | ||
2124 | /* Flush memory before next watchdog */ | |
2125 | smp_mb__before_atomic(); | |
2126 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); | |
2127 | } | |
2128 | ||
ca1d7669 SM |
2129 | static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) |
2130 | { | |
2131 | u32 rst_src_reg; | |
c1a81619 | 2132 | u32 cmdq_src_reg; |
ca1d7669 SM |
2133 | |
2134 | /* fetch the events from their corresponding regs */ | |
9ca8d1a7 | 2135 | rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); |
c1a81619 SM |
2136 | cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); |
2137 | ||
2138 | /* Assumption: If by any chance reset and mailbox events are reported | |
2139 | * together then we will only process reset event in this go and will | |
2140 | * defer the processing of the mailbox events. Since, we would have not | |
2141 | * cleared RX CMDQ event this time we would receive again another | |
2142 | * interrupt from H/W just for the mailbox. | |
2143 | */ | |
ca1d7669 SM |
2144 | |
2145 | /* check for vector0 reset event sources */ | |
2146 | if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { | |
8d40854f | 2147 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
ca1d7669 SM |
2148 | set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); |
2149 | *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2150 | return HCLGE_VECTOR0_EVENT_RST; | |
2151 | } | |
2152 | ||
2153 | if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { | |
8d40854f | 2154 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
ca1d7669 SM |
2155 | set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); |
2156 | *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2157 | return HCLGE_VECTOR0_EVENT_RST; | |
2158 | } | |
2159 | ||
2160 | if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { | |
2161 | set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); | |
2162 | *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2163 | return HCLGE_VECTOR0_EVENT_RST; | |
2164 | } | |
2165 | ||
c1a81619 SM |
2166 | /* check for vector0 mailbox(=CMDQ RX) event source */ |
2167 | if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { | |
2168 | cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); | |
2169 | *clearval = cmdq_src_reg; | |
2170 | return HCLGE_VECTOR0_EVENT_MBX; | |
2171 | } | |
ca1d7669 SM |
2172 | |
2173 | return HCLGE_VECTOR0_EVENT_OTHER; | |
2174 | } | |
2175 | ||
2176 | static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, | |
2177 | u32 regclr) | |
2178 | { | |
c1a81619 SM |
2179 | switch (event_type) { |
2180 | case HCLGE_VECTOR0_EVENT_RST: | |
ca1d7669 | 2181 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); |
c1a81619 SM |
2182 | break; |
2183 | case HCLGE_VECTOR0_EVENT_MBX: | |
2184 | hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); | |
2185 | break; | |
fa7a4bd5 JS |
2186 | default: |
2187 | break; | |
c1a81619 | 2188 | } |
ca1d7669 SM |
2189 | } |
2190 | ||
8e52a602 XW |
2191 | static void hclge_clear_all_event_cause(struct hclge_dev *hdev) |
2192 | { | |
2193 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, | |
2194 | BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | | |
2195 | BIT(HCLGE_VECTOR0_CORERESET_INT_B) | | |
2196 | BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); | |
2197 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); | |
2198 | } | |
2199 | ||
466b0c00 L |
2200 | static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) |
2201 | { | |
2202 | writel(enable ? 1 : 0, vector->addr); | |
2203 | } | |
2204 | ||
2205 | static irqreturn_t hclge_misc_irq_handle(int irq, void *data) | |
2206 | { | |
2207 | struct hclge_dev *hdev = data; | |
ca1d7669 SM |
2208 | u32 event_cause; |
2209 | u32 clearval; | |
466b0c00 L |
2210 | |
2211 | hclge_enable_vector(&hdev->misc_vector, false); | |
ca1d7669 SM |
2212 | event_cause = hclge_check_event_cause(hdev, &clearval); |
2213 | ||
c1a81619 | 2214 | /* vector 0 interrupt is shared with reset and mailbox source events.*/ |
ca1d7669 SM |
2215 | switch (event_cause) { |
2216 | case HCLGE_VECTOR0_EVENT_RST: | |
cb1b9f77 | 2217 | hclge_reset_task_schedule(hdev); |
ca1d7669 | 2218 | break; |
c1a81619 SM |
2219 | case HCLGE_VECTOR0_EVENT_MBX: |
2220 | /* If we are here then, | |
2221 | * 1. Either we are not handling any mbx task and we are not | |
2222 | * scheduled as well | |
2223 | * OR | |
2224 | * 2. We could be handling a mbx task but nothing more is | |
2225 | * scheduled. | |
2226 | * In both cases, we should schedule mbx task as there are more | |
2227 | * mbx messages reported by this interrupt. | |
2228 | */ | |
2229 | hclge_mbx_task_schedule(hdev); | |
f0ad97ac | 2230 | break; |
ca1d7669 | 2231 | default: |
f0ad97ac YL |
2232 | dev_warn(&hdev->pdev->dev, |
2233 | "received unknown or unhandled event of vector0\n"); | |
ca1d7669 SM |
2234 | break; |
2235 | } | |
2236 | ||
cd8c5c26 YL |
2237 | /* clear the source of interrupt if it is not cause by reset */ |
2238 | if (event_cause != HCLGE_VECTOR0_EVENT_RST) { | |
2239 | hclge_clear_event_cause(hdev, event_cause, clearval); | |
2240 | hclge_enable_vector(&hdev->misc_vector, true); | |
2241 | } | |
466b0c00 L |
2242 | |
2243 | return IRQ_HANDLED; | |
2244 | } | |
2245 | ||
2246 | static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) | |
2247 | { | |
36cbbdf6 PL |
2248 | if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { |
2249 | dev_warn(&hdev->pdev->dev, | |
2250 | "vector(vector_id %d) has been freed.\n", vector_id); | |
2251 | return; | |
2252 | } | |
2253 | ||
466b0c00 L |
2254 | hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; |
2255 | hdev->num_msi_left += 1; | |
2256 | hdev->num_msi_used -= 1; | |
2257 | } | |
2258 | ||
2259 | static void hclge_get_misc_vector(struct hclge_dev *hdev) | |
2260 | { | |
2261 | struct hclge_misc_vector *vector = &hdev->misc_vector; | |
2262 | ||
2263 | vector->vector_irq = pci_irq_vector(hdev->pdev, 0); | |
2264 | ||
2265 | vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; | |
2266 | hdev->vector_status[0] = 0; | |
2267 | ||
2268 | hdev->num_msi_left -= 1; | |
2269 | hdev->num_msi_used += 1; | |
2270 | } | |
2271 | ||
2272 | static int hclge_misc_irq_init(struct hclge_dev *hdev) | |
2273 | { | |
2274 | int ret; | |
2275 | ||
2276 | hclge_get_misc_vector(hdev); | |
2277 | ||
ca1d7669 SM |
2278 | /* this would be explicitly freed in the end */ |
2279 | ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, | |
2280 | 0, "hclge_misc", hdev); | |
466b0c00 L |
2281 | if (ret) { |
2282 | hclge_free_vector(hdev, 0); | |
2283 | dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", | |
2284 | hdev->misc_vector.vector_irq); | |
2285 | } | |
2286 | ||
2287 | return ret; | |
2288 | } | |
2289 | ||
ca1d7669 SM |
2290 | static void hclge_misc_irq_uninit(struct hclge_dev *hdev) |
2291 | { | |
2292 | free_irq(hdev->misc_vector.vector_irq, hdev); | |
2293 | hclge_free_vector(hdev, 0); | |
2294 | } | |
2295 | ||
4ed340ab L |
2296 | static int hclge_notify_client(struct hclge_dev *hdev, |
2297 | enum hnae3_reset_notify_type type) | |
2298 | { | |
2299 | struct hnae3_client *client = hdev->nic_client; | |
2300 | u16 i; | |
2301 | ||
2302 | if (!client->ops->reset_notify) | |
2303 | return -EOPNOTSUPP; | |
2304 | ||
2305 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2306 | struct hnae3_handle *handle = &hdev->vport[i].nic; | |
2307 | int ret; | |
2308 | ||
2309 | ret = client->ops->reset_notify(handle, type); | |
2310 | if (ret) | |
2311 | return ret; | |
2312 | } | |
2313 | ||
2314 | return 0; | |
2315 | } | |
2316 | ||
2317 | static int hclge_reset_wait(struct hclge_dev *hdev) | |
2318 | { | |
2319 | #define HCLGE_RESET_WATI_MS 100 | |
2320 | #define HCLGE_RESET_WAIT_CNT 5 | |
2321 | u32 val, reg, reg_bit; | |
2322 | u32 cnt = 0; | |
2323 | ||
2324 | switch (hdev->reset_type) { | |
2325 | case HNAE3_GLOBAL_RESET: | |
2326 | reg = HCLGE_GLOBAL_RESET_REG; | |
2327 | reg_bit = HCLGE_GLOBAL_RESET_BIT; | |
2328 | break; | |
2329 | case HNAE3_CORE_RESET: | |
2330 | reg = HCLGE_GLOBAL_RESET_REG; | |
2331 | reg_bit = HCLGE_CORE_RESET_BIT; | |
2332 | break; | |
2333 | case HNAE3_FUNC_RESET: | |
2334 | reg = HCLGE_FUN_RST_ING; | |
2335 | reg_bit = HCLGE_FUN_RST_ING_B; | |
2336 | break; | |
2337 | default: | |
2338 | dev_err(&hdev->pdev->dev, | |
2339 | "Wait for unsupported reset type: %d\n", | |
2340 | hdev->reset_type); | |
2341 | return -EINVAL; | |
2342 | } | |
2343 | ||
2344 | val = hclge_read_dev(&hdev->hw, reg); | |
e4e87715 | 2345 | while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { |
4ed340ab L |
2346 | msleep(HCLGE_RESET_WATI_MS); |
2347 | val = hclge_read_dev(&hdev->hw, reg); | |
2348 | cnt++; | |
2349 | } | |
2350 | ||
4ed340ab L |
2351 | if (cnt >= HCLGE_RESET_WAIT_CNT) { |
2352 | dev_warn(&hdev->pdev->dev, | |
2353 | "Wait for reset timeout: %d\n", hdev->reset_type); | |
2354 | return -EBUSY; | |
2355 | } | |
2356 | ||
2357 | return 0; | |
2358 | } | |
2359 | ||
2bfbd35d | 2360 | int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) |
4ed340ab L |
2361 | { |
2362 | struct hclge_desc desc; | |
2363 | struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; | |
2364 | int ret; | |
2365 | ||
2366 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); | |
e4e87715 | 2367 | hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); |
4ed340ab L |
2368 | req->fun_reset_vfid = func_id; |
2369 | ||
2370 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2371 | if (ret) | |
2372 | dev_err(&hdev->pdev->dev, | |
2373 | "send function reset cmd fail, status =%d\n", ret); | |
2374 | ||
2375 | return ret; | |
2376 | } | |
2377 | ||
f2f432f2 | 2378 | static void hclge_do_reset(struct hclge_dev *hdev) |
4ed340ab L |
2379 | { |
2380 | struct pci_dev *pdev = hdev->pdev; | |
2381 | u32 val; | |
2382 | ||
f2f432f2 | 2383 | switch (hdev->reset_type) { |
4ed340ab L |
2384 | case HNAE3_GLOBAL_RESET: |
2385 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
e4e87715 | 2386 | hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); |
4ed340ab L |
2387 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2388 | dev_info(&pdev->dev, "Global Reset requested\n"); | |
2389 | break; | |
2390 | case HNAE3_CORE_RESET: | |
2391 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
e4e87715 | 2392 | hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); |
4ed340ab L |
2393 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2394 | dev_info(&pdev->dev, "Core Reset requested\n"); | |
2395 | break; | |
2396 | case HNAE3_FUNC_RESET: | |
2397 | dev_info(&pdev->dev, "PF Reset requested\n"); | |
2398 | hclge_func_reset_cmd(hdev, 0); | |
cb1b9f77 SM |
2399 | /* schedule again to check later */ |
2400 | set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); | |
2401 | hclge_reset_task_schedule(hdev); | |
4ed340ab L |
2402 | break; |
2403 | default: | |
2404 | dev_warn(&pdev->dev, | |
f2f432f2 | 2405 | "Unsupported reset type: %d\n", hdev->reset_type); |
4ed340ab L |
2406 | break; |
2407 | } | |
2408 | } | |
2409 | ||
f2f432f2 SM |
2410 | static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, |
2411 | unsigned long *addr) | |
2412 | { | |
2413 | enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; | |
2414 | ||
2415 | /* return the highest priority reset level amongst all */ | |
2416 | if (test_bit(HNAE3_GLOBAL_RESET, addr)) | |
2417 | rst_level = HNAE3_GLOBAL_RESET; | |
2418 | else if (test_bit(HNAE3_CORE_RESET, addr)) | |
2419 | rst_level = HNAE3_CORE_RESET; | |
2420 | else if (test_bit(HNAE3_IMP_RESET, addr)) | |
2421 | rst_level = HNAE3_IMP_RESET; | |
2422 | else if (test_bit(HNAE3_FUNC_RESET, addr)) | |
2423 | rst_level = HNAE3_FUNC_RESET; | |
2424 | ||
2425 | /* now, clear all other resets */ | |
2426 | clear_bit(HNAE3_GLOBAL_RESET, addr); | |
2427 | clear_bit(HNAE3_CORE_RESET, addr); | |
2428 | clear_bit(HNAE3_IMP_RESET, addr); | |
2429 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2430 | ||
2431 | return rst_level; | |
2432 | } | |
2433 | ||
cd8c5c26 YL |
2434 | static void hclge_clear_reset_cause(struct hclge_dev *hdev) |
2435 | { | |
2436 | u32 clearval = 0; | |
2437 | ||
2438 | switch (hdev->reset_type) { | |
2439 | case HNAE3_IMP_RESET: | |
2440 | clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2441 | break; | |
2442 | case HNAE3_GLOBAL_RESET: | |
2443 | clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2444 | break; | |
2445 | case HNAE3_CORE_RESET: | |
2446 | clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2447 | break; | |
2448 | default: | |
cd8c5c26 YL |
2449 | break; |
2450 | } | |
2451 | ||
2452 | if (!clearval) | |
2453 | return; | |
2454 | ||
2455 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); | |
2456 | hclge_enable_vector(&hdev->misc_vector, true); | |
2457 | } | |
2458 | ||
f2f432f2 SM |
2459 | static void hclge_reset(struct hclge_dev *hdev) |
2460 | { | |
6871af29 | 2461 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); |
9de0b86f HT |
2462 | struct hnae3_handle *handle; |
2463 | ||
6871af29 JS |
2464 | /* Initialize ae_dev reset status as well, in case enet layer wants to |
2465 | * know if device is undergoing reset | |
2466 | */ | |
2467 | ae_dev->reset_type = hdev->reset_type; | |
f2f432f2 | 2468 | /* perform reset of the stack & ae device for a client */ |
9de0b86f | 2469 | handle = &hdev->vport[0].nic; |
6d4fab39 | 2470 | rtnl_lock(); |
f2f432f2 SM |
2471 | hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
2472 | ||
2473 | if (!hclge_reset_wait(hdev)) { | |
f2f432f2 SM |
2474 | hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); |
2475 | hclge_reset_ae_dev(hdev->ae_dev); | |
2476 | hclge_notify_client(hdev, HNAE3_INIT_CLIENT); | |
cd8c5c26 YL |
2477 | |
2478 | hclge_clear_reset_cause(hdev); | |
f2f432f2 SM |
2479 | } else { |
2480 | /* schedule again to check pending resets later */ | |
2481 | set_bit(hdev->reset_type, &hdev->reset_pending); | |
2482 | hclge_reset_task_schedule(hdev); | |
2483 | } | |
2484 | ||
2485 | hclge_notify_client(hdev, HNAE3_UP_CLIENT); | |
9de0b86f | 2486 | handle->last_reset_time = jiffies; |
6d4fab39 | 2487 | rtnl_unlock(); |
6871af29 | 2488 | ae_dev->reset_type = HNAE3_NONE_RESET; |
f2f432f2 SM |
2489 | } |
2490 | ||
6d4c3981 | 2491 | static void hclge_reset_event(struct hnae3_handle *handle) |
4ed340ab L |
2492 | { |
2493 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2494 | struct hclge_dev *hdev = vport->back; | |
2495 | ||
6d4c3981 SM |
2496 | /* check if this is a new reset request and we are not here just because |
2497 | * last reset attempt did not succeed and watchdog hit us again. We will | |
2498 | * know this if last reset request did not occur very recently (watchdog | |
2499 | * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) | |
2500 | * In case of new request we reset the "reset level" to PF reset. | |
9de0b86f HT |
2501 | * And if it is a repeat reset request of the most recent one then we |
2502 | * want to make sure we throttle the reset request. Therefore, we will | |
2503 | * not allow it again before 3*HZ times. | |
6d4c3981 | 2504 | */ |
9de0b86f HT |
2505 | if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) |
2506 | return; | |
2507 | else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) | |
6d4c3981 | 2508 | handle->reset_level = HNAE3_FUNC_RESET; |
4ed340ab | 2509 | |
6d4c3981 SM |
2510 | dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", |
2511 | handle->reset_level); | |
2512 | ||
2513 | /* request reset & schedule reset task */ | |
2514 | set_bit(handle->reset_level, &hdev->reset_request); | |
2515 | hclge_reset_task_schedule(hdev); | |
2516 | ||
2517 | if (handle->reset_level < HNAE3_GLOBAL_RESET) | |
2518 | handle->reset_level++; | |
4ed340ab L |
2519 | } |
2520 | ||
2521 | static void hclge_reset_subtask(struct hclge_dev *hdev) | |
2522 | { | |
f2f432f2 SM |
2523 | /* check if there is any ongoing reset in the hardware. This status can |
2524 | * be checked from reset_pending. If there is then, we need to wait for | |
2525 | * hardware to complete reset. | |
2526 | * a. If we are able to figure out in reasonable time that hardware | |
2527 | * has fully resetted then, we can proceed with driver, client | |
2528 | * reset. | |
2529 | * b. else, we can come back later to check this status so re-sched | |
2530 | * now. | |
2531 | */ | |
2532 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); | |
2533 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2534 | hclge_reset(hdev); | |
4ed340ab | 2535 | |
f2f432f2 SM |
2536 | /* check if we got any *new* reset requests to be honored */ |
2537 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); | |
2538 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2539 | hclge_do_reset(hdev); | |
4ed340ab | 2540 | |
4ed340ab L |
2541 | hdev->reset_type = HNAE3_NONE_RESET; |
2542 | } | |
2543 | ||
cb1b9f77 | 2544 | static void hclge_reset_service_task(struct work_struct *work) |
466b0c00 | 2545 | { |
cb1b9f77 SM |
2546 | struct hclge_dev *hdev = |
2547 | container_of(work, struct hclge_dev, rst_service_task); | |
2548 | ||
2549 | if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) | |
2550 | return; | |
2551 | ||
2552 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
2553 | ||
4ed340ab | 2554 | hclge_reset_subtask(hdev); |
cb1b9f77 SM |
2555 | |
2556 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
466b0c00 L |
2557 | } |
2558 | ||
c1a81619 SM |
2559 | static void hclge_mailbox_service_task(struct work_struct *work) |
2560 | { | |
2561 | struct hclge_dev *hdev = | |
2562 | container_of(work, struct hclge_dev, mbx_service_task); | |
2563 | ||
2564 | if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) | |
2565 | return; | |
2566 | ||
2567 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
2568 | ||
2569 | hclge_mbx_handler(hdev); | |
2570 | ||
2571 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
2572 | } | |
2573 | ||
46a3df9f S |
2574 | static void hclge_service_task(struct work_struct *work) |
2575 | { | |
2576 | struct hclge_dev *hdev = | |
2577 | container_of(work, struct hclge_dev, service_task); | |
2578 | ||
c5f65480 JS |
2579 | if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { |
2580 | hclge_update_stats_for_all(hdev); | |
2581 | hdev->hw_stats.stats_timer = 0; | |
2582 | } | |
2583 | ||
46a3df9f S |
2584 | hclge_update_speed_duplex(hdev); |
2585 | hclge_update_link_status(hdev); | |
46a3df9f S |
2586 | hclge_service_complete(hdev); |
2587 | } | |
2588 | ||
46a3df9f S |
2589 | struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) |
2590 | { | |
2591 | /* VF handle has no client */ | |
2592 | if (!handle->client) | |
2593 | return container_of(handle, struct hclge_vport, nic); | |
2594 | else if (handle->client->type == HNAE3_CLIENT_ROCE) | |
2595 | return container_of(handle, struct hclge_vport, roce); | |
2596 | else | |
2597 | return container_of(handle, struct hclge_vport, nic); | |
2598 | } | |
2599 | ||
2600 | static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, | |
2601 | struct hnae3_vector_info *vector_info) | |
2602 | { | |
2603 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2604 | struct hnae3_vector_info *vector = vector_info; | |
2605 | struct hclge_dev *hdev = vport->back; | |
2606 | int alloc = 0; | |
2607 | int i, j; | |
2608 | ||
2609 | vector_num = min(hdev->num_msi_left, vector_num); | |
2610 | ||
2611 | for (j = 0; j < vector_num; j++) { | |
2612 | for (i = 1; i < hdev->num_msi; i++) { | |
2613 | if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { | |
2614 | vector->vector = pci_irq_vector(hdev->pdev, i); | |
2615 | vector->io_addr = hdev->hw.io_base + | |
2616 | HCLGE_VECTOR_REG_BASE + | |
2617 | (i - 1) * HCLGE_VECTOR_REG_OFFSET + | |
2618 | vport->vport_id * | |
2619 | HCLGE_VECTOR_VF_OFFSET; | |
2620 | hdev->vector_status[i] = vport->vport_id; | |
887c3820 | 2621 | hdev->vector_irq[i] = vector->vector; |
46a3df9f S |
2622 | |
2623 | vector++; | |
2624 | alloc++; | |
2625 | ||
2626 | break; | |
2627 | } | |
2628 | } | |
2629 | } | |
2630 | hdev->num_msi_left -= alloc; | |
2631 | hdev->num_msi_used += alloc; | |
2632 | ||
2633 | return alloc; | |
2634 | } | |
2635 | ||
2636 | static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) | |
2637 | { | |
2638 | int i; | |
2639 | ||
887c3820 SM |
2640 | for (i = 0; i < hdev->num_msi; i++) |
2641 | if (vector == hdev->vector_irq[i]) | |
2642 | return i; | |
2643 | ||
46a3df9f S |
2644 | return -EINVAL; |
2645 | } | |
2646 | ||
0d3e6631 YL |
2647 | static int hclge_put_vector(struct hnae3_handle *handle, int vector) |
2648 | { | |
2649 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2650 | struct hclge_dev *hdev = vport->back; | |
2651 | int vector_id; | |
2652 | ||
2653 | vector_id = hclge_get_vector_index(hdev, vector); | |
2654 | if (vector_id < 0) { | |
2655 | dev_err(&hdev->pdev->dev, | |
2656 | "Get vector index fail. vector_id =%d\n", vector_id); | |
2657 | return vector_id; | |
2658 | } | |
2659 | ||
2660 | hclge_free_vector(hdev, vector_id); | |
2661 | ||
2662 | return 0; | |
2663 | } | |
2664 | ||
46a3df9f S |
2665 | static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) |
2666 | { | |
2667 | return HCLGE_RSS_KEY_SIZE; | |
2668 | } | |
2669 | ||
2670 | static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) | |
2671 | { | |
2672 | return HCLGE_RSS_IND_TBL_SIZE; | |
2673 | } | |
2674 | ||
46a3df9f S |
2675 | static int hclge_set_rss_algo_key(struct hclge_dev *hdev, |
2676 | const u8 hfunc, const u8 *key) | |
2677 | { | |
d44f9b63 | 2678 | struct hclge_rss_config_cmd *req; |
46a3df9f S |
2679 | struct hclge_desc desc; |
2680 | int key_offset; | |
2681 | int key_size; | |
2682 | int ret; | |
2683 | ||
d44f9b63 | 2684 | req = (struct hclge_rss_config_cmd *)desc.data; |
46a3df9f S |
2685 | |
2686 | for (key_offset = 0; key_offset < 3; key_offset++) { | |
2687 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, | |
2688 | false); | |
2689 | ||
2690 | req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); | |
2691 | req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); | |
2692 | ||
2693 | if (key_offset == 2) | |
2694 | key_size = | |
2695 | HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; | |
2696 | else | |
2697 | key_size = HCLGE_RSS_HASH_KEY_NUM; | |
2698 | ||
2699 | memcpy(req->hash_key, | |
2700 | key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); | |
2701 | ||
2702 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2703 | if (ret) { | |
2704 | dev_err(&hdev->pdev->dev, | |
2705 | "Configure RSS config fail, status = %d\n", | |
2706 | ret); | |
2707 | return ret; | |
2708 | } | |
2709 | } | |
2710 | return 0; | |
2711 | } | |
2712 | ||
89523cfa | 2713 | static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) |
46a3df9f | 2714 | { |
d44f9b63 | 2715 | struct hclge_rss_indirection_table_cmd *req; |
46a3df9f S |
2716 | struct hclge_desc desc; |
2717 | int i, j; | |
2718 | int ret; | |
2719 | ||
d44f9b63 | 2720 | req = (struct hclge_rss_indirection_table_cmd *)desc.data; |
46a3df9f S |
2721 | |
2722 | for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { | |
2723 | hclge_cmd_setup_basic_desc | |
2724 | (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); | |
2725 | ||
a90bb9a5 YL |
2726 | req->start_table_index = |
2727 | cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); | |
2728 | req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); | |
46a3df9f S |
2729 | |
2730 | for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) | |
2731 | req->rss_result[j] = | |
2732 | indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; | |
2733 | ||
2734 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2735 | if (ret) { | |
2736 | dev_err(&hdev->pdev->dev, | |
2737 | "Configure rss indir table fail,status = %d\n", | |
2738 | ret); | |
2739 | return ret; | |
2740 | } | |
2741 | } | |
2742 | return 0; | |
2743 | } | |
2744 | ||
2745 | static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, | |
2746 | u16 *tc_size, u16 *tc_offset) | |
2747 | { | |
d44f9b63 | 2748 | struct hclge_rss_tc_mode_cmd *req; |
46a3df9f S |
2749 | struct hclge_desc desc; |
2750 | int ret; | |
2751 | int i; | |
2752 | ||
2753 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); | |
d44f9b63 | 2754 | req = (struct hclge_rss_tc_mode_cmd *)desc.data; |
46a3df9f S |
2755 | |
2756 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
a90bb9a5 YL |
2757 | u16 mode = 0; |
2758 | ||
e4e87715 PL |
2759 | hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); |
2760 | hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, | |
2761 | HCLGE_RSS_TC_SIZE_S, tc_size[i]); | |
2762 | hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, | |
2763 | HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); | |
a90bb9a5 YL |
2764 | |
2765 | req->rss_tc_mode[i] = cpu_to_le16(mode); | |
46a3df9f S |
2766 | } |
2767 | ||
2768 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 2769 | if (ret) |
46a3df9f S |
2770 | dev_err(&hdev->pdev->dev, |
2771 | "Configure rss tc mode fail, status = %d\n", ret); | |
46a3df9f | 2772 | |
3f639907 | 2773 | return ret; |
46a3df9f S |
2774 | } |
2775 | ||
2776 | static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) | |
2777 | { | |
d44f9b63 | 2778 | struct hclge_rss_input_tuple_cmd *req; |
46a3df9f S |
2779 | struct hclge_desc desc; |
2780 | int ret; | |
2781 | ||
2782 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); | |
2783 | ||
d44f9b63 | 2784 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; |
6f2af429 YL |
2785 | |
2786 | /* Get the tuple cfg from pf */ | |
2787 | req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; | |
2788 | req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; | |
2789 | req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; | |
2790 | req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; | |
2791 | req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; | |
2792 | req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; | |
2793 | req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; | |
2794 | req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; | |
46a3df9f | 2795 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
3f639907 | 2796 | if (ret) |
46a3df9f S |
2797 | dev_err(&hdev->pdev->dev, |
2798 | "Configure rss input fail, status = %d\n", ret); | |
3f639907 | 2799 | return ret; |
46a3df9f S |
2800 | } |
2801 | ||
2802 | static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, | |
2803 | u8 *key, u8 *hfunc) | |
2804 | { | |
2805 | struct hclge_vport *vport = hclge_get_vport(handle); | |
46a3df9f S |
2806 | int i; |
2807 | ||
2808 | /* Get hash algorithm */ | |
2809 | if (hfunc) | |
89523cfa | 2810 | *hfunc = vport->rss_algo; |
46a3df9f S |
2811 | |
2812 | /* Get the RSS Key required by the user */ | |
2813 | if (key) | |
2814 | memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
2815 | ||
2816 | /* Get indirect table */ | |
2817 | if (indir) | |
2818 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
2819 | indir[i] = vport->rss_indirection_tbl[i]; | |
2820 | ||
2821 | return 0; | |
2822 | } | |
2823 | ||
2824 | static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, | |
2825 | const u8 *key, const u8 hfunc) | |
2826 | { | |
2827 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2828 | struct hclge_dev *hdev = vport->back; | |
2829 | u8 hash_algo; | |
2830 | int ret, i; | |
2831 | ||
2832 | /* Set the RSS Hash Key if specififed by the user */ | |
2833 | if (key) { | |
46a3df9f S |
2834 | |
2835 | if (hfunc == ETH_RSS_HASH_TOP || | |
2836 | hfunc == ETH_RSS_HASH_NO_CHANGE) | |
2837 | hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
2838 | else | |
2839 | return -EINVAL; | |
2840 | ret = hclge_set_rss_algo_key(hdev, hash_algo, key); | |
2841 | if (ret) | |
2842 | return ret; | |
89523cfa YL |
2843 | |
2844 | /* Update the shadow RSS key with user specified qids */ | |
2845 | memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); | |
2846 | vport->rss_algo = hash_algo; | |
46a3df9f S |
2847 | } |
2848 | ||
2849 | /* Update the shadow RSS table with user specified qids */ | |
2850 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
2851 | vport->rss_indirection_tbl[i] = indir[i]; | |
2852 | ||
2853 | /* Update the hardware */ | |
89523cfa | 2854 | return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); |
46a3df9f S |
2855 | } |
2856 | ||
f7db940a L |
2857 | static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) |
2858 | { | |
2859 | u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; | |
2860 | ||
2861 | if (nfc->data & RXH_L4_B_2_3) | |
2862 | hash_sets |= HCLGE_D_PORT_BIT; | |
2863 | else | |
2864 | hash_sets &= ~HCLGE_D_PORT_BIT; | |
2865 | ||
2866 | if (nfc->data & RXH_IP_SRC) | |
2867 | hash_sets |= HCLGE_S_IP_BIT; | |
2868 | else | |
2869 | hash_sets &= ~HCLGE_S_IP_BIT; | |
2870 | ||
2871 | if (nfc->data & RXH_IP_DST) | |
2872 | hash_sets |= HCLGE_D_IP_BIT; | |
2873 | else | |
2874 | hash_sets &= ~HCLGE_D_IP_BIT; | |
2875 | ||
2876 | if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) | |
2877 | hash_sets |= HCLGE_V_TAG_BIT; | |
2878 | ||
2879 | return hash_sets; | |
2880 | } | |
2881 | ||
2882 | static int hclge_set_rss_tuple(struct hnae3_handle *handle, | |
2883 | struct ethtool_rxnfc *nfc) | |
2884 | { | |
2885 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2886 | struct hclge_dev *hdev = vport->back; | |
2887 | struct hclge_rss_input_tuple_cmd *req; | |
2888 | struct hclge_desc desc; | |
2889 | u8 tuple_sets; | |
2890 | int ret; | |
2891 | ||
2892 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | | |
2893 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) | |
2894 | return -EINVAL; | |
2895 | ||
2896 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; | |
6f2af429 | 2897 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); |
f7db940a | 2898 | |
6f2af429 YL |
2899 | req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; |
2900 | req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; | |
2901 | req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; | |
2902 | req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; | |
2903 | req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; | |
2904 | req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; | |
2905 | req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; | |
2906 | req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; | |
f7db940a L |
2907 | |
2908 | tuple_sets = hclge_get_rss_hash_bits(nfc); | |
2909 | switch (nfc->flow_type) { | |
2910 | case TCP_V4_FLOW: | |
2911 | req->ipv4_tcp_en = tuple_sets; | |
2912 | break; | |
2913 | case TCP_V6_FLOW: | |
2914 | req->ipv6_tcp_en = tuple_sets; | |
2915 | break; | |
2916 | case UDP_V4_FLOW: | |
2917 | req->ipv4_udp_en = tuple_sets; | |
2918 | break; | |
2919 | case UDP_V6_FLOW: | |
2920 | req->ipv6_udp_en = tuple_sets; | |
2921 | break; | |
2922 | case SCTP_V4_FLOW: | |
2923 | req->ipv4_sctp_en = tuple_sets; | |
2924 | break; | |
2925 | case SCTP_V6_FLOW: | |
2926 | if ((nfc->data & RXH_L4_B_0_1) || | |
2927 | (nfc->data & RXH_L4_B_2_3)) | |
2928 | return -EINVAL; | |
2929 | ||
2930 | req->ipv6_sctp_en = tuple_sets; | |
2931 | break; | |
2932 | case IPV4_FLOW: | |
2933 | req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
2934 | break; | |
2935 | case IPV6_FLOW: | |
2936 | req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
2937 | break; | |
2938 | default: | |
2939 | return -EINVAL; | |
2940 | } | |
2941 | ||
2942 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6f2af429 | 2943 | if (ret) { |
f7db940a L |
2944 | dev_err(&hdev->pdev->dev, |
2945 | "Set rss tuple fail, status = %d\n", ret); | |
6f2af429 YL |
2946 | return ret; |
2947 | } | |
f7db940a | 2948 | |
6f2af429 YL |
2949 | vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; |
2950 | vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; | |
2951 | vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; | |
2952 | vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; | |
2953 | vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; | |
2954 | vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; | |
2955 | vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; | |
2956 | vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; | |
2957 | return 0; | |
f7db940a L |
2958 | } |
2959 | ||
07d29954 L |
2960 | static int hclge_get_rss_tuple(struct hnae3_handle *handle, |
2961 | struct ethtool_rxnfc *nfc) | |
2962 | { | |
2963 | struct hclge_vport *vport = hclge_get_vport(handle); | |
07d29954 | 2964 | u8 tuple_sets; |
07d29954 L |
2965 | |
2966 | nfc->data = 0; | |
2967 | ||
07d29954 L |
2968 | switch (nfc->flow_type) { |
2969 | case TCP_V4_FLOW: | |
6f2af429 | 2970 | tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; |
07d29954 L |
2971 | break; |
2972 | case UDP_V4_FLOW: | |
6f2af429 | 2973 | tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; |
07d29954 L |
2974 | break; |
2975 | case TCP_V6_FLOW: | |
6f2af429 | 2976 | tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; |
07d29954 L |
2977 | break; |
2978 | case UDP_V6_FLOW: | |
6f2af429 | 2979 | tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; |
07d29954 L |
2980 | break; |
2981 | case SCTP_V4_FLOW: | |
6f2af429 | 2982 | tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; |
07d29954 L |
2983 | break; |
2984 | case SCTP_V6_FLOW: | |
6f2af429 | 2985 | tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; |
07d29954 L |
2986 | break; |
2987 | case IPV4_FLOW: | |
2988 | case IPV6_FLOW: | |
2989 | tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; | |
2990 | break; | |
2991 | default: | |
2992 | return -EINVAL; | |
2993 | } | |
2994 | ||
2995 | if (!tuple_sets) | |
2996 | return 0; | |
2997 | ||
2998 | if (tuple_sets & HCLGE_D_PORT_BIT) | |
2999 | nfc->data |= RXH_L4_B_2_3; | |
3000 | if (tuple_sets & HCLGE_S_PORT_BIT) | |
3001 | nfc->data |= RXH_L4_B_0_1; | |
3002 | if (tuple_sets & HCLGE_D_IP_BIT) | |
3003 | nfc->data |= RXH_IP_DST; | |
3004 | if (tuple_sets & HCLGE_S_IP_BIT) | |
3005 | nfc->data |= RXH_IP_SRC; | |
3006 | ||
3007 | return 0; | |
3008 | } | |
3009 | ||
46a3df9f S |
3010 | static int hclge_get_tc_size(struct hnae3_handle *handle) |
3011 | { | |
3012 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3013 | struct hclge_dev *hdev = vport->back; | |
3014 | ||
3015 | return hdev->rss_size_max; | |
3016 | } | |
3017 | ||
77f255c1 | 3018 | int hclge_rss_init_hw(struct hclge_dev *hdev) |
46a3df9f | 3019 | { |
46a3df9f | 3020 | struct hclge_vport *vport = hdev->vport; |
268f5dfa YL |
3021 | u8 *rss_indir = vport[0].rss_indirection_tbl; |
3022 | u16 rss_size = vport[0].alloc_rss_size; | |
3023 | u8 *key = vport[0].rss_hash_key; | |
3024 | u8 hfunc = vport[0].rss_algo; | |
46a3df9f | 3025 | u16 tc_offset[HCLGE_MAX_TC_NUM]; |
46a3df9f S |
3026 | u16 tc_valid[HCLGE_MAX_TC_NUM]; |
3027 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
268f5dfa YL |
3028 | u16 roundup_size; |
3029 | int i, ret; | |
68ece54e | 3030 | |
46a3df9f S |
3031 | ret = hclge_set_rss_indir_table(hdev, rss_indir); |
3032 | if (ret) | |
268f5dfa | 3033 | return ret; |
46a3df9f | 3034 | |
46a3df9f S |
3035 | ret = hclge_set_rss_algo_key(hdev, hfunc, key); |
3036 | if (ret) | |
268f5dfa | 3037 | return ret; |
46a3df9f S |
3038 | |
3039 | ret = hclge_set_rss_input_tuple(hdev); | |
3040 | if (ret) | |
268f5dfa | 3041 | return ret; |
46a3df9f | 3042 | |
68ece54e YL |
3043 | /* Each TC have the same queue size, and tc_size set to hardware is |
3044 | * the log2 of roundup power of two of rss_size, the acutal queue | |
3045 | * size is limited by indirection table. | |
3046 | */ | |
3047 | if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { | |
3048 | dev_err(&hdev->pdev->dev, | |
3049 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | |
3050 | rss_size); | |
268f5dfa | 3051 | return -EINVAL; |
68ece54e YL |
3052 | } |
3053 | ||
3054 | roundup_size = roundup_pow_of_two(rss_size); | |
3055 | roundup_size = ilog2(roundup_size); | |
3056 | ||
46a3df9f | 3057 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
68ece54e | 3058 | tc_valid[i] = 0; |
46a3df9f | 3059 | |
68ece54e YL |
3060 | if (!(hdev->hw_tc_map & BIT(i))) |
3061 | continue; | |
3062 | ||
3063 | tc_valid[i] = 1; | |
3064 | tc_size[i] = roundup_size; | |
3065 | tc_offset[i] = rss_size * i; | |
46a3df9f | 3066 | } |
68ece54e | 3067 | |
268f5dfa YL |
3068 | return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); |
3069 | } | |
46a3df9f | 3070 | |
268f5dfa YL |
3071 | void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) |
3072 | { | |
3073 | struct hclge_vport *vport = hdev->vport; | |
3074 | int i, j; | |
46a3df9f | 3075 | |
268f5dfa YL |
3076 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { |
3077 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3078 | vport[j].rss_indirection_tbl[i] = | |
3079 | i % vport[j].alloc_rss_size; | |
3080 | } | |
3081 | } | |
3082 | ||
3083 | static void hclge_rss_init_cfg(struct hclge_dev *hdev) | |
3084 | { | |
3085 | struct hclge_vport *vport = hdev->vport; | |
3086 | int i; | |
3087 | ||
268f5dfa YL |
3088 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { |
3089 | vport[i].rss_tuple_sets.ipv4_tcp_en = | |
3090 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3091 | vport[i].rss_tuple_sets.ipv4_udp_en = | |
3092 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3093 | vport[i].rss_tuple_sets.ipv4_sctp_en = | |
3094 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3095 | vport[i].rss_tuple_sets.ipv4_fragment_en = | |
3096 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3097 | vport[i].rss_tuple_sets.ipv6_tcp_en = | |
3098 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3099 | vport[i].rss_tuple_sets.ipv6_udp_en = | |
3100 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3101 | vport[i].rss_tuple_sets.ipv6_sctp_en = | |
3102 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3103 | vport[i].rss_tuple_sets.ipv6_fragment_en = | |
3104 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3105 | ||
3106 | vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
ea739c90 FL |
3107 | |
3108 | netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
268f5dfa YL |
3109 | } |
3110 | ||
3111 | hclge_rss_indir_init_cfg(hdev); | |
46a3df9f S |
3112 | } |
3113 | ||
84e095d6 SM |
3114 | int hclge_bind_ring_with_vector(struct hclge_vport *vport, |
3115 | int vector_id, bool en, | |
3116 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3117 | { |
3118 | struct hclge_dev *hdev = vport->back; | |
46a3df9f S |
3119 | struct hnae3_ring_chain_node *node; |
3120 | struct hclge_desc desc; | |
84e095d6 SM |
3121 | struct hclge_ctrl_vector_chain_cmd *req |
3122 | = (struct hclge_ctrl_vector_chain_cmd *)desc.data; | |
3123 | enum hclge_cmd_status status; | |
3124 | enum hclge_opcode_type op; | |
3125 | u16 tqp_type_and_id; | |
46a3df9f S |
3126 | int i; |
3127 | ||
84e095d6 SM |
3128 | op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; |
3129 | hclge_cmd_setup_basic_desc(&desc, op, false); | |
46a3df9f S |
3130 | req->int_vector_id = vector_id; |
3131 | ||
3132 | i = 0; | |
3133 | for (node = ring_chain; node; node = node->next) { | |
84e095d6 | 3134 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); |
e4e87715 PL |
3135 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, |
3136 | HCLGE_INT_TYPE_S, | |
3137 | hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); | |
3138 | hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, | |
3139 | HCLGE_TQP_ID_S, node->tqp_index); | |
3140 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, | |
3141 | HCLGE_INT_GL_IDX_S, | |
3142 | hnae3_get_field(node->int_gl_idx, | |
3143 | HNAE3_RING_GL_IDX_M, | |
3144 | HNAE3_RING_GL_IDX_S)); | |
84e095d6 | 3145 | req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); |
46a3df9f S |
3146 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
3147 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | |
84e095d6 | 3148 | req->vfid = vport->vport_id; |
46a3df9f | 3149 | |
84e095d6 SM |
3150 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
3151 | if (status) { | |
46a3df9f S |
3152 | dev_err(&hdev->pdev->dev, |
3153 | "Map TQP fail, status is %d.\n", | |
84e095d6 SM |
3154 | status); |
3155 | return -EIO; | |
46a3df9f S |
3156 | } |
3157 | i = 0; | |
3158 | ||
3159 | hclge_cmd_setup_basic_desc(&desc, | |
84e095d6 | 3160 | op, |
46a3df9f S |
3161 | false); |
3162 | req->int_vector_id = vector_id; | |
3163 | } | |
3164 | } | |
3165 | ||
3166 | if (i > 0) { | |
3167 | req->int_cause_num = i; | |
84e095d6 SM |
3168 | req->vfid = vport->vport_id; |
3169 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3170 | if (status) { | |
46a3df9f | 3171 | dev_err(&hdev->pdev->dev, |
84e095d6 SM |
3172 | "Map TQP fail, status is %d.\n", status); |
3173 | return -EIO; | |
46a3df9f S |
3174 | } |
3175 | } | |
3176 | ||
3177 | return 0; | |
3178 | } | |
3179 | ||
84e095d6 SM |
3180 | static int hclge_map_ring_to_vector(struct hnae3_handle *handle, |
3181 | int vector, | |
3182 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3183 | { |
3184 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3185 | struct hclge_dev *hdev = vport->back; | |
3186 | int vector_id; | |
3187 | ||
3188 | vector_id = hclge_get_vector_index(hdev, vector); | |
3189 | if (vector_id < 0) { | |
3190 | dev_err(&hdev->pdev->dev, | |
84e095d6 | 3191 | "Get vector index fail. vector_id =%d\n", vector_id); |
46a3df9f S |
3192 | return vector_id; |
3193 | } | |
3194 | ||
84e095d6 | 3195 | return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); |
46a3df9f S |
3196 | } |
3197 | ||
84e095d6 SM |
3198 | static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, |
3199 | int vector, | |
3200 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3201 | { |
3202 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3203 | struct hclge_dev *hdev = vport->back; | |
84e095d6 | 3204 | int vector_id, ret; |
46a3df9f | 3205 | |
b50ae26c PL |
3206 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
3207 | return 0; | |
3208 | ||
46a3df9f S |
3209 | vector_id = hclge_get_vector_index(hdev, vector); |
3210 | if (vector_id < 0) { | |
3211 | dev_err(&handle->pdev->dev, | |
3212 | "Get vector index fail. ret =%d\n", vector_id); | |
3213 | return vector_id; | |
3214 | } | |
3215 | ||
84e095d6 | 3216 | ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); |
0d3e6631 | 3217 | if (ret) |
84e095d6 SM |
3218 | dev_err(&handle->pdev->dev, |
3219 | "Unmap ring from vector fail. vectorid=%d, ret =%d\n", | |
3220 | vector_id, | |
3221 | ret); | |
46a3df9f | 3222 | |
0d3e6631 | 3223 | return ret; |
46a3df9f S |
3224 | } |
3225 | ||
3226 | int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, | |
3227 | struct hclge_promisc_param *param) | |
3228 | { | |
d44f9b63 | 3229 | struct hclge_promisc_cfg_cmd *req; |
46a3df9f S |
3230 | struct hclge_desc desc; |
3231 | int ret; | |
3232 | ||
3233 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); | |
3234 | ||
d44f9b63 | 3235 | req = (struct hclge_promisc_cfg_cmd *)desc.data; |
46a3df9f | 3236 | req->vf_id = param->vf_id; |
96c0e861 PL |
3237 | |
3238 | /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on | |
3239 | * pdev revision(0x20), new revision support them. The | |
3240 | * value of this two fields will not return error when driver | |
3241 | * send command to fireware in revision(0x20). | |
3242 | */ | |
3243 | req->flag = (param->enable << HCLGE_PROMISC_EN_B) | | |
3244 | HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; | |
46a3df9f S |
3245 | |
3246 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 3247 | if (ret) |
46a3df9f S |
3248 | dev_err(&hdev->pdev->dev, |
3249 | "Set promisc mode fail, status is %d.\n", ret); | |
3f639907 JS |
3250 | |
3251 | return ret; | |
46a3df9f S |
3252 | } |
3253 | ||
3254 | void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, | |
3255 | bool en_mc, bool en_bc, int vport_id) | |
3256 | { | |
3257 | if (!param) | |
3258 | return; | |
3259 | ||
3260 | memset(param, 0, sizeof(struct hclge_promisc_param)); | |
3261 | if (en_uc) | |
3262 | param->enable = HCLGE_PROMISC_EN_UC; | |
3263 | if (en_mc) | |
3264 | param->enable |= HCLGE_PROMISC_EN_MC; | |
3265 | if (en_bc) | |
3266 | param->enable |= HCLGE_PROMISC_EN_BC; | |
3267 | param->vf_id = vport_id; | |
3268 | } | |
3269 | ||
3b75c3df PL |
3270 | static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, |
3271 | bool en_mc_pmc) | |
46a3df9f S |
3272 | { |
3273 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3274 | struct hclge_dev *hdev = vport->back; | |
3275 | struct hclge_promisc_param param; | |
3276 | ||
3b75c3df PL |
3277 | hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, |
3278 | vport->vport_id); | |
46a3df9f S |
3279 | hclge_cmd_set_promisc_mode(hdev, ¶m); |
3280 | } | |
3281 | ||
d695964d JS |
3282 | static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) |
3283 | { | |
3284 | struct hclge_get_fd_mode_cmd *req; | |
3285 | struct hclge_desc desc; | |
3286 | int ret; | |
3287 | ||
3288 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); | |
3289 | ||
3290 | req = (struct hclge_get_fd_mode_cmd *)desc.data; | |
3291 | ||
3292 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3293 | if (ret) { | |
3294 | dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); | |
3295 | return ret; | |
3296 | } | |
3297 | ||
3298 | *fd_mode = req->mode; | |
3299 | ||
3300 | return ret; | |
3301 | } | |
3302 | ||
3303 | static int hclge_get_fd_allocation(struct hclge_dev *hdev, | |
3304 | u32 *stage1_entry_num, | |
3305 | u32 *stage2_entry_num, | |
3306 | u16 *stage1_counter_num, | |
3307 | u16 *stage2_counter_num) | |
3308 | { | |
3309 | struct hclge_get_fd_allocation_cmd *req; | |
3310 | struct hclge_desc desc; | |
3311 | int ret; | |
3312 | ||
3313 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); | |
3314 | ||
3315 | req = (struct hclge_get_fd_allocation_cmd *)desc.data; | |
3316 | ||
3317 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3318 | if (ret) { | |
3319 | dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", | |
3320 | ret); | |
3321 | return ret; | |
3322 | } | |
3323 | ||
3324 | *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); | |
3325 | *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); | |
3326 | *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); | |
3327 | *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); | |
3328 | ||
3329 | return ret; | |
3330 | } | |
3331 | ||
3332 | static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) | |
3333 | { | |
3334 | struct hclge_set_fd_key_config_cmd *req; | |
3335 | struct hclge_fd_key_cfg *stage; | |
3336 | struct hclge_desc desc; | |
3337 | int ret; | |
3338 | ||
3339 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); | |
3340 | ||
3341 | req = (struct hclge_set_fd_key_config_cmd *)desc.data; | |
3342 | stage = &hdev->fd_cfg.key_cfg[stage_num]; | |
3343 | req->stage = stage_num; | |
3344 | req->key_select = stage->key_sel; | |
3345 | req->inner_sipv6_word_en = stage->inner_sipv6_word_en; | |
3346 | req->inner_dipv6_word_en = stage->inner_dipv6_word_en; | |
3347 | req->outer_sipv6_word_en = stage->outer_sipv6_word_en; | |
3348 | req->outer_dipv6_word_en = stage->outer_dipv6_word_en; | |
3349 | req->tuple_mask = cpu_to_le32(~stage->tuple_active); | |
3350 | req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); | |
3351 | ||
3352 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3353 | if (ret) | |
3354 | dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); | |
3355 | ||
3356 | return ret; | |
3357 | } | |
3358 | ||
3359 | static int hclge_init_fd_config(struct hclge_dev *hdev) | |
3360 | { | |
3361 | #define LOW_2_WORDS 0x03 | |
3362 | struct hclge_fd_key_cfg *key_cfg; | |
3363 | int ret; | |
3364 | ||
3365 | if (!hnae3_dev_fd_supported(hdev)) | |
3366 | return 0; | |
3367 | ||
3368 | ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); | |
3369 | if (ret) | |
3370 | return ret; | |
3371 | ||
3372 | switch (hdev->fd_cfg.fd_mode) { | |
3373 | case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: | |
3374 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; | |
3375 | break; | |
3376 | case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: | |
3377 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; | |
3378 | break; | |
3379 | default: | |
3380 | dev_err(&hdev->pdev->dev, | |
3381 | "Unsupported flow director mode %d\n", | |
3382 | hdev->fd_cfg.fd_mode); | |
3383 | return -EOPNOTSUPP; | |
3384 | } | |
3385 | ||
3386 | hdev->fd_cfg.fd_en = true; | |
3387 | hdev->fd_cfg.proto_support = | |
3388 | TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | | |
3389 | UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; | |
3390 | key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; | |
3391 | key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, | |
3392 | key_cfg->inner_sipv6_word_en = LOW_2_WORDS; | |
3393 | key_cfg->inner_dipv6_word_en = LOW_2_WORDS; | |
3394 | key_cfg->outer_sipv6_word_en = 0; | |
3395 | key_cfg->outer_dipv6_word_en = 0; | |
3396 | ||
3397 | key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | | |
3398 | BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | | |
3399 | BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
3400 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
3401 | ||
3402 | /* If use max 400bit key, we can support tuples for ether type */ | |
3403 | if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { | |
3404 | hdev->fd_cfg.proto_support |= ETHER_FLOW; | |
3405 | key_cfg->tuple_active |= | |
3406 | BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); | |
3407 | } | |
3408 | ||
3409 | /* roce_type is used to filter roce frames | |
3410 | * dst_vport is used to specify the rule | |
3411 | */ | |
3412 | key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); | |
3413 | ||
3414 | ret = hclge_get_fd_allocation(hdev, | |
3415 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], | |
3416 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], | |
3417 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], | |
3418 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); | |
3419 | if (ret) | |
3420 | return ret; | |
3421 | ||
3422 | return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); | |
3423 | } | |
3424 | ||
11732868 JS |
3425 | static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, |
3426 | int loc, u8 *key, bool is_add) | |
3427 | { | |
3428 | struct hclge_fd_tcam_config_1_cmd *req1; | |
3429 | struct hclge_fd_tcam_config_2_cmd *req2; | |
3430 | struct hclge_fd_tcam_config_3_cmd *req3; | |
3431 | struct hclge_desc desc[3]; | |
3432 | int ret; | |
3433 | ||
3434 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); | |
3435 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3436 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); | |
3437 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3438 | hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); | |
3439 | ||
3440 | req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; | |
3441 | req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; | |
3442 | req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; | |
3443 | ||
3444 | req1->stage = stage; | |
3445 | req1->xy_sel = sel_x ? 1 : 0; | |
3446 | hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); | |
3447 | req1->index = cpu_to_le32(loc); | |
3448 | req1->entry_vld = sel_x ? is_add : 0; | |
3449 | ||
3450 | if (key) { | |
3451 | memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); | |
3452 | memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], | |
3453 | sizeof(req2->tcam_data)); | |
3454 | memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + | |
3455 | sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); | |
3456 | } | |
3457 | ||
3458 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
3459 | if (ret) | |
3460 | dev_err(&hdev->pdev->dev, | |
3461 | "config tcam key fail, ret=%d\n", | |
3462 | ret); | |
3463 | ||
3464 | return ret; | |
3465 | } | |
3466 | ||
3467 | static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, | |
3468 | struct hclge_fd_ad_data *action) | |
3469 | { | |
3470 | struct hclge_fd_ad_config_cmd *req; | |
3471 | struct hclge_desc desc; | |
3472 | u64 ad_data = 0; | |
3473 | int ret; | |
3474 | ||
3475 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); | |
3476 | ||
3477 | req = (struct hclge_fd_ad_config_cmd *)desc.data; | |
3478 | req->index = cpu_to_le32(loc); | |
3479 | req->stage = stage; | |
3480 | ||
3481 | hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, | |
3482 | action->write_rule_id_to_bd); | |
3483 | hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, | |
3484 | action->rule_id); | |
3485 | ad_data <<= 32; | |
3486 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); | |
3487 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, | |
3488 | action->forward_to_direct_queue); | |
3489 | hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, | |
3490 | action->queue_id); | |
3491 | hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); | |
3492 | hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, | |
3493 | HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); | |
3494 | hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); | |
3495 | hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, | |
3496 | action->counter_id); | |
3497 | ||
3498 | req->ad_data = cpu_to_le64(ad_data); | |
3499 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3500 | if (ret) | |
3501 | dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); | |
3502 | ||
3503 | return ret; | |
3504 | } | |
3505 | ||
3506 | static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, | |
3507 | struct hclge_fd_rule *rule) | |
3508 | { | |
3509 | u16 tmp_x_s, tmp_y_s; | |
3510 | u32 tmp_x_l, tmp_y_l; | |
3511 | int i; | |
3512 | ||
3513 | if (rule->unused_tuple & tuple_bit) | |
3514 | return true; | |
3515 | ||
3516 | switch (tuple_bit) { | |
3517 | case 0: | |
3518 | return false; | |
3519 | case BIT(INNER_DST_MAC): | |
3520 | for (i = 0; i < 6; i++) { | |
3521 | calc_x(key_x[5 - i], rule->tuples.dst_mac[i], | |
3522 | rule->tuples_mask.dst_mac[i]); | |
3523 | calc_y(key_y[5 - i], rule->tuples.dst_mac[i], | |
3524 | rule->tuples_mask.dst_mac[i]); | |
3525 | } | |
3526 | ||
3527 | return true; | |
3528 | case BIT(INNER_SRC_MAC): | |
3529 | for (i = 0; i < 6; i++) { | |
3530 | calc_x(key_x[5 - i], rule->tuples.src_mac[i], | |
3531 | rule->tuples.src_mac[i]); | |
3532 | calc_y(key_y[5 - i], rule->tuples.src_mac[i], | |
3533 | rule->tuples.src_mac[i]); | |
3534 | } | |
3535 | ||
3536 | return true; | |
3537 | case BIT(INNER_VLAN_TAG_FST): | |
3538 | calc_x(tmp_x_s, rule->tuples.vlan_tag1, | |
3539 | rule->tuples_mask.vlan_tag1); | |
3540 | calc_y(tmp_y_s, rule->tuples.vlan_tag1, | |
3541 | rule->tuples_mask.vlan_tag1); | |
3542 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3543 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3544 | ||
3545 | return true; | |
3546 | case BIT(INNER_ETH_TYPE): | |
3547 | calc_x(tmp_x_s, rule->tuples.ether_proto, | |
3548 | rule->tuples_mask.ether_proto); | |
3549 | calc_y(tmp_y_s, rule->tuples.ether_proto, | |
3550 | rule->tuples_mask.ether_proto); | |
3551 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3552 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3553 | ||
3554 | return true; | |
3555 | case BIT(INNER_IP_TOS): | |
3556 | calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
3557 | calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
3558 | ||
3559 | return true; | |
3560 | case BIT(INNER_IP_PROTO): | |
3561 | calc_x(*key_x, rule->tuples.ip_proto, | |
3562 | rule->tuples_mask.ip_proto); | |
3563 | calc_y(*key_y, rule->tuples.ip_proto, | |
3564 | rule->tuples_mask.ip_proto); | |
3565 | ||
3566 | return true; | |
3567 | case BIT(INNER_SRC_IP): | |
3568 | calc_x(tmp_x_l, rule->tuples.src_ip[3], | |
3569 | rule->tuples_mask.src_ip[3]); | |
3570 | calc_y(tmp_y_l, rule->tuples.src_ip[3], | |
3571 | rule->tuples_mask.src_ip[3]); | |
3572 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
3573 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
3574 | ||
3575 | return true; | |
3576 | case BIT(INNER_DST_IP): | |
3577 | calc_x(tmp_x_l, rule->tuples.dst_ip[3], | |
3578 | rule->tuples_mask.dst_ip[3]); | |
3579 | calc_y(tmp_y_l, rule->tuples.dst_ip[3], | |
3580 | rule->tuples_mask.dst_ip[3]); | |
3581 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
3582 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
3583 | ||
3584 | return true; | |
3585 | case BIT(INNER_SRC_PORT): | |
3586 | calc_x(tmp_x_s, rule->tuples.src_port, | |
3587 | rule->tuples_mask.src_port); | |
3588 | calc_y(tmp_y_s, rule->tuples.src_port, | |
3589 | rule->tuples_mask.src_port); | |
3590 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3591 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3592 | ||
3593 | return true; | |
3594 | case BIT(INNER_DST_PORT): | |
3595 | calc_x(tmp_x_s, rule->tuples.dst_port, | |
3596 | rule->tuples_mask.dst_port); | |
3597 | calc_y(tmp_y_s, rule->tuples.dst_port, | |
3598 | rule->tuples_mask.dst_port); | |
3599 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3600 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3601 | ||
3602 | return true; | |
3603 | default: | |
3604 | return false; | |
3605 | } | |
3606 | } | |
3607 | ||
3608 | static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, | |
3609 | u8 vf_id, u8 network_port_id) | |
3610 | { | |
3611 | u32 port_number = 0; | |
3612 | ||
3613 | if (port_type == HOST_PORT) { | |
3614 | hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, | |
3615 | pf_id); | |
3616 | hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, | |
3617 | vf_id); | |
3618 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); | |
3619 | } else { | |
3620 | hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, | |
3621 | HCLGE_NETWORK_PORT_ID_S, network_port_id); | |
3622 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); | |
3623 | } | |
3624 | ||
3625 | return port_number; | |
3626 | } | |
3627 | ||
3628 | static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, | |
3629 | __le32 *key_x, __le32 *key_y, | |
3630 | struct hclge_fd_rule *rule) | |
3631 | { | |
3632 | u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; | |
3633 | u8 cur_pos = 0, tuple_size, shift_bits; | |
3634 | int i; | |
3635 | ||
3636 | for (i = 0; i < MAX_META_DATA; i++) { | |
3637 | tuple_size = meta_data_key_info[i].key_length; | |
3638 | tuple_bit = key_cfg->meta_data_active & BIT(i); | |
3639 | ||
3640 | switch (tuple_bit) { | |
3641 | case BIT(ROCE_TYPE): | |
3642 | hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); | |
3643 | cur_pos += tuple_size; | |
3644 | break; | |
3645 | case BIT(DST_VPORT): | |
3646 | port_number = hclge_get_port_number(HOST_PORT, 0, | |
3647 | rule->vf_id, 0); | |
3648 | hnae3_set_field(meta_data, | |
3649 | GENMASK(cur_pos + tuple_size, cur_pos), | |
3650 | cur_pos, port_number); | |
3651 | cur_pos += tuple_size; | |
3652 | break; | |
3653 | default: | |
3654 | break; | |
3655 | } | |
3656 | } | |
3657 | ||
3658 | calc_x(tmp_x, meta_data, 0xFFFFFFFF); | |
3659 | calc_y(tmp_y, meta_data, 0xFFFFFFFF); | |
3660 | shift_bits = sizeof(meta_data) * 8 - cur_pos; | |
3661 | ||
3662 | *key_x = cpu_to_le32(tmp_x << shift_bits); | |
3663 | *key_y = cpu_to_le32(tmp_y << shift_bits); | |
3664 | } | |
3665 | ||
3666 | /* A complete key is combined with meta data key and tuple key. | |
3667 | * Meta data key is stored at the MSB region, and tuple key is stored at | |
3668 | * the LSB region, unused bits will be filled 0. | |
3669 | */ | |
3670 | static int hclge_config_key(struct hclge_dev *hdev, u8 stage, | |
3671 | struct hclge_fd_rule *rule) | |
3672 | { | |
3673 | struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; | |
3674 | u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; | |
3675 | u8 *cur_key_x, *cur_key_y; | |
3676 | int i, ret, tuple_size; | |
3677 | u8 meta_data_region; | |
3678 | ||
3679 | memset(key_x, 0, sizeof(key_x)); | |
3680 | memset(key_y, 0, sizeof(key_y)); | |
3681 | cur_key_x = key_x; | |
3682 | cur_key_y = key_y; | |
3683 | ||
3684 | for (i = 0 ; i < MAX_TUPLE; i++) { | |
3685 | bool tuple_valid; | |
3686 | u32 check_tuple; | |
3687 | ||
3688 | tuple_size = tuple_key_info[i].key_length / 8; | |
3689 | check_tuple = key_cfg->tuple_active & BIT(i); | |
3690 | ||
3691 | tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, | |
3692 | cur_key_y, rule); | |
3693 | if (tuple_valid) { | |
3694 | cur_key_x += tuple_size; | |
3695 | cur_key_y += tuple_size; | |
3696 | } | |
3697 | } | |
3698 | ||
3699 | meta_data_region = hdev->fd_cfg.max_key_length / 8 - | |
3700 | MAX_META_DATA_LENGTH / 8; | |
3701 | ||
3702 | hclge_fd_convert_meta_data(key_cfg, | |
3703 | (__le32 *)(key_x + meta_data_region), | |
3704 | (__le32 *)(key_y + meta_data_region), | |
3705 | rule); | |
3706 | ||
3707 | ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, | |
3708 | true); | |
3709 | if (ret) { | |
3710 | dev_err(&hdev->pdev->dev, | |
3711 | "fd key_y config fail, loc=%d, ret=%d\n", | |
3712 | rule->queue_id, ret); | |
3713 | return ret; | |
3714 | } | |
3715 | ||
3716 | ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, | |
3717 | true); | |
3718 | if (ret) | |
3719 | dev_err(&hdev->pdev->dev, | |
3720 | "fd key_x config fail, loc=%d, ret=%d\n", | |
3721 | rule->queue_id, ret); | |
3722 | return ret; | |
3723 | } | |
3724 | ||
3725 | static int hclge_config_action(struct hclge_dev *hdev, u8 stage, | |
3726 | struct hclge_fd_rule *rule) | |
3727 | { | |
3728 | struct hclge_fd_ad_data ad_data; | |
3729 | ||
3730 | ad_data.ad_id = rule->location; | |
3731 | ||
3732 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
3733 | ad_data.drop_packet = true; | |
3734 | ad_data.forward_to_direct_queue = false; | |
3735 | ad_data.queue_id = 0; | |
3736 | } else { | |
3737 | ad_data.drop_packet = false; | |
3738 | ad_data.forward_to_direct_queue = true; | |
3739 | ad_data.queue_id = rule->queue_id; | |
3740 | } | |
3741 | ||
3742 | ad_data.use_counter = false; | |
3743 | ad_data.counter_id = 0; | |
3744 | ||
3745 | ad_data.use_next_stage = false; | |
3746 | ad_data.next_input_key = 0; | |
3747 | ||
3748 | ad_data.write_rule_id_to_bd = true; | |
3749 | ad_data.rule_id = rule->location; | |
3750 | ||
3751 | return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); | |
3752 | } | |
3753 | ||
dd74f815 JS |
3754 | static int hclge_fd_check_spec(struct hclge_dev *hdev, |
3755 | struct ethtool_rx_flow_spec *fs, u32 *unused) | |
3756 | { | |
3757 | struct ethtool_tcpip4_spec *tcp_ip4_spec; | |
3758 | struct ethtool_usrip4_spec *usr_ip4_spec; | |
3759 | struct ethtool_tcpip6_spec *tcp_ip6_spec; | |
3760 | struct ethtool_usrip6_spec *usr_ip6_spec; | |
3761 | struct ethhdr *ether_spec; | |
3762 | ||
3763 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
3764 | return -EINVAL; | |
3765 | ||
3766 | if (!(fs->flow_type & hdev->fd_cfg.proto_support)) | |
3767 | return -EOPNOTSUPP; | |
3768 | ||
3769 | if ((fs->flow_type & FLOW_EXT) && | |
3770 | (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { | |
3771 | dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); | |
3772 | return -EOPNOTSUPP; | |
3773 | } | |
3774 | ||
3775 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
3776 | case SCTP_V4_FLOW: | |
3777 | case TCP_V4_FLOW: | |
3778 | case UDP_V4_FLOW: | |
3779 | tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; | |
3780 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); | |
3781 | ||
3782 | if (!tcp_ip4_spec->ip4src) | |
3783 | *unused |= BIT(INNER_SRC_IP); | |
3784 | ||
3785 | if (!tcp_ip4_spec->ip4dst) | |
3786 | *unused |= BIT(INNER_DST_IP); | |
3787 | ||
3788 | if (!tcp_ip4_spec->psrc) | |
3789 | *unused |= BIT(INNER_SRC_PORT); | |
3790 | ||
3791 | if (!tcp_ip4_spec->pdst) | |
3792 | *unused |= BIT(INNER_DST_PORT); | |
3793 | ||
3794 | if (!tcp_ip4_spec->tos) | |
3795 | *unused |= BIT(INNER_IP_TOS); | |
3796 | ||
3797 | break; | |
3798 | case IP_USER_FLOW: | |
3799 | usr_ip4_spec = &fs->h_u.usr_ip4_spec; | |
3800 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
3801 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
3802 | ||
3803 | if (!usr_ip4_spec->ip4src) | |
3804 | *unused |= BIT(INNER_SRC_IP); | |
3805 | ||
3806 | if (!usr_ip4_spec->ip4dst) | |
3807 | *unused |= BIT(INNER_DST_IP); | |
3808 | ||
3809 | if (!usr_ip4_spec->tos) | |
3810 | *unused |= BIT(INNER_IP_TOS); | |
3811 | ||
3812 | if (!usr_ip4_spec->proto) | |
3813 | *unused |= BIT(INNER_IP_PROTO); | |
3814 | ||
3815 | if (usr_ip4_spec->l4_4_bytes) | |
3816 | return -EOPNOTSUPP; | |
3817 | ||
3818 | if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) | |
3819 | return -EOPNOTSUPP; | |
3820 | ||
3821 | break; | |
3822 | case SCTP_V6_FLOW: | |
3823 | case TCP_V6_FLOW: | |
3824 | case UDP_V6_FLOW: | |
3825 | tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; | |
3826 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
3827 | BIT(INNER_IP_TOS); | |
3828 | ||
3829 | if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && | |
3830 | !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) | |
3831 | *unused |= BIT(INNER_SRC_IP); | |
3832 | ||
3833 | if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && | |
3834 | !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) | |
3835 | *unused |= BIT(INNER_DST_IP); | |
3836 | ||
3837 | if (!tcp_ip6_spec->psrc) | |
3838 | *unused |= BIT(INNER_SRC_PORT); | |
3839 | ||
3840 | if (!tcp_ip6_spec->pdst) | |
3841 | *unused |= BIT(INNER_DST_PORT); | |
3842 | ||
3843 | if (tcp_ip6_spec->tclass) | |
3844 | return -EOPNOTSUPP; | |
3845 | ||
3846 | break; | |
3847 | case IPV6_USER_FLOW: | |
3848 | usr_ip6_spec = &fs->h_u.usr_ip6_spec; | |
3849 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
3850 | BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | | |
3851 | BIT(INNER_DST_PORT); | |
3852 | ||
3853 | if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && | |
3854 | !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) | |
3855 | *unused |= BIT(INNER_SRC_IP); | |
3856 | ||
3857 | if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && | |
3858 | !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) | |
3859 | *unused |= BIT(INNER_DST_IP); | |
3860 | ||
3861 | if (!usr_ip6_spec->l4_proto) | |
3862 | *unused |= BIT(INNER_IP_PROTO); | |
3863 | ||
3864 | if (usr_ip6_spec->tclass) | |
3865 | return -EOPNOTSUPP; | |
3866 | ||
3867 | if (usr_ip6_spec->l4_4_bytes) | |
3868 | return -EOPNOTSUPP; | |
3869 | ||
3870 | break; | |
3871 | case ETHER_FLOW: | |
3872 | ether_spec = &fs->h_u.ether_spec; | |
3873 | *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
3874 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | | |
3875 | BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); | |
3876 | ||
3877 | if (is_zero_ether_addr(ether_spec->h_source)) | |
3878 | *unused |= BIT(INNER_SRC_MAC); | |
3879 | ||
3880 | if (is_zero_ether_addr(ether_spec->h_dest)) | |
3881 | *unused |= BIT(INNER_DST_MAC); | |
3882 | ||
3883 | if (!ether_spec->h_proto) | |
3884 | *unused |= BIT(INNER_ETH_TYPE); | |
3885 | ||
3886 | break; | |
3887 | default: | |
3888 | return -EOPNOTSUPP; | |
3889 | } | |
3890 | ||
3891 | if ((fs->flow_type & FLOW_EXT)) { | |
3892 | if (fs->h_ext.vlan_etype) | |
3893 | return -EOPNOTSUPP; | |
3894 | if (!fs->h_ext.vlan_tci) | |
3895 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
3896 | ||
3897 | if (fs->m_ext.vlan_tci) { | |
3898 | if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) | |
3899 | return -EINVAL; | |
3900 | } | |
3901 | } else { | |
3902 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
3903 | } | |
3904 | ||
3905 | if (fs->flow_type & FLOW_MAC_EXT) { | |
3906 | if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) | |
3907 | return -EOPNOTSUPP; | |
3908 | ||
3909 | if (is_zero_ether_addr(fs->h_ext.h_dest)) | |
3910 | *unused |= BIT(INNER_DST_MAC); | |
3911 | else | |
3912 | *unused &= ~(BIT(INNER_DST_MAC)); | |
3913 | } | |
3914 | ||
3915 | return 0; | |
3916 | } | |
3917 | ||
3918 | static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) | |
3919 | { | |
3920 | struct hclge_fd_rule *rule = NULL; | |
3921 | struct hlist_node *node2; | |
3922 | ||
3923 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
3924 | if (rule->location >= location) | |
3925 | break; | |
3926 | } | |
3927 | ||
3928 | return rule && rule->location == location; | |
3929 | } | |
3930 | ||
3931 | static int hclge_fd_update_rule_list(struct hclge_dev *hdev, | |
3932 | struct hclge_fd_rule *new_rule, | |
3933 | u16 location, | |
3934 | bool is_add) | |
3935 | { | |
3936 | struct hclge_fd_rule *rule = NULL, *parent = NULL; | |
3937 | struct hlist_node *node2; | |
3938 | ||
3939 | if (is_add && !new_rule) | |
3940 | return -EINVAL; | |
3941 | ||
3942 | hlist_for_each_entry_safe(rule, node2, | |
3943 | &hdev->fd_rule_list, rule_node) { | |
3944 | if (rule->location >= location) | |
3945 | break; | |
3946 | parent = rule; | |
3947 | } | |
3948 | ||
3949 | if (rule && rule->location == location) { | |
3950 | hlist_del(&rule->rule_node); | |
3951 | kfree(rule); | |
3952 | hdev->hclge_fd_rule_num--; | |
3953 | ||
3954 | if (!is_add) | |
3955 | return 0; | |
3956 | ||
3957 | } else if (!is_add) { | |
3958 | dev_err(&hdev->pdev->dev, | |
3959 | "delete fail, rule %d is inexistent\n", | |
3960 | location); | |
3961 | return -EINVAL; | |
3962 | } | |
3963 | ||
3964 | INIT_HLIST_NODE(&new_rule->rule_node); | |
3965 | ||
3966 | if (parent) | |
3967 | hlist_add_behind(&new_rule->rule_node, &parent->rule_node); | |
3968 | else | |
3969 | hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); | |
3970 | ||
3971 | hdev->hclge_fd_rule_num++; | |
3972 | ||
3973 | return 0; | |
3974 | } | |
3975 | ||
3976 | static int hclge_fd_get_tuple(struct hclge_dev *hdev, | |
3977 | struct ethtool_rx_flow_spec *fs, | |
3978 | struct hclge_fd_rule *rule) | |
3979 | { | |
3980 | u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); | |
3981 | ||
3982 | switch (flow_type) { | |
3983 | case SCTP_V4_FLOW: | |
3984 | case TCP_V4_FLOW: | |
3985 | case UDP_V4_FLOW: | |
3986 | rule->tuples.src_ip[3] = | |
3987 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); | |
3988 | rule->tuples_mask.src_ip[3] = | |
3989 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); | |
3990 | ||
3991 | rule->tuples.dst_ip[3] = | |
3992 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); | |
3993 | rule->tuples_mask.dst_ip[3] = | |
3994 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); | |
3995 | ||
3996 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); | |
3997 | rule->tuples_mask.src_port = | |
3998 | be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); | |
3999 | ||
4000 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); | |
4001 | rule->tuples_mask.dst_port = | |
4002 | be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); | |
4003 | ||
4004 | rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; | |
4005 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; | |
4006 | ||
4007 | rule->tuples.ether_proto = ETH_P_IP; | |
4008 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4009 | ||
4010 | break; | |
4011 | case IP_USER_FLOW: | |
4012 | rule->tuples.src_ip[3] = | |
4013 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); | |
4014 | rule->tuples_mask.src_ip[3] = | |
4015 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); | |
4016 | ||
4017 | rule->tuples.dst_ip[3] = | |
4018 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); | |
4019 | rule->tuples_mask.dst_ip[3] = | |
4020 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); | |
4021 | ||
4022 | rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; | |
4023 | rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; | |
4024 | ||
4025 | rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; | |
4026 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; | |
4027 | ||
4028 | rule->tuples.ether_proto = ETH_P_IP; | |
4029 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4030 | ||
4031 | break; | |
4032 | case SCTP_V6_FLOW: | |
4033 | case TCP_V6_FLOW: | |
4034 | case UDP_V6_FLOW: | |
4035 | be32_to_cpu_array(rule->tuples.src_ip, | |
4036 | fs->h_u.tcp_ip6_spec.ip6src, 4); | |
4037 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4038 | fs->m_u.tcp_ip6_spec.ip6src, 4); | |
4039 | ||
4040 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4041 | fs->h_u.tcp_ip6_spec.ip6dst, 4); | |
4042 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4043 | fs->m_u.tcp_ip6_spec.ip6dst, 4); | |
4044 | ||
4045 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); | |
4046 | rule->tuples_mask.src_port = | |
4047 | be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); | |
4048 | ||
4049 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); | |
4050 | rule->tuples_mask.dst_port = | |
4051 | be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); | |
4052 | ||
4053 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4054 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4055 | ||
4056 | break; | |
4057 | case IPV6_USER_FLOW: | |
4058 | be32_to_cpu_array(rule->tuples.src_ip, | |
4059 | fs->h_u.usr_ip6_spec.ip6src, 4); | |
4060 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4061 | fs->m_u.usr_ip6_spec.ip6src, 4); | |
4062 | ||
4063 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4064 | fs->h_u.usr_ip6_spec.ip6dst, 4); | |
4065 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4066 | fs->m_u.usr_ip6_spec.ip6dst, 4); | |
4067 | ||
4068 | rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; | |
4069 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; | |
4070 | ||
4071 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4072 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4073 | ||
4074 | break; | |
4075 | case ETHER_FLOW: | |
4076 | ether_addr_copy(rule->tuples.src_mac, | |
4077 | fs->h_u.ether_spec.h_source); | |
4078 | ether_addr_copy(rule->tuples_mask.src_mac, | |
4079 | fs->m_u.ether_spec.h_source); | |
4080 | ||
4081 | ether_addr_copy(rule->tuples.dst_mac, | |
4082 | fs->h_u.ether_spec.h_dest); | |
4083 | ether_addr_copy(rule->tuples_mask.dst_mac, | |
4084 | fs->m_u.ether_spec.h_dest); | |
4085 | ||
4086 | rule->tuples.ether_proto = | |
4087 | be16_to_cpu(fs->h_u.ether_spec.h_proto); | |
4088 | rule->tuples_mask.ether_proto = | |
4089 | be16_to_cpu(fs->m_u.ether_spec.h_proto); | |
4090 | ||
4091 | break; | |
4092 | default: | |
4093 | return -EOPNOTSUPP; | |
4094 | } | |
4095 | ||
4096 | switch (flow_type) { | |
4097 | case SCTP_V4_FLOW: | |
4098 | case SCTP_V6_FLOW: | |
4099 | rule->tuples.ip_proto = IPPROTO_SCTP; | |
4100 | rule->tuples_mask.ip_proto = 0xFF; | |
4101 | break; | |
4102 | case TCP_V4_FLOW: | |
4103 | case TCP_V6_FLOW: | |
4104 | rule->tuples.ip_proto = IPPROTO_TCP; | |
4105 | rule->tuples_mask.ip_proto = 0xFF; | |
4106 | break; | |
4107 | case UDP_V4_FLOW: | |
4108 | case UDP_V6_FLOW: | |
4109 | rule->tuples.ip_proto = IPPROTO_UDP; | |
4110 | rule->tuples_mask.ip_proto = 0xFF; | |
4111 | break; | |
4112 | default: | |
4113 | break; | |
4114 | } | |
4115 | ||
4116 | if ((fs->flow_type & FLOW_EXT)) { | |
4117 | rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); | |
4118 | rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); | |
4119 | } | |
4120 | ||
4121 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4122 | ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); | |
4123 | ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); | |
4124 | } | |
4125 | ||
4126 | return 0; | |
4127 | } | |
4128 | ||
4129 | static int hclge_add_fd_entry(struct hnae3_handle *handle, | |
4130 | struct ethtool_rxnfc *cmd) | |
4131 | { | |
4132 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4133 | struct hclge_dev *hdev = vport->back; | |
4134 | u16 dst_vport_id = 0, q_index = 0; | |
4135 | struct ethtool_rx_flow_spec *fs; | |
4136 | struct hclge_fd_rule *rule; | |
4137 | u32 unused = 0; | |
4138 | u8 action; | |
4139 | int ret; | |
4140 | ||
4141 | if (!hnae3_dev_fd_supported(hdev)) | |
4142 | return -EOPNOTSUPP; | |
4143 | ||
4144 | if (!hdev->fd_cfg.fd_en) { | |
4145 | dev_warn(&hdev->pdev->dev, | |
4146 | "Please enable flow director first\n"); | |
4147 | return -EOPNOTSUPP; | |
4148 | } | |
4149 | ||
4150 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4151 | ||
4152 | ret = hclge_fd_check_spec(hdev, fs, &unused); | |
4153 | if (ret) { | |
4154 | dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); | |
4155 | return ret; | |
4156 | } | |
4157 | ||
4158 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) { | |
4159 | action = HCLGE_FD_ACTION_DROP_PACKET; | |
4160 | } else { | |
4161 | u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); | |
4162 | u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); | |
4163 | u16 tqps; | |
4164 | ||
4165 | dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; | |
4166 | tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; | |
4167 | ||
4168 | if (ring >= tqps) { | |
4169 | dev_err(&hdev->pdev->dev, | |
4170 | "Error: queue id (%d) > max tqp num (%d)\n", | |
4171 | ring, tqps - 1); | |
4172 | return -EINVAL; | |
4173 | } | |
4174 | ||
4175 | if (vf > hdev->num_req_vfs) { | |
4176 | dev_err(&hdev->pdev->dev, | |
4177 | "Error: vf id (%d) > max vf num (%d)\n", | |
4178 | vf, hdev->num_req_vfs); | |
4179 | return -EINVAL; | |
4180 | } | |
4181 | ||
4182 | action = HCLGE_FD_ACTION_ACCEPT_PACKET; | |
4183 | q_index = ring; | |
4184 | } | |
4185 | ||
4186 | rule = kzalloc(sizeof(*rule), GFP_KERNEL); | |
4187 | if (!rule) | |
4188 | return -ENOMEM; | |
4189 | ||
4190 | ret = hclge_fd_get_tuple(hdev, fs, rule); | |
4191 | if (ret) | |
4192 | goto free_rule; | |
4193 | ||
4194 | rule->flow_type = fs->flow_type; | |
4195 | ||
4196 | rule->location = fs->location; | |
4197 | rule->unused_tuple = unused; | |
4198 | rule->vf_id = dst_vport_id; | |
4199 | rule->queue_id = q_index; | |
4200 | rule->action = action; | |
4201 | ||
4202 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4203 | if (ret) | |
4204 | goto free_rule; | |
4205 | ||
4206 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4207 | if (ret) | |
4208 | goto free_rule; | |
4209 | ||
4210 | ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); | |
4211 | if (ret) | |
4212 | goto free_rule; | |
4213 | ||
4214 | return ret; | |
4215 | ||
4216 | free_rule: | |
4217 | kfree(rule); | |
4218 | return ret; | |
4219 | } | |
4220 | ||
4221 | static int hclge_del_fd_entry(struct hnae3_handle *handle, | |
4222 | struct ethtool_rxnfc *cmd) | |
4223 | { | |
4224 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4225 | struct hclge_dev *hdev = vport->back; | |
4226 | struct ethtool_rx_flow_spec *fs; | |
4227 | int ret; | |
4228 | ||
4229 | if (!hnae3_dev_fd_supported(hdev)) | |
4230 | return -EOPNOTSUPP; | |
4231 | ||
4232 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4233 | ||
4234 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4235 | return -EINVAL; | |
4236 | ||
4237 | if (!hclge_fd_rule_exist(hdev, fs->location)) { | |
4238 | dev_err(&hdev->pdev->dev, | |
4239 | "Delete fail, rule %d is inexistent\n", | |
4240 | fs->location); | |
4241 | return -ENOENT; | |
4242 | } | |
4243 | ||
4244 | ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4245 | fs->location, NULL, false); | |
4246 | if (ret) | |
4247 | return ret; | |
4248 | ||
4249 | return hclge_fd_update_rule_list(hdev, NULL, fs->location, | |
4250 | false); | |
4251 | } | |
4252 | ||
6871af29 JS |
4253 | static void hclge_del_all_fd_entries(struct hnae3_handle *handle, |
4254 | bool clear_list) | |
4255 | { | |
4256 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4257 | struct hclge_dev *hdev = vport->back; | |
4258 | struct hclge_fd_rule *rule; | |
4259 | struct hlist_node *node; | |
4260 | ||
4261 | if (!hnae3_dev_fd_supported(hdev)) | |
4262 | return; | |
4263 | ||
4264 | if (clear_list) { | |
4265 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4266 | rule_node) { | |
4267 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4268 | rule->location, NULL, false); | |
4269 | hlist_del(&rule->rule_node); | |
4270 | kfree(rule); | |
4271 | hdev->hclge_fd_rule_num--; | |
4272 | } | |
4273 | } else { | |
4274 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4275 | rule_node) | |
4276 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4277 | rule->location, NULL, false); | |
4278 | } | |
4279 | } | |
4280 | ||
4281 | static int hclge_restore_fd_entries(struct hnae3_handle *handle) | |
4282 | { | |
4283 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4284 | struct hclge_dev *hdev = vport->back; | |
4285 | struct hclge_fd_rule *rule; | |
4286 | struct hlist_node *node; | |
4287 | int ret; | |
4288 | ||
4289 | if (!hnae3_dev_fd_supported(hdev)) | |
4290 | return -EOPNOTSUPP; | |
4291 | ||
4292 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { | |
4293 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4294 | if (!ret) | |
4295 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4296 | ||
4297 | if (ret) { | |
4298 | dev_warn(&hdev->pdev->dev, | |
4299 | "Restore rule %d failed, remove it\n", | |
4300 | rule->location); | |
4301 | hlist_del(&rule->rule_node); | |
4302 | kfree(rule); | |
4303 | hdev->hclge_fd_rule_num--; | |
4304 | } | |
4305 | } | |
4306 | return 0; | |
4307 | } | |
4308 | ||
05c2314f JS |
4309 | static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, |
4310 | struct ethtool_rxnfc *cmd) | |
4311 | { | |
4312 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4313 | struct hclge_dev *hdev = vport->back; | |
4314 | ||
4315 | if (!hnae3_dev_fd_supported(hdev)) | |
4316 | return -EOPNOTSUPP; | |
4317 | ||
4318 | cmd->rule_cnt = hdev->hclge_fd_rule_num; | |
4319 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
4320 | ||
4321 | return 0; | |
4322 | } | |
4323 | ||
4324 | static int hclge_get_fd_rule_info(struct hnae3_handle *handle, | |
4325 | struct ethtool_rxnfc *cmd) | |
4326 | { | |
4327 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4328 | struct hclge_fd_rule *rule = NULL; | |
4329 | struct hclge_dev *hdev = vport->back; | |
4330 | struct ethtool_rx_flow_spec *fs; | |
4331 | struct hlist_node *node2; | |
4332 | ||
4333 | if (!hnae3_dev_fd_supported(hdev)) | |
4334 | return -EOPNOTSUPP; | |
4335 | ||
4336 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4337 | ||
4338 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4339 | if (rule->location >= fs->location) | |
4340 | break; | |
4341 | } | |
4342 | ||
4343 | if (!rule || fs->location != rule->location) | |
4344 | return -ENOENT; | |
4345 | ||
4346 | fs->flow_type = rule->flow_type; | |
4347 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4348 | case SCTP_V4_FLOW: | |
4349 | case TCP_V4_FLOW: | |
4350 | case UDP_V4_FLOW: | |
4351 | fs->h_u.tcp_ip4_spec.ip4src = | |
4352 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4353 | fs->m_u.tcp_ip4_spec.ip4src = | |
4354 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4355 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4356 | ||
4357 | fs->h_u.tcp_ip4_spec.ip4dst = | |
4358 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4359 | fs->m_u.tcp_ip4_spec.ip4dst = | |
4360 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4361 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4362 | ||
4363 | fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4364 | fs->m_u.tcp_ip4_spec.psrc = | |
4365 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4366 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4367 | ||
4368 | fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4369 | fs->m_u.tcp_ip4_spec.pdst = | |
4370 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4371 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4372 | ||
4373 | fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; | |
4374 | fs->m_u.tcp_ip4_spec.tos = | |
4375 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4376 | 0 : rule->tuples_mask.ip_tos; | |
4377 | ||
4378 | break; | |
4379 | case IP_USER_FLOW: | |
4380 | fs->h_u.usr_ip4_spec.ip4src = | |
4381 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4382 | fs->m_u.tcp_ip4_spec.ip4src = | |
4383 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4384 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4385 | ||
4386 | fs->h_u.usr_ip4_spec.ip4dst = | |
4387 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4388 | fs->m_u.usr_ip4_spec.ip4dst = | |
4389 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4390 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4391 | ||
4392 | fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; | |
4393 | fs->m_u.usr_ip4_spec.tos = | |
4394 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4395 | 0 : rule->tuples_mask.ip_tos; | |
4396 | ||
4397 | fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; | |
4398 | fs->m_u.usr_ip4_spec.proto = | |
4399 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4400 | 0 : rule->tuples_mask.ip_proto; | |
4401 | ||
4402 | fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; | |
4403 | ||
4404 | break; | |
4405 | case SCTP_V6_FLOW: | |
4406 | case TCP_V6_FLOW: | |
4407 | case UDP_V6_FLOW: | |
4408 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, | |
4409 | rule->tuples.src_ip, 4); | |
4410 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4411 | memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4412 | else | |
4413 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, | |
4414 | rule->tuples_mask.src_ip, 4); | |
4415 | ||
4416 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, | |
4417 | rule->tuples.dst_ip, 4); | |
4418 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4419 | memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4420 | else | |
4421 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, | |
4422 | rule->tuples_mask.dst_ip, 4); | |
4423 | ||
4424 | fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4425 | fs->m_u.tcp_ip6_spec.psrc = | |
4426 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4427 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4428 | ||
4429 | fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4430 | fs->m_u.tcp_ip6_spec.pdst = | |
4431 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4432 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4433 | ||
4434 | break; | |
4435 | case IPV6_USER_FLOW: | |
4436 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, | |
4437 | rule->tuples.src_ip, 4); | |
4438 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4439 | memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4440 | else | |
4441 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, | |
4442 | rule->tuples_mask.src_ip, 4); | |
4443 | ||
4444 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, | |
4445 | rule->tuples.dst_ip, 4); | |
4446 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4447 | memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4448 | else | |
4449 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, | |
4450 | rule->tuples_mask.dst_ip, 4); | |
4451 | ||
4452 | fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; | |
4453 | fs->m_u.usr_ip6_spec.l4_proto = | |
4454 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4455 | 0 : rule->tuples_mask.ip_proto; | |
4456 | ||
4457 | break; | |
4458 | case ETHER_FLOW: | |
4459 | ether_addr_copy(fs->h_u.ether_spec.h_source, | |
4460 | rule->tuples.src_mac); | |
4461 | if (rule->unused_tuple & BIT(INNER_SRC_MAC)) | |
4462 | eth_zero_addr(fs->m_u.ether_spec.h_source); | |
4463 | else | |
4464 | ether_addr_copy(fs->m_u.ether_spec.h_source, | |
4465 | rule->tuples_mask.src_mac); | |
4466 | ||
4467 | ether_addr_copy(fs->h_u.ether_spec.h_dest, | |
4468 | rule->tuples.dst_mac); | |
4469 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4470 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4471 | else | |
4472 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4473 | rule->tuples_mask.dst_mac); | |
4474 | ||
4475 | fs->h_u.ether_spec.h_proto = | |
4476 | cpu_to_be16(rule->tuples.ether_proto); | |
4477 | fs->m_u.ether_spec.h_proto = | |
4478 | rule->unused_tuple & BIT(INNER_ETH_TYPE) ? | |
4479 | 0 : cpu_to_be16(rule->tuples_mask.ether_proto); | |
4480 | ||
4481 | break; | |
4482 | default: | |
4483 | return -EOPNOTSUPP; | |
4484 | } | |
4485 | ||
4486 | if (fs->flow_type & FLOW_EXT) { | |
4487 | fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); | |
4488 | fs->m_ext.vlan_tci = | |
4489 | rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? | |
4490 | cpu_to_be16(VLAN_VID_MASK) : | |
4491 | cpu_to_be16(rule->tuples_mask.vlan_tag1); | |
4492 | } | |
4493 | ||
4494 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4495 | ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); | |
4496 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4497 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4498 | else | |
4499 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4500 | rule->tuples_mask.dst_mac); | |
4501 | } | |
4502 | ||
4503 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
4504 | fs->ring_cookie = RX_CLS_FLOW_DISC; | |
4505 | } else { | |
4506 | u64 vf_id; | |
4507 | ||
4508 | fs->ring_cookie = rule->queue_id; | |
4509 | vf_id = rule->vf_id; | |
4510 | vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; | |
4511 | fs->ring_cookie |= vf_id; | |
4512 | } | |
4513 | ||
4514 | return 0; | |
4515 | } | |
4516 | ||
4517 | static int hclge_get_all_rules(struct hnae3_handle *handle, | |
4518 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | |
4519 | { | |
4520 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4521 | struct hclge_dev *hdev = vport->back; | |
4522 | struct hclge_fd_rule *rule; | |
4523 | struct hlist_node *node2; | |
4524 | int cnt = 0; | |
4525 | ||
4526 | if (!hnae3_dev_fd_supported(hdev)) | |
4527 | return -EOPNOTSUPP; | |
4528 | ||
4529 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
4530 | ||
4531 | hlist_for_each_entry_safe(rule, node2, | |
4532 | &hdev->fd_rule_list, rule_node) { | |
4533 | if (cnt == cmd->rule_cnt) | |
4534 | return -EMSGSIZE; | |
4535 | ||
4536 | rule_locs[cnt] = rule->location; | |
4537 | cnt++; | |
4538 | } | |
4539 | ||
4540 | cmd->rule_cnt = cnt; | |
4541 | ||
4542 | return 0; | |
4543 | } | |
4544 | ||
c17852a8 JS |
4545 | static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) |
4546 | { | |
4547 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4548 | struct hclge_dev *hdev = vport->back; | |
4549 | ||
4550 | hdev->fd_cfg.fd_en = enable; | |
4551 | if (!enable) | |
4552 | hclge_del_all_fd_entries(handle, false); | |
4553 | else | |
4554 | hclge_restore_fd_entries(handle); | |
4555 | } | |
4556 | ||
46a3df9f S |
4557 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) |
4558 | { | |
4559 | struct hclge_desc desc; | |
d44f9b63 YL |
4560 | struct hclge_config_mac_mode_cmd *req = |
4561 | (struct hclge_config_mac_mode_cmd *)desc.data; | |
a90bb9a5 | 4562 | u32 loop_en = 0; |
46a3df9f S |
4563 | int ret; |
4564 | ||
4565 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); | |
e4e87715 PL |
4566 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); |
4567 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); | |
4568 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); | |
4569 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); | |
4570 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); | |
4571 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); | |
4572 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); | |
4573 | hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); | |
4574 | hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); | |
4575 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); | |
4576 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); | |
4577 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); | |
4578 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); | |
4579 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); | |
a90bb9a5 | 4580 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); |
46a3df9f S |
4581 | |
4582 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4583 | if (ret) | |
4584 | dev_err(&hdev->pdev->dev, | |
4585 | "mac enable fail, ret =%d.\n", ret); | |
4586 | } | |
4587 | ||
eb66d503 | 4588 | static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) |
c39c4d98 | 4589 | { |
c39c4d98 | 4590 | struct hclge_config_mac_mode_cmd *req; |
c39c4d98 YL |
4591 | struct hclge_desc desc; |
4592 | u32 loop_en; | |
4593 | int ret; | |
4594 | ||
e4d68dae YL |
4595 | req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; |
4596 | /* 1 Read out the MAC mode config at first */ | |
4597 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); | |
4598 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4599 | if (ret) { | |
4600 | dev_err(&hdev->pdev->dev, | |
4601 | "mac loopback get fail, ret =%d.\n", ret); | |
4602 | return ret; | |
4603 | } | |
c39c4d98 | 4604 | |
e4d68dae YL |
4605 | /* 2 Then setup the loopback flag */ |
4606 | loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); | |
e4e87715 | 4607 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); |
0f29fc23 YL |
4608 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); |
4609 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); | |
e4d68dae YL |
4610 | |
4611 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); | |
c39c4d98 | 4612 | |
e4d68dae YL |
4613 | /* 3 Config mac work mode with loopback flag |
4614 | * and its original configure parameters | |
4615 | */ | |
4616 | hclge_cmd_reuse_desc(&desc, false); | |
4617 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4618 | if (ret) | |
4619 | dev_err(&hdev->pdev->dev, | |
4620 | "mac loopback set fail, ret =%d.\n", ret); | |
4621 | return ret; | |
4622 | } | |
c39c4d98 | 4623 | |
4dc13b96 FL |
4624 | static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, |
4625 | enum hnae3_loop loop_mode) | |
5fd50ac3 PL |
4626 | { |
4627 | #define HCLGE_SERDES_RETRY_MS 10 | |
4628 | #define HCLGE_SERDES_RETRY_NUM 100 | |
4629 | struct hclge_serdes_lb_cmd *req; | |
4630 | struct hclge_desc desc; | |
4631 | int ret, i = 0; | |
4dc13b96 | 4632 | u8 loop_mode_b; |
5fd50ac3 | 4633 | |
d0d72bac | 4634 | req = (struct hclge_serdes_lb_cmd *)desc.data; |
5fd50ac3 PL |
4635 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); |
4636 | ||
4dc13b96 FL |
4637 | switch (loop_mode) { |
4638 | case HNAE3_LOOP_SERIAL_SERDES: | |
4639 | loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; | |
4640 | break; | |
4641 | case HNAE3_LOOP_PARALLEL_SERDES: | |
4642 | loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; | |
4643 | break; | |
4644 | default: | |
4645 | dev_err(&hdev->pdev->dev, | |
4646 | "unsupported serdes loopback mode %d\n", loop_mode); | |
4647 | return -ENOTSUPP; | |
4648 | } | |
4649 | ||
5fd50ac3 | 4650 | if (en) { |
4dc13b96 FL |
4651 | req->enable = loop_mode_b; |
4652 | req->mask = loop_mode_b; | |
5fd50ac3 | 4653 | } else { |
4dc13b96 | 4654 | req->mask = loop_mode_b; |
5fd50ac3 PL |
4655 | } |
4656 | ||
4657 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4658 | if (ret) { | |
4659 | dev_err(&hdev->pdev->dev, | |
4660 | "serdes loopback set fail, ret = %d\n", ret); | |
4661 | return ret; | |
4662 | } | |
4663 | ||
4664 | do { | |
4665 | msleep(HCLGE_SERDES_RETRY_MS); | |
4666 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, | |
4667 | true); | |
4668 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4669 | if (ret) { | |
4670 | dev_err(&hdev->pdev->dev, | |
4671 | "serdes loopback get, ret = %d\n", ret); | |
4672 | return ret; | |
4673 | } | |
4674 | } while (++i < HCLGE_SERDES_RETRY_NUM && | |
4675 | !(req->result & HCLGE_CMD_SERDES_DONE_B)); | |
4676 | ||
4677 | if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { | |
4678 | dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); | |
4679 | return -EBUSY; | |
4680 | } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { | |
4681 | dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); | |
4682 | return -EIO; | |
4683 | } | |
4684 | ||
0f29fc23 | 4685 | hclge_cfg_mac_mode(hdev, en); |
5fd50ac3 PL |
4686 | return 0; |
4687 | } | |
4688 | ||
0f29fc23 YL |
4689 | static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, |
4690 | int stream_id, bool enable) | |
4691 | { | |
4692 | struct hclge_desc desc; | |
4693 | struct hclge_cfg_com_tqp_queue_cmd *req = | |
4694 | (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; | |
4695 | int ret; | |
4696 | ||
4697 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); | |
4698 | req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); | |
4699 | req->stream_id = cpu_to_le16(stream_id); | |
4700 | req->enable |= enable << HCLGE_TQP_ENABLE_B; | |
4701 | ||
4702 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4703 | if (ret) | |
4704 | dev_err(&hdev->pdev->dev, | |
4705 | "Tqp enable fail, status =%d.\n", ret); | |
4706 | return ret; | |
4707 | } | |
4708 | ||
e4d68dae YL |
4709 | static int hclge_set_loopback(struct hnae3_handle *handle, |
4710 | enum hnae3_loop loop_mode, bool en) | |
4711 | { | |
4712 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4713 | struct hclge_dev *hdev = vport->back; | |
0f29fc23 | 4714 | int i, ret; |
e4d68dae YL |
4715 | |
4716 | switch (loop_mode) { | |
eb66d503 FL |
4717 | case HNAE3_LOOP_APP: |
4718 | ret = hclge_set_app_loopback(hdev, en); | |
c39c4d98 | 4719 | break; |
4dc13b96 FL |
4720 | case HNAE3_LOOP_SERIAL_SERDES: |
4721 | case HNAE3_LOOP_PARALLEL_SERDES: | |
4722 | ret = hclge_set_serdes_loopback(hdev, en, loop_mode); | |
5fd50ac3 | 4723 | break; |
c39c4d98 YL |
4724 | default: |
4725 | ret = -ENOTSUPP; | |
4726 | dev_err(&hdev->pdev->dev, | |
4727 | "loop_mode %d is not supported\n", loop_mode); | |
4728 | break; | |
4729 | } | |
4730 | ||
0f29fc23 YL |
4731 | for (i = 0; i < vport->alloc_tqps; i++) { |
4732 | ret = hclge_tqp_enable(hdev, i, 0, en); | |
4733 | if (ret) | |
4734 | return ret; | |
4735 | } | |
46a3df9f | 4736 | |
0f29fc23 | 4737 | return 0; |
46a3df9f S |
4738 | } |
4739 | ||
4740 | static void hclge_reset_tqp_stats(struct hnae3_handle *handle) | |
4741 | { | |
4742 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4743 | struct hnae3_queue *queue; | |
4744 | struct hclge_tqp *tqp; | |
4745 | int i; | |
4746 | ||
4747 | for (i = 0; i < vport->alloc_tqps; i++) { | |
4748 | queue = handle->kinfo.tqp[i]; | |
4749 | tqp = container_of(queue, struct hclge_tqp, q); | |
4750 | memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); | |
4751 | } | |
4752 | } | |
4753 | ||
4754 | static int hclge_ae_start(struct hnae3_handle *handle) | |
4755 | { | |
4756 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4757 | struct hclge_dev *hdev = vport->back; | |
b01b7cf1 | 4758 | int i; |
46a3df9f | 4759 | |
814e0274 PL |
4760 | for (i = 0; i < vport->alloc_tqps; i++) |
4761 | hclge_tqp_enable(hdev, i, 0, true); | |
46a3df9f | 4762 | |
46a3df9f S |
4763 | /* mac enable */ |
4764 | hclge_cfg_mac_mode(hdev, true); | |
4765 | clear_bit(HCLGE_STATE_DOWN, &hdev->state); | |
d039ef68 | 4766 | mod_timer(&hdev->service_timer, jiffies + HZ); |
be8d8cdb | 4767 | hdev->hw.mac.link = 0; |
46a3df9f | 4768 | |
b50ae26c PL |
4769 | /* reset tqp stats */ |
4770 | hclge_reset_tqp_stats(handle); | |
4771 | ||
b01b7cf1 | 4772 | hclge_mac_start_phy(hdev); |
46a3df9f | 4773 | |
46a3df9f S |
4774 | return 0; |
4775 | } | |
4776 | ||
4777 | static void hclge_ae_stop(struct hnae3_handle *handle) | |
4778 | { | |
4779 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4780 | struct hclge_dev *hdev = vport->back; | |
814e0274 | 4781 | int i; |
46a3df9f | 4782 | |
2f7e4896 FL |
4783 | set_bit(HCLGE_STATE_DOWN, &hdev->state); |
4784 | ||
b50ae26c PL |
4785 | del_timer_sync(&hdev->service_timer); |
4786 | cancel_work_sync(&hdev->service_task); | |
f5be7967 | 4787 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); |
b50ae26c | 4788 | |
9617f668 YL |
4789 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { |
4790 | hclge_mac_stop_phy(hdev); | |
b50ae26c | 4791 | return; |
9617f668 | 4792 | } |
b50ae26c | 4793 | |
814e0274 PL |
4794 | for (i = 0; i < vport->alloc_tqps; i++) |
4795 | hclge_tqp_enable(hdev, i, 0, false); | |
46a3df9f | 4796 | |
46a3df9f S |
4797 | /* Mac disable */ |
4798 | hclge_cfg_mac_mode(hdev, false); | |
4799 | ||
4800 | hclge_mac_stop_phy(hdev); | |
4801 | ||
4802 | /* reset tqp stats */ | |
4803 | hclge_reset_tqp_stats(handle); | |
f30dfddc FL |
4804 | del_timer_sync(&hdev->service_timer); |
4805 | cancel_work_sync(&hdev->service_task); | |
4806 | hclge_update_link_status(hdev); | |
46a3df9f S |
4807 | } |
4808 | ||
4809 | static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, | |
4810 | u16 cmdq_resp, u8 resp_code, | |
4811 | enum hclge_mac_vlan_tbl_opcode op) | |
4812 | { | |
4813 | struct hclge_dev *hdev = vport->back; | |
4814 | int return_status = -EIO; | |
4815 | ||
4816 | if (cmdq_resp) { | |
4817 | dev_err(&hdev->pdev->dev, | |
4818 | "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", | |
4819 | cmdq_resp); | |
4820 | return -EIO; | |
4821 | } | |
4822 | ||
4823 | if (op == HCLGE_MAC_VLAN_ADD) { | |
4824 | if ((!resp_code) || (resp_code == 1)) { | |
4825 | return_status = 0; | |
4826 | } else if (resp_code == 2) { | |
eefd00a5 | 4827 | return_status = -ENOSPC; |
46a3df9f S |
4828 | dev_err(&hdev->pdev->dev, |
4829 | "add mac addr failed for uc_overflow.\n"); | |
4830 | } else if (resp_code == 3) { | |
eefd00a5 | 4831 | return_status = -ENOSPC; |
46a3df9f S |
4832 | dev_err(&hdev->pdev->dev, |
4833 | "add mac addr failed for mc_overflow.\n"); | |
4834 | } else { | |
4835 | dev_err(&hdev->pdev->dev, | |
4836 | "add mac addr failed for undefined, code=%d.\n", | |
4837 | resp_code); | |
4838 | } | |
4839 | } else if (op == HCLGE_MAC_VLAN_REMOVE) { | |
4840 | if (!resp_code) { | |
4841 | return_status = 0; | |
4842 | } else if (resp_code == 1) { | |
eefd00a5 | 4843 | return_status = -ENOENT; |
46a3df9f S |
4844 | dev_dbg(&hdev->pdev->dev, |
4845 | "remove mac addr failed for miss.\n"); | |
4846 | } else { | |
4847 | dev_err(&hdev->pdev->dev, | |
4848 | "remove mac addr failed for undefined, code=%d.\n", | |
4849 | resp_code); | |
4850 | } | |
4851 | } else if (op == HCLGE_MAC_VLAN_LKUP) { | |
4852 | if (!resp_code) { | |
4853 | return_status = 0; | |
4854 | } else if (resp_code == 1) { | |
eefd00a5 | 4855 | return_status = -ENOENT; |
46a3df9f S |
4856 | dev_dbg(&hdev->pdev->dev, |
4857 | "lookup mac addr failed for miss.\n"); | |
4858 | } else { | |
4859 | dev_err(&hdev->pdev->dev, | |
4860 | "lookup mac addr failed for undefined, code=%d.\n", | |
4861 | resp_code); | |
4862 | } | |
4863 | } else { | |
eefd00a5 | 4864 | return_status = -EINVAL; |
46a3df9f S |
4865 | dev_err(&hdev->pdev->dev, |
4866 | "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", | |
4867 | op); | |
4868 | } | |
4869 | ||
4870 | return return_status; | |
4871 | } | |
4872 | ||
4873 | static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) | |
4874 | { | |
4875 | int word_num; | |
4876 | int bit_num; | |
4877 | ||
4878 | if (vfid > 255 || vfid < 0) | |
4879 | return -EIO; | |
4880 | ||
4881 | if (vfid >= 0 && vfid <= 191) { | |
4882 | word_num = vfid / 32; | |
4883 | bit_num = vfid % 32; | |
4884 | if (clr) | |
a90bb9a5 | 4885 | desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 4886 | else |
a90bb9a5 | 4887 | desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
4888 | } else { |
4889 | word_num = (vfid - 192) / 32; | |
4890 | bit_num = vfid % 32; | |
4891 | if (clr) | |
a90bb9a5 | 4892 | desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 4893 | else |
a90bb9a5 | 4894 | desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
4895 | } |
4896 | ||
4897 | return 0; | |
4898 | } | |
4899 | ||
4900 | static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) | |
4901 | { | |
4902 | #define HCLGE_DESC_NUMBER 3 | |
4903 | #define HCLGE_FUNC_NUMBER_PER_DESC 6 | |
4904 | int i, j; | |
4905 | ||
6c39d527 | 4906 | for (i = 1; i < HCLGE_DESC_NUMBER; i++) |
46a3df9f S |
4907 | for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) |
4908 | if (desc[i].data[j]) | |
4909 | return false; | |
4910 | ||
4911 | return true; | |
4912 | } | |
4913 | ||
d44f9b63 | 4914 | static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, |
46a3df9f S |
4915 | const u8 *addr) |
4916 | { | |
4917 | const unsigned char *mac_addr = addr; | |
4918 | u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | | |
4919 | (mac_addr[0]) | (mac_addr[1] << 8); | |
4920 | u32 low_val = mac_addr[4] | (mac_addr[5] << 8); | |
4921 | ||
4922 | new_req->mac_addr_hi32 = cpu_to_le32(high_val); | |
4923 | new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); | |
4924 | } | |
4925 | ||
46a3df9f | 4926 | static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, |
d44f9b63 | 4927 | struct hclge_mac_vlan_tbl_entry_cmd *req) |
46a3df9f S |
4928 | { |
4929 | struct hclge_dev *hdev = vport->back; | |
4930 | struct hclge_desc desc; | |
4931 | u8 resp_code; | |
a90bb9a5 | 4932 | u16 retval; |
46a3df9f S |
4933 | int ret; |
4934 | ||
4935 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); | |
4936 | ||
d44f9b63 | 4937 | memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
4938 | |
4939 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4940 | if (ret) { | |
4941 | dev_err(&hdev->pdev->dev, | |
4942 | "del mac addr failed for cmd_send, ret =%d.\n", | |
4943 | ret); | |
4944 | return ret; | |
4945 | } | |
a90bb9a5 YL |
4946 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
4947 | retval = le16_to_cpu(desc.retval); | |
46a3df9f | 4948 | |
a90bb9a5 | 4949 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
4950 | HCLGE_MAC_VLAN_REMOVE); |
4951 | } | |
4952 | ||
4953 | static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 4954 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
4955 | struct hclge_desc *desc, |
4956 | bool is_mc) | |
4957 | { | |
4958 | struct hclge_dev *hdev = vport->back; | |
4959 | u8 resp_code; | |
a90bb9a5 | 4960 | u16 retval; |
46a3df9f S |
4961 | int ret; |
4962 | ||
4963 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); | |
4964 | if (is_mc) { | |
4965 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
4966 | memcpy(desc[0].data, | |
4967 | req, | |
d44f9b63 | 4968 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
4969 | hclge_cmd_setup_basic_desc(&desc[1], |
4970 | HCLGE_OPC_MAC_VLAN_ADD, | |
4971 | true); | |
4972 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
4973 | hclge_cmd_setup_basic_desc(&desc[2], | |
4974 | HCLGE_OPC_MAC_VLAN_ADD, | |
4975 | true); | |
4976 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
4977 | } else { | |
4978 | memcpy(desc[0].data, | |
4979 | req, | |
d44f9b63 | 4980 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
4981 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
4982 | } | |
4983 | if (ret) { | |
4984 | dev_err(&hdev->pdev->dev, | |
4985 | "lookup mac addr failed for cmd_send, ret =%d.\n", | |
4986 | ret); | |
4987 | return ret; | |
4988 | } | |
a90bb9a5 YL |
4989 | resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; |
4990 | retval = le16_to_cpu(desc[0].retval); | |
46a3df9f | 4991 | |
a90bb9a5 | 4992 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
4993 | HCLGE_MAC_VLAN_LKUP); |
4994 | } | |
4995 | ||
4996 | static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 4997 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
4998 | struct hclge_desc *mc_desc) |
4999 | { | |
5000 | struct hclge_dev *hdev = vport->back; | |
5001 | int cfg_status; | |
5002 | u8 resp_code; | |
a90bb9a5 | 5003 | u16 retval; |
46a3df9f S |
5004 | int ret; |
5005 | ||
5006 | if (!mc_desc) { | |
5007 | struct hclge_desc desc; | |
5008 | ||
5009 | hclge_cmd_setup_basic_desc(&desc, | |
5010 | HCLGE_OPC_MAC_VLAN_ADD, | |
5011 | false); | |
d44f9b63 YL |
5012 | memcpy(desc.data, req, |
5013 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); | |
46a3df9f | 5014 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
a90bb9a5 YL |
5015 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5016 | retval = le16_to_cpu(desc.retval); | |
5017 | ||
5018 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5019 | resp_code, |
5020 | HCLGE_MAC_VLAN_ADD); | |
5021 | } else { | |
c3b6f755 | 5022 | hclge_cmd_reuse_desc(&mc_desc[0], false); |
46a3df9f | 5023 | mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5024 | hclge_cmd_reuse_desc(&mc_desc[1], false); |
46a3df9f | 5025 | mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5026 | hclge_cmd_reuse_desc(&mc_desc[2], false); |
46a3df9f S |
5027 | mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); |
5028 | memcpy(mc_desc[0].data, req, | |
d44f9b63 | 5029 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f | 5030 | ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); |
a90bb9a5 YL |
5031 | resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; |
5032 | retval = le16_to_cpu(mc_desc[0].retval); | |
5033 | ||
5034 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5035 | resp_code, |
5036 | HCLGE_MAC_VLAN_ADD); | |
5037 | } | |
5038 | ||
5039 | if (ret) { | |
5040 | dev_err(&hdev->pdev->dev, | |
5041 | "add mac addr failed for cmd_send, ret =%d.\n", | |
5042 | ret); | |
5043 | return ret; | |
5044 | } | |
5045 | ||
5046 | return cfg_status; | |
5047 | } | |
5048 | ||
39932473 JS |
5049 | static int hclge_init_umv_space(struct hclge_dev *hdev) |
5050 | { | |
5051 | u16 allocated_size = 0; | |
5052 | int ret; | |
5053 | ||
5054 | ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, | |
5055 | true); | |
5056 | if (ret) | |
5057 | return ret; | |
5058 | ||
5059 | if (allocated_size < hdev->wanted_umv_size) | |
5060 | dev_warn(&hdev->pdev->dev, | |
5061 | "Alloc umv space failed, want %d, get %d\n", | |
5062 | hdev->wanted_umv_size, allocated_size); | |
5063 | ||
5064 | mutex_init(&hdev->umv_mutex); | |
5065 | hdev->max_umv_size = allocated_size; | |
5066 | hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); | |
5067 | hdev->share_umv_size = hdev->priv_umv_size + | |
5068 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5069 | ||
5070 | return 0; | |
5071 | } | |
5072 | ||
5073 | static int hclge_uninit_umv_space(struct hclge_dev *hdev) | |
5074 | { | |
5075 | int ret; | |
5076 | ||
5077 | if (hdev->max_umv_size > 0) { | |
5078 | ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, | |
5079 | false); | |
5080 | if (ret) | |
5081 | return ret; | |
5082 | hdev->max_umv_size = 0; | |
5083 | } | |
5084 | mutex_destroy(&hdev->umv_mutex); | |
5085 | ||
5086 | return 0; | |
5087 | } | |
5088 | ||
5089 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, | |
5090 | u16 *allocated_size, bool is_alloc) | |
5091 | { | |
5092 | struct hclge_umv_spc_alc_cmd *req; | |
5093 | struct hclge_desc desc; | |
5094 | int ret; | |
5095 | ||
5096 | req = (struct hclge_umv_spc_alc_cmd *)desc.data; | |
5097 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); | |
5098 | hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); | |
5099 | req->space_size = cpu_to_le32(space_size); | |
5100 | ||
5101 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5102 | if (ret) { | |
5103 | dev_err(&hdev->pdev->dev, | |
5104 | "%s umv space failed for cmd_send, ret =%d\n", | |
5105 | is_alloc ? "allocate" : "free", ret); | |
5106 | return ret; | |
5107 | } | |
5108 | ||
5109 | if (is_alloc && allocated_size) | |
5110 | *allocated_size = le32_to_cpu(desc.data[1]); | |
5111 | ||
5112 | return 0; | |
5113 | } | |
5114 | ||
5115 | static void hclge_reset_umv_space(struct hclge_dev *hdev) | |
5116 | { | |
5117 | struct hclge_vport *vport; | |
5118 | int i; | |
5119 | ||
5120 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
5121 | vport = &hdev->vport[i]; | |
5122 | vport->used_umv_num = 0; | |
5123 | } | |
5124 | ||
5125 | mutex_lock(&hdev->umv_mutex); | |
5126 | hdev->share_umv_size = hdev->priv_umv_size + | |
5127 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5128 | mutex_unlock(&hdev->umv_mutex); | |
5129 | } | |
5130 | ||
5131 | static bool hclge_is_umv_space_full(struct hclge_vport *vport) | |
5132 | { | |
5133 | struct hclge_dev *hdev = vport->back; | |
5134 | bool is_full; | |
5135 | ||
5136 | mutex_lock(&hdev->umv_mutex); | |
5137 | is_full = (vport->used_umv_num >= hdev->priv_umv_size && | |
5138 | hdev->share_umv_size == 0); | |
5139 | mutex_unlock(&hdev->umv_mutex); | |
5140 | ||
5141 | return is_full; | |
5142 | } | |
5143 | ||
5144 | static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) | |
5145 | { | |
5146 | struct hclge_dev *hdev = vport->back; | |
5147 | ||
5148 | mutex_lock(&hdev->umv_mutex); | |
5149 | if (is_free) { | |
5150 | if (vport->used_umv_num > hdev->priv_umv_size) | |
5151 | hdev->share_umv_size++; | |
5152 | vport->used_umv_num--; | |
5153 | } else { | |
5154 | if (vport->used_umv_num >= hdev->priv_umv_size) | |
5155 | hdev->share_umv_size--; | |
5156 | vport->used_umv_num++; | |
5157 | } | |
5158 | mutex_unlock(&hdev->umv_mutex); | |
5159 | } | |
5160 | ||
46a3df9f S |
5161 | static int hclge_add_uc_addr(struct hnae3_handle *handle, |
5162 | const unsigned char *addr) | |
5163 | { | |
5164 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5165 | ||
5166 | return hclge_add_uc_addr_common(vport, addr); | |
5167 | } | |
5168 | ||
5169 | int hclge_add_uc_addr_common(struct hclge_vport *vport, | |
5170 | const unsigned char *addr) | |
5171 | { | |
5172 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5173 | struct hclge_mac_vlan_tbl_entry_cmd req; |
d07b6bb4 | 5174 | struct hclge_desc desc; |
a90bb9a5 | 5175 | u16 egress_port = 0; |
aa7a795e | 5176 | int ret; |
46a3df9f S |
5177 | |
5178 | /* mac addr check */ | |
5179 | if (is_zero_ether_addr(addr) || | |
5180 | is_broadcast_ether_addr(addr) || | |
5181 | is_multicast_ether_addr(addr)) { | |
5182 | dev_err(&hdev->pdev->dev, | |
5183 | "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", | |
5184 | addr, | |
5185 | is_zero_ether_addr(addr), | |
5186 | is_broadcast_ether_addr(addr), | |
5187 | is_multicast_ether_addr(addr)); | |
5188 | return -EINVAL; | |
5189 | } | |
5190 | ||
5191 | memset(&req, 0, sizeof(req)); | |
e4e87715 | 5192 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
a90bb9a5 | 5193 | |
e4e87715 PL |
5194 | hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, |
5195 | HCLGE_MAC_EPORT_VFID_S, vport->vport_id); | |
a90bb9a5 YL |
5196 | |
5197 | req.egress_port = cpu_to_le16(egress_port); | |
46a3df9f S |
5198 | |
5199 | hclge_prepare_mac_addr(&req, addr); | |
5200 | ||
d07b6bb4 JS |
5201 | /* Lookup the mac address in the mac_vlan table, and add |
5202 | * it if the entry is inexistent. Repeated unicast entry | |
5203 | * is not allowed in the mac vlan table. | |
5204 | */ | |
5205 | ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); | |
39932473 JS |
5206 | if (ret == -ENOENT) { |
5207 | if (!hclge_is_umv_space_full(vport)) { | |
5208 | ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); | |
5209 | if (!ret) | |
5210 | hclge_update_umv_space(vport, false); | |
5211 | return ret; | |
5212 | } | |
5213 | ||
5214 | dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", | |
5215 | hdev->priv_umv_size); | |
5216 | ||
5217 | return -ENOSPC; | |
5218 | } | |
d07b6bb4 JS |
5219 | |
5220 | /* check if we just hit the duplicate */ | |
5221 | if (!ret) | |
5222 | ret = -EINVAL; | |
5223 | ||
5224 | dev_err(&hdev->pdev->dev, | |
5225 | "PF failed to add unicast entry(%pM) in the MAC table\n", | |
5226 | addr); | |
46a3df9f | 5227 | |
aa7a795e | 5228 | return ret; |
46a3df9f S |
5229 | } |
5230 | ||
5231 | static int hclge_rm_uc_addr(struct hnae3_handle *handle, | |
5232 | const unsigned char *addr) | |
5233 | { | |
5234 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5235 | ||
5236 | return hclge_rm_uc_addr_common(vport, addr); | |
5237 | } | |
5238 | ||
5239 | int hclge_rm_uc_addr_common(struct hclge_vport *vport, | |
5240 | const unsigned char *addr) | |
5241 | { | |
5242 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5243 | struct hclge_mac_vlan_tbl_entry_cmd req; |
aa7a795e | 5244 | int ret; |
46a3df9f S |
5245 | |
5246 | /* mac addr check */ | |
5247 | if (is_zero_ether_addr(addr) || | |
5248 | is_broadcast_ether_addr(addr) || | |
5249 | is_multicast_ether_addr(addr)) { | |
5250 | dev_dbg(&hdev->pdev->dev, | |
5251 | "Remove mac err! invalid mac:%pM.\n", | |
5252 | addr); | |
5253 | return -EINVAL; | |
5254 | } | |
5255 | ||
5256 | memset(&req, 0, sizeof(req)); | |
e4e87715 PL |
5257 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5258 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
46a3df9f | 5259 | hclge_prepare_mac_addr(&req, addr); |
aa7a795e | 5260 | ret = hclge_remove_mac_vlan_tbl(vport, &req); |
39932473 JS |
5261 | if (!ret) |
5262 | hclge_update_umv_space(vport, true); | |
46a3df9f | 5263 | |
aa7a795e | 5264 | return ret; |
46a3df9f S |
5265 | } |
5266 | ||
5267 | static int hclge_add_mc_addr(struct hnae3_handle *handle, | |
5268 | const unsigned char *addr) | |
5269 | { | |
5270 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5271 | ||
a10829c4 | 5272 | return hclge_add_mc_addr_common(vport, addr); |
46a3df9f S |
5273 | } |
5274 | ||
5275 | int hclge_add_mc_addr_common(struct hclge_vport *vport, | |
5276 | const unsigned char *addr) | |
5277 | { | |
5278 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5279 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f | 5280 | struct hclge_desc desc[3]; |
46a3df9f S |
5281 | int status; |
5282 | ||
5283 | /* mac addr check */ | |
5284 | if (!is_multicast_ether_addr(addr)) { | |
5285 | dev_err(&hdev->pdev->dev, | |
5286 | "Add mc mac err! invalid mac:%pM.\n", | |
5287 | addr); | |
5288 | return -EINVAL; | |
5289 | } | |
5290 | memset(&req, 0, sizeof(req)); | |
e4e87715 PL |
5291 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5292 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5293 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
fd5f9da3 | 5294 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5295 | hclge_prepare_mac_addr(&req, addr); |
5296 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5297 | if (!status) { | |
5298 | /* This mac addr exist, update VFID for it */ | |
5299 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5300 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5301 | } else { | |
5302 | /* This mac addr do not exist, add new entry for it */ | |
5303 | memset(desc[0].data, 0, sizeof(desc[0].data)); | |
5304 | memset(desc[1].data, 0, sizeof(desc[0].data)); | |
5305 | memset(desc[2].data, 0, sizeof(desc[0].data)); | |
5306 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5307 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5308 | } | |
5309 | ||
1f6db589 JS |
5310 | if (status == -ENOSPC) |
5311 | dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); | |
46a3df9f S |
5312 | |
5313 | return status; | |
5314 | } | |
5315 | ||
5316 | static int hclge_rm_mc_addr(struct hnae3_handle *handle, | |
5317 | const unsigned char *addr) | |
5318 | { | |
5319 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5320 | ||
5321 | return hclge_rm_mc_addr_common(vport, addr); | |
5322 | } | |
5323 | ||
5324 | int hclge_rm_mc_addr_common(struct hclge_vport *vport, | |
5325 | const unsigned char *addr) | |
5326 | { | |
5327 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5328 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
5329 | enum hclge_cmd_status status; |
5330 | struct hclge_desc desc[3]; | |
46a3df9f S |
5331 | |
5332 | /* mac addr check */ | |
5333 | if (!is_multicast_ether_addr(addr)) { | |
5334 | dev_dbg(&hdev->pdev->dev, | |
5335 | "Remove mc mac err! invalid mac:%pM.\n", | |
5336 | addr); | |
5337 | return -EINVAL; | |
5338 | } | |
5339 | ||
5340 | memset(&req, 0, sizeof(req)); | |
e4e87715 PL |
5341 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5342 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5343 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
fd5f9da3 | 5344 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5345 | hclge_prepare_mac_addr(&req, addr); |
5346 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5347 | if (!status) { | |
5348 | /* This mac addr exist, remove this handle's VFID for it */ | |
5349 | hclge_update_desc_vfid(desc, vport->vport_id, true); | |
5350 | ||
5351 | if (hclge_is_all_function_id_zero(desc)) | |
5352 | /* All the vfid is zero, so need to delete this entry */ | |
5353 | status = hclge_remove_mac_vlan_tbl(vport, &req); | |
5354 | else | |
5355 | /* Not all the vfid is zero, update the vfid */ | |
5356 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5357 | ||
5358 | } else { | |
40cca1c5 XW |
5359 | /* Maybe this mac address is in mta table, but it cannot be |
5360 | * deleted here because an entry of mta represents an address | |
5361 | * range rather than a specific address. the delete action to | |
5362 | * all entries will take effect in update_mta_status called by | |
5363 | * hns3_nic_set_rx_mode. | |
5364 | */ | |
5365 | status = 0; | |
46a3df9f S |
5366 | } |
5367 | ||
46a3df9f S |
5368 | return status; |
5369 | } | |
5370 | ||
f5aac71c FL |
5371 | static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, |
5372 | u16 cmdq_resp, u8 resp_code) | |
5373 | { | |
5374 | #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 | |
5375 | #define HCLGE_ETHERTYPE_ALREADY_ADD 1 | |
5376 | #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 | |
5377 | #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 | |
5378 | ||
5379 | int return_status; | |
5380 | ||
5381 | if (cmdq_resp) { | |
5382 | dev_err(&hdev->pdev->dev, | |
5383 | "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", | |
5384 | cmdq_resp); | |
5385 | return -EIO; | |
5386 | } | |
5387 | ||
5388 | switch (resp_code) { | |
5389 | case HCLGE_ETHERTYPE_SUCCESS_ADD: | |
5390 | case HCLGE_ETHERTYPE_ALREADY_ADD: | |
5391 | return_status = 0; | |
5392 | break; | |
5393 | case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: | |
5394 | dev_err(&hdev->pdev->dev, | |
5395 | "add mac ethertype failed for manager table overflow.\n"); | |
5396 | return_status = -EIO; | |
5397 | break; | |
5398 | case HCLGE_ETHERTYPE_KEY_CONFLICT: | |
5399 | dev_err(&hdev->pdev->dev, | |
5400 | "add mac ethertype failed for key conflict.\n"); | |
5401 | return_status = -EIO; | |
5402 | break; | |
5403 | default: | |
5404 | dev_err(&hdev->pdev->dev, | |
5405 | "add mac ethertype failed for undefined, code=%d.\n", | |
5406 | resp_code); | |
5407 | return_status = -EIO; | |
5408 | } | |
5409 | ||
5410 | return return_status; | |
5411 | } | |
5412 | ||
5413 | static int hclge_add_mgr_tbl(struct hclge_dev *hdev, | |
5414 | const struct hclge_mac_mgr_tbl_entry_cmd *req) | |
5415 | { | |
5416 | struct hclge_desc desc; | |
5417 | u8 resp_code; | |
5418 | u16 retval; | |
5419 | int ret; | |
5420 | ||
5421 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); | |
5422 | memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); | |
5423 | ||
5424 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5425 | if (ret) { | |
5426 | dev_err(&hdev->pdev->dev, | |
5427 | "add mac ethertype failed for cmd_send, ret =%d.\n", | |
5428 | ret); | |
5429 | return ret; | |
5430 | } | |
5431 | ||
5432 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; | |
5433 | retval = le16_to_cpu(desc.retval); | |
5434 | ||
5435 | return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); | |
5436 | } | |
5437 | ||
5438 | static int init_mgr_tbl(struct hclge_dev *hdev) | |
5439 | { | |
5440 | int ret; | |
5441 | int i; | |
5442 | ||
5443 | for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { | |
5444 | ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); | |
5445 | if (ret) { | |
5446 | dev_err(&hdev->pdev->dev, | |
5447 | "add mac ethertype failed, ret =%d.\n", | |
5448 | ret); | |
5449 | return ret; | |
5450 | } | |
5451 | } | |
5452 | ||
5453 | return 0; | |
5454 | } | |
5455 | ||
46a3df9f S |
5456 | static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) |
5457 | { | |
5458 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5459 | struct hclge_dev *hdev = vport->back; | |
5460 | ||
5461 | ether_addr_copy(p, hdev->hw.mac.mac_addr); | |
5462 | } | |
5463 | ||
59098055 FL |
5464 | static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, |
5465 | bool is_first) | |
46a3df9f S |
5466 | { |
5467 | const unsigned char *new_addr = (const unsigned char *)p; | |
5468 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5469 | struct hclge_dev *hdev = vport->back; | |
18838d0c | 5470 | int ret; |
46a3df9f S |
5471 | |
5472 | /* mac addr check */ | |
5473 | if (is_zero_ether_addr(new_addr) || | |
5474 | is_broadcast_ether_addr(new_addr) || | |
5475 | is_multicast_ether_addr(new_addr)) { | |
5476 | dev_err(&hdev->pdev->dev, | |
5477 | "Change uc mac err! invalid mac:%p.\n", | |
5478 | new_addr); | |
5479 | return -EINVAL; | |
5480 | } | |
5481 | ||
59098055 | 5482 | if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) |
18838d0c | 5483 | dev_warn(&hdev->pdev->dev, |
59098055 | 5484 | "remove old uc mac address fail.\n"); |
46a3df9f | 5485 | |
18838d0c FL |
5486 | ret = hclge_add_uc_addr(handle, new_addr); |
5487 | if (ret) { | |
5488 | dev_err(&hdev->pdev->dev, | |
5489 | "add uc mac address fail, ret =%d.\n", | |
5490 | ret); | |
5491 | ||
59098055 FL |
5492 | if (!is_first && |
5493 | hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) | |
18838d0c | 5494 | dev_err(&hdev->pdev->dev, |
59098055 | 5495 | "restore uc mac address fail.\n"); |
18838d0c FL |
5496 | |
5497 | return -EIO; | |
46a3df9f S |
5498 | } |
5499 | ||
e98d7183 | 5500 | ret = hclge_pause_addr_cfg(hdev, new_addr); |
18838d0c FL |
5501 | if (ret) { |
5502 | dev_err(&hdev->pdev->dev, | |
5503 | "configure mac pause address fail, ret =%d.\n", | |
5504 | ret); | |
5505 | return -EIO; | |
5506 | } | |
5507 | ||
5508 | ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); | |
5509 | ||
5510 | return 0; | |
46a3df9f S |
5511 | } |
5512 | ||
26483246 XW |
5513 | static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, |
5514 | int cmd) | |
5515 | { | |
5516 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5517 | struct hclge_dev *hdev = vport->back; | |
5518 | ||
5519 | if (!hdev->hw.mac.phydev) | |
5520 | return -EOPNOTSUPP; | |
5521 | ||
5522 | return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); | |
5523 | } | |
5524 | ||
46a3df9f | 5525 | static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, |
64d114f0 | 5526 | u8 fe_type, bool filter_en) |
46a3df9f | 5527 | { |
d44f9b63 | 5528 | struct hclge_vlan_filter_ctrl_cmd *req; |
46a3df9f S |
5529 | struct hclge_desc desc; |
5530 | int ret; | |
5531 | ||
5532 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); | |
5533 | ||
d44f9b63 | 5534 | req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; |
46a3df9f | 5535 | req->vlan_type = vlan_type; |
64d114f0 | 5536 | req->vlan_fe = filter_en ? fe_type : 0; |
46a3df9f S |
5537 | |
5538 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 5539 | if (ret) |
46a3df9f S |
5540 | dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", |
5541 | ret); | |
46a3df9f | 5542 | |
3f639907 | 5543 | return ret; |
46a3df9f S |
5544 | } |
5545 | ||
391b5e93 JS |
5546 | #define HCLGE_FILTER_TYPE_VF 0 |
5547 | #define HCLGE_FILTER_TYPE_PORT 1 | |
64d114f0 ZL |
5548 | #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) |
5549 | #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) | |
5550 | #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) | |
5551 | #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) | |
5552 | #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) | |
5553 | #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ | |
5554 | | HCLGE_FILTER_FE_ROCE_EGRESS_B) | |
5555 | #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ | |
5556 | | HCLGE_FILTER_FE_ROCE_INGRESS_B) | |
391b5e93 JS |
5557 | |
5558 | static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) | |
5559 | { | |
5560 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5561 | struct hclge_dev *hdev = vport->back; | |
5562 | ||
64d114f0 ZL |
5563 | if (hdev->pdev->revision >= 0x21) { |
5564 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
5565 | HCLGE_FILTER_FE_EGRESS, enable); | |
5566 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, | |
5567 | HCLGE_FILTER_FE_INGRESS, enable); | |
5568 | } else { | |
5569 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
5570 | HCLGE_FILTER_FE_EGRESS_V1_B, enable); | |
5571 | } | |
391b5e93 JS |
5572 | } |
5573 | ||
dc8131d8 YL |
5574 | static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, |
5575 | bool is_kill, u16 vlan, u8 qos, | |
5576 | __be16 proto) | |
46a3df9f S |
5577 | { |
5578 | #define HCLGE_MAX_VF_BYTES 16 | |
d44f9b63 YL |
5579 | struct hclge_vlan_filter_vf_cfg_cmd *req0; |
5580 | struct hclge_vlan_filter_vf_cfg_cmd *req1; | |
46a3df9f S |
5581 | struct hclge_desc desc[2]; |
5582 | u8 vf_byte_val; | |
5583 | u8 vf_byte_off; | |
5584 | int ret; | |
5585 | ||
5586 | hclge_cmd_setup_basic_desc(&desc[0], | |
5587 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
5588 | hclge_cmd_setup_basic_desc(&desc[1], | |
5589 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
5590 | ||
5591 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5592 | ||
5593 | vf_byte_off = vfid / 8; | |
5594 | vf_byte_val = 1 << (vfid % 8); | |
5595 | ||
d44f9b63 YL |
5596 | req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
5597 | req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; | |
46a3df9f | 5598 | |
a90bb9a5 | 5599 | req0->vlan_id = cpu_to_le16(vlan); |
46a3df9f S |
5600 | req0->vlan_cfg = is_kill; |
5601 | ||
5602 | if (vf_byte_off < HCLGE_MAX_VF_BYTES) | |
5603 | req0->vf_bitmap[vf_byte_off] = vf_byte_val; | |
5604 | else | |
5605 | req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; | |
5606 | ||
5607 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
5608 | if (ret) { | |
5609 | dev_err(&hdev->pdev->dev, | |
5610 | "Send vf vlan command fail, ret =%d.\n", | |
5611 | ret); | |
5612 | return ret; | |
5613 | } | |
5614 | ||
5615 | if (!is_kill) { | |
6c251711 | 5616 | #define HCLGE_VF_VLAN_NO_ENTRY 2 |
46a3df9f S |
5617 | if (!req0->resp_code || req0->resp_code == 1) |
5618 | return 0; | |
5619 | ||
6c251711 YL |
5620 | if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { |
5621 | dev_warn(&hdev->pdev->dev, | |
5622 | "vf vlan table is full, vf vlan filter is disabled\n"); | |
5623 | return 0; | |
5624 | } | |
5625 | ||
46a3df9f S |
5626 | dev_err(&hdev->pdev->dev, |
5627 | "Add vf vlan filter fail, ret =%d.\n", | |
5628 | req0->resp_code); | |
5629 | } else { | |
41dafea2 | 5630 | #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 |
46a3df9f S |
5631 | if (!req0->resp_code) |
5632 | return 0; | |
5633 | ||
41dafea2 YL |
5634 | if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { |
5635 | dev_warn(&hdev->pdev->dev, | |
5636 | "vlan %d filter is not in vf vlan table\n", | |
5637 | vlan); | |
5638 | return 0; | |
5639 | } | |
5640 | ||
46a3df9f S |
5641 | dev_err(&hdev->pdev->dev, |
5642 | "Kill vf vlan filter fail, ret =%d.\n", | |
5643 | req0->resp_code); | |
5644 | } | |
5645 | ||
5646 | return -EIO; | |
5647 | } | |
5648 | ||
dc8131d8 YL |
5649 | static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, |
5650 | u16 vlan_id, bool is_kill) | |
46a3df9f | 5651 | { |
d44f9b63 | 5652 | struct hclge_vlan_filter_pf_cfg_cmd *req; |
46a3df9f S |
5653 | struct hclge_desc desc; |
5654 | u8 vlan_offset_byte_val; | |
5655 | u8 vlan_offset_byte; | |
5656 | u8 vlan_offset_160; | |
5657 | int ret; | |
5658 | ||
5659 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); | |
5660 | ||
5661 | vlan_offset_160 = vlan_id / 160; | |
5662 | vlan_offset_byte = (vlan_id % 160) / 8; | |
5663 | vlan_offset_byte_val = 1 << (vlan_id % 8); | |
5664 | ||
d44f9b63 | 5665 | req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; |
46a3df9f S |
5666 | req->vlan_offset = vlan_offset_160; |
5667 | req->vlan_cfg = is_kill; | |
5668 | req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; | |
5669 | ||
5670 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
dc8131d8 YL |
5671 | if (ret) |
5672 | dev_err(&hdev->pdev->dev, | |
5673 | "port vlan command, send fail, ret =%d.\n", ret); | |
5674 | return ret; | |
5675 | } | |
5676 | ||
5677 | static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, | |
5678 | u16 vport_id, u16 vlan_id, u8 qos, | |
5679 | bool is_kill) | |
5680 | { | |
5681 | u16 vport_idx, vport_num = 0; | |
5682 | int ret; | |
5683 | ||
daaa8521 YL |
5684 | if (is_kill && !vlan_id) |
5685 | return 0; | |
5686 | ||
dc8131d8 YL |
5687 | ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, |
5688 | 0, proto); | |
46a3df9f S |
5689 | if (ret) { |
5690 | dev_err(&hdev->pdev->dev, | |
dc8131d8 YL |
5691 | "Set %d vport vlan filter config fail, ret =%d.\n", |
5692 | vport_id, ret); | |
46a3df9f S |
5693 | return ret; |
5694 | } | |
5695 | ||
dc8131d8 YL |
5696 | /* vlan 0 may be added twice when 8021q module is enabled */ |
5697 | if (!is_kill && !vlan_id && | |
5698 | test_bit(vport_id, hdev->vlan_table[vlan_id])) | |
5699 | return 0; | |
5700 | ||
5701 | if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
46a3df9f | 5702 | dev_err(&hdev->pdev->dev, |
dc8131d8 YL |
5703 | "Add port vlan failed, vport %d is already in vlan %d\n", |
5704 | vport_id, vlan_id); | |
5705 | return -EINVAL; | |
46a3df9f S |
5706 | } |
5707 | ||
dc8131d8 YL |
5708 | if (is_kill && |
5709 | !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
5710 | dev_err(&hdev->pdev->dev, | |
5711 | "Delete port vlan failed, vport %d is not in vlan %d\n", | |
5712 | vport_id, vlan_id); | |
5713 | return -EINVAL; | |
5714 | } | |
5715 | ||
54e97d11 | 5716 | for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) |
dc8131d8 YL |
5717 | vport_num++; |
5718 | ||
5719 | if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) | |
5720 | ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, | |
5721 | is_kill); | |
5722 | ||
5723 | return ret; | |
5724 | } | |
5725 | ||
5726 | int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, | |
5727 | u16 vlan_id, bool is_kill) | |
5728 | { | |
5729 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5730 | struct hclge_dev *hdev = vport->back; | |
5731 | ||
5732 | return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, | |
5733 | 0, is_kill); | |
46a3df9f S |
5734 | } |
5735 | ||
5736 | static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, | |
5737 | u16 vlan, u8 qos, __be16 proto) | |
5738 | { | |
5739 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5740 | struct hclge_dev *hdev = vport->back; | |
5741 | ||
5742 | if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) | |
5743 | return -EINVAL; | |
5744 | if (proto != htons(ETH_P_8021Q)) | |
5745 | return -EPROTONOSUPPORT; | |
5746 | ||
dc8131d8 | 5747 | return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); |
46a3df9f S |
5748 | } |
5749 | ||
5f6ea83f PL |
5750 | static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) |
5751 | { | |
5752 | struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; | |
5753 | struct hclge_vport_vtag_tx_cfg_cmd *req; | |
5754 | struct hclge_dev *hdev = vport->back; | |
5755 | struct hclge_desc desc; | |
5756 | int status; | |
5757 | ||
5758 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); | |
5759 | ||
5760 | req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; | |
5761 | req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); | |
5762 | req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); | |
e4e87715 PL |
5763 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, |
5764 | vcfg->accept_tag1 ? 1 : 0); | |
5765 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, | |
5766 | vcfg->accept_untag1 ? 1 : 0); | |
5767 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, | |
5768 | vcfg->accept_tag2 ? 1 : 0); | |
5769 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, | |
5770 | vcfg->accept_untag2 ? 1 : 0); | |
5771 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, | |
5772 | vcfg->insert_tag1_en ? 1 : 0); | |
5773 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, | |
5774 | vcfg->insert_tag2_en ? 1 : 0); | |
5775 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); | |
5f6ea83f PL |
5776 | |
5777 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
5778 | req->vf_bitmap[req->vf_offset] = | |
5779 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
5780 | ||
5781 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5782 | if (status) | |
5783 | dev_err(&hdev->pdev->dev, | |
5784 | "Send port txvlan cfg command fail, ret =%d\n", | |
5785 | status); | |
5786 | ||
5787 | return status; | |
5788 | } | |
5789 | ||
5790 | static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) | |
5791 | { | |
5792 | struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; | |
5793 | struct hclge_vport_vtag_rx_cfg_cmd *req; | |
5794 | struct hclge_dev *hdev = vport->back; | |
5795 | struct hclge_desc desc; | |
5796 | int status; | |
5797 | ||
5798 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); | |
5799 | ||
5800 | req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; | |
e4e87715 PL |
5801 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, |
5802 | vcfg->strip_tag1_en ? 1 : 0); | |
5803 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, | |
5804 | vcfg->strip_tag2_en ? 1 : 0); | |
5805 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, | |
5806 | vcfg->vlan1_vlan_prionly ? 1 : 0); | |
5807 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, | |
5808 | vcfg->vlan2_vlan_prionly ? 1 : 0); | |
5f6ea83f PL |
5809 | |
5810 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
5811 | req->vf_bitmap[req->vf_offset] = | |
5812 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
5813 | ||
5814 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5815 | if (status) | |
5816 | dev_err(&hdev->pdev->dev, | |
5817 | "Send port rxvlan cfg command fail, ret =%d\n", | |
5818 | status); | |
5819 | ||
5820 | return status; | |
5821 | } | |
5822 | ||
5823 | static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) | |
5824 | { | |
5825 | struct hclge_rx_vlan_type_cfg_cmd *rx_req; | |
5826 | struct hclge_tx_vlan_type_cfg_cmd *tx_req; | |
5827 | struct hclge_desc desc; | |
5828 | int status; | |
5829 | ||
5830 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); | |
5831 | rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; | |
5832 | rx_req->ot_fst_vlan_type = | |
5833 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); | |
5834 | rx_req->ot_sec_vlan_type = | |
5835 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); | |
5836 | rx_req->in_fst_vlan_type = | |
5837 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); | |
5838 | rx_req->in_sec_vlan_type = | |
5839 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); | |
5840 | ||
5841 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5842 | if (status) { | |
5843 | dev_err(&hdev->pdev->dev, | |
5844 | "Send rxvlan protocol type command fail, ret =%d\n", | |
5845 | status); | |
5846 | return status; | |
5847 | } | |
5848 | ||
5849 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); | |
5850 | ||
d0d72bac | 5851 | tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; |
5f6ea83f PL |
5852 | tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); |
5853 | tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); | |
5854 | ||
5855 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5856 | if (status) | |
5857 | dev_err(&hdev->pdev->dev, | |
5858 | "Send txvlan protocol type command fail, ret =%d\n", | |
5859 | status); | |
5860 | ||
5861 | return status; | |
5862 | } | |
5863 | ||
46a3df9f S |
5864 | static int hclge_init_vlan_config(struct hclge_dev *hdev) |
5865 | { | |
5f6ea83f PL |
5866 | #define HCLGE_DEF_VLAN_TYPE 0x8100 |
5867 | ||
5e43aef8 | 5868 | struct hnae3_handle *handle; |
5f6ea83f | 5869 | struct hclge_vport *vport; |
46a3df9f | 5870 | int ret; |
5f6ea83f PL |
5871 | int i; |
5872 | ||
64d114f0 ZL |
5873 | if (hdev->pdev->revision >= 0x21) { |
5874 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
5875 | HCLGE_FILTER_FE_EGRESS, true); | |
5876 | if (ret) | |
5877 | return ret; | |
46a3df9f | 5878 | |
64d114f0 ZL |
5879 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, |
5880 | HCLGE_FILTER_FE_INGRESS, true); | |
5881 | if (ret) | |
5882 | return ret; | |
5883 | } else { | |
5884 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
5885 | HCLGE_FILTER_FE_EGRESS_V1_B, | |
5886 | true); | |
5887 | if (ret) | |
5888 | return ret; | |
5889 | } | |
46a3df9f | 5890 | |
5f6ea83f PL |
5891 | hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; |
5892 | hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
5893 | hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
5894 | hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
5895 | hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
5896 | hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
5897 | ||
5898 | ret = hclge_set_vlan_protocol_type(hdev); | |
5e43aef8 L |
5899 | if (ret) |
5900 | return ret; | |
46a3df9f | 5901 | |
5f6ea83f PL |
5902 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
5903 | vport = &hdev->vport[i]; | |
dcb35cce PL |
5904 | vport->txvlan_cfg.accept_tag1 = true; |
5905 | vport->txvlan_cfg.accept_untag1 = true; | |
5906 | ||
5907 | /* accept_tag2 and accept_untag2 are not supported on | |
5908 | * pdev revision(0x20), new revision support them. The | |
5909 | * value of this two fields will not return error when driver | |
5910 | * send command to fireware in revision(0x20). | |
5911 | * This two fields can not configured by user. | |
5912 | */ | |
5913 | vport->txvlan_cfg.accept_tag2 = true; | |
5914 | vport->txvlan_cfg.accept_untag2 = true; | |
5915 | ||
5f6ea83f PL |
5916 | vport->txvlan_cfg.insert_tag1_en = false; |
5917 | vport->txvlan_cfg.insert_tag2_en = false; | |
5918 | vport->txvlan_cfg.default_tag1 = 0; | |
5919 | vport->txvlan_cfg.default_tag2 = 0; | |
5920 | ||
5921 | ret = hclge_set_vlan_tx_offload_cfg(vport); | |
5922 | if (ret) | |
5923 | return ret; | |
5924 | ||
5925 | vport->rxvlan_cfg.strip_tag1_en = false; | |
5926 | vport->rxvlan_cfg.strip_tag2_en = true; | |
5927 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
5928 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
5929 | ||
5930 | ret = hclge_set_vlan_rx_offload_cfg(vport); | |
5931 | if (ret) | |
5932 | return ret; | |
5933 | } | |
5934 | ||
5e43aef8 | 5935 | handle = &hdev->vport[0].nic; |
dc8131d8 | 5936 | return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); |
46a3df9f S |
5937 | } |
5938 | ||
b2641e2a | 5939 | int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
052ece6d PL |
5940 | { |
5941 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5942 | ||
5943 | vport->rxvlan_cfg.strip_tag1_en = false; | |
5944 | vport->rxvlan_cfg.strip_tag2_en = enable; | |
5945 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
5946 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
5947 | ||
5948 | return hclge_set_vlan_rx_offload_cfg(vport); | |
5949 | } | |
5950 | ||
dd72140c | 5951 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) |
46a3df9f | 5952 | { |
d44f9b63 | 5953 | struct hclge_config_max_frm_size_cmd *req; |
46a3df9f | 5954 | struct hclge_desc desc; |
2866ccb2 | 5955 | int max_frm_size; |
46a3df9f S |
5956 | int ret; |
5957 | ||
2866ccb2 FL |
5958 | max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
5959 | ||
5960 | if (max_frm_size < HCLGE_MAC_MIN_FRAME || | |
5961 | max_frm_size > HCLGE_MAC_MAX_FRAME) | |
46a3df9f S |
5962 | return -EINVAL; |
5963 | ||
2866ccb2 FL |
5964 | max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); |
5965 | ||
46a3df9f S |
5966 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); |
5967 | ||
d44f9b63 | 5968 | req = (struct hclge_config_max_frm_size_cmd *)desc.data; |
2866ccb2 | 5969 | req->max_frm_size = cpu_to_le16(max_frm_size); |
8fc7346c | 5970 | req->min_frm_size = HCLGE_MAC_MIN_FRAME; |
46a3df9f S |
5971 | |
5972 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3f639907 | 5973 | if (ret) |
46a3df9f | 5974 | dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); |
3f639907 JS |
5975 | else |
5976 | hdev->mps = max_frm_size; | |
2866ccb2 | 5977 | |
3f639907 | 5978 | return ret; |
46a3df9f S |
5979 | } |
5980 | ||
dd72140c FL |
5981 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
5982 | { | |
5983 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5984 | struct hclge_dev *hdev = vport->back; | |
5985 | int ret; | |
5986 | ||
5987 | ret = hclge_set_mac_mtu(hdev, new_mtu); | |
5988 | if (ret) { | |
5989 | dev_err(&hdev->pdev->dev, | |
5990 | "Change mtu fail, ret =%d\n", ret); | |
5991 | return ret; | |
5992 | } | |
5993 | ||
5994 | ret = hclge_buffer_alloc(hdev); | |
5995 | if (ret) | |
5996 | dev_err(&hdev->pdev->dev, | |
5997 | "Allocate buffer fail, ret =%d\n", ret); | |
5998 | ||
5999 | return ret; | |
6000 | } | |
6001 | ||
46a3df9f S |
6002 | static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, |
6003 | bool enable) | |
6004 | { | |
d44f9b63 | 6005 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6006 | struct hclge_desc desc; |
6007 | int ret; | |
6008 | ||
6009 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); | |
6010 | ||
d44f9b63 | 6011 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f | 6012 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
e4e87715 | 6013 | hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); |
46a3df9f S |
6014 | |
6015 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6016 | if (ret) { | |
6017 | dev_err(&hdev->pdev->dev, | |
6018 | "Send tqp reset cmd error, status =%d\n", ret); | |
6019 | return ret; | |
6020 | } | |
6021 | ||
6022 | return 0; | |
6023 | } | |
6024 | ||
6025 | static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) | |
6026 | { | |
d44f9b63 | 6027 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6028 | struct hclge_desc desc; |
6029 | int ret; | |
6030 | ||
6031 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); | |
6032 | ||
d44f9b63 | 6033 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f S |
6034 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
6035 | ||
6036 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6037 | if (ret) { | |
6038 | dev_err(&hdev->pdev->dev, | |
6039 | "Get reset status error, status =%d\n", ret); | |
6040 | return ret; | |
6041 | } | |
6042 | ||
e4e87715 | 6043 | return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); |
46a3df9f S |
6044 | } |
6045 | ||
814e0274 PL |
6046 | static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, |
6047 | u16 queue_id) | |
6048 | { | |
6049 | struct hnae3_queue *queue; | |
6050 | struct hclge_tqp *tqp; | |
6051 | ||
6052 | queue = handle->kinfo.tqp[queue_id]; | |
6053 | tqp = container_of(queue, struct hclge_tqp, q); | |
6054 | ||
6055 | return tqp->index; | |
6056 | } | |
6057 | ||
84e095d6 | 6058 | void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
46a3df9f S |
6059 | { |
6060 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6061 | struct hclge_dev *hdev = vport->back; | |
6062 | int reset_try_times = 0; | |
6063 | int reset_status; | |
814e0274 | 6064 | u16 queue_gid; |
46a3df9f S |
6065 | int ret; |
6066 | ||
b50ae26c PL |
6067 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
6068 | return; | |
6069 | ||
814e0274 PL |
6070 | queue_gid = hclge_covert_handle_qid_global(handle, queue_id); |
6071 | ||
46a3df9f S |
6072 | ret = hclge_tqp_enable(hdev, queue_id, 0, false); |
6073 | if (ret) { | |
6074 | dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); | |
6075 | return; | |
6076 | } | |
6077 | ||
814e0274 | 6078 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); |
46a3df9f S |
6079 | if (ret) { |
6080 | dev_warn(&hdev->pdev->dev, | |
6081 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6082 | return; | |
6083 | } | |
6084 | ||
6085 | reset_try_times = 0; | |
6086 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6087 | /* Wait for tqp hw reset */ | |
6088 | msleep(20); | |
814e0274 | 6089 | reset_status = hclge_get_reset_status(hdev, queue_gid); |
46a3df9f S |
6090 | if (reset_status) |
6091 | break; | |
6092 | } | |
6093 | ||
6094 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
6095 | dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); | |
6096 | return; | |
6097 | } | |
6098 | ||
814e0274 | 6099 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); |
46a3df9f S |
6100 | if (ret) { |
6101 | dev_warn(&hdev->pdev->dev, | |
6102 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6103 | return; | |
6104 | } | |
6105 | } | |
6106 | ||
1a426f8b PL |
6107 | void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) |
6108 | { | |
6109 | struct hclge_dev *hdev = vport->back; | |
6110 | int reset_try_times = 0; | |
6111 | int reset_status; | |
6112 | u16 queue_gid; | |
6113 | int ret; | |
6114 | ||
6115 | queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); | |
6116 | ||
6117 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); | |
6118 | if (ret) { | |
6119 | dev_warn(&hdev->pdev->dev, | |
6120 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6121 | return; | |
6122 | } | |
6123 | ||
6124 | reset_try_times = 0; | |
6125 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6126 | /* Wait for tqp hw reset */ | |
6127 | msleep(20); | |
6128 | reset_status = hclge_get_reset_status(hdev, queue_gid); | |
6129 | if (reset_status) | |
6130 | break; | |
6131 | } | |
6132 | ||
6133 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
6134 | dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); | |
6135 | return; | |
6136 | } | |
6137 | ||
6138 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); | |
6139 | if (ret) | |
6140 | dev_warn(&hdev->pdev->dev, | |
6141 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6142 | } | |
6143 | ||
46a3df9f S |
6144 | static u32 hclge_get_fw_version(struct hnae3_handle *handle) |
6145 | { | |
6146 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6147 | struct hclge_dev *hdev = vport->back; | |
6148 | ||
6149 | return hdev->fw_version; | |
6150 | } | |
6151 | ||
61387774 PL |
6152 | static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) |
6153 | { | |
6154 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6155 | ||
6156 | if (!phydev) | |
6157 | return; | |
6158 | ||
70814e81 | 6159 | phy_set_asym_pause(phydev, rx_en, tx_en); |
61387774 PL |
6160 | } |
6161 | ||
6162 | static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) | |
6163 | { | |
61387774 PL |
6164 | int ret; |
6165 | ||
6166 | if (rx_en && tx_en) | |
40173a2e | 6167 | hdev->fc_mode_last_time = HCLGE_FC_FULL; |
61387774 | 6168 | else if (rx_en && !tx_en) |
40173a2e | 6169 | hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; |
61387774 | 6170 | else if (!rx_en && tx_en) |
40173a2e | 6171 | hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; |
61387774 | 6172 | else |
40173a2e | 6173 | hdev->fc_mode_last_time = HCLGE_FC_NONE; |
61387774 | 6174 | |
40173a2e | 6175 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) |
61387774 | 6176 | return 0; |
61387774 PL |
6177 | |
6178 | ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); | |
6179 | if (ret) { | |
6180 | dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", | |
6181 | ret); | |
6182 | return ret; | |
6183 | } | |
6184 | ||
40173a2e | 6185 | hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
61387774 PL |
6186 | |
6187 | return 0; | |
6188 | } | |
6189 | ||
1770a7a3 PL |
6190 | int hclge_cfg_flowctrl(struct hclge_dev *hdev) |
6191 | { | |
6192 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6193 | u16 remote_advertising = 0; | |
6194 | u16 local_advertising = 0; | |
6195 | u32 rx_pause, tx_pause; | |
6196 | u8 flowctl; | |
6197 | ||
6198 | if (!phydev->link || !phydev->autoneg) | |
6199 | return 0; | |
6200 | ||
5f991f7b | 6201 | local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); |
1770a7a3 PL |
6202 | |
6203 | if (phydev->pause) | |
6204 | remote_advertising = LPA_PAUSE_CAP; | |
6205 | ||
6206 | if (phydev->asym_pause) | |
6207 | remote_advertising |= LPA_PAUSE_ASYM; | |
6208 | ||
6209 | flowctl = mii_resolve_flowctrl_fdx(local_advertising, | |
6210 | remote_advertising); | |
6211 | tx_pause = flowctl & FLOW_CTRL_TX; | |
6212 | rx_pause = flowctl & FLOW_CTRL_RX; | |
6213 | ||
6214 | if (phydev->duplex == HCLGE_MAC_HALF) { | |
6215 | tx_pause = 0; | |
6216 | rx_pause = 0; | |
6217 | } | |
6218 | ||
6219 | return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); | |
6220 | } | |
6221 | ||
46a3df9f S |
6222 | static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, |
6223 | u32 *rx_en, u32 *tx_en) | |
6224 | { | |
6225 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6226 | struct hclge_dev *hdev = vport->back; | |
6227 | ||
6228 | *auto_neg = hclge_get_autoneg(handle); | |
6229 | ||
6230 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6231 | *rx_en = 0; | |
6232 | *tx_en = 0; | |
6233 | return; | |
6234 | } | |
6235 | ||
6236 | if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { | |
6237 | *rx_en = 1; | |
6238 | *tx_en = 0; | |
6239 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { | |
6240 | *tx_en = 1; | |
6241 | *rx_en = 0; | |
6242 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { | |
6243 | *rx_en = 1; | |
6244 | *tx_en = 1; | |
6245 | } else { | |
6246 | *rx_en = 0; | |
6247 | *tx_en = 0; | |
6248 | } | |
6249 | } | |
6250 | ||
61387774 PL |
6251 | static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, |
6252 | u32 rx_en, u32 tx_en) | |
6253 | { | |
6254 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6255 | struct hclge_dev *hdev = vport->back; | |
6256 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6257 | u32 fc_autoneg; | |
6258 | ||
61387774 PL |
6259 | fc_autoneg = hclge_get_autoneg(handle); |
6260 | if (auto_neg != fc_autoneg) { | |
6261 | dev_info(&hdev->pdev->dev, | |
6262 | "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); | |
6263 | return -EOPNOTSUPP; | |
6264 | } | |
6265 | ||
6266 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6267 | dev_info(&hdev->pdev->dev, | |
6268 | "Priority flow control enabled. Cannot set link flow control.\n"); | |
6269 | return -EOPNOTSUPP; | |
6270 | } | |
6271 | ||
6272 | hclge_set_flowctrl_adv(hdev, rx_en, tx_en); | |
6273 | ||
6274 | if (!fc_autoneg) | |
6275 | return hclge_cfg_pauseparam(hdev, rx_en, tx_en); | |
6276 | ||
0c963e8c FL |
6277 | /* Only support flow control negotiation for netdev with |
6278 | * phy attached for now. | |
6279 | */ | |
6280 | if (!phydev) | |
6281 | return -EOPNOTSUPP; | |
6282 | ||
61387774 PL |
6283 | return phy_start_aneg(phydev); |
6284 | } | |
6285 | ||
46a3df9f S |
6286 | static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, |
6287 | u8 *auto_neg, u32 *speed, u8 *duplex) | |
6288 | { | |
6289 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6290 | struct hclge_dev *hdev = vport->back; | |
6291 | ||
6292 | if (speed) | |
6293 | *speed = hdev->hw.mac.speed; | |
6294 | if (duplex) | |
6295 | *duplex = hdev->hw.mac.duplex; | |
6296 | if (auto_neg) | |
6297 | *auto_neg = hdev->hw.mac.autoneg; | |
6298 | } | |
6299 | ||
6300 | static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) | |
6301 | { | |
6302 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6303 | struct hclge_dev *hdev = vport->back; | |
6304 | ||
6305 | if (media_type) | |
6306 | *media_type = hdev->hw.mac.media_type; | |
6307 | } | |
6308 | ||
6309 | static void hclge_get_mdix_mode(struct hnae3_handle *handle, | |
6310 | u8 *tp_mdix_ctrl, u8 *tp_mdix) | |
6311 | { | |
6312 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6313 | struct hclge_dev *hdev = vport->back; | |
6314 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6315 | int mdix_ctrl, mdix, retval, is_resolved; | |
6316 | ||
6317 | if (!phydev) { | |
6318 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6319 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6320 | return; | |
6321 | } | |
6322 | ||
6323 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); | |
6324 | ||
6325 | retval = phy_read(phydev, HCLGE_PHY_CSC_REG); | |
e4e87715 PL |
6326 | mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, |
6327 | HCLGE_PHY_MDIX_CTRL_S); | |
46a3df9f S |
6328 | |
6329 | retval = phy_read(phydev, HCLGE_PHY_CSS_REG); | |
e4e87715 PL |
6330 | mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); |
6331 | is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); | |
46a3df9f S |
6332 | |
6333 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); | |
6334 | ||
6335 | switch (mdix_ctrl) { | |
6336 | case 0x0: | |
6337 | *tp_mdix_ctrl = ETH_TP_MDI; | |
6338 | break; | |
6339 | case 0x1: | |
6340 | *tp_mdix_ctrl = ETH_TP_MDI_X; | |
6341 | break; | |
6342 | case 0x3: | |
6343 | *tp_mdix_ctrl = ETH_TP_MDI_AUTO; | |
6344 | break; | |
6345 | default: | |
6346 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6347 | break; | |
6348 | } | |
6349 | ||
6350 | if (!is_resolved) | |
6351 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6352 | else if (mdix) | |
6353 | *tp_mdix = ETH_TP_MDI_X; | |
6354 | else | |
6355 | *tp_mdix = ETH_TP_MDI; | |
6356 | } | |
6357 | ||
b01b7cf1 FL |
6358 | static int hclge_init_instance_hw(struct hclge_dev *hdev) |
6359 | { | |
6360 | return hclge_mac_connect_phy(hdev); | |
6361 | } | |
6362 | ||
6363 | static void hclge_uninit_instance_hw(struct hclge_dev *hdev) | |
6364 | { | |
6365 | hclge_mac_disconnect_phy(hdev); | |
6366 | } | |
6367 | ||
46a3df9f S |
6368 | static int hclge_init_client_instance(struct hnae3_client *client, |
6369 | struct hnae3_ae_dev *ae_dev) | |
6370 | { | |
6371 | struct hclge_dev *hdev = ae_dev->priv; | |
6372 | struct hclge_vport *vport; | |
6373 | int i, ret; | |
6374 | ||
6375 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
6376 | vport = &hdev->vport[i]; | |
6377 | ||
6378 | switch (client->type) { | |
6379 | case HNAE3_CLIENT_KNIC: | |
6380 | ||
6381 | hdev->nic_client = client; | |
6382 | vport->nic.client = client; | |
6383 | ret = client->ops->init_instance(&vport->nic); | |
6384 | if (ret) | |
49dd8054 | 6385 | goto clear_nic; |
46a3df9f | 6386 | |
b01b7cf1 FL |
6387 | ret = hclge_init_instance_hw(hdev); |
6388 | if (ret) { | |
6389 | client->ops->uninit_instance(&vport->nic, | |
6390 | 0); | |
49dd8054 | 6391 | goto clear_nic; |
b01b7cf1 FL |
6392 | } |
6393 | ||
d9f28fc2 JS |
6394 | hnae3_set_client_init_flag(client, ae_dev, 1); |
6395 | ||
46a3df9f | 6396 | if (hdev->roce_client && |
e92a0843 | 6397 | hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
6398 | struct hnae3_client *rc = hdev->roce_client; |
6399 | ||
6400 | ret = hclge_init_roce_base_info(vport); | |
6401 | if (ret) | |
49dd8054 | 6402 | goto clear_roce; |
46a3df9f S |
6403 | |
6404 | ret = rc->ops->init_instance(&vport->roce); | |
6405 | if (ret) | |
49dd8054 | 6406 | goto clear_roce; |
d9f28fc2 JS |
6407 | |
6408 | hnae3_set_client_init_flag(hdev->roce_client, | |
6409 | ae_dev, 1); | |
46a3df9f S |
6410 | } |
6411 | ||
6412 | break; | |
6413 | case HNAE3_CLIENT_UNIC: | |
6414 | hdev->nic_client = client; | |
6415 | vport->nic.client = client; | |
6416 | ||
6417 | ret = client->ops->init_instance(&vport->nic); | |
6418 | if (ret) | |
49dd8054 | 6419 | goto clear_nic; |
46a3df9f | 6420 | |
d9f28fc2 JS |
6421 | hnae3_set_client_init_flag(client, ae_dev, 1); |
6422 | ||
46a3df9f S |
6423 | break; |
6424 | case HNAE3_CLIENT_ROCE: | |
e92a0843 | 6425 | if (hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
6426 | hdev->roce_client = client; |
6427 | vport->roce.client = client; | |
6428 | } | |
6429 | ||
3a46f34d | 6430 | if (hdev->roce_client && hdev->nic_client) { |
46a3df9f S |
6431 | ret = hclge_init_roce_base_info(vport); |
6432 | if (ret) | |
49dd8054 | 6433 | goto clear_roce; |
46a3df9f S |
6434 | |
6435 | ret = client->ops->init_instance(&vport->roce); | |
6436 | if (ret) | |
49dd8054 | 6437 | goto clear_roce; |
d9f28fc2 JS |
6438 | |
6439 | hnae3_set_client_init_flag(client, ae_dev, 1); | |
46a3df9f | 6440 | } |
fa7a4bd5 JS |
6441 | |
6442 | break; | |
6443 | default: | |
6444 | return -EINVAL; | |
46a3df9f S |
6445 | } |
6446 | } | |
6447 | ||
6448 | return 0; | |
49dd8054 JS |
6449 | |
6450 | clear_nic: | |
6451 | hdev->nic_client = NULL; | |
6452 | vport->nic.client = NULL; | |
6453 | return ret; | |
6454 | clear_roce: | |
6455 | hdev->roce_client = NULL; | |
6456 | vport->roce.client = NULL; | |
6457 | return ret; | |
46a3df9f S |
6458 | } |
6459 | ||
6460 | static void hclge_uninit_client_instance(struct hnae3_client *client, | |
6461 | struct hnae3_ae_dev *ae_dev) | |
6462 | { | |
6463 | struct hclge_dev *hdev = ae_dev->priv; | |
6464 | struct hclge_vport *vport; | |
6465 | int i; | |
6466 | ||
6467 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
6468 | vport = &hdev->vport[i]; | |
a17dcf3f | 6469 | if (hdev->roce_client) { |
46a3df9f S |
6470 | hdev->roce_client->ops->uninit_instance(&vport->roce, |
6471 | 0); | |
a17dcf3f L |
6472 | hdev->roce_client = NULL; |
6473 | vport->roce.client = NULL; | |
6474 | } | |
46a3df9f S |
6475 | if (client->type == HNAE3_CLIENT_ROCE) |
6476 | return; | |
49dd8054 | 6477 | if (hdev->nic_client && client->ops->uninit_instance) { |
b01b7cf1 | 6478 | hclge_uninit_instance_hw(hdev); |
46a3df9f | 6479 | client->ops->uninit_instance(&vport->nic, 0); |
a17dcf3f L |
6480 | hdev->nic_client = NULL; |
6481 | vport->nic.client = NULL; | |
6482 | } | |
46a3df9f S |
6483 | } |
6484 | } | |
6485 | ||
6486 | static int hclge_pci_init(struct hclge_dev *hdev) | |
6487 | { | |
6488 | struct pci_dev *pdev = hdev->pdev; | |
6489 | struct hclge_hw *hw; | |
6490 | int ret; | |
6491 | ||
6492 | ret = pci_enable_device(pdev); | |
6493 | if (ret) { | |
6494 | dev_err(&pdev->dev, "failed to enable PCI device\n"); | |
3e249d3b | 6495 | return ret; |
46a3df9f S |
6496 | } |
6497 | ||
6498 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
6499 | if (ret) { | |
6500 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
6501 | if (ret) { | |
6502 | dev_err(&pdev->dev, | |
6503 | "can't set consistent PCI DMA"); | |
6504 | goto err_disable_device; | |
6505 | } | |
6506 | dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); | |
6507 | } | |
6508 | ||
6509 | ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); | |
6510 | if (ret) { | |
6511 | dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); | |
6512 | goto err_disable_device; | |
6513 | } | |
6514 | ||
6515 | pci_set_master(pdev); | |
6516 | hw = &hdev->hw; | |
46a3df9f S |
6517 | hw->io_base = pcim_iomap(pdev, 2, 0); |
6518 | if (!hw->io_base) { | |
6519 | dev_err(&pdev->dev, "Can't map configuration register space\n"); | |
6520 | ret = -ENOMEM; | |
6521 | goto err_clr_master; | |
6522 | } | |
6523 | ||
709eb41a L |
6524 | hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); |
6525 | ||
46a3df9f S |
6526 | return 0; |
6527 | err_clr_master: | |
6528 | pci_clear_master(pdev); | |
6529 | pci_release_regions(pdev); | |
6530 | err_disable_device: | |
6531 | pci_disable_device(pdev); | |
46a3df9f S |
6532 | |
6533 | return ret; | |
6534 | } | |
6535 | ||
6536 | static void hclge_pci_uninit(struct hclge_dev *hdev) | |
6537 | { | |
6538 | struct pci_dev *pdev = hdev->pdev; | |
6539 | ||
6a814413 | 6540 | pcim_iounmap(pdev, hdev->hw.io_base); |
887c3820 | 6541 | pci_free_irq_vectors(pdev); |
46a3df9f S |
6542 | pci_clear_master(pdev); |
6543 | pci_release_mem_regions(pdev); | |
6544 | pci_disable_device(pdev); | |
6545 | } | |
6546 | ||
48569cda PL |
6547 | static void hclge_state_init(struct hclge_dev *hdev) |
6548 | { | |
6549 | set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); | |
6550 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
6551 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
6552 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
6553 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
6554 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
6555 | } | |
6556 | ||
6557 | static void hclge_state_uninit(struct hclge_dev *hdev) | |
6558 | { | |
6559 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
6560 | ||
6561 | if (hdev->service_timer.function) | |
6562 | del_timer_sync(&hdev->service_timer); | |
6563 | if (hdev->service_task.func) | |
6564 | cancel_work_sync(&hdev->service_task); | |
6565 | if (hdev->rst_service_task.func) | |
6566 | cancel_work_sync(&hdev->rst_service_task); | |
6567 | if (hdev->mbx_service_task.func) | |
6568 | cancel_work_sync(&hdev->mbx_service_task); | |
6569 | } | |
6570 | ||
46a3df9f S |
6571 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
6572 | { | |
6573 | struct pci_dev *pdev = ae_dev->pdev; | |
46a3df9f S |
6574 | struct hclge_dev *hdev; |
6575 | int ret; | |
6576 | ||
6577 | hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); | |
6578 | if (!hdev) { | |
6579 | ret = -ENOMEM; | |
ffd5656e | 6580 | goto out; |
46a3df9f S |
6581 | } |
6582 | ||
46a3df9f S |
6583 | hdev->pdev = pdev; |
6584 | hdev->ae_dev = ae_dev; | |
4ed340ab | 6585 | hdev->reset_type = HNAE3_NONE_RESET; |
46a3df9f S |
6586 | ae_dev->priv = hdev; |
6587 | ||
46a3df9f S |
6588 | ret = hclge_pci_init(hdev); |
6589 | if (ret) { | |
6590 | dev_err(&pdev->dev, "PCI init failed\n"); | |
ffd5656e | 6591 | goto out; |
46a3df9f S |
6592 | } |
6593 | ||
3efb960f L |
6594 | /* Firmware command queue initialize */ |
6595 | ret = hclge_cmd_queue_init(hdev); | |
6596 | if (ret) { | |
6597 | dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); | |
ffd5656e | 6598 | goto err_pci_uninit; |
3efb960f L |
6599 | } |
6600 | ||
6601 | /* Firmware command initialize */ | |
46a3df9f S |
6602 | ret = hclge_cmd_init(hdev); |
6603 | if (ret) | |
ffd5656e | 6604 | goto err_cmd_uninit; |
46a3df9f S |
6605 | |
6606 | ret = hclge_get_cap(hdev); | |
6607 | if (ret) { | |
e00e2197 CIK |
6608 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", |
6609 | ret); | |
ffd5656e | 6610 | goto err_cmd_uninit; |
46a3df9f S |
6611 | } |
6612 | ||
6613 | ret = hclge_configure(hdev); | |
6614 | if (ret) { | |
6615 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
ffd5656e | 6616 | goto err_cmd_uninit; |
46a3df9f S |
6617 | } |
6618 | ||
887c3820 | 6619 | ret = hclge_init_msi(hdev); |
46a3df9f | 6620 | if (ret) { |
887c3820 | 6621 | dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); |
ffd5656e | 6622 | goto err_cmd_uninit; |
46a3df9f S |
6623 | } |
6624 | ||
466b0c00 L |
6625 | ret = hclge_misc_irq_init(hdev); |
6626 | if (ret) { | |
6627 | dev_err(&pdev->dev, | |
6628 | "Misc IRQ(vector0) init error, ret = %d.\n", | |
6629 | ret); | |
ffd5656e | 6630 | goto err_msi_uninit; |
466b0c00 L |
6631 | } |
6632 | ||
46a3df9f S |
6633 | ret = hclge_alloc_tqps(hdev); |
6634 | if (ret) { | |
6635 | dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); | |
ffd5656e | 6636 | goto err_msi_irq_uninit; |
46a3df9f S |
6637 | } |
6638 | ||
6639 | ret = hclge_alloc_vport(hdev); | |
6640 | if (ret) { | |
6641 | dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); | |
ffd5656e | 6642 | goto err_msi_irq_uninit; |
46a3df9f S |
6643 | } |
6644 | ||
7df7dad6 L |
6645 | ret = hclge_map_tqp(hdev); |
6646 | if (ret) { | |
6647 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
2312e050 | 6648 | goto err_msi_irq_uninit; |
7df7dad6 L |
6649 | } |
6650 | ||
c5ef83cb HT |
6651 | if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { |
6652 | ret = hclge_mac_mdio_config(hdev); | |
6653 | if (ret) { | |
6654 | dev_err(&hdev->pdev->dev, | |
6655 | "mdio config fail ret=%d\n", ret); | |
2312e050 | 6656 | goto err_msi_irq_uninit; |
c5ef83cb | 6657 | } |
cf9cca2d | 6658 | } |
6659 | ||
39932473 JS |
6660 | ret = hclge_init_umv_space(hdev); |
6661 | if (ret) { | |
6662 | dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); | |
6663 | goto err_msi_irq_uninit; | |
6664 | } | |
6665 | ||
46a3df9f S |
6666 | ret = hclge_mac_init(hdev); |
6667 | if (ret) { | |
6668 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
ffd5656e | 6669 | goto err_mdiobus_unreg; |
46a3df9f | 6670 | } |
46a3df9f S |
6671 | |
6672 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); | |
6673 | if (ret) { | |
6674 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
ffd5656e | 6675 | goto err_mdiobus_unreg; |
46a3df9f S |
6676 | } |
6677 | ||
46a3df9f S |
6678 | ret = hclge_init_vlan_config(hdev); |
6679 | if (ret) { | |
6680 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
ffd5656e | 6681 | goto err_mdiobus_unreg; |
46a3df9f S |
6682 | } |
6683 | ||
6684 | ret = hclge_tm_schd_init(hdev); | |
6685 | if (ret) { | |
6686 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
ffd5656e | 6687 | goto err_mdiobus_unreg; |
68ece54e YL |
6688 | } |
6689 | ||
268f5dfa | 6690 | hclge_rss_init_cfg(hdev); |
68ece54e YL |
6691 | ret = hclge_rss_init_hw(hdev); |
6692 | if (ret) { | |
6693 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
ffd5656e | 6694 | goto err_mdiobus_unreg; |
46a3df9f S |
6695 | } |
6696 | ||
f5aac71c FL |
6697 | ret = init_mgr_tbl(hdev); |
6698 | if (ret) { | |
6699 | dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); | |
ffd5656e | 6700 | goto err_mdiobus_unreg; |
f5aac71c FL |
6701 | } |
6702 | ||
d695964d JS |
6703 | ret = hclge_init_fd_config(hdev); |
6704 | if (ret) { | |
6705 | dev_err(&pdev->dev, | |
6706 | "fd table init fail, ret=%d\n", ret); | |
6707 | goto err_mdiobus_unreg; | |
6708 | } | |
6709 | ||
cacde272 YL |
6710 | hclge_dcb_ops_set(hdev); |
6711 | ||
d039ef68 | 6712 | timer_setup(&hdev->service_timer, hclge_service_timer, 0); |
46a3df9f | 6713 | INIT_WORK(&hdev->service_task, hclge_service_task); |
cb1b9f77 | 6714 | INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); |
c1a81619 | 6715 | INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); |
46a3df9f | 6716 | |
8e52a602 XW |
6717 | hclge_clear_all_event_cause(hdev); |
6718 | ||
466b0c00 L |
6719 | /* Enable MISC vector(vector0) */ |
6720 | hclge_enable_vector(&hdev->misc_vector, true); | |
6721 | ||
48569cda | 6722 | hclge_state_init(hdev); |
46a3df9f S |
6723 | |
6724 | pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); | |
6725 | return 0; | |
6726 | ||
ffd5656e HT |
6727 | err_mdiobus_unreg: |
6728 | if (hdev->hw.mac.phydev) | |
6729 | mdiobus_unregister(hdev->hw.mac.mdio_bus); | |
ffd5656e HT |
6730 | err_msi_irq_uninit: |
6731 | hclge_misc_irq_uninit(hdev); | |
6732 | err_msi_uninit: | |
6733 | pci_free_irq_vectors(pdev); | |
6734 | err_cmd_uninit: | |
6735 | hclge_destroy_cmd_queue(&hdev->hw); | |
6736 | err_pci_uninit: | |
6a814413 | 6737 | pcim_iounmap(pdev, hdev->hw.io_base); |
ffd5656e | 6738 | pci_clear_master(pdev); |
46a3df9f | 6739 | pci_release_regions(pdev); |
ffd5656e | 6740 | pci_disable_device(pdev); |
ffd5656e | 6741 | out: |
46a3df9f S |
6742 | return ret; |
6743 | } | |
6744 | ||
c6dc5213 | 6745 | static void hclge_stats_clear(struct hclge_dev *hdev) |
6746 | { | |
6747 | memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); | |
6748 | } | |
6749 | ||
4ed340ab L |
6750 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) |
6751 | { | |
6752 | struct hclge_dev *hdev = ae_dev->priv; | |
6753 | struct pci_dev *pdev = ae_dev->pdev; | |
6754 | int ret; | |
6755 | ||
6756 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
6757 | ||
c6dc5213 | 6758 | hclge_stats_clear(hdev); |
dc8131d8 | 6759 | memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); |
c6dc5213 | 6760 | |
4ed340ab L |
6761 | ret = hclge_cmd_init(hdev); |
6762 | if (ret) { | |
6763 | dev_err(&pdev->dev, "Cmd queue init failed\n"); | |
6764 | return ret; | |
6765 | } | |
6766 | ||
6767 | ret = hclge_get_cap(hdev); | |
6768 | if (ret) { | |
6769 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", | |
6770 | ret); | |
6771 | return ret; | |
6772 | } | |
6773 | ||
6774 | ret = hclge_configure(hdev); | |
6775 | if (ret) { | |
6776 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
6777 | return ret; | |
6778 | } | |
6779 | ||
6780 | ret = hclge_map_tqp(hdev); | |
6781 | if (ret) { | |
6782 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
6783 | return ret; | |
6784 | } | |
6785 | ||
39932473 JS |
6786 | hclge_reset_umv_space(hdev); |
6787 | ||
4ed340ab L |
6788 | ret = hclge_mac_init(hdev); |
6789 | if (ret) { | |
6790 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
6791 | return ret; | |
6792 | } | |
6793 | ||
4ed340ab L |
6794 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); |
6795 | if (ret) { | |
6796 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
6797 | return ret; | |
6798 | } | |
6799 | ||
6800 | ret = hclge_init_vlan_config(hdev); | |
6801 | if (ret) { | |
6802 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
6803 | return ret; | |
6804 | } | |
6805 | ||
f31c1ba6 | 6806 | ret = hclge_tm_init_hw(hdev); |
4ed340ab | 6807 | if (ret) { |
f31c1ba6 | 6808 | dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); |
4ed340ab L |
6809 | return ret; |
6810 | } | |
6811 | ||
6812 | ret = hclge_rss_init_hw(hdev); | |
6813 | if (ret) { | |
6814 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
6815 | return ret; | |
6816 | } | |
6817 | ||
d695964d JS |
6818 | ret = hclge_init_fd_config(hdev); |
6819 | if (ret) { | |
6820 | dev_err(&pdev->dev, | |
6821 | "fd table init fail, ret=%d\n", ret); | |
6822 | return ret; | |
6823 | } | |
6824 | ||
4ed340ab L |
6825 | dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", |
6826 | HCLGE_DRIVER_NAME); | |
6827 | ||
6828 | return 0; | |
6829 | } | |
6830 | ||
46a3df9f S |
6831 | static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
6832 | { | |
6833 | struct hclge_dev *hdev = ae_dev->priv; | |
6834 | struct hclge_mac *mac = &hdev->hw.mac; | |
6835 | ||
48569cda | 6836 | hclge_state_uninit(hdev); |
46a3df9f S |
6837 | |
6838 | if (mac->phydev) | |
6839 | mdiobus_unregister(mac->mdio_bus); | |
6840 | ||
39932473 JS |
6841 | hclge_uninit_umv_space(hdev); |
6842 | ||
466b0c00 L |
6843 | /* Disable MISC vector(vector0) */ |
6844 | hclge_enable_vector(&hdev->misc_vector, false); | |
8e52a602 XW |
6845 | synchronize_irq(hdev->misc_vector.vector_irq); |
6846 | ||
46a3df9f | 6847 | hclge_destroy_cmd_queue(&hdev->hw); |
ca1d7669 | 6848 | hclge_misc_irq_uninit(hdev); |
46a3df9f S |
6849 | hclge_pci_uninit(hdev); |
6850 | ae_dev->priv = NULL; | |
6851 | } | |
6852 | ||
482d2e9c PL |
6853 | static u32 hclge_get_max_channels(struct hnae3_handle *handle) |
6854 | { | |
6855 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
6856 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6857 | struct hclge_dev *hdev = vport->back; | |
6858 | ||
6859 | return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); | |
6860 | } | |
6861 | ||
6862 | static void hclge_get_channels(struct hnae3_handle *handle, | |
6863 | struct ethtool_channels *ch) | |
6864 | { | |
6865 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6866 | ||
6867 | ch->max_combined = hclge_get_max_channels(handle); | |
6868 | ch->other_count = 1; | |
6869 | ch->max_other = 1; | |
6870 | ch->combined_count = vport->alloc_tqps; | |
6871 | } | |
6872 | ||
09f2af64 | 6873 | static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, |
0d43bf45 | 6874 | u16 *alloc_tqps, u16 *max_rss_size) |
09f2af64 PL |
6875 | { |
6876 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6877 | struct hclge_dev *hdev = vport->back; | |
09f2af64 | 6878 | |
0d43bf45 | 6879 | *alloc_tqps = vport->alloc_tqps; |
09f2af64 PL |
6880 | *max_rss_size = hdev->rss_size_max; |
6881 | } | |
6882 | ||
6883 | static void hclge_release_tqp(struct hclge_vport *vport) | |
6884 | { | |
6885 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
6886 | struct hclge_dev *hdev = vport->back; | |
6887 | int i; | |
6888 | ||
6889 | for (i = 0; i < kinfo->num_tqps; i++) { | |
6890 | struct hclge_tqp *tqp = | |
6891 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
6892 | ||
6893 | tqp->q.handle = NULL; | |
6894 | tqp->q.tqp_index = 0; | |
6895 | tqp->alloced = false; | |
6896 | } | |
6897 | ||
6898 | devm_kfree(&hdev->pdev->dev, kinfo->tqp); | |
6899 | kinfo->tqp = NULL; | |
6900 | } | |
6901 | ||
6902 | static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) | |
6903 | { | |
6904 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6905 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
6906 | struct hclge_dev *hdev = vport->back; | |
6907 | int cur_rss_size = kinfo->rss_size; | |
6908 | int cur_tqps = kinfo->num_tqps; | |
6909 | u16 tc_offset[HCLGE_MAX_TC_NUM]; | |
6910 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | |
6911 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
6912 | u16 roundup_size; | |
6913 | u32 *rss_indir; | |
6914 | int ret, i; | |
6915 | ||
fdace1bc | 6916 | /* Free old tqps, and reallocate with new tqp number when nic setup */ |
09f2af64 PL |
6917 | hclge_release_tqp(vport); |
6918 | ||
128b900d | 6919 | ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); |
09f2af64 PL |
6920 | if (ret) { |
6921 | dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); | |
6922 | return ret; | |
6923 | } | |
6924 | ||
6925 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
6926 | if (ret) { | |
6927 | dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); | |
6928 | return ret; | |
6929 | } | |
6930 | ||
6931 | ret = hclge_tm_schd_init(hdev); | |
6932 | if (ret) { | |
6933 | dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
6934 | return ret; | |
6935 | } | |
6936 | ||
6937 | roundup_size = roundup_pow_of_two(kinfo->rss_size); | |
6938 | roundup_size = ilog2(roundup_size); | |
6939 | /* Set the RSS TC mode according to the new RSS size */ | |
6940 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
6941 | tc_valid[i] = 0; | |
6942 | ||
6943 | if (!(hdev->hw_tc_map & BIT(i))) | |
6944 | continue; | |
6945 | ||
6946 | tc_valid[i] = 1; | |
6947 | tc_size[i] = roundup_size; | |
6948 | tc_offset[i] = kinfo->rss_size * i; | |
6949 | } | |
6950 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); | |
6951 | if (ret) | |
6952 | return ret; | |
6953 | ||
6954 | /* Reinitializes the rss indirect table according to the new RSS size */ | |
6955 | rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); | |
6956 | if (!rss_indir) | |
6957 | return -ENOMEM; | |
6958 | ||
6959 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
6960 | rss_indir[i] = i % kinfo->rss_size; | |
6961 | ||
6962 | ret = hclge_set_rss(handle, rss_indir, NULL, 0); | |
6963 | if (ret) | |
6964 | dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", | |
6965 | ret); | |
6966 | ||
6967 | kfree(rss_indir); | |
6968 | ||
6969 | if (!ret) | |
6970 | dev_info(&hdev->pdev->dev, | |
6971 | "Channels changed, rss_size from %d to %d, tqps from %d to %d", | |
6972 | cur_rss_size, kinfo->rss_size, | |
6973 | cur_tqps, kinfo->rss_size * kinfo->num_tc); | |
6974 | ||
6975 | return ret; | |
6976 | } | |
6977 | ||
77b34110 FL |
6978 | static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, |
6979 | u32 *regs_num_64_bit) | |
6980 | { | |
6981 | struct hclge_desc desc; | |
6982 | u32 total_num; | |
6983 | int ret; | |
6984 | ||
6985 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); | |
6986 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6987 | if (ret) { | |
6988 | dev_err(&hdev->pdev->dev, | |
6989 | "Query register number cmd failed, ret = %d.\n", ret); | |
6990 | return ret; | |
6991 | } | |
6992 | ||
6993 | *regs_num_32_bit = le32_to_cpu(desc.data[0]); | |
6994 | *regs_num_64_bit = le32_to_cpu(desc.data[1]); | |
6995 | ||
6996 | total_num = *regs_num_32_bit + *regs_num_64_bit; | |
6997 | if (!total_num) | |
6998 | return -EINVAL; | |
6999 | ||
7000 | return 0; | |
7001 | } | |
7002 | ||
7003 | static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7004 | void *data) | |
7005 | { | |
7006 | #define HCLGE_32_BIT_REG_RTN_DATANUM 8 | |
7007 | ||
7008 | struct hclge_desc *desc; | |
7009 | u32 *reg_val = data; | |
7010 | __le32 *desc_data; | |
7011 | int cmd_num; | |
7012 | int i, k, n; | |
7013 | int ret; | |
7014 | ||
7015 | if (regs_num == 0) | |
7016 | return 0; | |
7017 | ||
7018 | cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); | |
7019 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7020 | if (!desc) | |
7021 | return -ENOMEM; | |
7022 | ||
7023 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); | |
7024 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7025 | if (ret) { | |
7026 | dev_err(&hdev->pdev->dev, | |
7027 | "Query 32 bit register cmd failed, ret = %d.\n", ret); | |
7028 | kfree(desc); | |
7029 | return ret; | |
7030 | } | |
7031 | ||
7032 | for (i = 0; i < cmd_num; i++) { | |
7033 | if (i == 0) { | |
7034 | desc_data = (__le32 *)(&desc[i].data[0]); | |
7035 | n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; | |
7036 | } else { | |
7037 | desc_data = (__le32 *)(&desc[i]); | |
7038 | n = HCLGE_32_BIT_REG_RTN_DATANUM; | |
7039 | } | |
7040 | for (k = 0; k < n; k++) { | |
7041 | *reg_val++ = le32_to_cpu(*desc_data++); | |
7042 | ||
7043 | regs_num--; | |
7044 | if (!regs_num) | |
7045 | break; | |
7046 | } | |
7047 | } | |
7048 | ||
7049 | kfree(desc); | |
7050 | return 0; | |
7051 | } | |
7052 | ||
7053 | static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7054 | void *data) | |
7055 | { | |
7056 | #define HCLGE_64_BIT_REG_RTN_DATANUM 4 | |
7057 | ||
7058 | struct hclge_desc *desc; | |
7059 | u64 *reg_val = data; | |
7060 | __le64 *desc_data; | |
7061 | int cmd_num; | |
7062 | int i, k, n; | |
7063 | int ret; | |
7064 | ||
7065 | if (regs_num == 0) | |
7066 | return 0; | |
7067 | ||
7068 | cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); | |
7069 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7070 | if (!desc) | |
7071 | return -ENOMEM; | |
7072 | ||
7073 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); | |
7074 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7075 | if (ret) { | |
7076 | dev_err(&hdev->pdev->dev, | |
7077 | "Query 64 bit register cmd failed, ret = %d.\n", ret); | |
7078 | kfree(desc); | |
7079 | return ret; | |
7080 | } | |
7081 | ||
7082 | for (i = 0; i < cmd_num; i++) { | |
7083 | if (i == 0) { | |
7084 | desc_data = (__le64 *)(&desc[i].data[0]); | |
7085 | n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; | |
7086 | } else { | |
7087 | desc_data = (__le64 *)(&desc[i]); | |
7088 | n = HCLGE_64_BIT_REG_RTN_DATANUM; | |
7089 | } | |
7090 | for (k = 0; k < n; k++) { | |
7091 | *reg_val++ = le64_to_cpu(*desc_data++); | |
7092 | ||
7093 | regs_num--; | |
7094 | if (!regs_num) | |
7095 | break; | |
7096 | } | |
7097 | } | |
7098 | ||
7099 | kfree(desc); | |
7100 | return 0; | |
7101 | } | |
7102 | ||
7103 | static int hclge_get_regs_len(struct hnae3_handle *handle) | |
7104 | { | |
7105 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7106 | struct hclge_dev *hdev = vport->back; | |
7107 | u32 regs_num_32_bit, regs_num_64_bit; | |
7108 | int ret; | |
7109 | ||
7110 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7111 | if (ret) { | |
7112 | dev_err(&hdev->pdev->dev, | |
7113 | "Get register number failed, ret = %d.\n", ret); | |
7114 | return -EOPNOTSUPP; | |
7115 | } | |
7116 | ||
7117 | return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); | |
7118 | } | |
7119 | ||
7120 | static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, | |
7121 | void *data) | |
7122 | { | |
7123 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7124 | struct hclge_dev *hdev = vport->back; | |
7125 | u32 regs_num_32_bit, regs_num_64_bit; | |
7126 | int ret; | |
7127 | ||
7128 | *version = hdev->fw_version; | |
7129 | ||
7130 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7131 | if (ret) { | |
7132 | dev_err(&hdev->pdev->dev, | |
7133 | "Get register number failed, ret = %d.\n", ret); | |
7134 | return; | |
7135 | } | |
7136 | ||
7137 | ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); | |
7138 | if (ret) { | |
7139 | dev_err(&hdev->pdev->dev, | |
7140 | "Get 32 bit register failed, ret = %d.\n", ret); | |
7141 | return; | |
7142 | } | |
7143 | ||
7144 | data = (u32 *)data + regs_num_32_bit; | |
7145 | ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, | |
7146 | data); | |
7147 | if (ret) | |
7148 | dev_err(&hdev->pdev->dev, | |
7149 | "Get 64 bit register failed, ret = %d.\n", ret); | |
7150 | } | |
7151 | ||
f6f75abc | 7152 | static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) |
07f8e940 JS |
7153 | { |
7154 | struct hclge_set_led_state_cmd *req; | |
7155 | struct hclge_desc desc; | |
7156 | int ret; | |
7157 | ||
7158 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); | |
7159 | ||
7160 | req = (struct hclge_set_led_state_cmd *)desc.data; | |
e4e87715 PL |
7161 | hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, |
7162 | HCLGE_LED_LOCATE_STATE_S, locate_led_status); | |
07f8e940 JS |
7163 | |
7164 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7165 | if (ret) | |
7166 | dev_err(&hdev->pdev->dev, | |
7167 | "Send set led state cmd error, ret =%d\n", ret); | |
7168 | ||
7169 | return ret; | |
7170 | } | |
7171 | ||
7172 | enum hclge_led_status { | |
7173 | HCLGE_LED_OFF, | |
7174 | HCLGE_LED_ON, | |
7175 | HCLGE_LED_NO_CHANGE = 0xFF, | |
7176 | }; | |
7177 | ||
7178 | static int hclge_set_led_id(struct hnae3_handle *handle, | |
7179 | enum ethtool_phys_id_state status) | |
7180 | { | |
07f8e940 JS |
7181 | struct hclge_vport *vport = hclge_get_vport(handle); |
7182 | struct hclge_dev *hdev = vport->back; | |
07f8e940 JS |
7183 | |
7184 | switch (status) { | |
7185 | case ETHTOOL_ID_ACTIVE: | |
f6f75abc | 7186 | return hclge_set_led_status(hdev, HCLGE_LED_ON); |
07f8e940 | 7187 | case ETHTOOL_ID_INACTIVE: |
f6f75abc | 7188 | return hclge_set_led_status(hdev, HCLGE_LED_OFF); |
07f8e940 | 7189 | default: |
f6f75abc | 7190 | return -EINVAL; |
07f8e940 | 7191 | } |
07f8e940 JS |
7192 | } |
7193 | ||
0979aa0b FL |
7194 | static void hclge_get_link_mode(struct hnae3_handle *handle, |
7195 | unsigned long *supported, | |
7196 | unsigned long *advertising) | |
7197 | { | |
7198 | unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); | |
7199 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7200 | struct hclge_dev *hdev = vport->back; | |
7201 | unsigned int idx = 0; | |
7202 | ||
7203 | for (; idx < size; idx++) { | |
7204 | supported[idx] = hdev->hw.mac.supported[idx]; | |
7205 | advertising[idx] = hdev->hw.mac.advertising[idx]; | |
7206 | } | |
7207 | } | |
7208 | ||
46a3df9f S |
7209 | static const struct hnae3_ae_ops hclge_ops = { |
7210 | .init_ae_dev = hclge_init_ae_dev, | |
7211 | .uninit_ae_dev = hclge_uninit_ae_dev, | |
7212 | .init_client_instance = hclge_init_client_instance, | |
7213 | .uninit_client_instance = hclge_uninit_client_instance, | |
84e095d6 SM |
7214 | .map_ring_to_vector = hclge_map_ring_to_vector, |
7215 | .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, | |
46a3df9f | 7216 | .get_vector = hclge_get_vector, |
0d3e6631 | 7217 | .put_vector = hclge_put_vector, |
46a3df9f | 7218 | .set_promisc_mode = hclge_set_promisc_mode, |
c39c4d98 | 7219 | .set_loopback = hclge_set_loopback, |
46a3df9f S |
7220 | .start = hclge_ae_start, |
7221 | .stop = hclge_ae_stop, | |
7222 | .get_status = hclge_get_status, | |
7223 | .get_ksettings_an_result = hclge_get_ksettings_an_result, | |
7224 | .update_speed_duplex_h = hclge_update_speed_duplex_h, | |
7225 | .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, | |
7226 | .get_media_type = hclge_get_media_type, | |
7227 | .get_rss_key_size = hclge_get_rss_key_size, | |
7228 | .get_rss_indir_size = hclge_get_rss_indir_size, | |
7229 | .get_rss = hclge_get_rss, | |
7230 | .set_rss = hclge_set_rss, | |
f7db940a | 7231 | .set_rss_tuple = hclge_set_rss_tuple, |
07d29954 | 7232 | .get_rss_tuple = hclge_get_rss_tuple, |
46a3df9f S |
7233 | .get_tc_size = hclge_get_tc_size, |
7234 | .get_mac_addr = hclge_get_mac_addr, | |
7235 | .set_mac_addr = hclge_set_mac_addr, | |
26483246 | 7236 | .do_ioctl = hclge_do_ioctl, |
46a3df9f S |
7237 | .add_uc_addr = hclge_add_uc_addr, |
7238 | .rm_uc_addr = hclge_rm_uc_addr, | |
7239 | .add_mc_addr = hclge_add_mc_addr, | |
7240 | .rm_mc_addr = hclge_rm_mc_addr, | |
7241 | .set_autoneg = hclge_set_autoneg, | |
7242 | .get_autoneg = hclge_get_autoneg, | |
7243 | .get_pauseparam = hclge_get_pauseparam, | |
61387774 | 7244 | .set_pauseparam = hclge_set_pauseparam, |
46a3df9f S |
7245 | .set_mtu = hclge_set_mtu, |
7246 | .reset_queue = hclge_reset_tqp, | |
7247 | .get_stats = hclge_get_stats, | |
7248 | .update_stats = hclge_update_stats, | |
7249 | .get_strings = hclge_get_strings, | |
7250 | .get_sset_count = hclge_get_sset_count, | |
7251 | .get_fw_version = hclge_get_fw_version, | |
7252 | .get_mdix_mode = hclge_get_mdix_mode, | |
391b5e93 | 7253 | .enable_vlan_filter = hclge_enable_vlan_filter, |
dc8131d8 | 7254 | .set_vlan_filter = hclge_set_vlan_filter, |
46a3df9f | 7255 | .set_vf_vlan_filter = hclge_set_vf_vlan_filter, |
052ece6d | 7256 | .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, |
4ed340ab | 7257 | .reset_event = hclge_reset_event, |
09f2af64 PL |
7258 | .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, |
7259 | .set_channels = hclge_set_channels, | |
482d2e9c | 7260 | .get_channels = hclge_get_channels, |
77b34110 FL |
7261 | .get_regs_len = hclge_get_regs_len, |
7262 | .get_regs = hclge_get_regs, | |
07f8e940 | 7263 | .set_led_id = hclge_set_led_id, |
0979aa0b | 7264 | .get_link_mode = hclge_get_link_mode, |
dd74f815 JS |
7265 | .add_fd_entry = hclge_add_fd_entry, |
7266 | .del_fd_entry = hclge_del_fd_entry, | |
6871af29 | 7267 | .del_all_fd_entries = hclge_del_all_fd_entries, |
05c2314f JS |
7268 | .get_fd_rule_cnt = hclge_get_fd_rule_cnt, |
7269 | .get_fd_rule_info = hclge_get_fd_rule_info, | |
7270 | .get_fd_all_rules = hclge_get_all_rules, | |
6871af29 | 7271 | .restore_fd_rules = hclge_restore_fd_entries, |
c17852a8 | 7272 | .enable_fd = hclge_enable_fd, |
46a3df9f S |
7273 | }; |
7274 | ||
7275 | static struct hnae3_ae_algo ae_algo = { | |
7276 | .ops = &hclge_ops, | |
46a3df9f S |
7277 | .pdev_id_table = ae_algo_pci_tbl, |
7278 | }; | |
7279 | ||
7280 | static int hclge_init(void) | |
7281 | { | |
7282 | pr_info("%s is initializing\n", HCLGE_NAME); | |
7283 | ||
854cf33a FL |
7284 | hnae3_register_ae_algo(&ae_algo); |
7285 | ||
7286 | return 0; | |
46a3df9f S |
7287 | } |
7288 | ||
7289 | static void hclge_exit(void) | |
7290 | { | |
7291 | hnae3_unregister_ae_algo(&ae_algo); | |
7292 | } | |
7293 | module_init(hclge_init); | |
7294 | module_exit(hclge_exit); | |
7295 | ||
7296 | MODULE_LICENSE("GPL"); | |
7297 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); | |
7298 | MODULE_DESCRIPTION("HCLGE Driver"); | |
7299 | MODULE_VERSION(HCLGE_MOD_VERSION); |