Commit | Line | Data |
---|---|---|
dde1a86e SM |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. | |
3 | ||
4 | #include "hclge_main.h" | |
5 | #include "hclge_mbx.h" | |
6 | #include "hnae3.h" | |
7347255e | 7 | #include "hclge_comm_rss.h" |
dde1a86e | 8 | |
d8355240 YM |
9 | #define CREATE_TRACE_POINTS |
10 | #include "hclge_trace.h" | |
11 | ||
027fd531 JS |
12 | static u16 hclge_errno_to_resp(int errno) |
13 | { | |
e79c0e32 GL |
14 | int resp = abs(errno); |
15 | ||
16 | /* The status for pf to vf msg cmd is u16, constrainted by HW. | |
17 | * We need to keep the same type with it. | |
18 | * The intput errno is the stander error code, it's safely to | |
19 | * use a u16 to store the abs(errno). | |
20 | */ | |
21 | return (u16)resp; | |
027fd531 JS |
22 | } |
23 | ||
dde1a86e SM |
24 | /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF |
25 | * receives a mailbox message from VF. | |
26 | * @vport: pointer to struct hclge_vport | |
27 | * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox | |
28 | * message | |
29 | * @resp_status: indicate to VF whether its request success(0) or failed. | |
30 | */ | |
31 | static int hclge_gen_resp_to_vf(struct hclge_vport *vport, | |
32 | struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, | |
bb5790b7 | 33 | struct hclge_respond_to_vf_msg *resp_msg) |
dde1a86e SM |
34 | { |
35 | struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; | |
36 | struct hclge_dev *hdev = vport->back; | |
eaa5607d | 37 | enum hclge_comm_cmd_status status; |
dde1a86e | 38 | struct hclge_desc desc; |
027fd531 | 39 | u16 resp; |
dde1a86e SM |
40 | |
41 | resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; | |
42 | ||
bb5790b7 | 43 | if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { |
dde1a86e | 44 | dev_err(&hdev->pdev->dev, |
adcf738b | 45 | "PF fail to gen resp to VF len %u exceeds max len %u\n", |
bb5790b7 | 46 | resp_msg->len, |
dde1a86e | 47 | HCLGE_MBX_MAX_RESP_DATA_SIZE); |
bb5790b7 | 48 | /* If resp_msg->len is too long, set the value to max length |
89295152 PL |
49 | * and return the msg to VF |
50 | */ | |
bb5790b7 | 51 | resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; |
dde1a86e SM |
52 | } |
53 | ||
54 | hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); | |
55 | ||
56 | resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; | |
57 | resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; | |
1b713d14 | 58 | resp_pf_to_vf->match_id = vf_to_pf_req->match_id; |
dde1a86e | 59 | |
767975e5 JW |
60 | resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); |
61 | resp_pf_to_vf->msg.vf_mbx_msg_code = | |
62 | cpu_to_le16(vf_to_pf_req->msg.code); | |
63 | resp_pf_to_vf->msg.vf_mbx_msg_subcode = | |
64 | cpu_to_le16(vf_to_pf_req->msg.subcode); | |
bb5790b7 | 65 | resp = hclge_errno_to_resp(resp_msg->status); |
027fd531 | 66 | if (resp < SHRT_MAX) { |
767975e5 | 67 | resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); |
027fd531 JS |
68 | } else { |
69 | dev_warn(&hdev->pdev->dev, | |
c5aaf176 | 70 | "failed to send response to VF, response status %u is out-of-bound\n", |
027fd531 | 71 | resp); |
767975e5 | 72 | resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); |
027fd531 | 73 | } |
dde1a86e | 74 | |
bb5790b7 HT |
75 | if (resp_msg->len > 0) |
76 | memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, | |
77 | resp_msg->len); | |
dde1a86e | 78 | |
0fc36e37 YM |
79 | trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); |
80 | ||
dde1a86e SM |
81 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
82 | if (status) | |
83 | dev_err(&hdev->pdev->dev, | |
d3410018 YM |
84 | "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", |
85 | status, vf_to_pf_req->mbx_src_vfid, | |
86 | vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); | |
dde1a86e SM |
87 | |
88 | return status; | |
89 | } | |
90 | ||
91 | static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, | |
92 | u16 mbx_opcode, u8 dest_vfid) | |
93 | { | |
94 | struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; | |
95 | struct hclge_dev *hdev = vport->back; | |
eaa5607d | 96 | enum hclge_comm_cmd_status status; |
dde1a86e SM |
97 | struct hclge_desc desc; |
98 | ||
7d413735 JS |
99 | if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { |
100 | dev_err(&hdev->pdev->dev, | |
101 | "msg data length(=%u) exceeds maximum(=%u)\n", | |
102 | msg_len, HCLGE_MBX_MAX_MSG_SIZE); | |
103 | return -EMSGSIZE; | |
104 | } | |
105 | ||
dde1a86e SM |
106 | resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; |
107 | ||
108 | hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); | |
109 | ||
110 | resp_pf_to_vf->dest_vfid = dest_vfid; | |
111 | resp_pf_to_vf->msg_len = msg_len; | |
767975e5 | 112 | resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); |
dde1a86e | 113 | |
6fde96df | 114 | memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); |
dde1a86e | 115 | |
d8355240 YM |
116 | trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); |
117 | ||
dde1a86e SM |
118 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
119 | if (status) | |
120 | dev_err(&hdev->pdev->dev, | |
d3410018 YM |
121 | "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", |
122 | status, dest_vfid, mbx_opcode); | |
dde1a86e SM |
123 | |
124 | return status; | |
125 | } | |
126 | ||
8a45c4f9 | 127 | int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) |
fec73521 JS |
128 | { |
129 | __le16 msg_data; | |
130 | u8 dest_vfid; | |
131 | ||
132 | dest_vfid = (u8)vport->vport_id; | |
133 | msg_data = cpu_to_le16(reset_type); | |
134 | ||
135 | /* send this requested info to VF */ | |
136 | return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), | |
137 | HCLGE_MBX_ASSERTING_RESET, dest_vfid); | |
138 | } | |
139 | ||
dea846e8 | 140 | int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) |
2bfbd35d | 141 | { |
aa5c4f17 | 142 | struct hclge_dev *hdev = vport->back; |
7061867b | 143 | u16 reset_type; |
2bfbd35d | 144 | |
7061867b HT |
145 | BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); |
146 | ||
aa5c4f17 HT |
147 | if (hdev->reset_type == HNAE3_FUNC_RESET) |
148 | reset_type = HNAE3_VF_PF_FUNC_RESET; | |
6b9a97ee HT |
149 | else if (hdev->reset_type == HNAE3_FLR_RESET) |
150 | reset_type = HNAE3_VF_FULL_RESET; | |
aa5c4f17 | 151 | else |
0cd86182 | 152 | reset_type = HNAE3_VF_FUNC_RESET; |
aa5c4f17 | 153 | |
fec73521 | 154 | return hclge_inform_vf_reset(vport, reset_type); |
2bfbd35d SM |
155 | } |
156 | ||
84e095d6 SM |
157 | static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) |
158 | { | |
159 | struct hnae3_ring_chain_node *chain_tmp, *chain; | |
160 | ||
161 | chain = head->next; | |
162 | ||
163 | while (chain) { | |
164 | chain_tmp = chain->next; | |
453431a5 | 165 | kfree_sensitive(chain); |
84e095d6 SM |
166 | chain = chain_tmp; |
167 | } | |
168 | } | |
169 | ||
5550aa4d FL |
170 | /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx |
171 | * from mailbox message | |
84e095d6 SM |
172 | * msg[0]: opcode |
173 | * msg[1]: <not relevant to this function> | |
174 | * msg[2]: ring_num | |
175 | * msg[3]: first ring type (TX|RX) | |
176 | * msg[4]: first tqp id | |
5550aa4d FL |
177 | * msg[5]: first int_gl idx |
178 | * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx | |
84e095d6 SM |
179 | */ |
180 | static int hclge_get_ring_chain_from_mbx( | |
181 | struct hclge_mbx_vf_to_pf_cmd *req, | |
182 | struct hnae3_ring_chain_node *ring_chain, | |
183 | struct hclge_vport *vport) | |
184 | { | |
84e095d6 | 185 | struct hnae3_ring_chain_node *cur_chain, *new_chain; |
326334aa | 186 | struct hclge_dev *hdev = vport->back; |
84e095d6 | 187 | int ring_num; |
326334aa | 188 | int i; |
84e095d6 | 189 | |
d3410018 | 190 | ring_num = req->msg.ring_num; |
84e095d6 | 191 | |
d3410018 | 192 | if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) |
48009e99 | 193 | return -EINVAL; |
5d02a58d | 194 | |
326334aa YM |
195 | for (i = 0; i < ring_num; i++) { |
196 | if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { | |
197 | dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", | |
198 | req->msg.param[i].tqp_index, | |
0cc25c6a | 199 | vport->nic.kinfo.rss_size - 1U); |
326334aa YM |
200 | return -EINVAL; |
201 | } | |
202 | } | |
203 | ||
d3410018 | 204 | hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, |
326334aa | 205 | req->msg.param[0].ring_type); |
84e095d6 | 206 | ring_chain->tqp_index = |
d3410018 | 207 | hclge_get_queue_id(vport->nic.kinfo.tqp |
326334aa | 208 | [req->msg.param[0].tqp_index]); |
cf4103c6 | 209 | hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, |
326334aa | 210 | HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); |
84e095d6 SM |
211 | |
212 | cur_chain = ring_chain; | |
213 | ||
214 | for (i = 1; i < ring_num; i++) { | |
215 | new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); | |
216 | if (!new_chain) | |
217 | goto err; | |
218 | ||
e4e87715 | 219 | hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, |
d3410018 | 220 | req->msg.param[i].ring_type); |
84e095d6 SM |
221 | |
222 | new_chain->tqp_index = | |
223 | hclge_get_queue_id(vport->nic.kinfo.tqp | |
d3410018 | 224 | [req->msg.param[i].tqp_index]); |
84e095d6 | 225 | |
cf4103c6 FL |
226 | hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, |
227 | HNAE3_RING_GL_IDX_S, | |
d3410018 | 228 | req->msg.param[i].int_gl_index); |
79eee410 | 229 | |
84e095d6 SM |
230 | cur_chain->next = new_chain; |
231 | cur_chain = new_chain; | |
232 | } | |
233 | ||
234 | return 0; | |
235 | err: | |
236 | hclge_free_vector_ring_chain(ring_chain); | |
237 | return -ENOMEM; | |
238 | } | |
239 | ||
240 | static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, | |
241 | struct hclge_mbx_vf_to_pf_cmd *req) | |
242 | { | |
243 | struct hnae3_ring_chain_node ring_chain; | |
d3410018 | 244 | int vector_id = req->msg.vector_id; |
84e095d6 SM |
245 | int ret; |
246 | ||
247 | memset(&ring_chain, 0, sizeof(ring_chain)); | |
248 | ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); | |
249 | if (ret) | |
250 | return ret; | |
251 | ||
252 | ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); | |
84e095d6 SM |
253 | |
254 | hclge_free_vector_ring_chain(&ring_chain); | |
255 | ||
49f971bd | 256 | return ret; |
84e095d6 SM |
257 | } |
258 | ||
a1aed456 GH |
259 | static int hclge_query_ring_vector_map(struct hclge_vport *vport, |
260 | struct hnae3_ring_chain_node *ring_chain, | |
261 | struct hclge_desc *desc) | |
262 | { | |
263 | struct hclge_ctrl_vector_chain_cmd *req = | |
264 | (struct hclge_ctrl_vector_chain_cmd *)desc->data; | |
265 | struct hclge_dev *hdev = vport->back; | |
266 | u16 tqp_type_and_id; | |
267 | int status; | |
268 | ||
269 | hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); | |
270 | ||
271 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); | |
272 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, | |
273 | hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); | |
274 | hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, | |
275 | ring_chain->tqp_index); | |
276 | req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); | |
277 | req->vfid = vport->vport_id; | |
278 | ||
279 | status = hclge_cmd_send(&hdev->hw, desc, 1); | |
280 | if (status) | |
281 | dev_err(&hdev->pdev->dev, | |
282 | "Get VF ring vector map info fail, status is %d.\n", | |
283 | status); | |
284 | ||
285 | return status; | |
286 | } | |
287 | ||
288 | static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, | |
289 | struct hclge_mbx_vf_to_pf_cmd *req, | |
290 | struct hclge_respond_to_vf_msg *resp) | |
291 | { | |
292 | #define HCLGE_LIMIT_RING_NUM 1 | |
293 | #define HCLGE_RING_TYPE_OFFSET 0 | |
294 | #define HCLGE_TQP_INDEX_OFFSET 1 | |
295 | #define HCLGE_INT_GL_INDEX_OFFSET 2 | |
296 | #define HCLGE_VECTOR_ID_OFFSET 3 | |
297 | #define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 | |
298 | struct hnae3_ring_chain_node ring_chain; | |
299 | struct hclge_desc desc; | |
300 | struct hclge_ctrl_vector_chain_cmd *data = | |
301 | (struct hclge_ctrl_vector_chain_cmd *)desc.data; | |
302 | u16 tqp_type_and_id; | |
303 | u8 int_gl_index; | |
304 | int ret; | |
305 | ||
306 | req->msg.ring_num = HCLGE_LIMIT_RING_NUM; | |
307 | ||
308 | memset(&ring_chain, 0, sizeof(ring_chain)); | |
309 | ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); | |
310 | if (ret) | |
311 | return ret; | |
312 | ||
313 | ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); | |
314 | if (ret) { | |
315 | hclge_free_vector_ring_chain(&ring_chain); | |
316 | return ret; | |
317 | } | |
318 | ||
319 | tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); | |
320 | int_gl_index = hnae3_get_field(tqp_type_and_id, | |
321 | HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); | |
322 | ||
323 | resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; | |
324 | resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; | |
325 | resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; | |
326 | resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; | |
327 | resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; | |
328 | ||
329 | hclge_free_vector_ring_chain(&ring_chain); | |
330 | ||
331 | return ret; | |
332 | } | |
333 | ||
1e6e7610 JS |
334 | static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, |
335 | struct hclge_mbx_vf_to_pf_cmd *req) | |
dde1a86e | 336 | { |
5e7414cd | 337 | struct hnae3_handle *handle = &vport->nic; |
1e6e7610 | 338 | struct hclge_dev *hdev = vport->back; |
e196ec75 | 339 | |
1e6e7610 JS |
340 | vport->vf_info.request_uc_en = req->msg.en_uc; |
341 | vport->vf_info.request_mc_en = req->msg.en_mc; | |
342 | vport->vf_info.request_bc_en = req->msg.en_bc; | |
e196ec75 | 343 | |
5e7414cd JS |
344 | if (req->msg.en_limit_promisc) |
345 | set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); | |
346 | else | |
347 | clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, | |
348 | &handle->priv_flags); | |
349 | ||
1e6e7610 JS |
350 | set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); |
351 | hclge_task_schedule(hdev, 0); | |
dde1a86e SM |
352 | } |
353 | ||
354 | static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, | |
b7048d32 | 355 | struct hclge_mbx_vf_to_pf_cmd *mbx_req) |
dde1a86e | 356 | { |
d3410018 YM |
357 | #define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 |
358 | ||
359 | const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); | |
dde1a86e SM |
360 | struct hclge_dev *hdev = vport->back; |
361 | int status; | |
362 | ||
d3410018 YM |
363 | if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { |
364 | const u8 *old_addr = (const u8 *) | |
365 | (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); | |
dde1a86e | 366 | |
8e6de441 HT |
367 | /* If VF MAC has been configured by the host then it |
368 | * cannot be overridden by the MAC specified by the VM. | |
369 | */ | |
370 | if (!is_zero_ether_addr(vport->vf_info.mac) && | |
bb5790b7 HT |
371 | !ether_addr_equal(mac_addr, vport->vf_info.mac)) |
372 | return -EPERM; | |
8e6de441 | 373 | |
bb5790b7 HT |
374 | if (!is_valid_ether_addr(mac_addr)) |
375 | return -EINVAL; | |
8e6de441 | 376 | |
ee4bcd3b JS |
377 | spin_lock_bh(&vport->mac_list_lock); |
378 | status = hclge_update_mac_node_for_dev_addr(vport, old_addr, | |
379 | mac_addr); | |
380 | spin_unlock_bh(&vport->mac_list_lock); | |
381 | hclge_task_schedule(hdev, 0); | |
d3410018 | 382 | } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { |
ee4bcd3b JS |
383 | status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, |
384 | HCLGE_MAC_ADDR_UC, mac_addr); | |
d3410018 | 385 | } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { |
ee4bcd3b JS |
386 | status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, |
387 | HCLGE_MAC_ADDR_UC, mac_addr); | |
dde1a86e SM |
388 | } else { |
389 | dev_err(&hdev->pdev->dev, | |
adcf738b | 390 | "failed to set unicast mac addr, unknown subcode %u\n", |
d3410018 | 391 | mbx_req->msg.subcode); |
dde1a86e SM |
392 | return -EIO; |
393 | } | |
394 | ||
bb5790b7 | 395 | return status; |
dde1a86e SM |
396 | } |
397 | ||
398 | static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, | |
bb5790b7 | 399 | struct hclge_mbx_vf_to_pf_cmd *mbx_req) |
dde1a86e | 400 | { |
d3410018 | 401 | const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); |
dde1a86e | 402 | struct hclge_dev *hdev = vport->back; |
dde1a86e | 403 | |
d3410018 | 404 | if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { |
ee4bcd3b JS |
405 | hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, |
406 | HCLGE_MAC_ADDR_MC, mac_addr); | |
d3410018 | 407 | } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { |
ee4bcd3b JS |
408 | hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, |
409 | HCLGE_MAC_ADDR_MC, mac_addr); | |
dde1a86e SM |
410 | } else { |
411 | dev_err(&hdev->pdev->dev, | |
adcf738b | 412 | "failed to set mcast mac addr, unknown subcode %u\n", |
d3410018 | 413 | mbx_req->msg.subcode); |
dde1a86e SM |
414 | return -EIO; |
415 | } | |
416 | ||
ee4bcd3b | 417 | return 0; |
dde1a86e SM |
418 | } |
419 | ||
92f11ea1 | 420 | int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, |
f2dbf0ed JS |
421 | u16 state, |
422 | struct hclge_vlan_info *vlan_info) | |
92f11ea1 | 423 | { |
767975e5 | 424 | struct hclge_mbx_port_base_vlan base_vlan; |
92f11ea1 | 425 | |
767975e5 JW |
426 | base_vlan.state = cpu_to_le16(state); |
427 | base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); | |
428 | base_vlan.qos = cpu_to_le16(vlan_info->qos); | |
429 | base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); | |
92f11ea1 | 430 | |
767975e5 | 431 | return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), |
4803d010 | 432 | HCLGE_MBX_PUSH_VLAN_INFO, vfid); |
92f11ea1 JS |
433 | } |
434 | ||
dde1a86e | 435 | static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, |
bb5790b7 HT |
436 | struct hclge_mbx_vf_to_pf_cmd *mbx_req, |
437 | struct hclge_respond_to_vf_msg *resp_msg) | |
dde1a86e | 438 | { |
d3410018 YM |
439 | #define HCLGE_MBX_VLAN_STATE_OFFSET 0 |
440 | #define HCLGE_MBX_VLAN_INFO_OFFSET 2 | |
441 | ||
060e9acc JS |
442 | struct hnae3_handle *handle = &vport->nic; |
443 | struct hclge_dev *hdev = vport->back; | |
ebaf1908 | 444 | struct hclge_vf_vlan_cfg *msg_cmd; |
416eedb6 JW |
445 | __be16 proto; |
446 | u16 vlan_id; | |
dde1a86e | 447 | |
d3410018 | 448 | msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; |
060e9acc JS |
449 | switch (msg_cmd->subcode) { |
450 | case HCLGE_MBX_VLAN_FILTER: | |
416eedb6 JW |
451 | proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); |
452 | vlan_id = le16_to_cpu(msg_cmd->vlan); | |
453 | return hclge_set_vlan_filter(handle, proto, vlan_id, | |
454 | msg_cmd->is_kill); | |
060e9acc JS |
455 | case HCLGE_MBX_VLAN_RX_OFF_CFG: |
456 | return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); | |
457 | case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: | |
592b0179 GL |
458 | /* vf does not need to know about the port based VLAN state |
459 | * on device HNAE3_DEVICE_VERSION_V3. So always return disable | |
460 | * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port | |
461 | * based VLAN state. | |
462 | */ | |
463 | resp_msg->data[0] = | |
060e9acc | 464 | hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? |
592b0179 GL |
465 | HNAE3_PORT_BASE_VLAN_DISABLE : |
466 | vport->port_base_vlan_cfg.state; | |
bb5790b7 | 467 | resp_msg->len = sizeof(u8); |
060e9acc | 468 | return 0; |
fa6a262a JS |
469 | case HCLGE_MBX_ENABLE_VLAN_FILTER: |
470 | return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable); | |
060e9acc JS |
471 | default: |
472 | return 0; | |
dde1a86e | 473 | } |
dde1a86e SM |
474 | } |
475 | ||
a6d818e3 | 476 | static int hclge_set_vf_alive(struct hclge_vport *vport, |
bb5790b7 | 477 | struct hclge_mbx_vf_to_pf_cmd *mbx_req) |
a6d818e3 | 478 | { |
d3410018 | 479 | bool alive = !!mbx_req->msg.data[0]; |
a6d818e3 YL |
480 | int ret = 0; |
481 | ||
482 | if (alive) | |
483 | ret = hclge_vport_start(vport); | |
484 | else | |
485 | hclge_vport_stop(vport); | |
486 | ||
487 | return ret; | |
488 | } | |
489 | ||
32e6d104 JS |
490 | static void hclge_get_basic_info(struct hclge_vport *vport, |
491 | struct hclge_respond_to_vf_msg *resp_msg) | |
dde1a86e | 492 | { |
de67a690 | 493 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
32e6d104 JS |
494 | struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; |
495 | struct hclge_basic_info *basic_info; | |
ebaf1908 | 496 | unsigned int i; |
416eedb6 | 497 | u32 pf_caps; |
de67a690 | 498 | |
32e6d104 | 499 | basic_info = (struct hclge_basic_info *)resp_msg->data; |
35244430 | 500 | for (i = 0; i < kinfo->tc_info.num_tc; i++) |
32e6d104 | 501 | basic_info->hw_tc_map |= BIT(i); |
dde1a86e | 502 | |
416eedb6 | 503 | pf_caps = le32_to_cpu(basic_info->pf_caps); |
32e6d104 | 504 | if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) |
416eedb6 | 505 | hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); |
32e6d104 | 506 | |
416eedb6 | 507 | basic_info->pf_caps = cpu_to_le32(pf_caps); |
32e6d104 | 508 | resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; |
dde1a86e SM |
509 | } |
510 | ||
bb5790b7 HT |
511 | static void hclge_get_vf_queue_info(struct hclge_vport *vport, |
512 | struct hclge_respond_to_vf_msg *resp_msg) | |
dde1a86e | 513 | { |
c0425944 | 514 | #define HCLGE_TQPS_RSS_INFO_LEN 6 |
bb5790b7 | 515 | |
416eedb6 | 516 | struct hclge_mbx_vf_queue_info *queue_info; |
dde1a86e SM |
517 | struct hclge_dev *hdev = vport->back; |
518 | ||
519 | /* get the queue related info */ | |
416eedb6 JW |
520 | queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; |
521 | queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); | |
522 | queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); | |
523 | queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); | |
bb5790b7 | 524 | resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; |
dde1a86e SM |
525 | } |
526 | ||
bb5790b7 HT |
527 | static void hclge_get_vf_mac_addr(struct hclge_vport *vport, |
528 | struct hclge_respond_to_vf_msg *resp_msg) | |
8e6de441 | 529 | { |
bb5790b7 HT |
530 | ether_addr_copy(resp_msg->data, vport->vf_info.mac); |
531 | resp_msg->len = ETH_ALEN; | |
8e6de441 HT |
532 | } |
533 | ||
bb5790b7 HT |
534 | static void hclge_get_vf_queue_depth(struct hclge_vport *vport, |
535 | struct hclge_respond_to_vf_msg *resp_msg) | |
c0425944 PL |
536 | { |
537 | #define HCLGE_TQPS_DEPTH_INFO_LEN 4 | |
bb5790b7 | 538 | |
416eedb6 | 539 | struct hclge_mbx_vf_queue_depth *queue_depth; |
c0425944 PL |
540 | struct hclge_dev *hdev = vport->back; |
541 | ||
542 | /* get the queue depth info */ | |
416eedb6 JW |
543 | queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; |
544 | queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); | |
545 | queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); | |
546 | ||
bb5790b7 | 547 | resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; |
c0425944 PL |
548 | } |
549 | ||
bb5790b7 HT |
550 | static void hclge_get_vf_media_type(struct hclge_vport *vport, |
551 | struct hclge_respond_to_vf_msg *resp_msg) | |
9c3e7130 | 552 | { |
bb5790b7 HT |
553 | #define HCLGE_VF_MEDIA_TYPE_OFFSET 0 |
554 | #define HCLGE_VF_MODULE_TYPE_OFFSET 1 | |
555 | #define HCLGE_VF_MEDIA_TYPE_LENGTH 2 | |
556 | ||
9c3e7130 | 557 | struct hclge_dev *hdev = vport->back; |
9c3e7130 | 558 | |
bb5790b7 HT |
559 | resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = |
560 | hdev->hw.mac.media_type; | |
561 | resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = | |
562 | hdev->hw.mac.module_type; | |
563 | resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; | |
9c3e7130 | 564 | } |
565 | ||
18b6e31f | 566 | int hclge_push_vf_link_status(struct hclge_vport *vport) |
dde1a86e | 567 | { |
6430f744 YM |
568 | #define HCLGE_VF_LINK_STATE_UP 1U |
569 | #define HCLGE_VF_LINK_STATE_DOWN 0U | |
570 | ||
767975e5 | 571 | struct hclge_mbx_link_status link_info; |
dde1a86e SM |
572 | struct hclge_dev *hdev = vport->back; |
573 | u16 link_status; | |
dde1a86e SM |
574 | |
575 | /* mac.link can only be 0 or 1 */ | |
6430f744 YM |
576 | switch (vport->vf_info.link_state) { |
577 | case IFLA_VF_LINK_STATE_ENABLE: | |
578 | link_status = HCLGE_VF_LINK_STATE_UP; | |
579 | break; | |
580 | case IFLA_VF_LINK_STATE_DISABLE: | |
581 | link_status = HCLGE_VF_LINK_STATE_DOWN; | |
582 | break; | |
583 | case IFLA_VF_LINK_STATE_AUTO: | |
584 | default: | |
585 | link_status = (u16)hdev->hw.mac.link; | |
586 | break; | |
587 | } | |
588 | ||
767975e5 JW |
589 | link_info.link_status = cpu_to_le16(link_status); |
590 | link_info.speed = cpu_to_le32(hdev->hw.mac.speed); | |
591 | link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); | |
592 | link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; | |
dde1a86e SM |
593 | |
594 | /* send this requested info to VF */ | |
767975e5 | 595 | return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), |
18b6e31f | 596 | HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); |
dde1a86e SM |
597 | } |
598 | ||
9194d18b | 599 | static void hclge_get_link_mode(struct hclge_vport *vport, |
600 | struct hclge_mbx_vf_to_pf_cmd *mbx_req) | |
601 | { | |
602 | #define HCLGE_SUPPORTED 1 | |
767975e5 | 603 | struct hclge_mbx_link_mode link_mode; |
9194d18b | 604 | struct hclge_dev *hdev = vport->back; |
605 | unsigned long advertising; | |
606 | unsigned long supported; | |
607 | unsigned long send_data; | |
9194d18b | 608 | u8 dest_vfid; |
609 | ||
610 | advertising = hdev->hw.mac.advertising[0]; | |
611 | supported = hdev->hw.mac.supported[0]; | |
612 | dest_vfid = mbx_req->mbx_src_vfid; | |
767975e5 JW |
613 | send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : |
614 | advertising; | |
615 | link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); | |
616 | link_mode.link_mode = cpu_to_le64(send_data); | |
9194d18b | 617 | |
767975e5 | 618 | hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), |
9194d18b | 619 | HCLGE_MBX_LINK_STAT_MODE, dest_vfid); |
620 | } | |
621 | ||
8fa86551 YM |
622 | static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, |
623 | struct hclge_mbx_vf_to_pf_cmd *mbx_req, | |
624 | struct hclge_respond_to_vf_msg *resp_msg) | |
84e095d6 | 625 | { |
8fa86551 YM |
626 | #define HCLGE_RESET_ALL_QUEUE_DONE 1U |
627 | struct hnae3_handle *handle = &vport->nic; | |
628 | struct hclge_dev *hdev = vport->back; | |
84e095d6 | 629 | u16 queue_id; |
8fa86551 | 630 | int ret; |
84e095d6 | 631 | |
416eedb6 | 632 | queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); |
8fa86551 YM |
633 | resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; |
634 | resp_msg->len = sizeof(u8); | |
84e095d6 | 635 | |
8fa86551 YM |
636 | /* pf will reset vf's all queues at a time. So it is unnecessary |
637 | * to reset queues if queue_id > 0, just return success. | |
638 | */ | |
639 | if (queue_id > 0) | |
640 | return 0; | |
641 | ||
642 | ret = hclge_reset_tqp(handle); | |
643 | if (ret) | |
644 | dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", | |
645 | vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); | |
646 | ||
647 | return ret; | |
84e095d6 SM |
648 | } |
649 | ||
bb5790b7 | 650 | static int hclge_reset_vf(struct hclge_vport *vport) |
2bfbd35d SM |
651 | { |
652 | struct hclge_dev *hdev = vport->back; | |
2bfbd35d | 653 | |
adcf738b | 654 | dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", |
311c0aaa | 655 | vport->vport_id - HCLGE_VF_VPORT_START_NUM); |
2bfbd35d | 656 | |
bb5790b7 | 657 | return hclge_func_reset_cmd(hdev, vport->vport_id); |
2bfbd35d SM |
658 | } |
659 | ||
fec73521 JS |
660 | static void hclge_notify_vf_config(struct hclge_vport *vport) |
661 | { | |
662 | struct hclge_dev *hdev = vport->back; | |
663 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); | |
664 | struct hclge_port_base_vlan_config *vlan_cfg; | |
665 | int ret; | |
666 | ||
667 | hclge_push_vf_link_status(vport); | |
668 | if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) { | |
669 | ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET); | |
670 | if (ret) { | |
671 | dev_err(&hdev->pdev->dev, | |
672 | "failed to inform VF %u reset!", | |
673 | vport->vport_id - HCLGE_VF_VPORT_START_NUM); | |
674 | return; | |
675 | } | |
676 | vport->need_notify = 0; | |
677 | return; | |
678 | } | |
679 | ||
680 | if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && | |
681 | test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) { | |
682 | vlan_cfg = &vport->port_base_vlan_cfg; | |
683 | ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], | |
684 | vport->vport_id, | |
685 | vlan_cfg->state, | |
686 | &vlan_cfg->vlan_info); | |
687 | if (ret) { | |
688 | dev_err(&hdev->pdev->dev, | |
689 | "failed to inform VF %u port base vlan!", | |
690 | vport->vport_id - HCLGE_VF_VPORT_START_NUM); | |
691 | return; | |
692 | } | |
693 | clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify); | |
694 | } | |
695 | } | |
696 | ||
bb5790b7 | 697 | static void hclge_vf_keep_alive(struct hclge_vport *vport) |
a6d818e3 | 698 | { |
fec73521 JS |
699 | struct hclge_dev *hdev = vport->back; |
700 | ||
a6d818e3 | 701 | vport->last_active_jiffies = jiffies; |
fec73521 JS |
702 | |
703 | if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) && | |
704 | !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { | |
705 | set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
706 | dev_info(&hdev->pdev->dev, "VF %u is alive!", | |
707 | vport->vport_id - HCLGE_VF_VPORT_START_NUM); | |
708 | hclge_notify_vf_config(vport); | |
709 | } | |
a6d818e3 YL |
710 | } |
711 | ||
818f1675 YL |
712 | static int hclge_set_vf_mtu(struct hclge_vport *vport, |
713 | struct hclge_mbx_vf_to_pf_cmd *mbx_req) | |
714 | { | |
416eedb6 | 715 | struct hclge_mbx_mtu_info *mtu_info; |
818f1675 YL |
716 | u32 mtu; |
717 | ||
416eedb6 JW |
718 | mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; |
719 | mtu = le32_to_cpu(mtu_info->mtu); | |
818f1675 | 720 | |
bb5790b7 | 721 | return hclge_set_vport_mtu(vport, mtu); |
818f1675 YL |
722 | } |
723 | ||
c59d6062 JS |
724 | static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, |
725 | struct hclge_mbx_vf_to_pf_cmd *mbx_req, | |
726 | struct hclge_respond_to_vf_msg *resp_msg) | |
0c29d191 | 727 | { |
63b1279d YM |
728 | struct hnae3_handle *handle = &vport->nic; |
729 | struct hclge_dev *hdev = vport->back; | |
0c29d191 | 730 | u16 queue_id, qid_in_pf; |
0c29d191 | 731 | |
416eedb6 | 732 | queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); |
63b1279d YM |
733 | if (queue_id >= handle->kinfo.num_tqps) { |
734 | dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", | |
735 | queue_id, mbx_req->mbx_src_vfid); | |
c59d6062 | 736 | return -EINVAL; |
63b1279d YM |
737 | } |
738 | ||
0c29d191 | 739 | qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); |
416eedb6 | 740 | *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); |
bb5790b7 | 741 | resp_msg->len = sizeof(qid_in_pf); |
c59d6062 | 742 | return 0; |
0c29d191 | 743 | } |
744 | ||
c59d6062 JS |
745 | static int hclge_get_rss_key(struct hclge_vport *vport, |
746 | struct hclge_mbx_vf_to_pf_cmd *mbx_req, | |
747 | struct hclge_respond_to_vf_msg *resp_msg) | |
a638b1d8 JS |
748 | { |
749 | #define HCLGE_RSS_MBX_RESP_LEN 8 | |
a638b1d8 | 750 | struct hclge_dev *hdev = vport->back; |
7347255e | 751 | struct hclge_comm_rss_cfg *rss_cfg; |
a638b1d8 JS |
752 | u8 index; |
753 | ||
d3410018 | 754 | index = mbx_req->msg.data[0]; |
7347255e | 755 | rss_cfg = &hdev->rss_cfg; |
a638b1d8 | 756 | |
532cfc0d YM |
757 | /* Check the query index of rss_hash_key from VF, make sure no |
758 | * more than the size of rss_hash_key. | |
759 | */ | |
760 | if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > | |
7347255e | 761 | sizeof(rss_cfg->rss_hash_key)) { |
532cfc0d YM |
762 | dev_warn(&hdev->pdev->dev, |
763 | "failed to get the rss hash key, the index(%u) invalid !\n", | |
764 | index); | |
c59d6062 | 765 | return -EINVAL; |
532cfc0d YM |
766 | } |
767 | ||
bb5790b7 | 768 | memcpy(resp_msg->data, |
7347255e | 769 | &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], |
a638b1d8 | 770 | HCLGE_RSS_MBX_RESP_LEN); |
bb5790b7 | 771 | resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; |
c59d6062 | 772 | return 0; |
a638b1d8 JS |
773 | } |
774 | ||
ed8fb4b2 JS |
775 | static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) |
776 | { | |
777 | switch (link_fail_code) { | |
778 | case HCLGE_LF_REF_CLOCK_LOST: | |
779 | dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); | |
780 | break; | |
781 | case HCLGE_LF_XSFP_TX_DISABLE: | |
782 | dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); | |
783 | break; | |
784 | case HCLGE_LF_XSFP_ABSENT: | |
785 | dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); | |
786 | break; | |
787 | default: | |
788 | break; | |
789 | } | |
790 | } | |
791 | ||
792 | static void hclge_handle_link_change_event(struct hclge_dev *hdev, | |
793 | struct hclge_mbx_vf_to_pf_cmd *req) | |
794 | { | |
ed8fb4b2 JS |
795 | hclge_task_schedule(hdev, 0); |
796 | ||
d3410018 YM |
797 | if (!req->msg.subcode) |
798 | hclge_link_fail_parse(hdev, req->msg.data[0]); | |
ed8fb4b2 JS |
799 | } |
800 | ||
7c4bfcb0 XW |
801 | static bool hclge_cmd_crq_empty(struct hclge_hw *hw) |
802 | { | |
8e2288ca | 803 | u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); |
7c4bfcb0 | 804 | |
eaa5607d | 805 | return tail == hw->hw.cmq.crq.next_to_use; |
7c4bfcb0 XW |
806 | } |
807 | ||
b18bf305 HT |
808 | static void hclge_handle_ncsi_error(struct hclge_dev *hdev) |
809 | { | |
810 | struct hnae3_ae_dev *ae_dev = hdev->ae_dev; | |
811 | ||
812 | ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); | |
813 | dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); | |
814 | ae_dev->ops->reset_event(hdev->pdev, NULL); | |
815 | } | |
816 | ||
039ba863 JS |
817 | static void hclge_handle_vf_tbl(struct hclge_vport *vport, |
818 | struct hclge_mbx_vf_to_pf_cmd *mbx_req) | |
819 | { | |
820 | struct hclge_dev *hdev = vport->back; | |
821 | struct hclge_vf_vlan_cfg *msg_cmd; | |
822 | ||
823 | msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; | |
824 | if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { | |
825 | hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); | |
826 | hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); | |
827 | hclge_rm_vport_all_vlan_table(vport, true); | |
828 | } else { | |
829 | dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", | |
830 | msg_cmd->subcode); | |
831 | } | |
832 | } | |
833 | ||
09431ed8 HL |
834 | static int |
835 | hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param) | |
836 | { | |
837 | return hclge_map_unmap_ring_to_vf_vector(param->vport, true, | |
838 | param->req); | |
839 | } | |
840 | ||
841 | static int | |
842 | hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param) | |
843 | { | |
844 | return hclge_map_unmap_ring_to_vf_vector(param->vport, false, | |
845 | param->req); | |
846 | } | |
847 | ||
848 | static int | |
849 | hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param) | |
850 | { | |
851 | int ret; | |
852 | ||
853 | ret = hclge_get_vf_ring_vector_map(param->vport, param->req, | |
854 | param->resp_msg); | |
855 | if (ret) | |
856 | dev_err(¶m->vport->back->pdev->dev, | |
857 | "PF fail(%d) to get VF ring vector map\n", | |
858 | ret); | |
859 | return ret; | |
860 | } | |
861 | ||
862 | static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param) | |
863 | { | |
864 | hclge_set_vf_promisc_mode(param->vport, param->req); | |
865 | return 0; | |
866 | } | |
867 | ||
868 | static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param) | |
869 | { | |
870 | int ret; | |
871 | ||
872 | ret = hclge_set_vf_uc_mac_addr(param->vport, param->req); | |
873 | if (ret) | |
874 | dev_err(¶m->vport->back->pdev->dev, | |
875 | "PF fail(%d) to set VF UC MAC Addr\n", | |
876 | ret); | |
877 | return ret; | |
878 | } | |
879 | ||
880 | static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param) | |
881 | { | |
882 | int ret; | |
883 | ||
884 | ret = hclge_set_vf_mc_mac_addr(param->vport, param->req); | |
885 | if (ret) | |
886 | dev_err(¶m->vport->back->pdev->dev, | |
887 | "PF fail(%d) to set VF MC MAC Addr\n", | |
888 | ret); | |
889 | return ret; | |
890 | } | |
891 | ||
892 | static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param) | |
893 | { | |
894 | int ret; | |
895 | ||
896 | ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg); | |
897 | if (ret) | |
898 | dev_err(¶m->vport->back->pdev->dev, | |
899 | "PF failed(%d) to config VF's VLAN\n", | |
900 | ret); | |
901 | return ret; | |
902 | } | |
903 | ||
904 | static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param) | |
905 | { | |
906 | int ret; | |
907 | ||
908 | ret = hclge_set_vf_alive(param->vport, param->req); | |
909 | if (ret) | |
910 | dev_err(¶m->vport->back->pdev->dev, | |
911 | "PF failed(%d) to set VF's ALIVE\n", | |
912 | ret); | |
913 | return ret; | |
914 | } | |
915 | ||
916 | static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param) | |
917 | { | |
918 | hclge_get_vf_queue_info(param->vport, param->resp_msg); | |
919 | return 0; | |
920 | } | |
921 | ||
922 | static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param) | |
923 | { | |
924 | hclge_get_vf_queue_depth(param->vport, param->resp_msg); | |
925 | return 0; | |
926 | } | |
927 | ||
928 | static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param) | |
929 | { | |
930 | hclge_get_basic_info(param->vport, param->resp_msg); | |
931 | return 0; | |
932 | } | |
933 | ||
934 | static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param) | |
935 | { | |
936 | int ret; | |
937 | ||
938 | ret = hclge_push_vf_link_status(param->vport); | |
939 | if (ret) | |
940 | dev_err(¶m->vport->back->pdev->dev, | |
941 | "failed to inform link stat to VF, ret = %d\n", | |
942 | ret); | |
943 | return ret; | |
944 | } | |
945 | ||
946 | static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param) | |
947 | { | |
948 | return hclge_mbx_reset_vf_queue(param->vport, param->req, | |
949 | param->resp_msg); | |
950 | } | |
951 | ||
952 | static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param) | |
953 | { | |
954 | return hclge_reset_vf(param->vport); | |
955 | } | |
956 | ||
957 | static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param) | |
958 | { | |
959 | hclge_vf_keep_alive(param->vport); | |
960 | return 0; | |
961 | } | |
962 | ||
963 | static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param) | |
964 | { | |
965 | int ret; | |
966 | ||
967 | ret = hclge_set_vf_mtu(param->vport, param->req); | |
968 | if (ret) | |
969 | dev_err(¶m->vport->back->pdev->dev, | |
970 | "VF fail(%d) to set mtu\n", ret); | |
971 | return ret; | |
972 | } | |
973 | ||
974 | static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param) | |
975 | { | |
976 | return hclge_get_queue_id_in_pf(param->vport, param->req, | |
977 | param->resp_msg); | |
978 | } | |
979 | ||
980 | static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param) | |
981 | { | |
982 | return hclge_get_rss_key(param->vport, param->req, param->resp_msg); | |
983 | } | |
984 | ||
985 | static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param) | |
986 | { | |
987 | hclge_get_link_mode(param->vport, param->req); | |
988 | return 0; | |
989 | } | |
990 | ||
991 | static int | |
992 | hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param) | |
993 | { | |
994 | hclge_rm_vport_all_mac_table(param->vport, false, | |
995 | HCLGE_MAC_ADDR_UC); | |
996 | hclge_rm_vport_all_mac_table(param->vport, false, | |
997 | HCLGE_MAC_ADDR_MC); | |
998 | hclge_rm_vport_all_vlan_table(param->vport, false); | |
999 | return 0; | |
1000 | } | |
1001 | ||
1002 | static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param) | |
1003 | { | |
1004 | hclge_rm_vport_all_mac_table(param->vport, true, | |
1005 | HCLGE_MAC_ADDR_UC); | |
1006 | hclge_rm_vport_all_mac_table(param->vport, true, | |
1007 | HCLGE_MAC_ADDR_MC); | |
1008 | hclge_rm_vport_all_vlan_table(param->vport, true); | |
fec73521 | 1009 | param->vport->mps = 0; |
09431ed8 HL |
1010 | return 0; |
1011 | } | |
1012 | ||
1013 | static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param) | |
1014 | { | |
1015 | hclge_get_vf_media_type(param->vport, param->resp_msg); | |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param) | |
1020 | { | |
1021 | hclge_handle_link_change_event(param->vport->back, param->req); | |
1022 | return 0; | |
1023 | } | |
1024 | ||
1025 | static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param) | |
1026 | { | |
1027 | hclge_get_vf_mac_addr(param->vport, param->resp_msg); | |
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param) | |
1032 | { | |
1033 | hclge_handle_ncsi_error(param->vport->back); | |
1034 | return 0; | |
1035 | } | |
1036 | ||
1037 | static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param) | |
1038 | { | |
1039 | hclge_handle_vf_tbl(param->vport, param->req); | |
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = { | |
1044 | [HCLGE_MBX_RESET] = hclge_mbx_reset_handler, | |
1045 | [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler, | |
1046 | [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler, | |
1047 | [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler, | |
1048 | [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler, | |
1049 | [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler, | |
1050 | [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler, | |
1051 | [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler, | |
1052 | [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler, | |
1053 | [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler, | |
1054 | [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler, | |
1055 | [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler, | |
1056 | [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler, | |
1057 | [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler, | |
1058 | [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler, | |
1059 | [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler, | |
1060 | [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler, | |
1061 | [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler, | |
1062 | [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler, | |
1063 | [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler, | |
1064 | [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler, | |
1065 | [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler, | |
1066 | [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler, | |
1067 | [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler, | |
1068 | [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler, | |
1069 | [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler, | |
1070 | }; | |
1071 | ||
1072 | static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param) | |
1073 | { | |
1074 | hclge_mbx_ops_fn cmd_func = NULL; | |
1075 | struct hclge_dev *hdev; | |
1076 | int ret = 0; | |
1077 | ||
1078 | hdev = param->vport->back; | |
1079 | cmd_func = hclge_mbx_ops_list[param->req->msg.code]; | |
669554c5 | 1080 | if (!cmd_func) { |
09431ed8 HL |
1081 | dev_err(&hdev->pdev->dev, |
1082 | "un-supported mailbox message, code = %u\n", | |
1083 | param->req->msg.code); | |
669554c5 JS |
1084 | return; |
1085 | } | |
1086 | ret = cmd_func(param); | |
09431ed8 HL |
1087 | |
1088 | /* PF driver should not reply IMP */ | |
1089 | if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && | |
1090 | param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { | |
1091 | param->resp_msg->status = ret; | |
1092 | if (time_is_before_jiffies(hdev->last_mbx_scheduled + | |
1093 | HCLGE_MBX_SCHED_TIMEOUT)) | |
1094 | dev_warn(&hdev->pdev->dev, | |
1095 | "resp vport%u mbx(%u,%u) late\n", | |
1096 | param->req->mbx_src_vfid, | |
1097 | param->req->msg.code, | |
1098 | param->req->msg.subcode); | |
1099 | ||
1100 | hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg); | |
1101 | } | |
1102 | } | |
1103 | ||
dde1a86e SM |
1104 | void hclge_mbx_handler(struct hclge_dev *hdev) |
1105 | { | |
eaa5607d | 1106 | struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq; |
bb5790b7 | 1107 | struct hclge_respond_to_vf_msg resp_msg; |
dde1a86e | 1108 | struct hclge_mbx_vf_to_pf_cmd *req; |
09431ed8 | 1109 | struct hclge_mbx_ops_param param; |
dde1a86e | 1110 | struct hclge_desc *desc; |
ebaf1908 | 1111 | unsigned int flag; |
dde1a86e | 1112 | |
09431ed8 | 1113 | param.resp_msg = &resp_msg; |
dde1a86e | 1114 | /* handle all the mailbox requests in the queue */ |
7c4bfcb0 | 1115 | while (!hclge_cmd_crq_empty(&hdev->hw)) { |
eaa5607d JW |
1116 | if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, |
1117 | &hdev->hw.hw.comm_state)) { | |
3c88ed1d HT |
1118 | dev_warn(&hdev->pdev->dev, |
1119 | "command queue needs re-initializing\n"); | |
1120 | return; | |
1121 | } | |
1122 | ||
dde1a86e SM |
1123 | desc = &crq->desc[crq->next_to_use]; |
1124 | req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; | |
1125 | ||
7c4bfcb0 | 1126 | flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); |
4e2969a0 JS |
1127 | if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) || |
1128 | req->mbx_src_vfid > hdev->num_req_vfs)) { | |
7c4bfcb0 | 1129 | dev_warn(&hdev->pdev->dev, |
4e2969a0 JS |
1130 | "dropped invalid mailbox message, code = %u, vfid = %u\n", |
1131 | req->msg.code, req->mbx_src_vfid); | |
7c4bfcb0 XW |
1132 | |
1133 | /* dropping/not processing this invalid message */ | |
1134 | crq->desc[crq->next_to_use].flag = 0; | |
1135 | hclge_mbx_ring_ptr_move_crq(crq); | |
1136 | continue; | |
1137 | } | |
1138 | ||
d8355240 YM |
1139 | trace_hclge_pf_mbx_get(hdev, req); |
1140 | ||
a710b9ff JZ |
1141 | /* clear the resp_msg before processing every mailbox message */ |
1142 | memset(&resp_msg, 0, sizeof(resp_msg)); | |
09431ed8 HL |
1143 | param.vport = &hdev->vport[req->mbx_src_vfid]; |
1144 | param.req = req; | |
1145 | hclge_mbx_request_handling(¶m); | |
bb5790b7 | 1146 | |
090e3b53 | 1147 | crq->desc[crq->next_to_use].flag = 0; |
dde1a86e SM |
1148 | hclge_mbx_ring_ptr_move_crq(crq); |
1149 | } | |
1150 | ||
1151 | /* Write back CMDQ_RQ header pointer, M7 need this pointer */ | |
8e2288ca JW |
1152 | hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, |
1153 | crq->next_to_use); | |
dde1a86e | 1154 | } |