Commit | Line | Data |
---|---|---|
fe56b9e6 | 1 | /* QLogic qed NIC Driver |
e8f1cb50 | 2 | * Copyright (c) 2015-2017 QLogic Corporation |
fe56b9e6 | 3 | * |
e8f1cb50 MY |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and /or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
fe56b9e6 YM |
31 | */ |
32 | ||
33 | #include <linux/types.h> | |
34 | #include <asm/byteorder.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/errno.h> | |
37 | #include <linux/kernel.h> | |
fe56b9e6 | 38 | #include <linux/slab.h> |
5529bad9 | 39 | #include <linux/spinlock.h> |
fe56b9e6 | 40 | #include <linux/string.h> |
0fefbfba | 41 | #include <linux/etherdevice.h> |
fe56b9e6 | 42 | #include "qed.h" |
39651abd | 43 | #include "qed_dcbx.h" |
fe56b9e6 YM |
44 | #include "qed_hsi.h" |
45 | #include "qed_hw.h" | |
46 | #include "qed_mcp.h" | |
47 | #include "qed_reg_addr.h" | |
1408cc1f YM |
48 | #include "qed_sriov.h" |
49 | ||
fe56b9e6 YM |
50 | #define CHIP_MCP_RESP_ITER_US 10 |
51 | ||
52 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ | |
53 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ | |
54 | ||
55 | #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ | |
56 | qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ | |
57 | _val) | |
58 | ||
59 | #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ | |
60 | qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) | |
61 | ||
62 | #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ | |
63 | DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ | |
64 | offsetof(struct public_drv_mb, _field), _val) | |
65 | ||
66 | #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ | |
67 | DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ | |
68 | offsetof(struct public_drv_mb, _field)) | |
69 | ||
70 | #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ | |
71 | DRV_ID_PDA_COMP_VER_SHIFT) | |
72 | ||
73 | #define MCP_BYTES_PER_MBIT_SHIFT 17 | |
74 | ||
75 | bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) | |
76 | { | |
77 | if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) | |
78 | return false; | |
79 | return true; | |
80 | } | |
81 | ||
1a635e48 | 82 | void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
fe56b9e6 YM |
83 | { |
84 | u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, | |
85 | PUBLIC_PORT); | |
86 | u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); | |
87 | ||
88 | p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, | |
89 | MFW_PORT(p_hwfn)); | |
90 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | |
91 | "port_addr = 0x%x, port_id 0x%02x\n", | |
92 | p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); | |
93 | } | |
94 | ||
1a635e48 | 95 | void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
fe56b9e6 YM |
96 | { |
97 | u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); | |
98 | u32 tmp, i; | |
99 | ||
100 | if (!p_hwfn->mcp_info->public_base) | |
101 | return; | |
102 | ||
103 | for (i = 0; i < length; i++) { | |
104 | tmp = qed_rd(p_hwfn, p_ptt, | |
105 | p_hwfn->mcp_info->mfw_mb_addr + | |
106 | (i << 2) + sizeof(u32)); | |
107 | ||
108 | /* The MB data is actually BE; Need to force it to cpu */ | |
109 | ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = | |
110 | be32_to_cpu((__force __be32)tmp); | |
111 | } | |
112 | } | |
113 | ||
114 | int qed_mcp_free(struct qed_hwfn *p_hwfn) | |
115 | { | |
116 | if (p_hwfn->mcp_info) { | |
117 | kfree(p_hwfn->mcp_info->mfw_mb_cur); | |
118 | kfree(p_hwfn->mcp_info->mfw_mb_shadow); | |
119 | } | |
120 | kfree(p_hwfn->mcp_info); | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
1a635e48 | 125 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
fe56b9e6 YM |
126 | { |
127 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; | |
128 | u32 drv_mb_offsize, mfw_mb_offsize; | |
129 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); | |
130 | ||
131 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); | |
132 | if (!p_info->public_base) | |
133 | return 0; | |
134 | ||
135 | p_info->public_base |= GRCBASE_MCP; | |
136 | ||
137 | /* Calculate the driver and MFW mailbox address */ | |
138 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, | |
139 | SECTION_OFFSIZE_ADDR(p_info->public_base, | |
140 | PUBLIC_DRV_MB)); | |
141 | p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); | |
142 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | |
143 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", | |
144 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); | |
145 | ||
146 | /* Set the MFW MB address */ | |
147 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | |
148 | SECTION_OFFSIZE_ADDR(p_info->public_base, | |
149 | PUBLIC_MFW_MB)); | |
150 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | |
151 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); | |
152 | ||
153 | /* Get the current driver mailbox sequence before sending | |
154 | * the first command | |
155 | */ | |
156 | p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & | |
157 | DRV_MSG_SEQ_NUMBER_MASK; | |
158 | ||
159 | /* Get current FW pulse sequence */ | |
160 | p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & | |
161 | DRV_PULSE_SEQ_MASK; | |
162 | ||
163 | p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); | |
164 | ||
165 | return 0; | |
166 | } | |
167 | ||
1a635e48 | 168 | int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
fe56b9e6 YM |
169 | { |
170 | struct qed_mcp_info *p_info; | |
171 | u32 size; | |
172 | ||
173 | /* Allocate mcp_info structure */ | |
60fffb3b | 174 | p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); |
fe56b9e6 YM |
175 | if (!p_hwfn->mcp_info) |
176 | goto err; | |
177 | p_info = p_hwfn->mcp_info; | |
178 | ||
179 | if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { | |
180 | DP_NOTICE(p_hwfn, "MCP is not initialized\n"); | |
181 | /* Do not free mcp_info here, since public_base indicate that | |
182 | * the MCP is not initialized | |
183 | */ | |
184 | return 0; | |
185 | } | |
186 | ||
187 | size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); | |
60fffb3b | 188 | p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); |
83aeb933 | 189 | p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); |
fe56b9e6 YM |
190 | if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) |
191 | goto err; | |
192 | ||
5529bad9 TT |
193 | /* Initialize the MFW spinlock */ |
194 | spin_lock_init(&p_info->lock); | |
fe56b9e6 YM |
195 | |
196 | return 0; | |
197 | ||
198 | err: | |
fe56b9e6 YM |
199 | qed_mcp_free(p_hwfn); |
200 | return -ENOMEM; | |
201 | } | |
202 | ||
5529bad9 TT |
203 | /* Locks the MFW mailbox of a PF to ensure a single access. |
204 | * The lock is achieved in most cases by holding a spinlock, causing other | |
205 | * threads to wait till a previous access is done. | |
206 | * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single | |
207 | * access is achieved by setting a blocking flag, which will fail other | |
208 | * competing contexts to send their mailboxes. | |
209 | */ | |
1a635e48 | 210 | static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd) |
5529bad9 TT |
211 | { |
212 | spin_lock_bh(&p_hwfn->mcp_info->lock); | |
213 | ||
214 | /* The spinlock shouldn't be acquired when the mailbox command is | |
215 | * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel | |
216 | * pending [UN]LOAD_REQ command of another PF together with a spinlock | |
217 | * (i.e. interrupts are disabled) - can lead to a deadlock. | |
218 | * It is assumed that for a single PF, no other mailbox commands can be | |
219 | * sent from another context while sending LOAD_REQ, and that any | |
220 | * parallel commands to UNLOAD_REQ can be cancelled. | |
221 | */ | |
222 | if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE) | |
223 | p_hwfn->mcp_info->block_mb_sending = false; | |
224 | ||
225 | if (p_hwfn->mcp_info->block_mb_sending) { | |
226 | DP_NOTICE(p_hwfn, | |
227 | "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n", | |
228 | cmd); | |
229 | spin_unlock_bh(&p_hwfn->mcp_info->lock); | |
230 | return -EBUSY; | |
231 | } | |
232 | ||
233 | if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) { | |
234 | p_hwfn->mcp_info->block_mb_sending = true; | |
235 | spin_unlock_bh(&p_hwfn->mcp_info->lock); | |
236 | } | |
237 | ||
238 | return 0; | |
239 | } | |
240 | ||
1a635e48 | 241 | static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd) |
5529bad9 TT |
242 | { |
243 | if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) | |
244 | spin_unlock_bh(&p_hwfn->mcp_info->lock); | |
245 | } | |
246 | ||
1a635e48 | 247 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
fe56b9e6 YM |
248 | { |
249 | u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; | |
250 | u8 delay = CHIP_MCP_RESP_ITER_US; | |
251 | u32 org_mcp_reset_seq, cnt = 0; | |
252 | int rc = 0; | |
253 | ||
5529bad9 TT |
254 | /* Ensure that only a single thread is accessing the mailbox at a |
255 | * certain time. | |
256 | */ | |
257 | rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET); | |
258 | if (rc != 0) | |
259 | return rc; | |
260 | ||
fe56b9e6 YM |
261 | /* Set drv command along with the updated sequence */ |
262 | org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); | |
263 | DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, | |
264 | (DRV_MSG_CODE_MCP_RESET | seq)); | |
265 | ||
266 | do { | |
267 | /* Wait for MFW response */ | |
268 | udelay(delay); | |
269 | /* Give the FW up to 500 second (50*1000*10usec) */ | |
270 | } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, | |
271 | MISCS_REG_GENERIC_POR_0)) && | |
272 | (cnt++ < QED_MCP_RESET_RETRIES)); | |
273 | ||
274 | if (org_mcp_reset_seq != | |
275 | qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { | |
276 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | |
277 | "MCP was reset after %d usec\n", cnt * delay); | |
278 | } else { | |
279 | DP_ERR(p_hwfn, "Failed to reset MCP\n"); | |
280 | rc = -EAGAIN; | |
281 | } | |
282 | ||
5529bad9 TT |
283 | qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET); |
284 | ||
fe56b9e6 YM |
285 | return rc; |
286 | } | |
287 | ||
288 | static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, | |
289 | struct qed_ptt *p_ptt, | |
290 | u32 cmd, | |
291 | u32 param, | |
292 | u32 *o_mcp_resp, | |
293 | u32 *o_mcp_param) | |
294 | { | |
295 | u8 delay = CHIP_MCP_RESP_ITER_US; | |
296 | u32 seq, cnt = 1, actual_mb_seq; | |
297 | int rc = 0; | |
298 | ||
299 | /* Get actual driver mailbox sequence */ | |
300 | actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & | |
301 | DRV_MSG_SEQ_NUMBER_MASK; | |
302 | ||
303 | /* Use MCP history register to check if MCP reset occurred between | |
304 | * init time and now. | |
305 | */ | |
306 | if (p_hwfn->mcp_info->mcp_hist != | |
307 | qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { | |
308 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); | |
309 | qed_load_mcp_offsets(p_hwfn, p_ptt); | |
310 | qed_mcp_cmd_port_init(p_hwfn, p_ptt); | |
311 | } | |
312 | seq = ++p_hwfn->mcp_info->drv_mb_seq; | |
313 | ||
314 | /* Set drv param */ | |
315 | DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); | |
316 | ||
317 | /* Set drv command along with the updated sequence */ | |
318 | DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); | |
319 | ||
320 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | |
321 | "wrote command (%x) to MFW MB param 0x%08x\n", | |
322 | (cmd | seq), param); | |
323 | ||
324 | do { | |
325 | /* Wait for MFW response */ | |
326 | udelay(delay); | |
327 | *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); | |
328 | ||
329 | /* Give the FW up to 5 second (500*10ms) */ | |
330 | } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && | |
331 | (cnt++ < QED_DRV_MB_MAX_RETRIES)); | |
332 | ||
333 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | |
334 | "[after %d ms] read (%x) seq is (%x) from FW MB\n", | |
335 | cnt * delay, *o_mcp_resp, seq); | |
336 | ||
337 | /* Is this a reply to our command? */ | |
338 | if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { | |
339 | *o_mcp_resp &= FW_MSG_CODE_MASK; | |
340 | /* Get the MCP param */ | |
341 | *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); | |
342 | } else { | |
343 | /* FW BUG! */ | |
525ef5c0 YM |
344 | DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", |
345 | cmd, param); | |
fe56b9e6 YM |
346 | *o_mcp_resp = 0; |
347 | rc = -EAGAIN; | |
348 | } | |
349 | return rc; | |
350 | } | |
351 | ||
5529bad9 TT |
352 | static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, |
353 | struct qed_ptt *p_ptt, | |
354 | struct qed_mcp_mb_params *p_mb_params) | |
fe56b9e6 | 355 | { |
5529bad9 | 356 | u32 union_data_addr; |
14d39648 | 357 | |
5529bad9 | 358 | int rc; |
fe56b9e6 YM |
359 | |
360 | /* MCP not initialized */ | |
361 | if (!qed_mcp_is_init(p_hwfn)) { | |
525ef5c0 | 362 | DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); |
fe56b9e6 YM |
363 | return -EBUSY; |
364 | } | |
365 | ||
5529bad9 TT |
366 | union_data_addr = p_hwfn->mcp_info->drv_mb_addr + |
367 | offsetof(struct public_drv_mb, union_data); | |
368 | ||
369 | /* Ensure that only a single thread is accessing the mailbox at a | |
370 | * certain time. | |
fe56b9e6 | 371 | */ |
5529bad9 TT |
372 | rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd); |
373 | if (rc) | |
374 | return rc; | |
375 | ||
376 | if (p_mb_params->p_data_src != NULL) | |
377 | qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, | |
378 | p_mb_params->p_data_src, | |
379 | sizeof(*p_mb_params->p_data_src)); | |
380 | ||
381 | rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd, | |
382 | p_mb_params->param, &p_mb_params->mcp_resp, | |
383 | &p_mb_params->mcp_param); | |
384 | ||
385 | if (p_mb_params->p_data_dst != NULL) | |
386 | qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, | |
387 | union_data_addr, | |
388 | sizeof(*p_mb_params->p_data_dst)); | |
389 | ||
390 | qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd); | |
fe56b9e6 YM |
391 | |
392 | return rc; | |
393 | } | |
394 | ||
5529bad9 TT |
395 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, |
396 | struct qed_ptt *p_ptt, | |
397 | u32 cmd, | |
398 | u32 param, | |
399 | u32 *o_mcp_resp, | |
400 | u32 *o_mcp_param) | |
fe56b9e6 | 401 | { |
5529bad9 | 402 | struct qed_mcp_mb_params mb_params; |
14d39648 | 403 | union drv_union_data data_src; |
5529bad9 TT |
404 | int rc; |
405 | ||
406 | memset(&mb_params, 0, sizeof(mb_params)); | |
14d39648 | 407 | memset(&data_src, 0, sizeof(data_src)); |
5529bad9 TT |
408 | mb_params.cmd = cmd; |
409 | mb_params.param = param; | |
14d39648 MY |
410 | |
411 | /* In case of UNLOAD_DONE, set the primary MAC */ | |
412 | if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) && | |
413 | (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) { | |
414 | u8 *p_mac = p_hwfn->cdev->wol_mac; | |
415 | ||
416 | data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; | |
417 | data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | | |
418 | p_mac[4] << 8 | p_mac[5]; | |
419 | ||
420 | DP_VERBOSE(p_hwfn, | |
421 | (QED_MSG_SP | NETIF_MSG_IFDOWN), | |
422 | "Setting WoL MAC: %pM --> [%08x,%08x]\n", | |
423 | p_mac, data_src.wol_mac.mac_upper, | |
424 | data_src.wol_mac.mac_lower); | |
425 | ||
426 | mb_params.p_data_src = &data_src; | |
427 | } | |
428 | ||
5529bad9 TT |
429 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); |
430 | if (rc) | |
431 | return rc; | |
432 | ||
433 | *o_mcp_resp = mb_params.mcp_resp; | |
434 | *o_mcp_param = mb_params.mcp_param; | |
fe56b9e6 | 435 | |
5529bad9 | 436 | return 0; |
fe56b9e6 YM |
437 | } |
438 | ||
4102426f TT |
439 | int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, |
440 | struct qed_ptt *p_ptt, | |
441 | u32 cmd, | |
442 | u32 param, | |
443 | u32 *o_mcp_resp, | |
444 | u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) | |
445 | { | |
446 | struct qed_mcp_mb_params mb_params; | |
447 | union drv_union_data union_data; | |
448 | int rc; | |
449 | ||
450 | memset(&mb_params, 0, sizeof(mb_params)); | |
451 | mb_params.cmd = cmd; | |
452 | mb_params.param = param; | |
453 | mb_params.p_data_dst = &union_data; | |
454 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | |
455 | if (rc) | |
456 | return rc; | |
457 | ||
458 | *o_mcp_resp = mb_params.mcp_resp; | |
459 | *o_mcp_param = mb_params.mcp_param; | |
460 | ||
461 | *o_txn_size = *o_mcp_param; | |
462 | memcpy(o_buf, &union_data.raw_data, *o_txn_size); | |
463 | ||
464 | return 0; | |
465 | } | |
466 | ||
fe56b9e6 | 467 | int qed_mcp_load_req(struct qed_hwfn *p_hwfn, |
1a635e48 | 468 | struct qed_ptt *p_ptt, u32 *p_load_code) |
fe56b9e6 YM |
469 | { |
470 | struct qed_dev *cdev = p_hwfn->cdev; | |
5529bad9 TT |
471 | struct qed_mcp_mb_params mb_params; |
472 | union drv_union_data union_data; | |
fe56b9e6 YM |
473 | int rc; |
474 | ||
5529bad9 | 475 | memset(&mb_params, 0, sizeof(mb_params)); |
fe56b9e6 | 476 | /* Load Request */ |
5529bad9 TT |
477 | mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; |
478 | mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | | |
479 | cdev->drv_type; | |
480 | memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); | |
481 | mb_params.p_data_src = &union_data; | |
482 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | |
fe56b9e6 YM |
483 | |
484 | /* if mcp fails to respond we must abort */ | |
485 | if (rc) { | |
486 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | |
487 | return rc; | |
488 | } | |
489 | ||
5529bad9 TT |
490 | *p_load_code = mb_params.mcp_resp; |
491 | ||
fe56b9e6 YM |
492 | /* If MFW refused (e.g. other port is in diagnostic mode) we |
493 | * must abort. This can happen in the following cases: | |
494 | * - Other port is in diagnostic mode | |
495 | * - Previously loaded function on the engine is not compliant with | |
496 | * the requester. | |
497 | * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION. | |
498 | * - | |
499 | */ | |
500 | if (!(*p_load_code) || | |
501 | ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || | |
502 | ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || | |
503 | ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) { | |
504 | DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); | |
505 | return -EBUSY; | |
506 | } | |
507 | ||
508 | return 0; | |
509 | } | |
510 | ||
0b55e27d YM |
511 | static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, |
512 | struct qed_ptt *p_ptt) | |
513 | { | |
514 | u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, | |
515 | PUBLIC_PATH); | |
516 | u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); | |
517 | u32 path_addr = SECTION_ADDR(mfw_path_offsize, | |
518 | QED_PATH_ID(p_hwfn)); | |
519 | u32 disabled_vfs[VF_MAX_STATIC / 32]; | |
520 | int i; | |
521 | ||
522 | DP_VERBOSE(p_hwfn, | |
523 | QED_MSG_SP, | |
524 | "Reading Disabled VF information from [offset %08x], path_addr %08x\n", | |
525 | mfw_path_offsize, path_addr); | |
526 | ||
527 | for (i = 0; i < (VF_MAX_STATIC / 32); i++) { | |
528 | disabled_vfs[i] = qed_rd(p_hwfn, p_ptt, | |
529 | path_addr + | |
530 | offsetof(struct public_path, | |
531 | mcp_vf_disabled) + | |
532 | sizeof(u32) * i); | |
533 | DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), | |
534 | "FLR-ed VFs [%08x,...,%08x] - %08x\n", | |
535 | i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); | |
536 | } | |
537 | ||
538 | if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs)) | |
539 | qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG); | |
540 | } | |
541 | ||
542 | int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, | |
543 | struct qed_ptt *p_ptt, u32 *vfs_to_ack) | |
544 | { | |
545 | u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, | |
546 | PUBLIC_FUNC); | |
547 | u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr); | |
548 | u32 func_addr = SECTION_ADDR(mfw_func_offsize, | |
549 | MCP_PF_ID(p_hwfn)); | |
550 | struct qed_mcp_mb_params mb_params; | |
551 | union drv_union_data union_data; | |
552 | int rc; | |
553 | int i; | |
554 | ||
555 | for (i = 0; i < (VF_MAX_STATIC / 32); i++) | |
556 | DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), | |
557 | "Acking VFs [%08x,...,%08x] - %08x\n", | |
558 | i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); | |
559 | ||
560 | memset(&mb_params, 0, sizeof(mb_params)); | |
561 | mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; | |
562 | memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8); | |
563 | mb_params.p_data_src = &union_data; | |
564 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | |
565 | if (rc) { | |
566 | DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); | |
567 | return -EBUSY; | |
568 | } | |
569 | ||
570 | /* Clear the ACK bits */ | |
571 | for (i = 0; i < (VF_MAX_STATIC / 32); i++) | |
572 | qed_wr(p_hwfn, p_ptt, | |
573 | func_addr + | |
574 | offsetof(struct public_func, drv_ack_vf_disabled) + | |
575 | i * sizeof(u32), 0); | |
576 | ||
577 | return rc; | |
578 | } | |
579 | ||
334c03b5 ZN |
580 | static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, |
581 | struct qed_ptt *p_ptt) | |
582 | { | |
583 | u32 transceiver_state; | |
584 | ||
585 | transceiver_state = qed_rd(p_hwfn, p_ptt, | |
586 | p_hwfn->mcp_info->port_addr + | |
587 | offsetof(struct public_port, | |
588 | transceiver_data)); | |
589 | ||
590 | DP_VERBOSE(p_hwfn, | |
591 | (NETIF_MSG_HW | QED_MSG_SP), | |
592 | "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", | |
593 | transceiver_state, | |
594 | (u32)(p_hwfn->mcp_info->port_addr + | |
1a635e48 | 595 | offsetof(struct public_port, transceiver_data))); |
334c03b5 ZN |
596 | |
597 | transceiver_state = GET_FIELD(transceiver_state, | |
351a4ded | 598 | ETH_TRANSCEIVER_STATE); |
334c03b5 | 599 | |
351a4ded | 600 | if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) |
334c03b5 ZN |
601 | DP_NOTICE(p_hwfn, "Transceiver is present.\n"); |
602 | else | |
603 | DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); | |
604 | } | |
605 | ||
cc875c2e | 606 | static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, |
1a635e48 | 607 | struct qed_ptt *p_ptt, bool b_reset) |
cc875c2e YM |
608 | { |
609 | struct qed_mcp_link_state *p_link; | |
a64b02d5 | 610 | u8 max_bw, min_bw; |
cc875c2e YM |
611 | u32 status = 0; |
612 | ||
613 | p_link = &p_hwfn->mcp_info->link_output; | |
614 | memset(p_link, 0, sizeof(*p_link)); | |
615 | if (!b_reset) { | |
616 | status = qed_rd(p_hwfn, p_ptt, | |
617 | p_hwfn->mcp_info->port_addr + | |
618 | offsetof(struct public_port, link_status)); | |
619 | DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), | |
620 | "Received link update [0x%08x] from mfw [Addr 0x%x]\n", | |
621 | status, | |
622 | (u32)(p_hwfn->mcp_info->port_addr + | |
1a635e48 | 623 | offsetof(struct public_port, link_status))); |
cc875c2e YM |
624 | } else { |
625 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, | |
626 | "Resetting link indications\n"); | |
627 | return; | |
628 | } | |
629 | ||
fc916ff2 SRK |
630 | if (p_hwfn->b_drv_link_init) |
631 | p_link->link_up = !!(status & LINK_STATUS_LINK_UP); | |
632 | else | |
633 | p_link->link_up = false; | |
cc875c2e YM |
634 | |
635 | p_link->full_duplex = true; | |
636 | switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { | |
637 | case LINK_STATUS_SPEED_AND_DUPLEX_100G: | |
638 | p_link->speed = 100000; | |
639 | break; | |
640 | case LINK_STATUS_SPEED_AND_DUPLEX_50G: | |
641 | p_link->speed = 50000; | |
642 | break; | |
643 | case LINK_STATUS_SPEED_AND_DUPLEX_40G: | |
644 | p_link->speed = 40000; | |
645 | break; | |
646 | case LINK_STATUS_SPEED_AND_DUPLEX_25G: | |
647 | p_link->speed = 25000; | |
648 | break; | |
649 | case LINK_STATUS_SPEED_AND_DUPLEX_20G: | |
650 | p_link->speed = 20000; | |
651 | break; | |
652 | case LINK_STATUS_SPEED_AND_DUPLEX_10G: | |
653 | p_link->speed = 10000; | |
654 | break; | |
655 | case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: | |
656 | p_link->full_duplex = false; | |
657 | /* Fall-through */ | |
658 | case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: | |
659 | p_link->speed = 1000; | |
660 | break; | |
661 | default: | |
662 | p_link->speed = 0; | |
663 | } | |
664 | ||
4b01e519 MC |
665 | if (p_link->link_up && p_link->speed) |
666 | p_link->line_speed = p_link->speed; | |
667 | else | |
668 | p_link->line_speed = 0; | |
669 | ||
670 | max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; | |
a64b02d5 | 671 | min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; |
4b01e519 | 672 | |
a64b02d5 | 673 | /* Max bandwidth configuration */ |
4b01e519 | 674 | __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); |
cc875c2e | 675 | |
a64b02d5 MC |
676 | /* Min bandwidth configuration */ |
677 | __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); | |
678 | qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate); | |
679 | ||
cc875c2e YM |
680 | p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); |
681 | p_link->an_complete = !!(status & | |
682 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); | |
683 | p_link->parallel_detection = !!(status & | |
684 | LINK_STATUS_PARALLEL_DETECTION_USED); | |
685 | p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); | |
686 | ||
687 | p_link->partner_adv_speed |= | |
688 | (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? | |
689 | QED_LINK_PARTNER_SPEED_1G_FD : 0; | |
690 | p_link->partner_adv_speed |= | |
691 | (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? | |
692 | QED_LINK_PARTNER_SPEED_1G_HD : 0; | |
693 | p_link->partner_adv_speed |= | |
694 | (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? | |
695 | QED_LINK_PARTNER_SPEED_10G : 0; | |
696 | p_link->partner_adv_speed |= | |
697 | (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? | |
698 | QED_LINK_PARTNER_SPEED_20G : 0; | |
054c67d1 SRK |
699 | p_link->partner_adv_speed |= |
700 | (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? | |
701 | QED_LINK_PARTNER_SPEED_25G : 0; | |
cc875c2e YM |
702 | p_link->partner_adv_speed |= |
703 | (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? | |
704 | QED_LINK_PARTNER_SPEED_40G : 0; | |
705 | p_link->partner_adv_speed |= | |
706 | (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? | |
707 | QED_LINK_PARTNER_SPEED_50G : 0; | |
708 | p_link->partner_adv_speed |= | |
709 | (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? | |
710 | QED_LINK_PARTNER_SPEED_100G : 0; | |
711 | ||
712 | p_link->partner_tx_flow_ctrl_en = | |
713 | !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); | |
714 | p_link->partner_rx_flow_ctrl_en = | |
715 | !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); | |
716 | ||
717 | switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { | |
718 | case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: | |
719 | p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; | |
720 | break; | |
721 | case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: | |
722 | p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; | |
723 | break; | |
724 | case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: | |
725 | p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; | |
726 | break; | |
727 | default: | |
728 | p_link->partner_adv_pause = 0; | |
729 | } | |
730 | ||
731 | p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); | |
732 | ||
733 | qed_link_update(p_hwfn); | |
734 | } | |
735 | ||
351a4ded | 736 | int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) |
cc875c2e YM |
737 | { |
738 | struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; | |
5529bad9 TT |
739 | struct qed_mcp_mb_params mb_params; |
740 | union drv_union_data union_data; | |
351a4ded | 741 | struct eth_phy_cfg *phy_cfg; |
cc875c2e | 742 | int rc = 0; |
5529bad9 | 743 | u32 cmd; |
cc875c2e YM |
744 | |
745 | /* Set the shmem configuration according to params */ | |
5529bad9 TT |
746 | phy_cfg = &union_data.drv_phy_cfg; |
747 | memset(phy_cfg, 0, sizeof(*phy_cfg)); | |
cc875c2e YM |
748 | cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; |
749 | if (!params->speed.autoneg) | |
5529bad9 | 750 | phy_cfg->speed = params->speed.forced_speed; |
351a4ded YM |
751 | phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; |
752 | phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; | |
753 | phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; | |
5529bad9 TT |
754 | phy_cfg->adv_speed = params->speed.advertised_speeds; |
755 | phy_cfg->loopback_mode = params->loopback_mode; | |
cc875c2e | 756 | |
fc916ff2 SRK |
757 | p_hwfn->b_drv_link_init = b_up; |
758 | ||
cc875c2e YM |
759 | if (b_up) { |
760 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, | |
761 | "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", | |
5529bad9 TT |
762 | phy_cfg->speed, |
763 | phy_cfg->pause, | |
764 | phy_cfg->adv_speed, | |
765 | phy_cfg->loopback_mode, | |
766 | phy_cfg->feature_config_flags); | |
cc875c2e YM |
767 | } else { |
768 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, | |
769 | "Resetting link\n"); | |
770 | } | |
771 | ||
5529bad9 TT |
772 | memset(&mb_params, 0, sizeof(mb_params)); |
773 | mb_params.cmd = cmd; | |
774 | mb_params.p_data_src = &union_data; | |
775 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | |
cc875c2e YM |
776 | |
777 | /* if mcp fails to respond we must abort */ | |
778 | if (rc) { | |
779 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | |
780 | return rc; | |
781 | } | |
782 | ||
783 | /* Reset the link status if needed */ | |
784 | if (!b_up) | |
785 | qed_mcp_handle_link_change(p_hwfn, p_ptt, true); | |
786 | ||
787 | return 0; | |
788 | } | |
789 | ||
6c754246 SRK |
790 | static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, |
791 | struct qed_ptt *p_ptt, | |
792 | enum MFW_DRV_MSG_TYPE type) | |
793 | { | |
794 | enum qed_mcp_protocol_type stats_type; | |
795 | union qed_mcp_protocol_stats stats; | |
796 | struct qed_mcp_mb_params mb_params; | |
797 | union drv_union_data union_data; | |
798 | u32 hsi_param; | |
799 | ||
800 | switch (type) { | |
801 | case MFW_DRV_MSG_GET_LAN_STATS: | |
802 | stats_type = QED_MCP_LAN_STATS; | |
803 | hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; | |
804 | break; | |
805 | case MFW_DRV_MSG_GET_FCOE_STATS: | |
806 | stats_type = QED_MCP_FCOE_STATS; | |
807 | hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; | |
808 | break; | |
809 | case MFW_DRV_MSG_GET_ISCSI_STATS: | |
810 | stats_type = QED_MCP_ISCSI_STATS; | |
811 | hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; | |
812 | break; | |
813 | case MFW_DRV_MSG_GET_RDMA_STATS: | |
814 | stats_type = QED_MCP_RDMA_STATS; | |
815 | hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; | |
816 | break; | |
817 | default: | |
818 | DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); | |
819 | return; | |
820 | } | |
821 | ||
822 | qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); | |
823 | ||
824 | memset(&mb_params, 0, sizeof(mb_params)); | |
825 | mb_params.cmd = DRV_MSG_CODE_GET_STATS; | |
826 | mb_params.param = hsi_param; | |
827 | memcpy(&union_data, &stats, sizeof(stats)); | |
828 | mb_params.p_data_src = &union_data; | |
829 | qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | |
830 | } | |
831 | ||
4b01e519 MC |
832 | static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, |
833 | struct public_func *p_shmem_info) | |
834 | { | |
835 | struct qed_mcp_function_info *p_info; | |
836 | ||
837 | p_info = &p_hwfn->mcp_info->func_info; | |
838 | ||
839 | p_info->bandwidth_min = (p_shmem_info->config & | |
840 | FUNC_MF_CFG_MIN_BW_MASK) >> | |
841 | FUNC_MF_CFG_MIN_BW_SHIFT; | |
842 | if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { | |
843 | DP_INFO(p_hwfn, | |
844 | "bandwidth minimum out of bounds [%02x]. Set to 1\n", | |
845 | p_info->bandwidth_min); | |
846 | p_info->bandwidth_min = 1; | |
847 | } | |
848 | ||
849 | p_info->bandwidth_max = (p_shmem_info->config & | |
850 | FUNC_MF_CFG_MAX_BW_MASK) >> | |
851 | FUNC_MF_CFG_MAX_BW_SHIFT; | |
852 | if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { | |
853 | DP_INFO(p_hwfn, | |
854 | "bandwidth maximum out of bounds [%02x]. Set to 100\n", | |
855 | p_info->bandwidth_max); | |
856 | p_info->bandwidth_max = 100; | |
857 | } | |
858 | } | |
859 | ||
860 | static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, | |
861 | struct qed_ptt *p_ptt, | |
1a635e48 | 862 | struct public_func *p_data, int pfid) |
4b01e519 MC |
863 | { |
864 | u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, | |
865 | PUBLIC_FUNC); | |
866 | u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); | |
867 | u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); | |
868 | u32 i, size; | |
869 | ||
870 | memset(p_data, 0, sizeof(*p_data)); | |
871 | ||
1a635e48 | 872 | size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); |
4b01e519 MC |
873 | for (i = 0; i < size / sizeof(u32); i++) |
874 | ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, | |
875 | func_addr + (i << 2)); | |
876 | return size; | |
877 | } | |
878 | ||
1a635e48 | 879 | static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4b01e519 MC |
880 | { |
881 | struct qed_mcp_function_info *p_info; | |
882 | struct public_func shmem_info; | |
883 | u32 resp = 0, param = 0; | |
884 | ||
1a635e48 | 885 | qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); |
4b01e519 MC |
886 | |
887 | qed_read_pf_bandwidth(p_hwfn, &shmem_info); | |
888 | ||
889 | p_info = &p_hwfn->mcp_info->func_info; | |
890 | ||
a64b02d5 | 891 | qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min); |
4b01e519 MC |
892 | qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); |
893 | ||
894 | /* Acknowledge the MFW */ | |
895 | qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, | |
896 | ¶m); | |
897 | } | |
898 | ||
cc875c2e YM |
899 | int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, |
900 | struct qed_ptt *p_ptt) | |
901 | { | |
902 | struct qed_mcp_info *info = p_hwfn->mcp_info; | |
903 | int rc = 0; | |
904 | bool found = false; | |
905 | u16 i; | |
906 | ||
907 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); | |
908 | ||
909 | /* Read Messages from MFW */ | |
910 | qed_mcp_read_mb(p_hwfn, p_ptt); | |
911 | ||
912 | /* Compare current messages to old ones */ | |
913 | for (i = 0; i < info->mfw_mb_length; i++) { | |
914 | if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) | |
915 | continue; | |
916 | ||
917 | found = true; | |
918 | ||
919 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, | |
920 | "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", | |
921 | i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); | |
922 | ||
923 | switch (i) { | |
924 | case MFW_DRV_MSG_LINK_CHANGE: | |
925 | qed_mcp_handle_link_change(p_hwfn, p_ptt, false); | |
926 | break; | |
0b55e27d YM |
927 | case MFW_DRV_MSG_VF_DISABLED: |
928 | qed_mcp_handle_vf_flr(p_hwfn, p_ptt); | |
929 | break; | |
39651abd SRK |
930 | case MFW_DRV_MSG_LLDP_DATA_UPDATED: |
931 | qed_dcbx_mib_update_event(p_hwfn, p_ptt, | |
932 | QED_DCBX_REMOTE_LLDP_MIB); | |
933 | break; | |
934 | case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: | |
935 | qed_dcbx_mib_update_event(p_hwfn, p_ptt, | |
936 | QED_DCBX_REMOTE_MIB); | |
937 | break; | |
938 | case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: | |
939 | qed_dcbx_mib_update_event(p_hwfn, p_ptt, | |
940 | QED_DCBX_OPERATIONAL_MIB); | |
941 | break; | |
334c03b5 ZN |
942 | case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: |
943 | qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); | |
944 | break; | |
6c754246 SRK |
945 | case MFW_DRV_MSG_GET_LAN_STATS: |
946 | case MFW_DRV_MSG_GET_FCOE_STATS: | |
947 | case MFW_DRV_MSG_GET_ISCSI_STATS: | |
948 | case MFW_DRV_MSG_GET_RDMA_STATS: | |
949 | qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); | |
950 | break; | |
4b01e519 MC |
951 | case MFW_DRV_MSG_BW_UPDATE: |
952 | qed_mcp_update_bw(p_hwfn, p_ptt); | |
953 | break; | |
cc875c2e YM |
954 | default: |
955 | DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); | |
956 | rc = -EINVAL; | |
957 | } | |
958 | } | |
959 | ||
960 | /* ACK everything */ | |
961 | for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { | |
962 | __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); | |
963 | ||
964 | /* MFW expect answer in BE, so we force write in that format */ | |
965 | qed_wr(p_hwfn, p_ptt, | |
966 | info->mfw_mb_addr + sizeof(u32) + | |
967 | MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * | |
968 | sizeof(u32) + i * sizeof(u32), | |
969 | (__force u32)val); | |
970 | } | |
971 | ||
972 | if (!found) { | |
973 | DP_NOTICE(p_hwfn, | |
974 | "Received an MFW message indication but no new message!\n"); | |
975 | rc = -EINVAL; | |
976 | } | |
977 | ||
978 | /* Copy the new mfw messages into the shadow */ | |
979 | memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); | |
980 | ||
981 | return rc; | |
982 | } | |
983 | ||
1408cc1f YM |
984 | int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, |
985 | struct qed_ptt *p_ptt, | |
986 | u32 *p_mfw_ver, u32 *p_running_bundle_id) | |
fe56b9e6 | 987 | { |
fe56b9e6 YM |
988 | u32 global_offsize; |
989 | ||
1408cc1f YM |
990 | if (IS_VF(p_hwfn->cdev)) { |
991 | if (p_hwfn->vf_iov_info) { | |
992 | struct pfvf_acquire_resp_tlv *p_resp; | |
993 | ||
994 | p_resp = &p_hwfn->vf_iov_info->acquire_resp; | |
995 | *p_mfw_ver = p_resp->pfdev_info.mfw_ver; | |
996 | return 0; | |
997 | } else { | |
998 | DP_VERBOSE(p_hwfn, | |
999 | QED_MSG_IOV, | |
1000 | "VF requested MFW version prior to ACQUIRE\n"); | |
1001 | return -EINVAL; | |
1002 | } | |
1003 | } | |
fe56b9e6 YM |
1004 | |
1005 | global_offsize = qed_rd(p_hwfn, p_ptt, | |
1408cc1f YM |
1006 | SECTION_OFFSIZE_ADDR(p_hwfn-> |
1007 | mcp_info->public_base, | |
fe56b9e6 | 1008 | PUBLIC_GLOBAL)); |
1408cc1f YM |
1009 | *p_mfw_ver = |
1010 | qed_rd(p_hwfn, p_ptt, | |
1011 | SECTION_ADDR(global_offsize, | |
1012 | 0) + offsetof(struct public_global, mfw_ver)); | |
1013 | ||
1014 | if (p_running_bundle_id != NULL) { | |
1015 | *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, | |
1016 | SECTION_ADDR(global_offsize, 0) + | |
1017 | offsetof(struct public_global, | |
1018 | running_bundle_id)); | |
1019 | } | |
fe56b9e6 YM |
1020 | |
1021 | return 0; | |
1022 | } | |
1023 | ||
1a635e48 | 1024 | int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) |
cc875c2e YM |
1025 | { |
1026 | struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; | |
1027 | struct qed_ptt *p_ptt; | |
1028 | ||
1408cc1f YM |
1029 | if (IS_VF(cdev)) |
1030 | return -EINVAL; | |
1031 | ||
cc875c2e | 1032 | if (!qed_mcp_is_init(p_hwfn)) { |
525ef5c0 | 1033 | DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); |
cc875c2e YM |
1034 | return -EBUSY; |
1035 | } | |
1036 | ||
1037 | *p_media_type = MEDIA_UNSPECIFIED; | |
1038 | ||
1039 | p_ptt = qed_ptt_acquire(p_hwfn); | |
1040 | if (!p_ptt) | |
1041 | return -EBUSY; | |
1042 | ||
1043 | *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + | |
1044 | offsetof(struct public_port, media_type)); | |
1045 | ||
1046 | qed_ptt_release(p_hwfn, p_ptt); | |
1047 | ||
1048 | return 0; | |
1049 | } | |
1050 | ||
6927e826 MY |
1051 | /* Old MFW has a global configuration for all PFs regarding RDMA support */ |
1052 | static void | |
1053 | qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, | |
1054 | enum qed_pci_personality *p_proto) | |
1055 | { | |
1056 | /* There wasn't ever a legacy MFW that published iwarp. | |
1057 | * So at this point, this is either plain l2 or RoCE. | |
1058 | */ | |
1059 | if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities)) | |
1060 | *p_proto = QED_PCI_ETH_ROCE; | |
1061 | else | |
1062 | *p_proto = QED_PCI_ETH; | |
1063 | ||
1064 | DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, | |
1065 | "According to Legacy capabilities, L2 personality is %08x\n", | |
1066 | (u32) *p_proto); | |
1067 | } | |
1068 | ||
1069 | static int | |
1070 | qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, | |
1071 | struct qed_ptt *p_ptt, | |
1072 | enum qed_pci_personality *p_proto) | |
1073 | { | |
1074 | u32 resp = 0, param = 0; | |
1075 | int rc; | |
1076 | ||
1077 | rc = qed_mcp_cmd(p_hwfn, p_ptt, | |
1078 | DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m); | |
1079 | if (rc) | |
1080 | return rc; | |
1081 | if (resp != FW_MSG_CODE_OK) { | |
1082 | DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, | |
1083 | "MFW lacks support for command; Returns %08x\n", | |
1084 | resp); | |
1085 | return -EINVAL; | |
1086 | } | |
1087 | ||
1088 | switch (param) { | |
1089 | case FW_MB_PARAM_GET_PF_RDMA_NONE: | |
1090 | *p_proto = QED_PCI_ETH; | |
1091 | break; | |
1092 | case FW_MB_PARAM_GET_PF_RDMA_ROCE: | |
1093 | *p_proto = QED_PCI_ETH_ROCE; | |
1094 | break; | |
1095 | case FW_MB_PARAM_GET_PF_RDMA_BOTH: | |
1096 | DP_NOTICE(p_hwfn, | |
1097 | "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n"); | |
1098 | *p_proto = QED_PCI_ETH_ROCE; | |
1099 | break; | |
1100 | case FW_MB_PARAM_GET_PF_RDMA_IWARP: | |
1101 | default: | |
1102 | DP_NOTICE(p_hwfn, | |
1103 | "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", | |
1104 | param); | |
1105 | return -EINVAL; | |
1106 | } | |
1107 | ||
1108 | DP_VERBOSE(p_hwfn, | |
1109 | NETIF_MSG_IFUP, | |
1110 | "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", | |
1111 | (u32) *p_proto, resp, param); | |
1112 | return 0; | |
1113 | } | |
1114 | ||
fe56b9e6 YM |
1115 | static int |
1116 | qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, | |
1117 | struct public_func *p_info, | |
6927e826 | 1118 | struct qed_ptt *p_ptt, |
fe56b9e6 YM |
1119 | enum qed_pci_personality *p_proto) |
1120 | { | |
1121 | int rc = 0; | |
1122 | ||
1123 | switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { | |
1124 | case FUNC_MF_CFG_PROTOCOL_ETHERNET: | |
1fe582ec RA |
1125 | if (!IS_ENABLED(CONFIG_QED_RDMA)) |
1126 | *p_proto = QED_PCI_ETH; | |
1127 | else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) | |
6927e826 | 1128 | qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); |
c5ac9319 YM |
1129 | break; |
1130 | case FUNC_MF_CFG_PROTOCOL_ISCSI: | |
1131 | *p_proto = QED_PCI_ISCSI; | |
1132 | break; | |
1e128c81 AE |
1133 | case FUNC_MF_CFG_PROTOCOL_FCOE: |
1134 | *p_proto = QED_PCI_FCOE; | |
1135 | break; | |
c5ac9319 YM |
1136 | case FUNC_MF_CFG_PROTOCOL_ROCE: |
1137 | DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); | |
6927e826 | 1138 | /* Fallthrough */ |
fe56b9e6 YM |
1139 | default: |
1140 | rc = -EINVAL; | |
1141 | } | |
1142 | ||
1143 | return rc; | |
1144 | } | |
1145 | ||
1146 | int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, | |
1147 | struct qed_ptt *p_ptt) | |
1148 | { | |
1149 | struct qed_mcp_function_info *info; | |
1150 | struct public_func shmem_info; | |
1151 | ||
1a635e48 | 1152 | qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); |
fe56b9e6 YM |
1153 | info = &p_hwfn->mcp_info->func_info; |
1154 | ||
1155 | info->pause_on_host = (shmem_info.config & | |
1156 | FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; | |
1157 | ||
6927e826 MY |
1158 | if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, |
1159 | &info->protocol)) { | |
fe56b9e6 YM |
1160 | DP_ERR(p_hwfn, "Unknown personality %08x\n", |
1161 | (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); | |
1162 | return -EINVAL; | |
1163 | } | |
1164 | ||
4b01e519 | 1165 | qed_read_pf_bandwidth(p_hwfn, &shmem_info); |
fe56b9e6 YM |
1166 | |
1167 | if (shmem_info.mac_upper || shmem_info.mac_lower) { | |
1168 | info->mac[0] = (u8)(shmem_info.mac_upper >> 8); | |
1169 | info->mac[1] = (u8)(shmem_info.mac_upper); | |
1170 | info->mac[2] = (u8)(shmem_info.mac_lower >> 24); | |
1171 | info->mac[3] = (u8)(shmem_info.mac_lower >> 16); | |
1172 | info->mac[4] = (u8)(shmem_info.mac_lower >> 8); | |
1173 | info->mac[5] = (u8)(shmem_info.mac_lower); | |
14d39648 MY |
1174 | |
1175 | /* Store primary MAC for later possible WoL */ | |
1176 | memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN); | |
fe56b9e6 YM |
1177 | } else { |
1178 | DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); | |
1179 | } | |
1180 | ||
1181 | info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | | |
1182 | (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); | |
1183 | info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | | |
1184 | (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); | |
1185 | ||
1186 | info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); | |
1187 | ||
0fefbfba SK |
1188 | info->mtu = (u16)shmem_info.mtu_size; |
1189 | ||
14d39648 MY |
1190 | p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE; |
1191 | p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT; | |
1192 | if (qed_mcp_is_init(p_hwfn)) { | |
1193 | u32 resp = 0, param = 0; | |
1194 | int rc; | |
1195 | ||
1196 | rc = qed_mcp_cmd(p_hwfn, p_ptt, | |
1197 | DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m); | |
1198 | if (rc) | |
1199 | return rc; | |
1200 | if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED) | |
1201 | p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME; | |
1202 | } | |
1203 | ||
fe56b9e6 | 1204 | DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), |
14d39648 | 1205 | "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n", |
fe56b9e6 YM |
1206 | info->pause_on_host, info->protocol, |
1207 | info->bandwidth_min, info->bandwidth_max, | |
1208 | info->mac[0], info->mac[1], info->mac[2], | |
1209 | info->mac[3], info->mac[4], info->mac[5], | |
14d39648 MY |
1210 | info->wwn_port, info->wwn_node, |
1211 | info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); | |
fe56b9e6 YM |
1212 | |
1213 | return 0; | |
1214 | } | |
1215 | ||
cc875c2e YM |
1216 | struct qed_mcp_link_params |
1217 | *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) | |
1218 | { | |
1219 | if (!p_hwfn || !p_hwfn->mcp_info) | |
1220 | return NULL; | |
1221 | return &p_hwfn->mcp_info->link_input; | |
1222 | } | |
1223 | ||
1224 | struct qed_mcp_link_state | |
1225 | *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) | |
1226 | { | |
1227 | if (!p_hwfn || !p_hwfn->mcp_info) | |
1228 | return NULL; | |
1229 | return &p_hwfn->mcp_info->link_output; | |
1230 | } | |
1231 | ||
1232 | struct qed_mcp_link_capabilities | |
1233 | *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) | |
1234 | { | |
1235 | if (!p_hwfn || !p_hwfn->mcp_info) | |
1236 | return NULL; | |
1237 | return &p_hwfn->mcp_info->link_capabilities; | |
1238 | } | |
1239 | ||
1a635e48 | 1240 | int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
fe56b9e6 YM |
1241 | { |
1242 | u32 resp = 0, param = 0; | |
1243 | int rc; | |
1244 | ||
1245 | rc = qed_mcp_cmd(p_hwfn, p_ptt, | |
1a635e48 | 1246 | DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); |
fe56b9e6 YM |
1247 | |
1248 | /* Wait for the drain to complete before returning */ | |
8f60bafe | 1249 | msleep(1020); |
fe56b9e6 YM |
1250 | |
1251 | return rc; | |
1252 | } | |
1253 | ||
cee4d264 | 1254 | int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, |
1a635e48 | 1255 | struct qed_ptt *p_ptt, u32 *p_flash_size) |
cee4d264 MC |
1256 | { |
1257 | u32 flash_size; | |
1258 | ||
1408cc1f YM |
1259 | if (IS_VF(p_hwfn->cdev)) |
1260 | return -EINVAL; | |
1261 | ||
cee4d264 MC |
1262 | flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); |
1263 | flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> | |
1264 | MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; | |
1265 | flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); | |
1266 | ||
1267 | *p_flash_size = flash_size; | |
1268 | ||
1269 | return 0; | |
1270 | } | |
1271 | ||
1408cc1f YM |
1272 | int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, |
1273 | struct qed_ptt *p_ptt, u8 vf_id, u8 num) | |
1274 | { | |
1275 | u32 resp = 0, param = 0, rc_param = 0; | |
1276 | int rc; | |
1277 | ||
1278 | /* Only Leader can configure MSIX, and need to take CMT into account */ | |
1279 | if (!IS_LEAD_HWFN(p_hwfn)) | |
1280 | return 0; | |
1281 | num *= p_hwfn->cdev->num_hwfns; | |
1282 | ||
1283 | param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & | |
1284 | DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; | |
1285 | param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & | |
1286 | DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; | |
1287 | ||
1288 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, | |
1289 | &resp, &rc_param); | |
1290 | ||
1291 | if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { | |
1292 | DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id); | |
1293 | rc = -EINVAL; | |
1294 | } else { | |
1295 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1296 | "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", | |
1297 | num, vf_id); | |
1298 | } | |
1299 | ||
1300 | return rc; | |
1301 | } | |
1302 | ||
fe56b9e6 YM |
1303 | int |
1304 | qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, | |
1305 | struct qed_ptt *p_ptt, | |
1306 | struct qed_mcp_drv_version *p_ver) | |
1307 | { | |
5529bad9 TT |
1308 | struct drv_version_stc *p_drv_version; |
1309 | struct qed_mcp_mb_params mb_params; | |
1310 | union drv_union_data union_data; | |
1311 | __be32 val; | |
1312 | u32 i; | |
1313 | int rc; | |
fe56b9e6 | 1314 | |
5529bad9 TT |
1315 | p_drv_version = &union_data.drv_version; |
1316 | p_drv_version->version = p_ver->version; | |
4b01e519 | 1317 | |
67a99b70 YM |
1318 | for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { |
1319 | val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); | |
4b01e519 | 1320 | *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; |
fe56b9e6 YM |
1321 | } |
1322 | ||
5529bad9 TT |
1323 | memset(&mb_params, 0, sizeof(mb_params)); |
1324 | mb_params.cmd = DRV_MSG_CODE_SET_VERSION; | |
1325 | mb_params.p_data_src = &union_data; | |
1326 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | |
1327 | if (rc) | |
fe56b9e6 | 1328 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); |
fe56b9e6 | 1329 | |
5529bad9 | 1330 | return rc; |
fe56b9e6 | 1331 | } |
91420b83 | 1332 | |
4102426f TT |
1333 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
1334 | { | |
1335 | u32 resp = 0, param = 0; | |
1336 | int rc; | |
1337 | ||
1338 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, | |
1339 | ¶m); | |
1340 | if (rc) | |
1341 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | |
1342 | ||
1343 | return rc; | |
1344 | } | |
1345 | ||
1346 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
1347 | { | |
1348 | u32 value, cpu_mode; | |
1349 | ||
1350 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); | |
1351 | ||
1352 | value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | |
1353 | value &= ~MCP_REG_CPU_MODE_SOFT_HALT; | |
1354 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); | |
1355 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | |
1356 | ||
1357 | return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; | |
1358 | } | |
1359 | ||
0fefbfba SK |
1360 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, |
1361 | struct qed_ptt *p_ptt, | |
1362 | enum qed_ov_client client) | |
1363 | { | |
1364 | u32 resp = 0, param = 0; | |
1365 | u32 drv_mb_param; | |
1366 | int rc; | |
1367 | ||
1368 | switch (client) { | |
1369 | case QED_OV_CLIENT_DRV: | |
1370 | drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; | |
1371 | break; | |
1372 | case QED_OV_CLIENT_USER: | |
1373 | drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; | |
1374 | break; | |
1375 | case QED_OV_CLIENT_VENDOR_SPEC: | |
1376 | drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; | |
1377 | break; | |
1378 | default: | |
1379 | DP_NOTICE(p_hwfn, "Invalid client type %d\n", client); | |
1380 | return -EINVAL; | |
1381 | } | |
1382 | ||
1383 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, | |
1384 | drv_mb_param, &resp, ¶m); | |
1385 | if (rc) | |
1386 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | |
1387 | ||
1388 | return rc; | |
1389 | } | |
1390 | ||
1391 | int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, | |
1392 | struct qed_ptt *p_ptt, | |
1393 | enum qed_ov_driver_state drv_state) | |
1394 | { | |
1395 | u32 resp = 0, param = 0; | |
1396 | u32 drv_mb_param; | |
1397 | int rc; | |
1398 | ||
1399 | switch (drv_state) { | |
1400 | case QED_OV_DRIVER_STATE_NOT_LOADED: | |
1401 | drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; | |
1402 | break; | |
1403 | case QED_OV_DRIVER_STATE_DISABLED: | |
1404 | drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; | |
1405 | break; | |
1406 | case QED_OV_DRIVER_STATE_ACTIVE: | |
1407 | drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; | |
1408 | break; | |
1409 | default: | |
1410 | DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state); | |
1411 | return -EINVAL; | |
1412 | } | |
1413 | ||
1414 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, | |
1415 | drv_mb_param, &resp, ¶m); | |
1416 | if (rc) | |
1417 | DP_ERR(p_hwfn, "Failed to send driver state\n"); | |
1418 | ||
1419 | return rc; | |
1420 | } | |
1421 | ||
1422 | int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, | |
1423 | struct qed_ptt *p_ptt, u16 mtu) | |
1424 | { | |
1425 | u32 resp = 0, param = 0; | |
1426 | u32 drv_mb_param; | |
1427 | int rc; | |
1428 | ||
1429 | drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT; | |
1430 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, | |
1431 | drv_mb_param, &resp, ¶m); | |
1432 | if (rc) | |
1433 | DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); | |
1434 | ||
1435 | return rc; | |
1436 | } | |
1437 | ||
1438 | int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, | |
1439 | struct qed_ptt *p_ptt, u8 *mac) | |
1440 | { | |
1441 | struct qed_mcp_mb_params mb_params; | |
1442 | union drv_union_data union_data; | |
1443 | int rc; | |
1444 | ||
1445 | memset(&mb_params, 0, sizeof(mb_params)); | |
1446 | mb_params.cmd = DRV_MSG_CODE_SET_VMAC; | |
1447 | mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << | |
1448 | DRV_MSG_CODE_VMAC_TYPE_SHIFT; | |
1449 | mb_params.param |= MCP_PF_ID(p_hwfn); | |
1450 | ether_addr_copy(&union_data.raw_data[0], mac); | |
1451 | mb_params.p_data_src = &union_data; | |
1452 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | |
1453 | if (rc) | |
1454 | DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); | |
1455 | ||
14d39648 MY |
1456 | /* Store primary MAC for later possible WoL */ |
1457 | memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN); | |
1458 | ||
0fefbfba SK |
1459 | return rc; |
1460 | } | |
1461 | ||
1462 | int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, | |
1463 | struct qed_ptt *p_ptt, enum qed_ov_wol wol) | |
1464 | { | |
1465 | u32 resp = 0, param = 0; | |
1466 | u32 drv_mb_param; | |
1467 | int rc; | |
1468 | ||
14d39648 MY |
1469 | if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) { |
1470 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | |
1471 | "Can't change WoL configuration when WoL isn't supported\n"); | |
1472 | return -EINVAL; | |
1473 | } | |
1474 | ||
0fefbfba SK |
1475 | switch (wol) { |
1476 | case QED_OV_WOL_DEFAULT: | |
1477 | drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT; | |
1478 | break; | |
1479 | case QED_OV_WOL_DISABLED: | |
1480 | drv_mb_param = DRV_MB_PARAM_WOL_DISABLED; | |
1481 | break; | |
1482 | case QED_OV_WOL_ENABLED: | |
1483 | drv_mb_param = DRV_MB_PARAM_WOL_ENABLED; | |
1484 | break; | |
1485 | default: | |
1486 | DP_ERR(p_hwfn, "Invalid wol state %d\n", wol); | |
1487 | return -EINVAL; | |
1488 | } | |
1489 | ||
1490 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL, | |
1491 | drv_mb_param, &resp, ¶m); | |
1492 | if (rc) | |
1493 | DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc); | |
1494 | ||
14d39648 MY |
1495 | /* Store the WoL update for a future unload */ |
1496 | p_hwfn->cdev->wol_config = (u8)wol; | |
1497 | ||
0fefbfba SK |
1498 | return rc; |
1499 | } | |
1500 | ||
1501 | int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, | |
1502 | struct qed_ptt *p_ptt, | |
1503 | enum qed_ov_eswitch eswitch) | |
1504 | { | |
1505 | u32 resp = 0, param = 0; | |
1506 | u32 drv_mb_param; | |
1507 | int rc; | |
1508 | ||
1509 | switch (eswitch) { | |
1510 | case QED_OV_ESWITCH_NONE: | |
1511 | drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; | |
1512 | break; | |
1513 | case QED_OV_ESWITCH_VEB: | |
1514 | drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; | |
1515 | break; | |
1516 | case QED_OV_ESWITCH_VEPA: | |
1517 | drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; | |
1518 | break; | |
1519 | default: | |
1520 | DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); | |
1521 | return -EINVAL; | |
1522 | } | |
1523 | ||
1524 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, | |
1525 | drv_mb_param, &resp, ¶m); | |
1526 | if (rc) | |
1527 | DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); | |
1528 | ||
1529 | return rc; | |
1530 | } | |
1531 | ||
1a635e48 YM |
1532 | int qed_mcp_set_led(struct qed_hwfn *p_hwfn, |
1533 | struct qed_ptt *p_ptt, enum qed_led_mode mode) | |
91420b83 SK |
1534 | { |
1535 | u32 resp = 0, param = 0, drv_mb_param; | |
1536 | int rc; | |
1537 | ||
1538 | switch (mode) { | |
1539 | case QED_LED_MODE_ON: | |
1540 | drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; | |
1541 | break; | |
1542 | case QED_LED_MODE_OFF: | |
1543 | drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; | |
1544 | break; | |
1545 | case QED_LED_MODE_RESTORE: | |
1546 | drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; | |
1547 | break; | |
1548 | default: | |
1549 | DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); | |
1550 | return -EINVAL; | |
1551 | } | |
1552 | ||
1553 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, | |
1554 | drv_mb_param, &resp, ¶m); | |
1555 | ||
1556 | return rc; | |
1557 | } | |
03dc76ca | 1558 | |
4102426f TT |
1559 | int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, |
1560 | struct qed_ptt *p_ptt, u32 mask_parities) | |
1561 | { | |
1562 | u32 resp = 0, param = 0; | |
1563 | int rc; | |
1564 | ||
1565 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, | |
1566 | mask_parities, &resp, ¶m); | |
1567 | ||
1568 | if (rc) { | |
1569 | DP_ERR(p_hwfn, | |
1570 | "MCP response failure for mask parities, aborting\n"); | |
1571 | } else if (resp != FW_MSG_CODE_OK) { | |
1572 | DP_ERR(p_hwfn, | |
1573 | "MCP did not acknowledge mask parity request. Old MFW?\n"); | |
1574 | rc = -EINVAL; | |
1575 | } | |
1576 | ||
1577 | return rc; | |
1578 | } | |
1579 | ||
7a4b21b7 MY |
1580 | int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) |
1581 | { | |
1582 | u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0; | |
1583 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); | |
1584 | u32 resp = 0, resp_param = 0; | |
1585 | struct qed_ptt *p_ptt; | |
1586 | int rc = 0; | |
1587 | ||
1588 | p_ptt = qed_ptt_acquire(p_hwfn); | |
1589 | if (!p_ptt) | |
1590 | return -EBUSY; | |
1591 | ||
1592 | while (bytes_left > 0) { | |
1593 | bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); | |
1594 | ||
1595 | rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, | |
1596 | DRV_MSG_CODE_NVM_READ_NVRAM, | |
1597 | addr + offset + | |
1598 | (bytes_to_copy << | |
1599 | DRV_MB_PARAM_NVM_LEN_SHIFT), | |
1600 | &resp, &resp_param, | |
1601 | &read_len, | |
1602 | (u32 *)(p_buf + offset)); | |
1603 | ||
1604 | if (rc || (resp != FW_MSG_CODE_NVM_OK)) { | |
1605 | DP_NOTICE(cdev, "MCP command rc = %d\n", rc); | |
1606 | break; | |
1607 | } | |
1608 | ||
1609 | /* This can be a lengthy process, and it's possible scheduler | |
1610 | * isn't preemptable. Sleep a bit to prevent CPU hogging. | |
1611 | */ | |
1612 | if (bytes_left % 0x1000 < | |
1613 | (bytes_left - read_len) % 0x1000) | |
1614 | usleep_range(1000, 2000); | |
1615 | ||
1616 | offset += read_len; | |
1617 | bytes_left -= read_len; | |
1618 | } | |
1619 | ||
1620 | cdev->mcp_nvm_resp = resp; | |
1621 | qed_ptt_release(p_hwfn, p_ptt); | |
1622 | ||
1623 | return rc; | |
1624 | } | |
1625 | ||
03dc76ca SRK |
1626 | int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
1627 | { | |
1628 | u32 drv_mb_param = 0, rsp, param; | |
1629 | int rc = 0; | |
1630 | ||
1631 | drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << | |
1632 | DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); | |
1633 | ||
1634 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, | |
1635 | drv_mb_param, &rsp, ¶m); | |
1636 | ||
1637 | if (rc) | |
1638 | return rc; | |
1639 | ||
1640 | if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || | |
1641 | (param != DRV_MB_PARAM_BIST_RC_PASSED)) | |
1642 | rc = -EAGAIN; | |
1643 | ||
1644 | return rc; | |
1645 | } | |
1646 | ||
1647 | int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
1648 | { | |
1649 | u32 drv_mb_param, rsp, param; | |
1650 | int rc = 0; | |
1651 | ||
1652 | drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << | |
1653 | DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); | |
1654 | ||
1655 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, | |
1656 | drv_mb_param, &rsp, ¶m); | |
1657 | ||
1658 | if (rc) | |
1659 | return rc; | |
1660 | ||
1661 | if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || | |
1662 | (param != DRV_MB_PARAM_BIST_RC_PASSED)) | |
1663 | rc = -EAGAIN; | |
1664 | ||
1665 | return rc; | |
1666 | } | |
7a4b21b7 MY |
1667 | |
1668 | int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, | |
1669 | struct qed_ptt *p_ptt, | |
1670 | u32 *num_images) | |
1671 | { | |
1672 | u32 drv_mb_param = 0, rsp; | |
1673 | int rc = 0; | |
1674 | ||
1675 | drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << | |
1676 | DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); | |
1677 | ||
1678 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, | |
1679 | drv_mb_param, &rsp, num_images); | |
1680 | if (rc) | |
1681 | return rc; | |
1682 | ||
1683 | if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) | |
1684 | rc = -EINVAL; | |
1685 | ||
1686 | return rc; | |
1687 | } | |
1688 | ||
1689 | int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, | |
1690 | struct qed_ptt *p_ptt, | |
1691 | struct bist_nvm_image_att *p_image_att, | |
1692 | u32 image_index) | |
1693 | { | |
1694 | u32 buf_size = 0, param, resp = 0, resp_param = 0; | |
1695 | int rc; | |
1696 | ||
1697 | param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << | |
1698 | DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT; | |
1699 | param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT; | |
1700 | ||
1701 | rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, | |
1702 | DRV_MSG_CODE_BIST_TEST, param, | |
1703 | &resp, &resp_param, | |
1704 | &buf_size, | |
1705 | (u32 *)p_image_att); | |
1706 | if (rc) | |
1707 | return rc; | |
1708 | ||
1709 | if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || | |
1710 | (p_image_att->return_code != 1)) | |
1711 | rc = -EINVAL; | |
1712 | ||
1713 | return rc; | |
1714 | } | |
2edbff8d TT |
1715 | |
1716 | #define QED_RESC_ALLOC_VERSION_MAJOR 1 | |
1717 | #define QED_RESC_ALLOC_VERSION_MINOR 0 | |
1718 | #define QED_RESC_ALLOC_VERSION \ | |
1719 | ((QED_RESC_ALLOC_VERSION_MAJOR << \ | |
1720 | DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ | |
1721 | (QED_RESC_ALLOC_VERSION_MINOR << \ | |
1722 | DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) | |
1723 | int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, | |
1724 | struct qed_ptt *p_ptt, | |
1725 | struct resource_info *p_resc_info, | |
1726 | u32 *p_mcp_resp, u32 *p_mcp_param) | |
1727 | { | |
1728 | struct qed_mcp_mb_params mb_params; | |
bb480242 | 1729 | union drv_union_data union_data; |
2edbff8d TT |
1730 | int rc; |
1731 | ||
1732 | memset(&mb_params, 0, sizeof(mb_params)); | |
bb480242 | 1733 | memset(&union_data, 0, sizeof(union_data)); |
2edbff8d TT |
1734 | mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; |
1735 | mb_params.param = QED_RESC_ALLOC_VERSION; | |
bb480242 MY |
1736 | |
1737 | /* Need to have a sufficient large struct, as the cmd_and_union | |
1738 | * is going to do memcpy from and to it. | |
1739 | */ | |
1740 | memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info)); | |
1741 | ||
1742 | mb_params.p_data_src = &union_data; | |
1743 | mb_params.p_data_dst = &union_data; | |
2edbff8d TT |
1744 | rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); |
1745 | if (rc) | |
1746 | return rc; | |
1747 | ||
bb480242 MY |
1748 | /* Copy the data back */ |
1749 | memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info)); | |
2edbff8d TT |
1750 | *p_mcp_resp = mb_params.mcp_resp; |
1751 | *p_mcp_param = mb_params.mcp_param; | |
1752 | ||
1753 | DP_VERBOSE(p_hwfn, | |
1754 | QED_MSG_SP, | |
1755 | "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n", | |
1756 | *p_mcp_param, | |
1757 | p_resc_info->res_id, | |
1758 | p_resc_info->size, | |
1759 | p_resc_info->offset, | |
1760 | p_resc_info->vf_size, | |
1761 | p_resc_info->vf_offset, p_resc_info->flags); | |
1762 | ||
1763 | return 0; | |
1764 | } |