Linux 6.12-rc1
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_common.c
CommitLineData
7ec59eea 1// SPDX-License-Identifier: GPL-2.0
ba1124f5 2/* Copyright (c) 2018-2023, Intel Corporation. */
7ec59eea
AV
3
4#include "ice_common.h"
9c20346b 5#include "ice_sched.h"
7ec59eea 6#include "ice_adminq_cmd.h"
c90ed40c 7#include "ice_flow.h"
272ad794 8#include "ice_ptp_hw.h"
7ec59eea 9
71245072 10#define ICE_PF_RESET_WAIT_COUNT 300
8a3a565f 11#define ICE_MAX_NETLIST_SIZE 10
f31e4b6f 12
f8c74ca6
AV
13static const char * const ice_link_mode_str_low[] = {
14 [0] = "100BASE_TX",
15 [1] = "100M_SGMII",
16 [2] = "1000BASE_T",
17 [3] = "1000BASE_SX",
18 [4] = "1000BASE_LX",
19 [5] = "1000BASE_KX",
20 [6] = "1G_SGMII",
21 [7] = "2500BASE_T",
22 [8] = "2500BASE_X",
23 [9] = "2500BASE_KX",
24 [10] = "5GBASE_T",
25 [11] = "5GBASE_KR",
26 [12] = "10GBASE_T",
27 [13] = "10G_SFI_DA",
28 [14] = "10GBASE_SR",
29 [15] = "10GBASE_LR",
30 [16] = "10GBASE_KR_CR1",
31 [17] = "10G_SFI_AOC_ACC",
32 [18] = "10G_SFI_C2C",
33 [19] = "25GBASE_T",
34 [20] = "25GBASE_CR",
35 [21] = "25GBASE_CR_S",
36 [22] = "25GBASE_CR1",
37 [23] = "25GBASE_SR",
38 [24] = "25GBASE_LR",
39 [25] = "25GBASE_KR",
40 [26] = "25GBASE_KR_S",
41 [27] = "25GBASE_KR1",
42 [28] = "25G_AUI_AOC_ACC",
43 [29] = "25G_AUI_C2C",
44 [30] = "40GBASE_CR4",
45 [31] = "40GBASE_SR4",
46 [32] = "40GBASE_LR4",
47 [33] = "40GBASE_KR4",
48 [34] = "40G_XLAUI_AOC_ACC",
49 [35] = "40G_XLAUI",
50 [36] = "50GBASE_CR2",
51 [37] = "50GBASE_SR2",
52 [38] = "50GBASE_LR2",
53 [39] = "50GBASE_KR2",
54 [40] = "50G_LAUI2_AOC_ACC",
55 [41] = "50G_LAUI2",
56 [42] = "50G_AUI2_AOC_ACC",
57 [43] = "50G_AUI2",
58 [44] = "50GBASE_CP",
59 [45] = "50GBASE_SR",
60 [46] = "50GBASE_FR",
61 [47] = "50GBASE_LR",
62 [48] = "50GBASE_KR_PAM4",
63 [49] = "50G_AUI1_AOC_ACC",
64 [50] = "50G_AUI1",
65 [51] = "100GBASE_CR4",
66 [52] = "100GBASE_SR4",
67 [53] = "100GBASE_LR4",
68 [54] = "100GBASE_KR4",
69 [55] = "100G_CAUI4_AOC_ACC",
70 [56] = "100G_CAUI4",
71 [57] = "100G_AUI4_AOC_ACC",
72 [58] = "100G_AUI4",
73 [59] = "100GBASE_CR_PAM4",
74 [60] = "100GBASE_KR_PAM4",
75 [61] = "100GBASE_CP2",
76 [62] = "100GBASE_SR2",
77 [63] = "100GBASE_DR",
78};
79
80static const char * const ice_link_mode_str_high[] = {
81 [0] = "100GBASE_KR2_PAM4",
82 [1] = "100G_CAUI2_AOC_ACC",
83 [2] = "100G_CAUI2",
84 [3] = "100G_AUI2_AOC_ACC",
85 [4] = "100G_AUI2",
86};
87
88/**
89 * ice_dump_phy_type - helper function to dump phy_type
90 * @hw: pointer to the HW structure
91 * @low: 64 bit value for phy_type_low
92 * @high: 64 bit value for phy_type_high
93 * @prefix: prefix string to differentiate multiple dumps
94 */
95static void
96ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
97{
98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
99
100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
101 if (low & BIT_ULL(i))
102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
103 prefix, i, ice_link_mode_str_low[i]);
104 }
105
106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
107
108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
109 if (high & BIT_ULL(i))
110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
111 prefix, i, ice_link_mode_str_high[i]);
112 }
113}
114
f31e4b6f
AV
115/**
116 * ice_set_mac_type - Sets MAC type
117 * @hw: pointer to the HW structure
118 *
119 * This function sets the MAC type of the adapter based on the
f9867df6 120 * vendor ID and device ID stored in the HW structure.
f31e4b6f 121 */
5e24d598 122static int ice_set_mac_type(struct ice_hw *hw)
f31e4b6f
AV
123{
124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
d54699e2 125 return -ENODEV;
f31e4b6f 126
ea78ce4d
PG
127 switch (hw->device_id) {
128 case ICE_DEV_ID_E810C_BACKPLANE:
129 case ICE_DEV_ID_E810C_QSFP:
130 case ICE_DEV_ID_E810C_SFP:
7dcf78b8
TN
131 case ICE_DEV_ID_E810_XXV_BACKPLANE:
132 case ICE_DEV_ID_E810_XXV_QSFP:
ea78ce4d
PG
133 case ICE_DEV_ID_E810_XXV_SFP:
134 hw->mac_type = ICE_MAC_E810;
135 break;
136 case ICE_DEV_ID_E823C_10G_BASE_T:
137 case ICE_DEV_ID_E823C_BACKPLANE:
138 case ICE_DEV_ID_E823C_QSFP:
139 case ICE_DEV_ID_E823C_SFP:
140 case ICE_DEV_ID_E823C_SGMII:
141 case ICE_DEV_ID_E822C_10G_BASE_T:
142 case ICE_DEV_ID_E822C_BACKPLANE:
143 case ICE_DEV_ID_E822C_QSFP:
144 case ICE_DEV_ID_E822C_SFP:
145 case ICE_DEV_ID_E822C_SGMII:
146 case ICE_DEV_ID_E822L_10G_BASE_T:
147 case ICE_DEV_ID_E822L_BACKPLANE:
148 case ICE_DEV_ID_E822L_SFP:
149 case ICE_DEV_ID_E822L_SGMII:
150 case ICE_DEV_ID_E823L_10G_BASE_T:
151 case ICE_DEV_ID_E823L_1GBE:
152 case ICE_DEV_ID_E823L_BACKPLANE:
153 case ICE_DEV_ID_E823L_QSFP:
154 case ICE_DEV_ID_E823L_SFP:
155 hw->mac_type = ICE_MAC_GENERIC;
156 break;
372e27de
GN
157 case ICE_DEV_ID_E825C_BACKPLANE:
158 case ICE_DEV_ID_E825C_QSFP:
159 case ICE_DEV_ID_E825C_SFP:
160 case ICE_DEV_ID_E825C_SGMII:
161 hw->mac_type = ICE_MAC_GENERIC_3K_E825;
162 break;
a8e682f0
PG
163 case ICE_DEV_ID_E830CC_BACKPLANE:
164 case ICE_DEV_ID_E830CC_QSFP56:
165 case ICE_DEV_ID_E830CC_SFP:
166 case ICE_DEV_ID_E830CC_SFP_DD:
4fd10401
PG
167 case ICE_DEV_ID_E830C_BACKPLANE:
168 case ICE_DEV_ID_E830_XXV_BACKPLANE:
169 case ICE_DEV_ID_E830C_QSFP:
170 case ICE_DEV_ID_E830_XXV_QSFP:
171 case ICE_DEV_ID_E830C_SFP:
172 case ICE_DEV_ID_E830_XXV_SFP:
ba1124f5
PG
173 hw->mac_type = ICE_MAC_E830;
174 break;
ea78ce4d
PG
175 default:
176 hw->mac_type = ICE_MAC_UNKNOWN;
177 break;
178 }
179
180 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
f31e4b6f
AV
181 return 0;
182}
183
7a15668f
GN
184/**
185 * ice_is_generic_mac - check if device's mac_type is generic
186 * @hw: pointer to the hardware structure
187 *
188 * Return: true if mac_type is generic (with SBQ support), false if not
189 */
190bool ice_is_generic_mac(struct ice_hw *hw)
191{
192 return (hw->mac_type == ICE_MAC_GENERIC ||
193 hw->mac_type == ICE_MAC_GENERIC_3K_E825);
194}
195
06c16d89
JK
196/**
197 * ice_is_e810
198 * @hw: pointer to the hardware structure
199 *
200 * returns true if the device is E810 based, false if not.
201 */
202bool ice_is_e810(struct ice_hw *hw)
203{
204 return hw->mac_type == ICE_MAC_E810;
205}
206
885fe693
MM
207/**
208 * ice_is_e810t
209 * @hw: pointer to the hardware structure
210 *
211 * returns true if the device is E810T based, false if not.
212 */
213bool ice_is_e810t(struct ice_hw *hw)
214{
215 switch (hw->device_id) {
216 case ICE_DEV_ID_E810C_SFP:
793189a2
AK
217 switch (hw->subsystem_device_id) {
218 case ICE_SUBDEV_ID_E810T:
219 case ICE_SUBDEV_ID_E810T2:
220 case ICE_SUBDEV_ID_E810T3:
221 case ICE_SUBDEV_ID_E810T4:
222 case ICE_SUBDEV_ID_E810T6:
223 case ICE_SUBDEV_ID_E810T7:
885fe693 224 return true;
793189a2
AK
225 }
226 break;
227 case ICE_DEV_ID_E810C_QSFP:
228 switch (hw->subsystem_device_id) {
229 case ICE_SUBDEV_ID_E810T2:
230 case ICE_SUBDEV_ID_E810T3:
231 case ICE_SUBDEV_ID_E810T5:
232 return true;
233 }
885fe693
MM
234 break;
235 default:
236 break;
237 }
238
239 return false;
240}
241
d551d075
KK
242/**
243 * ice_is_e822 - Check if a device is E822 family device
244 * @hw: pointer to the hardware structure
245 *
246 * Return: true if the device is E822 based, false if not.
247 */
248bool ice_is_e822(struct ice_hw *hw)
249{
250 switch (hw->device_id) {
251 case ICE_DEV_ID_E822C_BACKPLANE:
252 case ICE_DEV_ID_E822C_QSFP:
253 case ICE_DEV_ID_E822C_SFP:
254 case ICE_DEV_ID_E822C_10G_BASE_T:
255 case ICE_DEV_ID_E822C_SGMII:
256 case ICE_DEV_ID_E822L_BACKPLANE:
257 case ICE_DEV_ID_E822L_SFP:
258 case ICE_DEV_ID_E822L_10G_BASE_T:
259 case ICE_DEV_ID_E822L_SGMII:
260 return true;
261 default:
262 return false;
263 }
264}
265
634d841d
KK
266/**
267 * ice_is_e823
268 * @hw: pointer to the hardware structure
269 *
270 * returns true if the device is E823-L or E823-C based, false if not.
271 */
272bool ice_is_e823(struct ice_hw *hw)
273{
274 switch (hw->device_id) {
275 case ICE_DEV_ID_E823L_BACKPLANE:
276 case ICE_DEV_ID_E823L_SFP:
277 case ICE_DEV_ID_E823L_10G_BASE_T:
278 case ICE_DEV_ID_E823L_1GBE:
279 case ICE_DEV_ID_E823L_QSFP:
280 case ICE_DEV_ID_E823C_BACKPLANE:
281 case ICE_DEV_ID_E823C_QSFP:
282 case ICE_DEV_ID_E823C_SFP:
283 case ICE_DEV_ID_E823C_10G_BASE_T:
284 case ICE_DEV_ID_E823C_SGMII:
285 return true;
286 default:
287 return false;
288 }
289}
290
f64e1894
GN
291/**
292 * ice_is_e825c - Check if a device is E825C family device
293 * @hw: pointer to the hardware structure
294 *
295 * Return: true if the device is E825-C based, false if not.
296 */
297bool ice_is_e825c(struct ice_hw *hw)
298{
299 switch (hw->device_id) {
300 case ICE_DEV_ID_E825C_BACKPLANE:
301 case ICE_DEV_ID_E825C_QSFP:
302 case ICE_DEV_ID_E825C_SFP:
303 case ICE_DEV_ID_E825C_SGMII:
304 return true;
305 default:
306 return false;
307 }
308}
309
f31e4b6f
AV
310/**
311 * ice_clear_pf_cfg - Clear PF configuration
312 * @hw: pointer to the hardware structure
3968540b
AV
313 *
314 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
315 * configuration, flow director filters, etc.).
f31e4b6f 316 */
5e24d598 317int ice_clear_pf_cfg(struct ice_hw *hw)
f31e4b6f
AV
318{
319 struct ice_aq_desc desc;
320
321 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
322
323 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
324}
325
dc49c772
AV
326/**
327 * ice_aq_manage_mac_read - manage MAC address read command
f9867df6 328 * @hw: pointer to the HW struct
dc49c772
AV
329 * @buf: a virtual buffer to hold the manage MAC read response
330 * @buf_size: Size of the virtual buffer
331 * @cd: pointer to command details structure or NULL
332 *
333 * This function is used to return per PF station MAC address (0x0107).
334 * NOTE: Upon successful completion of this command, MAC address information
335 * is returned in user specified buffer. Please interpret user specified
336 * buffer as "manage_mac_read" response.
337 * Response such as various MAC addresses are stored in HW struct (port.mac)
81aed647
JK
338 * ice_discover_dev_caps is expected to be called before this function is
339 * called.
dc49c772 340 */
5e24d598 341static int
dc49c772
AV
342ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
343 struct ice_sq_cd *cd)
344{
345 struct ice_aqc_manage_mac_read_resp *resp;
346 struct ice_aqc_manage_mac_read *cmd;
347 struct ice_aq_desc desc;
5e24d598 348 int status;
dc49c772 349 u16 flags;
d6fef10c 350 u8 i;
dc49c772
AV
351
352 cmd = &desc.params.mac_read;
353
354 if (buf_size < sizeof(*resp))
d54699e2 355 return -EINVAL;
dc49c772
AV
356
357 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
358
359 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
360 if (status)
361 return status;
362
7a63dae0 363 resp = buf;
dc49c772
AV
364 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
365
366 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
367 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
d54699e2 368 return -EIO;
dc49c772
AV
369 }
370
d6fef10c
MFIP
371 /* A single port can report up to two (LAN and WoL) addresses */
372 for (i = 0; i < cmd->num_addr; i++)
373 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
374 ether_addr_copy(hw->port_info->mac.lan_addr,
375 resp[i].mac_addr);
376 ether_addr_copy(hw->port_info->mac.perm_addr,
377 resp[i].mac_addr);
378 break;
379 }
380
dc49c772
AV
381 return 0;
382}
383
384/**
385 * ice_aq_get_phy_caps - returns PHY capabilities
386 * @pi: port information structure
387 * @qual_mods: report qualified modules
388 * @report_mode: report mode capabilities
389 * @pcaps: structure for PHY capabilities to be filled
390 * @cd: pointer to command details structure or NULL
391 *
392 * Returns the various PHY capabilities supported on the Port (0x0600)
393 */
5e24d598 394int
dc49c772
AV
395ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
396 struct ice_aqc_get_phy_caps_data *pcaps,
397 struct ice_sq_cd *cd)
398{
399 struct ice_aqc_get_phy_caps *cmd;
400 u16 pcaps_size = sizeof(*pcaps);
401 struct ice_aq_desc desc;
f8c74ca6 402 const char *prefix;
55df52a0 403 struct ice_hw *hw;
5518ac2a 404 int status;
dc49c772
AV
405
406 cmd = &desc.params.get_phy;
407
408 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
d54699e2 409 return -EINVAL;
55df52a0 410 hw = pi->hw;
dc49c772 411
0a02944f
AV
412 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
413 !ice_fw_supports_report_dflt_cfg(hw))
d54699e2 414 return -EINVAL;
0a02944f 415
dc49c772
AV
416 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
417
418 if (qual_mods)
419 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
420
421 cmd->param0 |= cpu_to_le16(report_mode);
55df52a0
PG
422 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
423
f8c74ca6
AV
424 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
425
426 switch (report_mode) {
427 case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
428 prefix = "phy_caps_media";
429 break;
430 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
431 prefix = "phy_caps_no_media";
432 break;
433 case ICE_AQC_REPORT_ACTIVE_CFG:
434 prefix = "phy_caps_active";
435 break;
436 case ICE_AQC_REPORT_DFLT_CFG:
437 prefix = "phy_caps_default";
438 break;
439 default:
440 prefix = "phy_caps_invalid";
441 }
442
443 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
444 le64_to_cpu(pcaps->phy_type_high), prefix);
445
446 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
447 prefix, report_mode);
448 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
449 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
bdeff971 450 pcaps->low_power_ctrl_an);
f8c74ca6
AV
451 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
452 pcaps->eee_cap);
453 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
55df52a0 454 pcaps->eeer_value);
f8c74ca6 455 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
55df52a0 456 pcaps->link_fec_options);
f8c74ca6
AV
457 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
458 prefix, pcaps->module_compliance_enforcement);
459 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
460 prefix, pcaps->extended_compliance_code);
461 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
55df52a0 462 pcaps->module_type[0]);
f8c74ca6 463 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
55df52a0 464 pcaps->module_type[1]);
f8c74ca6 465 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
55df52a0 466 pcaps->module_type[2]);
dc49c772 467
d6730a87 468 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
dc49c772 469 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
aef74145 470 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
c2b35226
PSJ
471 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
472 sizeof(pi->phy.link_info.module_type));
aef74145 473 }
dc49c772
AV
474
475 return status;
476}
477
8ea1da59
PG
478/**
479 * ice_aq_get_link_topo_handle - get link topology node return status
480 * @pi: port information structure
481 * @node_type: requested node type
482 * @cd: pointer to command details structure or NULL
483 *
484 * Get link topology node return status for specified node type (0x06E0)
485 *
486 * Node type cage can be used to determine if cage is present. If AQC
487 * returns error (ENOENT), then no cage present. If no cage present, then
488 * connection type is backplane or BASE-T.
489 */
5e24d598 490static int
8ea1da59
PG
491ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
492 struct ice_sq_cd *cd)
493{
494 struct ice_aqc_get_link_topo *cmd;
495 struct ice_aq_desc desc;
496
497 cmd = &desc.params.get_link_topo;
498
499 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
500
e00ae1a2
MM
501 cmd->addr.topo_params.node_type_ctx =
502 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
503 ICE_AQC_LINK_TOPO_NODE_CTX_S);
8ea1da59
PG
504
505 /* set node type */
e00ae1a2
MM
506 cmd->addr.topo_params.node_type_ctx |=
507 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
8ea1da59
PG
508
509 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
510}
511
8a3a565f
AK
512/**
513 * ice_aq_get_netlist_node
514 * @hw: pointer to the hw struct
515 * @cmd: get_link_topo AQ structure
516 * @node_part_number: output node part number if node found
517 * @node_handle: output node handle parameter if node found
518 *
519 * Get netlist node handle.
520 */
521int
522ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
523 u8 *node_part_number, u16 *node_handle)
524{
525 struct ice_aq_desc desc;
526
527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
528 desc.params.get_link_topo = *cmd;
529
530 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
531 return -EINTR;
532
533 if (node_handle)
534 *node_handle =
535 le16_to_cpu(desc.params.get_link_topo.addr.handle);
536 if (node_part_number)
537 *node_part_number = desc.params.get_link_topo.node_part_num;
538
539 return 0;
540}
541
542/**
543 * ice_find_netlist_node
544 * @hw: pointer to the hw struct
545 * @node_type_ctx: type of netlist node to look for
546 * @node_part_number: node part number to look for
547 * @node_handle: output parameter if node found - optional
548 *
640a65f8
JK
549 * Scan the netlist for a node handle of the given node type and part number.
550 *
551 * If node_handle is non-NULL it will be modified on function exit. It is only
552 * valid if the function returns zero, and should be ignored on any non-zero
553 * return value.
554 *
555 * Returns: 0 if the node is found, -ENOENT if no handle was found, and
556 * a negative error code on failure to access the AQ.
8a3a565f 557 */
91e43ca0
JK
558static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
559 u8 node_part_number, u16 *node_handle)
8a3a565f 560{
8a3a565f
AK
561 u8 idx;
562
563 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) {
640a65f8
JK
564 struct ice_aqc_get_link_topo cmd = {};
565 u8 rec_node_part_number;
8a3a565f
AK
566 int status;
567
8a3a565f 568 cmd.addr.topo_params.node_type_ctx =
640a65f8
JK
569 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
570 node_type_ctx);
8a3a565f
AK
571 cmd.addr.topo_params.index = idx;
572
573 status = ice_aq_get_netlist_node(hw, &cmd,
574 &rec_node_part_number,
640a65f8 575 node_handle);
8a3a565f
AK
576 if (status)
577 return status;
578
640a65f8 579 if (rec_node_part_number == node_part_number)
8a3a565f 580 return 0;
8a3a565f
AK
581 }
582
640a65f8 583 return -ENOENT;
8a3a565f
AK
584}
585
8ea1da59
PG
586/**
587 * ice_is_media_cage_present
588 * @pi: port information structure
589 *
590 * Returns true if media cage is present, else false. If no cage, then
591 * media type is backplane or BASE-T.
592 */
593static bool ice_is_media_cage_present(struct ice_port_info *pi)
594{
595 /* Node type cage can be used to determine if cage is present. If AQC
596 * returns error (ENOENT), then no cage present. If no cage present then
597 * connection type is backplane or BASE-T.
598 */
599 return !ice_aq_get_link_topo_handle(pi,
600 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
601 NULL);
602}
603
dc49c772
AV
604/**
605 * ice_get_media_type - Gets media type
606 * @pi: port information structure
607 */
608static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
609{
610 struct ice_link_status *hw_link_info;
611
612 if (!pi)
613 return ICE_MEDIA_UNKNOWN;
614
615 hw_link_info = &pi->phy.link_info;
aef74145
AV
616 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
617 /* If more than one media type is selected, report unknown */
618 return ICE_MEDIA_UNKNOWN;
dc49c772
AV
619
620 if (hw_link_info->phy_type_low) {
c2b35226
PSJ
621 /* 1G SGMII is a special case where some DA cable PHYs
622 * may show this as an option when it really shouldn't
623 * be since SGMII is meant to be between a MAC and a PHY
624 * in a backplane. Try to detect this case and handle it
625 */
626 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
627 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
628 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
629 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
630 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
631 return ICE_MEDIA_DA;
632
dc49c772
AV
633 switch (hw_link_info->phy_type_low) {
634 case ICE_PHY_TYPE_LOW_1000BASE_SX:
635 case ICE_PHY_TYPE_LOW_1000BASE_LX:
636 case ICE_PHY_TYPE_LOW_10GBASE_SR:
637 case ICE_PHY_TYPE_LOW_10GBASE_LR:
638 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
639 case ICE_PHY_TYPE_LOW_25GBASE_SR:
640 case ICE_PHY_TYPE_LOW_25GBASE_LR:
dc49c772
AV
641 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
642 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
aef74145
AV
643 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
644 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
645 case ICE_PHY_TYPE_LOW_50GBASE_SR:
646 case ICE_PHY_TYPE_LOW_50GBASE_FR:
647 case ICE_PHY_TYPE_LOW_50GBASE_LR:
648 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
649 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
650 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
651 case ICE_PHY_TYPE_LOW_100GBASE_DR:
c1eb3b6b
DD
652 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
653 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
654 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
655 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
656 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
657 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
658 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
659 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
dc49c772
AV
660 return ICE_MEDIA_FIBER;
661 case ICE_PHY_TYPE_LOW_100BASE_TX:
662 case ICE_PHY_TYPE_LOW_1000BASE_T:
663 case ICE_PHY_TYPE_LOW_2500BASE_T:
664 case ICE_PHY_TYPE_LOW_5GBASE_T:
665 case ICE_PHY_TYPE_LOW_10GBASE_T:
666 case ICE_PHY_TYPE_LOW_25GBASE_T:
667 return ICE_MEDIA_BASET;
668 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
669 case ICE_PHY_TYPE_LOW_25GBASE_CR:
670 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
671 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
672 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
aef74145
AV
673 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
674 case ICE_PHY_TYPE_LOW_50GBASE_CP:
675 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
676 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
677 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
dc49c772 678 return ICE_MEDIA_DA;
8ea1da59
PG
679 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
680 case ICE_PHY_TYPE_LOW_40G_XLAUI:
681 case ICE_PHY_TYPE_LOW_50G_LAUI2:
682 case ICE_PHY_TYPE_LOW_50G_AUI2:
683 case ICE_PHY_TYPE_LOW_50G_AUI1:
684 case ICE_PHY_TYPE_LOW_100G_AUI4:
685 case ICE_PHY_TYPE_LOW_100G_CAUI4:
686 if (ice_is_media_cage_present(pi))
687 return ICE_MEDIA_DA;
688 fallthrough;
dc49c772
AV
689 case ICE_PHY_TYPE_LOW_1000BASE_KX:
690 case ICE_PHY_TYPE_LOW_2500BASE_KX:
691 case ICE_PHY_TYPE_LOW_2500BASE_X:
692 case ICE_PHY_TYPE_LOW_5GBASE_KR:
693 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
694 case ICE_PHY_TYPE_LOW_25GBASE_KR:
695 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
696 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
697 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
aef74145
AV
698 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
699 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
700 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
701 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
702 return ICE_MEDIA_BACKPLANE;
703 }
704 } else {
705 switch (hw_link_info->phy_type_high) {
8ea1da59
PG
706 case ICE_PHY_TYPE_HIGH_100G_AUI2:
707 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
708 if (ice_is_media_cage_present(pi))
709 return ICE_MEDIA_DA;
710 fallthrough;
aef74145 711 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
dc49c772 712 return ICE_MEDIA_BACKPLANE;
c1eb3b6b
DD
713 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
714 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
715 return ICE_MEDIA_FIBER;
dc49c772
AV
716 }
717 }
dc49c772
AV
718 return ICE_MEDIA_UNKNOWN;
719}
720
2777d24e
PG
721/**
722 * ice_get_link_status_datalen
723 * @hw: pointer to the HW struct
724 *
725 * Returns datalength for the Get Link Status AQ command, which is bigger for
726 * newer adapter families handled by ice driver.
727 */
728static u16 ice_get_link_status_datalen(struct ice_hw *hw)
729{
730 switch (hw->mac_type) {
731 case ICE_MAC_E830:
732 return ICE_AQC_LS_DATA_SIZE_V2;
733 case ICE_MAC_E810:
734 default:
735 return ICE_AQC_LS_DATA_SIZE_V1;
736 }
737}
738
dc49c772
AV
739/**
740 * ice_aq_get_link_info
741 * @pi: port information structure
742 * @ena_lse: enable/disable LinkStatusEvent reporting
743 * @link: pointer to link status structure - optional
744 * @cd: pointer to command details structure or NULL
745 *
746 * Get Link Status (0x607). Returns the link status of the adapter.
747 */
5e24d598 748int
dc49c772
AV
749ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
750 struct ice_link_status *link, struct ice_sq_cd *cd)
751{
dc49c772
AV
752 struct ice_aqc_get_link_status_data link_data = { 0 };
753 struct ice_aqc_get_link_status *resp;
dc67039b 754 struct ice_link_status *li_old, *li;
dc49c772
AV
755 enum ice_media_type *hw_media_type;
756 struct ice_fc_info *hw_fc_info;
757 bool tx_pause, rx_pause;
758 struct ice_aq_desc desc;
dc67039b 759 struct ice_hw *hw;
dc49c772 760 u16 cmd_flags;
5518ac2a 761 int status;
dc49c772
AV
762
763 if (!pi)
d54699e2 764 return -EINVAL;
dc67039b
JB
765 hw = pi->hw;
766 li_old = &pi->phy.link_info_old;
dc49c772 767 hw_media_type = &pi->phy.media_type;
dc67039b 768 li = &pi->phy.link_info;
dc49c772
AV
769 hw_fc_info = &pi->fc;
770
771 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
772 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
773 resp = &desc.params.get_link_status;
774 resp->cmd_flags = cpu_to_le16(cmd_flags);
775 resp->lport_num = pi->lport;
776
2777d24e
PG
777 status = ice_aq_send_cmd(hw, &desc, &link_data,
778 ice_get_link_status_datalen(hw), cd);
dc49c772
AV
779 if (status)
780 return status;
781
782 /* save off old link status information */
dc67039b 783 *li_old = *li;
dc49c772
AV
784
785 /* update current link status information */
dc67039b
JB
786 li->link_speed = le16_to_cpu(link_data.link_speed);
787 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
788 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
dc49c772 789 *hw_media_type = ice_get_media_type(pi);
dc67039b 790 li->link_info = link_data.link_info;
c77849f5 791 li->link_cfg_err = link_data.link_cfg_err;
dc67039b
JB
792 li->an_info = link_data.an_info;
793 li->ext_info = link_data.ext_info;
794 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
795 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
796 li->topo_media_conflict = link_data.topo_media_conflict;
797 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
798 ICE_AQ_CFG_PACING_TYPE_M);
dc49c772
AV
799
800 /* update fc info */
801 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
802 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
803 if (tx_pause && rx_pause)
804 hw_fc_info->current_mode = ICE_FC_FULL;
805 else if (tx_pause)
806 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
807 else if (rx_pause)
808 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
809 else
810 hw_fc_info->current_mode = ICE_FC_NONE;
811
dc67039b
JB
812 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
813
55df52a0
PG
814 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
815 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
816 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
dc67039b 817 (unsigned long long)li->phy_type_low);
55df52a0 818 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
dc67039b 819 (unsigned long long)li->phy_type_high);
55df52a0
PG
820 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
821 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
c77849f5 822 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
55df52a0
PG
823 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
824 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
825 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
826 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
827 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
828 li->max_frame_size);
829 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
dc49c772
AV
830
831 /* save link status information */
832 if (link)
dc67039b 833 *link = *li;
dc49c772
AV
834
835 /* flag cleared so calling functions don't call AQ again */
836 pi->phy.get_link_info = false;
837
1b5c19c7 838 return 0;
dc49c772
AV
839}
840
42449105
AV
841/**
842 * ice_fill_tx_timer_and_fc_thresh
843 * @hw: pointer to the HW struct
844 * @cmd: pointer to MAC cfg structure
845 *
846 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
847 * descriptor
848 */
849static void
850ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
851 struct ice_aqc_set_mac_cfg *cmd)
852{
ba1124f5 853 u32 val, fc_thres_m;
42449105
AV
854
855 /* We read back the transmit timer and FC threshold value of
856 * LFC. Thus, we will use index =
857 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
858 *
859 * Also, because we are operating on transmit timer and FC
860 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
861 */
ba1124f5
PG
862#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX
863#define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR
864
865 if (hw->mac_type == ICE_MAC_E830) {
866 /* Retrieve the transmit timer */
867 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT);
868 cmd->tx_tmr_value =
869 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M);
870
871 /* Retrieve the fc threshold */
872 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR);
873 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M;
874 } else {
875 /* Retrieve the transmit timer */
876 val = rd32(hw,
877 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC));
878 cmd->tx_tmr_value =
879 le16_encode_bits(val,
880 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M);
881
882 /* Retrieve the fc threshold */
883 val = rd32(hw,
884 E800_REFRESH_TMR(E800_IDX_OF_LFC));
885 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M;
886 }
887 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m);
42449105
AV
888}
889
890/**
891 * ice_aq_set_mac_cfg
892 * @hw: pointer to the HW struct
893 * @max_frame_size: Maximum Frame Size to be supported
894 * @cd: pointer to command details structure or NULL
895 *
896 * Set MAC configuration (0x0603)
897 */
5e24d598 898int
42449105
AV
899ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
900{
901 struct ice_aqc_set_mac_cfg *cmd;
902 struct ice_aq_desc desc;
903
904 cmd = &desc.params.set_mac_cfg;
905
906 if (max_frame_size == 0)
d54699e2 907 return -EINVAL;
42449105
AV
908
909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
910
911 cmd->max_frame_size = cpu_to_le16(max_frame_size);
912
913 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
914
915 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
916}
917
9daf8208
AV
918/**
919 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
f9867df6 920 * @hw: pointer to the HW struct
9daf8208 921 */
5e24d598 922static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
9daf8208
AV
923{
924 struct ice_switch_info *sw;
5e24d598 925 int status;
9daf8208
AV
926
927 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
928 sizeof(*hw->switch_info), GFP_KERNEL);
929 sw = hw->switch_info;
930
931 if (!sw)
d54699e2 932 return -ENOMEM;
9daf8208
AV
933
934 INIT_LIST_HEAD(&sw->vsi_list_map_head);
0f94570d 935 sw->prof_res_bm_init = 0;
9daf8208 936
e10989e5
MS
937 /* Initialize recipe count with default recipes read from NVM */
938 sw->recp_cnt = ICE_SW_LKUP_LAST;
939
1aaef2bc
SB
940 status = ice_init_def_sw_recp(hw);
941 if (status) {
942 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
943 return status;
944 }
945 return 0;
9daf8208
AV
946}
947
948/**
949 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
f9867df6 950 * @hw: pointer to the HW struct
9daf8208
AV
951 */
952static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
953{
954 struct ice_switch_info *sw = hw->switch_info;
955 struct ice_vsi_list_map_info *v_pos_map;
956 struct ice_vsi_list_map_info *v_tmp_map;
80d144c9
AV
957 struct ice_sw_recipe *recps;
958 u8 i;
9daf8208
AV
959
960 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
961 list_entry) {
962 list_del(&v_pos_map->list_entry);
963 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
964 }
8b8ef05b
VR
965 recps = sw->recp_list;
966 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
80d144c9 967 recps[i].root_rid = i;
8b8ef05b
VR
968
969 if (recps[i].adv_rule) {
970 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
971 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
972
973 mutex_destroy(&recps[i].filt_rule_lock);
974 list_for_each_entry_safe(lst_itr, tmp_entry,
975 &recps[i].filt_rules,
976 list_entry) {
977 list_del(&lst_itr->list_entry);
978 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
979 devm_kfree(ice_hw_to_dev(hw), lst_itr);
980 }
981 } else {
982 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
983
984 mutex_destroy(&recps[i].filt_rule_lock);
985 list_for_each_entry_safe(lst_itr, tmp_entry,
986 &recps[i].filt_rules,
987 list_entry) {
988 list_del(&lst_itr->list_entry);
989 devm_kfree(ice_hw_to_dev(hw), lst_itr);
990 }
991 }
80d144c9 992 }
334cb062 993 ice_rm_all_sw_replay_rule_info(hw);
80d144c9 994 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
9daf8208
AV
995 devm_kfree(ice_hw_to_dev(hw), sw);
996}
997
9e4ab4c2 998/**
4ee656bb 999 * ice_get_itr_intrl_gran
f9867df6 1000 * @hw: pointer to the HW struct
9e4ab4c2 1001 *
4ee656bb 1002 * Determines the ITR/INTRL granularities based on the maximum aggregate
9e4ab4c2
BC
1003 * bandwidth according to the device's configuration during power-on.
1004 */
fe7219fa 1005static void ice_get_itr_intrl_gran(struct ice_hw *hw)
9e4ab4c2 1006{
5a259f8e
JB
1007 u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M,
1008 rd32(hw, GL_PWR_MODE_CTL));
9e4ab4c2
BC
1009
1010 switch (max_agg_bw) {
1011 case ICE_MAX_AGG_BW_200G:
1012 case ICE_MAX_AGG_BW_100G:
1013 case ICE_MAX_AGG_BW_50G:
1014 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
1015 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
1016 break;
1017 case ICE_MAX_AGG_BW_25G:
1018 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
1019 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
1020 break;
9e4ab4c2 1021 }
9e4ab4c2
BC
1022}
1023
f31e4b6f
AV
1024/**
1025 * ice_init_hw - main hardware initialization routine
1026 * @hw: pointer to the hardware structure
1027 */
5e24d598 1028int ice_init_hw(struct ice_hw *hw)
f31e4b6f 1029{
90ca6956
DC
1030 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
1031 void *mac_buf __free(kfree) = NULL;
dc49c772 1032 u16 mac_buf_len;
5518ac2a 1033 int status;
f31e4b6f
AV
1034
1035 /* Set MAC type based on DeviceID */
1036 status = ice_set_mac_type(hw);
1037 if (status)
1038 return status;
1039
5a259f8e 1040 hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID));
f31e4b6f
AV
1041
1042 status = ice_reset(hw, ICE_RESET_PFR);
1043 if (status)
1044 return status;
1045
fe7219fa 1046 ice_get_itr_intrl_gran(hw);
940b61af 1047
5c91ecfd 1048 status = ice_create_all_ctrlq(hw);
f31e4b6f
AV
1049 if (status)
1050 goto err_unroll_cqinit;
1051
96a9a934
PSJ
1052 status = ice_fwlog_init(hw);
1053 if (status)
1054 ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
1055 status);
1056
f31e4b6f
AV
1057 status = ice_clear_pf_cfg(hw);
1058 if (status)
1059 goto err_unroll_cqinit;
1060
148beb61
HT
1061 /* Set bit to enable Flow Director filters */
1062 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
1063 INIT_LIST_HEAD(&hw->fdir_list_head);
1064
f31e4b6f
AV
1065 ice_clear_pxe_mode(hw);
1066
1067 status = ice_init_nvm(hw);
1068 if (status)
1069 goto err_unroll_cqinit;
1070
9c20346b
AV
1071 status = ice_get_caps(hw);
1072 if (status)
1073 goto err_unroll_cqinit;
1074
5b246e53
MS
1075 if (!hw->port_info)
1076 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
1077 sizeof(*hw->port_info),
1078 GFP_KERNEL);
9c20346b 1079 if (!hw->port_info) {
d54699e2 1080 status = -ENOMEM;
9c20346b
AV
1081 goto err_unroll_cqinit;
1082 }
1083
a59618b9 1084 hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED;
f9867df6 1085 /* set the back pointer to HW */
9c20346b
AV
1086 hw->port_info->hw = hw;
1087
1088 /* Initialize port_info struct with switch configuration data */
1089 status = ice_get_initial_sw_cfg(hw);
1090 if (status)
1091 goto err_unroll_alloc;
1092
9daf8208
AV
1093 hw->evb_veb = true;
1094
16dfa494
MW
1095 /* init xarray for identifying scheduling nodes uniquely */
1096 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC);
1097
d337f2af 1098 /* Query the allocated resources for Tx scheduler */
9c20346b
AV
1099 status = ice_sched_query_res_alloc(hw);
1100 if (status) {
9228d8b2 1101 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
9c20346b
AV
1102 goto err_unroll_alloc;
1103 }
4f8a1497 1104 ice_sched_get_psm_clk_freq(hw);
9c20346b 1105
dc49c772
AV
1106 /* Initialize port_info struct with scheduler data */
1107 status = ice_sched_init_port(hw->port_info);
1108 if (status)
1109 goto err_unroll_sched;
1110
90f821d7 1111 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
dc49c772 1112 if (!pcaps) {
d54699e2 1113 status = -ENOMEM;
dc49c772
AV
1114 goto err_unroll_sched;
1115 }
1116
1117 /* Initialize port_info struct with PHY capabilities */
1118 status = ice_aq_get_phy_caps(hw->port_info, false,
d6730a87
AV
1119 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
1120 NULL);
dc49c772 1121 if (status)
f2651a91
PSJ
1122 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
1123 status);
dc49c772
AV
1124
1125 /* Initialize port_info struct with link information */
1126 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1127 if (status)
1128 goto err_unroll_sched;
1129
b36c598c
AV
1130 /* need a valid SW entry point to build a Tx tree */
1131 if (!hw->sw_entry_point_layer) {
1132 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
d54699e2 1133 status = -EIO;
b36c598c
AV
1134 goto err_unroll_sched;
1135 }
9be1d6f8 1136 INIT_LIST_HEAD(&hw->agg_list);
1ddef455
UK
1137 /* Initialize max burst size */
1138 if (!hw->max_burst_size)
1139 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
b36c598c 1140
9daf8208
AV
1141 status = ice_init_fltr_mgmt_struct(hw);
1142 if (status)
1143 goto err_unroll_sched;
1144
d6fef10c
MFIP
1145 /* Get MAC information */
1146 /* A single port can report up to two (LAN and WoL) addresses */
90f821d7
MF
1147 mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
1148 GFP_KERNEL);
63bb4e1e 1149 if (!mac_buf) {
d54699e2 1150 status = -ENOMEM;
9daf8208 1151 goto err_unroll_fltr_mgmt_struct;
63bb4e1e 1152 }
dc49c772 1153
90f821d7 1154 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
dc49c772 1155 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
dc49c772 1156
42449105
AV
1157 if (status)
1158 goto err_unroll_fltr_mgmt_struct;
1159 /* enable jumbo frame support at MAC level */
1160 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
148beb61
HT
1161 if (status)
1162 goto err_unroll_fltr_mgmt_struct;
1163 /* Obtain counter base index which would be used by flow director */
1164 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
dc49c772 1165 if (status)
9daf8208 1166 goto err_unroll_fltr_mgmt_struct;
32d63fa1
TN
1167 status = ice_init_hw_tbls(hw);
1168 if (status)
1169 goto err_unroll_fltr_mgmt_struct;
a4e82a81 1170 mutex_init(&hw->tnl_lock);
95ad92d6
SZ
1171 ice_init_chk_recipe_reuse_support(hw);
1172
f31e4b6f
AV
1173 return 0;
1174
9daf8208
AV
1175err_unroll_fltr_mgmt_struct:
1176 ice_cleanup_fltr_mgmt_struct(hw);
dc49c772
AV
1177err_unroll_sched:
1178 ice_sched_cleanup_all(hw);
9c20346b
AV
1179err_unroll_alloc:
1180 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
f31e4b6f 1181err_unroll_cqinit:
5c91ecfd 1182 ice_destroy_all_ctrlq(hw);
f31e4b6f
AV
1183 return status;
1184}
1185
1186/**
1187 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1188 * @hw: pointer to the hardware structure
ed14245a
AV
1189 *
1190 * This should be called only during nominal operation, not as a result of
1191 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1192 * applicable initializations if it fails for any reason.
f31e4b6f
AV
1193 */
1194void ice_deinit_hw(struct ice_hw *hw)
1195{
148beb61 1196 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
8b97ceb1
HT
1197 ice_cleanup_fltr_mgmt_struct(hw);
1198
9c20346b 1199 ice_sched_cleanup_all(hw);
9be1d6f8 1200 ice_sched_clear_agg(hw);
c7648810 1201 ice_free_seg(hw);
32d63fa1 1202 ice_free_hw_tbls(hw);
a4e82a81 1203 mutex_destroy(&hw->tnl_lock);
dc49c772 1204
96a9a934 1205 ice_fwlog_deinit(hw);
5c91ecfd 1206 ice_destroy_all_ctrlq(hw);
33e055fc
VR
1207
1208 /* Clear VSI contexts if not already cleared */
1209 ice_clear_all_vsi_ctx(hw);
f31e4b6f
AV
1210}
1211
1212/**
1213 * ice_check_reset - Check to see if a global reset is complete
1214 * @hw: pointer to the hardware structure
1215 */
5e24d598 1216int ice_check_reset(struct ice_hw *hw)
f31e4b6f 1217{
585cdabd 1218 u32 cnt, reg = 0, grst_timeout, uld_mask;
f31e4b6f
AV
1219
1220 /* Poll for Device Active state in case a recent CORER, GLOBR,
1221 * or EMPR has occurred. The grst delay value is in 100ms units.
1222 * Add 1sec for outstanding AQ commands that can take a long time.
1223 */
5a259f8e
JB
1224 grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M,
1225 rd32(hw, GLGEN_RSTCTL)) + 10;
f31e4b6f 1226
585cdabd 1227 for (cnt = 0; cnt < grst_timeout; cnt++) {
f31e4b6f
AV
1228 mdelay(100);
1229 reg = rd32(hw, GLGEN_RSTAT);
1230 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1231 break;
1232 }
1233
585cdabd 1234 if (cnt == grst_timeout) {
9228d8b2 1235 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
d54699e2 1236 return -EIO;
f31e4b6f
AV
1237 }
1238
cf8fc2a0
BA
1239#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1240 GLNVM_ULD_PCIER_DONE_1_M |\
1241 GLNVM_ULD_CORER_DONE_M |\
1242 GLNVM_ULD_GLOBR_DONE_M |\
1243 GLNVM_ULD_POR_DONE_M |\
1244 GLNVM_ULD_POR_DONE_1_M |\
1245 GLNVM_ULD_PCIER_DONE_2_M)
1246
d25a0fc4
DE
1247 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1248 GLNVM_ULD_PE_DONE_M : 0);
f31e4b6f
AV
1249
1250 /* Device is Active; check Global Reset processes are done */
1251 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
cf8fc2a0
BA
1252 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1253 if (reg == uld_mask) {
9228d8b2 1254 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
f31e4b6f
AV
1255 break;
1256 }
1257 mdelay(10);
1258 }
1259
1260 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
9228d8b2 1261 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
f31e4b6f 1262 reg);
d54699e2 1263 return -EIO;
f31e4b6f
AV
1264 }
1265
1266 return 0;
1267}
1268
1269/**
1270 * ice_pf_reset - Reset the PF
1271 * @hw: pointer to the hardware structure
1272 *
1273 * If a global reset has been triggered, this function checks
1274 * for its completion and then issues the PF reset
1275 */
5e24d598 1276static int ice_pf_reset(struct ice_hw *hw)
f31e4b6f
AV
1277{
1278 u32 cnt, reg;
1279
1280 /* If at function entry a global reset was already in progress, i.e.
1281 * state is not 'device active' or any of the reset done bits are not
1282 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1283 * global reset is done.
1284 */
1285 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1286 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1287 /* poll on global reset currently in progress until done */
1288 if (ice_check_reset(hw))
d54699e2 1289 return -EIO;
f31e4b6f
AV
1290
1291 return 0;
1292 }
1293
1294 /* Reset the PF */
1295 reg = rd32(hw, PFGEN_CTRL);
1296
1297 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1298
c9a12d6d
DN
1299 /* Wait for the PFR to complete. The wait time is the global config lock
1300 * timeout plus the PFR timeout which will account for a possible reset
1301 * that is occurring during a download package operation.
1302 */
1303 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1304 ICE_PF_RESET_WAIT_COUNT; cnt++) {
f31e4b6f
AV
1305 reg = rd32(hw, PFGEN_CTRL);
1306 if (!(reg & PFGEN_CTRL_PFSWR_M))
1307 break;
1308
1309 mdelay(1);
1310 }
1311
1312 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
9228d8b2 1313 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
d54699e2 1314 return -EIO;
f31e4b6f
AV
1315 }
1316
1317 return 0;
1318}
1319
1320/**
1321 * ice_reset - Perform different types of reset
1322 * @hw: pointer to the hardware structure
1323 * @req: reset request
1324 *
1325 * This function triggers a reset as specified by the req parameter.
1326 *
1327 * Note:
1328 * If anything other than a PF reset is triggered, PXE mode is restored.
1329 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1330 * interface has been restored in the rebuild flow.
1331 */
5e24d598 1332int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
f31e4b6f
AV
1333{
1334 u32 val = 0;
1335
1336 switch (req) {
1337 case ICE_RESET_PFR:
1338 return ice_pf_reset(hw);
1339 case ICE_RESET_CORER:
1340 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1341 val = GLGEN_RTRIG_CORER_M;
1342 break;
1343 case ICE_RESET_GLOBR:
1344 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1345 val = GLGEN_RTRIG_GLOBR_M;
1346 break;
0f9d5027 1347 default:
d54699e2 1348 return -EINVAL;
f31e4b6f
AV
1349 }
1350
1351 val |= rd32(hw, GLGEN_RTRIG);
1352 wr32(hw, GLGEN_RTRIG, val);
1353 ice_flush(hw);
1354
1355 /* wait for the FW to be ready */
1356 return ice_check_reset(hw);
1357}
1358
cdedef59
AV
1359/**
1360 * ice_copy_rxq_ctx_to_hw
1361 * @hw: pointer to the hardware structure
1362 * @ice_rxq_ctx: pointer to the rxq context
d337f2af 1363 * @rxq_index: the index of the Rx queue
cdedef59 1364 *
f9867df6 1365 * Copies rxq context from dense structure to HW register space
cdedef59 1366 */
5e24d598 1367static int
cdedef59
AV
1368ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1369{
1370 u8 i;
1371
1372 if (!ice_rxq_ctx)
d54699e2 1373 return -EINVAL;
cdedef59
AV
1374
1375 if (rxq_index > QRX_CTRL_MAX_INDEX)
d54699e2 1376 return -EINVAL;
cdedef59 1377
f9867df6 1378 /* Copy each dword separately to HW */
cdedef59
AV
1379 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1380 wr32(hw, QRX_CONTEXT(i, rxq_index),
1381 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1382
1383 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1384 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1385 }
1386
1387 return 0;
1388}
1389
1390/* LAN Rx Queue Context */
1391static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1392 /* Field Width LSB */
1393 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1394 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1395 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1396 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1397 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1398 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1399 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1400 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1401 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1402 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1403 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1404 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1405 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1406 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1407 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1408 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1409 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1410 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1411 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
c31a5c25 1412 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
cdedef59
AV
1413 { 0 }
1414};
1415
1416/**
1417 * ice_write_rxq_ctx
1418 * @hw: pointer to the hardware structure
1419 * @rlan_ctx: pointer to the rxq context
d337f2af 1420 * @rxq_index: the index of the Rx queue
cdedef59
AV
1421 *
1422 * Converts rxq context from sparse to dense structure and then writes
c31a5c25
BC
1423 * it to HW register space and enables the hardware to prefetch descriptors
1424 * instead of only fetching them on demand
cdedef59 1425 */
979c2c04
JK
1426int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1427 u32 rxq_index)
cdedef59
AV
1428{
1429 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1430
c31a5c25 1431 if (!rlan_ctx)
d54699e2 1432 return -EINVAL;
c31a5c25
BC
1433
1434 rlan_ctx->prefena = 1;
1435
7e34786a 1436 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
cdedef59
AV
1437 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1438}
1439
1440/* LAN Tx Queue Context */
1441const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1442 /* Field Width LSB */
1443 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1444 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1445 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1446 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1447 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1448 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1449 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1450 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
201beeb7 1451 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
cdedef59
AV
1452 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1453 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1454 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1455 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1456 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1457 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1458 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1459 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1460 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1461 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1462 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1463 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1464 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1465 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1466 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1467 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1468 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1469 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
201beeb7 1470 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
cdedef59
AV
1471 { 0 }
1472};
1473
8f5ee3c4
JK
1474/* Sideband Queue command wrappers */
1475
1476/**
1477 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1478 * @hw: pointer to the HW struct
1479 * @desc: descriptor describing the command
1480 * @buf: buffer to use for indirect commands (NULL for direct commands)
1481 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1482 * @cd: pointer to command details structure
1483 */
1484static int
1485ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1486 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1487{
d54699e2
TN
1488 return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1489 (struct ice_aq_desc *)desc, buf, buf_size, cd);
8f5ee3c4
JK
1490}
1491
1492/**
1493 * ice_sbq_rw_reg - Fill Sideband Queue command
1494 * @hw: pointer to the HW struct
1495 * @in: message info to be filled in descriptor
a317f873 1496 * @flags: control queue descriptor flags
8f5ee3c4 1497 */
a317f873 1498int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
8f5ee3c4
JK
1499{
1500 struct ice_sbq_cmd_desc desc = {0};
1501 struct ice_sbq_msg_req msg = {0};
1502 u16 msg_len;
1503 int status;
1504
1505 msg_len = sizeof(msg);
1506
1507 msg.dest_dev = in->dest_dev;
1508 msg.opcode = in->opcode;
1509 msg.flags = ICE_SBQ_MSG_FLAGS;
1510 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1511 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1512 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1513
1514 if (in->opcode)
1515 msg.data = cpu_to_le32(in->data);
1516 else
1517 /* data read comes back in completion, so shorten the struct by
1518 * sizeof(msg.data)
1519 */
1520 msg_len -= sizeof(msg.data);
1521
a317f873 1522 desc.flags = cpu_to_le16(flags);
8f5ee3c4
JK
1523 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1524 desc.param0.cmd_len = cpu_to_le16(msg_len);
1525 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1526 if (!status && !in->opcode)
1527 in->data = le32_to_cpu
1528 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1529 return status;
1530}
1531
7ec59eea
AV
1532/* FW Admin Queue command wrappers */
1533
c7648810
TN
1534/* Software lock/mutex that is meant to be held while the Global Config Lock
1535 * in firmware is acquired by the software to prevent most (but not all) types
1536 * of AQ commands from being sent to FW
1537 */
1538DEFINE_MUTEX(ice_global_cfg_lock_sw);
1539
3056df93
CC
1540/**
1541 * ice_should_retry_sq_send_cmd
1542 * @opcode: AQ opcode
1543 *
1544 * Decide if we should retry the send command routine for the ATQ, depending
1545 * on the opcode.
1546 */
1547static bool ice_should_retry_sq_send_cmd(u16 opcode)
1548{
1549 switch (opcode) {
1550 case ice_aqc_opc_get_link_topo:
1551 case ice_aqc_opc_lldp_stop:
1552 case ice_aqc_opc_lldp_start:
1553 case ice_aqc_opc_lldp_filter_ctrl:
1554 return true;
1555 }
1556
1557 return false;
1558}
1559
1560/**
1561 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1562 * @hw: pointer to the HW struct
1563 * @cq: pointer to the specific Control queue
1564 * @desc: prefilled descriptor describing the command
1565 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1566 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1567 * @cd: pointer to command details structure
1568 *
1569 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1570 * Queue if the EBUSY AQ error is returned.
1571 */
5e24d598 1572static int
3056df93
CC
1573ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1574 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1575 struct ice_sq_cd *cd)
1576{
1577 struct ice_aq_desc desc_cpy;
3056df93 1578 bool is_cmd_for_retry;
3056df93
CC
1579 u8 idx = 0;
1580 u16 opcode;
5518ac2a 1581 int status;
3056df93
CC
1582
1583 opcode = le16_to_cpu(desc->opcode);
1584 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1585 memset(&desc_cpy, 0, sizeof(desc_cpy));
1586
1587 if (is_cmd_for_retry) {
43a630e3
MS
1588 /* All retryable cmds are direct, without buf. */
1589 WARN_ON(buf);
3056df93
CC
1590
1591 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1592 }
1593
1594 do {
1595 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1596
1597 if (!is_cmd_for_retry || !status ||
1598 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1599 break;
1600
3056df93
CC
1601 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1602
b488ae52 1603 msleep(ICE_SQ_SEND_DELAY_TIME_MS);
3056df93
CC
1604
1605 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1606
3056df93
CC
1607 return status;
1608}
1609
7ec59eea
AV
1610/**
1611 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
f9867df6 1612 * @hw: pointer to the HW struct
7ec59eea
AV
1613 * @desc: descriptor describing the command
1614 * @buf: buffer to use for indirect commands (NULL for direct commands)
1615 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1616 * @cd: pointer to command details structure
1617 *
1618 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1619 */
5e24d598 1620int
7ec59eea
AV
1621ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1622 u16 buf_size, struct ice_sq_cd *cd)
1623{
c7648810
TN
1624 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1625 bool lock_acquired = false;
5e24d598 1626 int status;
c7648810
TN
1627
1628 /* When a package download is in process (i.e. when the firmware's
1629 * Global Configuration Lock resource is held), only the Download
a1ffafb0
BC
1630 * Package, Get Version, Get Package Info List, Upload Section,
1631 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
1632 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
1633 * Recipes to Profile Association, and Release Resource (with resource
1634 * ID set to Global Config Lock) AdminQ commands are allowed; all others
1635 * must block until the package download completes and the Global Config
1636 * Lock is released. See also ice_acquire_global_cfg_lock().
c7648810
TN
1637 */
1638 switch (le16_to_cpu(desc->opcode)) {
1639 case ice_aqc_opc_download_pkg:
1640 case ice_aqc_opc_get_pkg_info_list:
1641 case ice_aqc_opc_get_ver:
a1ffafb0
BC
1642 case ice_aqc_opc_upload_section:
1643 case ice_aqc_opc_update_pkg:
1644 case ice_aqc_opc_set_port_params:
1645 case ice_aqc_opc_get_vlan_mode_parameters:
1646 case ice_aqc_opc_set_vlan_mode_parameters:
91427e6d
RV
1647 case ice_aqc_opc_set_tx_topo:
1648 case ice_aqc_opc_get_tx_topo:
a1ffafb0
BC
1649 case ice_aqc_opc_add_recipe:
1650 case ice_aqc_opc_recipe_to_profile:
1651 case ice_aqc_opc_get_recipe:
1652 case ice_aqc_opc_get_recipe_to_profile:
c7648810
TN
1653 break;
1654 case ice_aqc_opc_release_res:
1655 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1656 break;
4e83fc93 1657 fallthrough;
c7648810
TN
1658 default:
1659 mutex_lock(&ice_global_cfg_lock_sw);
1660 lock_acquired = true;
1661 break;
1662 }
1663
3056df93 1664 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
c7648810
TN
1665 if (lock_acquired)
1666 mutex_unlock(&ice_global_cfg_lock_sw);
1667
1668 return status;
7ec59eea
AV
1669}
1670
1671/**
1672 * ice_aq_get_fw_ver
f9867df6 1673 * @hw: pointer to the HW struct
7ec59eea
AV
1674 * @cd: pointer to command details structure or NULL
1675 *
1676 * Get the firmware version (0x0001) from the admin queue commands
1677 */
5e24d598 1678int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
7ec59eea
AV
1679{
1680 struct ice_aqc_get_ver *resp;
1681 struct ice_aq_desc desc;
5e24d598 1682 int status;
7ec59eea
AV
1683
1684 resp = &desc.params.get_ver;
1685
1686 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1687
1688 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1689
1690 if (!status) {
1691 hw->fw_branch = resp->fw_branch;
1692 hw->fw_maj_ver = resp->fw_major;
1693 hw->fw_min_ver = resp->fw_minor;
1694 hw->fw_patch = resp->fw_patch;
1695 hw->fw_build = le32_to_cpu(resp->fw_build);
1696 hw->api_branch = resp->api_branch;
1697 hw->api_maj_ver = resp->api_major;
1698 hw->api_min_ver = resp->api_minor;
1699 hw->api_patch = resp->api_patch;
1700 }
1701
1702 return status;
1703}
1704
e3710a01
PSJ
1705/**
1706 * ice_aq_send_driver_ver
1707 * @hw: pointer to the HW struct
1708 * @dv: driver's major, minor version
1709 * @cd: pointer to command details structure or NULL
1710 *
1711 * Send the driver version (0x0002) to the firmware
1712 */
5e24d598 1713int
e3710a01
PSJ
1714ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1715 struct ice_sq_cd *cd)
1716{
1717 struct ice_aqc_driver_ver *cmd;
1718 struct ice_aq_desc desc;
1719 u16 len;
1720
1721 cmd = &desc.params.driver_ver;
1722
1723 if (!dv)
d54699e2 1724 return -EINVAL;
e3710a01
PSJ
1725
1726 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1727
1728 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1729 cmd->major_ver = dv->major_ver;
1730 cmd->minor_ver = dv->minor_ver;
1731 cmd->build_ver = dv->build_ver;
1732 cmd->subbuild_ver = dv->subbuild_ver;
1733
1734 len = 0;
1735 while (len < sizeof(dv->driver_string) &&
1736 isascii(dv->driver_string[len]) && dv->driver_string[len])
1737 len++;
1738
1739 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1740}
1741
7ec59eea
AV
1742/**
1743 * ice_aq_q_shutdown
f9867df6 1744 * @hw: pointer to the HW struct
7ec59eea
AV
1745 * @unloading: is the driver unloading itself
1746 *
1747 * Tell the Firmware that we're shutting down the AdminQ and whether
1748 * or not the driver is unloading as well (0x0003).
1749 */
5e24d598 1750int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
7ec59eea
AV
1751{
1752 struct ice_aqc_q_shutdown *cmd;
1753 struct ice_aq_desc desc;
1754
1755 cmd = &desc.params.q_shutdown;
1756
1757 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1758
1759 if (unloading)
7404e84a 1760 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
7ec59eea
AV
1761
1762 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1763}
f31e4b6f
AV
1764
1765/**
1766 * ice_aq_req_res
f9867df6
AV
1767 * @hw: pointer to the HW struct
1768 * @res: resource ID
f31e4b6f
AV
1769 * @access: access type
1770 * @sdp_number: resource number
1771 * @timeout: the maximum time in ms that the driver may hold the resource
1772 * @cd: pointer to command details structure or NULL
1773 *
ff2b1321
DN
1774 * Requests common resource using the admin queue commands (0x0008).
1775 * When attempting to acquire the Global Config Lock, the driver can
1776 * learn of three states:
d54699e2
TN
1777 * 1) 0 - acquired lock, and can perform download package
1778 * 2) -EIO - did not get lock, driver should fail to load
1779 * 3) -EALREADY - did not get lock, but another driver has
1780 * successfully downloaded the package; the driver does
1781 * not have to download the package and can continue
1782 * loading
ff2b1321
DN
1783 *
1784 * Note that if the caller is in an acquire lock, perform action, release lock
1785 * phase of operation, it is possible that the FW may detect a timeout and issue
1786 * a CORER. In this case, the driver will receive a CORER interrupt and will
1787 * have to determine its cause. The calling thread that is handling this flow
1788 * will likely get an error propagated back to it indicating the Download
1789 * Package, Update Package or the Release Resource AQ commands timed out.
f31e4b6f 1790 */
5e24d598 1791static int
f31e4b6f
AV
1792ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1793 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1794 struct ice_sq_cd *cd)
1795{
1796 struct ice_aqc_req_res *cmd_resp;
1797 struct ice_aq_desc desc;
5e24d598 1798 int status;
f31e4b6f
AV
1799
1800 cmd_resp = &desc.params.res_owner;
1801
1802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1803
1804 cmd_resp->res_id = cpu_to_le16(res);
1805 cmd_resp->access_type = cpu_to_le16(access);
1806 cmd_resp->res_number = cpu_to_le32(sdp_number);
ff2b1321
DN
1807 cmd_resp->timeout = cpu_to_le32(*timeout);
1808 *timeout = 0;
f31e4b6f
AV
1809
1810 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
ff2b1321 1811
f31e4b6f
AV
1812 /* The completion specifies the maximum time in ms that the driver
1813 * may hold the resource in the Timeout field.
ff2b1321
DN
1814 */
1815
1816 /* Global config lock response utilizes an additional status field.
1817 *
1818 * If the Global config lock resource is held by some other driver, the
1819 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1820 * and the timeout field indicates the maximum time the current owner
1821 * of the resource has to free it.
1822 */
1823 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1824 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1825 *timeout = le32_to_cpu(cmd_resp->timeout);
1826 return 0;
1827 } else if (le16_to_cpu(cmd_resp->status) ==
1828 ICE_AQ_RES_GLBL_IN_PROG) {
1829 *timeout = le32_to_cpu(cmd_resp->timeout);
d54699e2 1830 return -EIO;
ff2b1321
DN
1831 } else if (le16_to_cpu(cmd_resp->status) ==
1832 ICE_AQ_RES_GLBL_DONE) {
d54699e2 1833 return -EALREADY;
ff2b1321
DN
1834 }
1835
1836 /* invalid FW response, force a timeout immediately */
1837 *timeout = 0;
d54699e2 1838 return -EIO;
ff2b1321
DN
1839 }
1840
1841 /* If the resource is held by some other driver, the command completes
1842 * with a busy return value and the timeout field indicates the maximum
1843 * time the current owner of the resource has to free it.
f31e4b6f
AV
1844 */
1845 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1846 *timeout = le32_to_cpu(cmd_resp->timeout);
1847
1848 return status;
1849}
1850
1851/**
1852 * ice_aq_release_res
f9867df6
AV
1853 * @hw: pointer to the HW struct
1854 * @res: resource ID
f31e4b6f
AV
1855 * @sdp_number: resource number
1856 * @cd: pointer to command details structure or NULL
1857 *
1858 * release common resource using the admin queue commands (0x0009)
1859 */
5e24d598 1860static int
f31e4b6f
AV
1861ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1862 struct ice_sq_cd *cd)
1863{
1864 struct ice_aqc_req_res *cmd;
1865 struct ice_aq_desc desc;
1866
1867 cmd = &desc.params.res_owner;
1868
1869 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1870
1871 cmd->res_id = cpu_to_le16(res);
1872 cmd->res_number = cpu_to_le32(sdp_number);
1873
1874 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1875}
1876
1877/**
1878 * ice_acquire_res
1879 * @hw: pointer to the HW structure
f9867df6 1880 * @res: resource ID
f31e4b6f 1881 * @access: access type (read or write)
ff2b1321 1882 * @timeout: timeout in milliseconds
f31e4b6f
AV
1883 *
1884 * This function will attempt to acquire the ownership of a resource.
1885 */
5e24d598 1886int
f31e4b6f 1887ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
ff2b1321 1888 enum ice_aq_res_access_type access, u32 timeout)
f31e4b6f
AV
1889{
1890#define ICE_RES_POLLING_DELAY_MS 10
1891 u32 delay = ICE_RES_POLLING_DELAY_MS;
ff2b1321 1892 u32 time_left = timeout;
5e24d598 1893 int status;
f31e4b6f
AV
1894
1895 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1896
d54699e2 1897 /* A return code of -EALREADY means that another driver has
ff2b1321
DN
1898 * previously acquired the resource and performed any necessary updates;
1899 * in this case the caller does not obtain the resource and has no
1900 * further work to do.
f31e4b6f 1901 */
d54699e2 1902 if (status == -EALREADY)
f31e4b6f 1903 goto ice_acquire_res_exit;
f31e4b6f
AV
1904
1905 if (status)
9228d8b2 1906 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
f31e4b6f
AV
1907
1908 /* If necessary, poll until the current lock owner timeouts */
1909 timeout = time_left;
1910 while (status && timeout && time_left) {
1911 mdelay(delay);
1912 timeout = (timeout > delay) ? timeout - delay : 0;
1913 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1914
d54699e2 1915 if (status == -EALREADY)
f31e4b6f 1916 /* lock free, but no work to do */
f31e4b6f 1917 break;
f31e4b6f
AV
1918
1919 if (!status)
1920 /* lock acquired */
1921 break;
1922 }
d54699e2 1923 if (status && status != -EALREADY)
f31e4b6f
AV
1924 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1925
1926ice_acquire_res_exit:
d54699e2 1927 if (status == -EALREADY) {
f31e4b6f 1928 if (access == ICE_RES_WRITE)
9228d8b2 1929 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
f31e4b6f 1930 else
d54699e2 1931 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
f31e4b6f
AV
1932 }
1933 return status;
1934}
1935
1936/**
1937 * ice_release_res
1938 * @hw: pointer to the HW structure
f9867df6 1939 * @res: resource ID
f31e4b6f
AV
1940 *
1941 * This function will release a resource using the proper Admin Command.
1942 */
1943void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1944{
f86d6f9c 1945 unsigned long timeout;
5518ac2a 1946 int status;
f31e4b6f 1947
f31e4b6f 1948 /* there are some rare cases when trying to release the resource
f9867df6 1949 * results in an admin queue timeout, so handle them correctly
f31e4b6f 1950 */
f86d6f9c
MS
1951 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT;
1952 do {
f31e4b6f 1953 status = ice_aq_release_res(hw, res, 0, NULL);
f86d6f9c
MS
1954 if (status != -EIO)
1955 break;
1956 usleep_range(1000, 2000);
1957 } while (time_before(jiffies, timeout));
f31e4b6f
AV
1958}
1959
31ad4e4e
TN
1960/**
1961 * ice_aq_alloc_free_res - command to allocate/free resources
1962 * @hw: pointer to the HW struct
31ad4e4e
TN
1963 * @buf: Indirect buffer to hold data parameters and response
1964 * @buf_size: size of buffer for indirect commands
1965 * @opc: pass in the command opcode
31ad4e4e
TN
1966 *
1967 * Helper function to allocate/free resources using the admin queue commands
1968 */
52da2fb2
PK
1969int ice_aq_alloc_free_res(struct ice_hw *hw,
1970 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1971 enum ice_adminq_opc opc)
31ad4e4e
TN
1972{
1973 struct ice_aqc_alloc_free_res_cmd *cmd;
1974 struct ice_aq_desc desc;
1975
1976 cmd = &desc.params.sw_res_ctrl;
1977
52da2fb2 1978 if (!buf || buf_size < flex_array_size(buf, elem, 1))
d54699e2 1979 return -EINVAL;
31ad4e4e
TN
1980
1981 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1982
1983 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1984
52da2fb2 1985 cmd->num_entries = cpu_to_le16(1);
31ad4e4e 1986
52da2fb2 1987 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL);
31ad4e4e
TN
1988}
1989
1990/**
1991 * ice_alloc_hw_res - allocate resource
1992 * @hw: pointer to the HW struct
1993 * @type: type of resource
1994 * @num: number of resources to allocate
1995 * @btm: allocate from bottom
1996 * @res: pointer to array that will receive the resources
1997 */
5e24d598 1998int
31ad4e4e
TN
1999ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2000{
2001 struct ice_aqc_alloc_free_res_elem *buf;
31ad4e4e 2002 u16 buf_len;
5518ac2a 2003 int status;
31ad4e4e 2004
66486d89 2005 buf_len = struct_size(buf, elem, num);
31ad4e4e
TN
2006 buf = kzalloc(buf_len, GFP_KERNEL);
2007 if (!buf)
d54699e2 2008 return -ENOMEM;
31ad4e4e
TN
2009
2010 /* Prepare buffer to allocate resource. */
2011 buf->num_elems = cpu_to_le16(num);
2012 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2013 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2014 if (btm)
2015 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2016
52da2fb2 2017 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
31ad4e4e
TN
2018 if (status)
2019 goto ice_alloc_res_exit;
2020
66486d89 2021 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
31ad4e4e
TN
2022
2023ice_alloc_res_exit:
2024 kfree(buf);
2025 return status;
2026}
2027
451f2c44
TN
2028/**
2029 * ice_free_hw_res - free allocated HW resource
2030 * @hw: pointer to the HW struct
2031 * @type: type of resource to free
2032 * @num: number of resources
2033 * @res: pointer to array that contains the resources to free
2034 */
5e24d598 2035int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
451f2c44
TN
2036{
2037 struct ice_aqc_alloc_free_res_elem *buf;
451f2c44 2038 u16 buf_len;
5518ac2a 2039 int status;
451f2c44 2040
66486d89 2041 buf_len = struct_size(buf, elem, num);
451f2c44
TN
2042 buf = kzalloc(buf_len, GFP_KERNEL);
2043 if (!buf)
d54699e2 2044 return -ENOMEM;
451f2c44
TN
2045
2046 /* Prepare buffer to free resource. */
2047 buf->num_elems = cpu_to_le16(num);
2048 buf->res_type = cpu_to_le16(type);
66486d89 2049 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
451f2c44 2050
52da2fb2 2051 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
451f2c44
TN
2052 if (status)
2053 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2054
2055 kfree(buf);
2056 return status;
2057}
2058
995c90f2 2059/**
7a1f7111 2060 * ice_get_num_per_func - determine number of resources per PF
f9867df6 2061 * @hw: pointer to the HW structure
7a1f7111 2062 * @max: value to be evenly split between each PF
995c90f2
AV
2063 *
2064 * Determine the number of valid functions by going through the bitmap returned
7a1f7111
BC
2065 * from parsing capabilities and use this to calculate the number of resources
2066 * per PF based on the max value passed in.
995c90f2 2067 */
7a1f7111 2068static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
995c90f2
AV
2069{
2070 u8 funcs;
2071
2072#define ICE_CAPS_VALID_FUNCS_M 0xFF
2073 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
2074 ICE_CAPS_VALID_FUNCS_M);
2075
2076 if (!funcs)
2077 return 0;
2078
7a1f7111 2079 return max / funcs;
995c90f2
AV
2080}
2081
9c20346b 2082/**
595b13e2 2083 * ice_parse_common_caps - parse common device/function capabilities
f9867df6 2084 * @hw: pointer to the HW struct
595b13e2
JK
2085 * @caps: pointer to common capabilities structure
2086 * @elem: the capability element to parse
2087 * @prefix: message prefix for tracing capabilities
9c20346b 2088 *
595b13e2
JK
2089 * Given a capability element, extract relevant details into the common
2090 * capability structure.
2091 *
2092 * Returns: true if the capability matches one of the common capability ids,
2093 * false otherwise.
2094 */
2095static bool
2096ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2097 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2098{
2099 u32 logical_id = le32_to_cpu(elem->logical_id);
2100 u32 phys_id = le32_to_cpu(elem->phys_id);
2101 u32 number = le32_to_cpu(elem->number);
2102 u16 cap = le16_to_cpu(elem->cap);
2103 bool found = true;
2104
2105 switch (cap) {
2106 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2107 caps->valid_functions = number;
9228d8b2 2108 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
595b13e2
JK
2109 caps->valid_functions);
2110 break;
2111 case ICE_AQC_CAPS_SRIOV:
2112 caps->sr_iov_1_1 = (number == 1);
9228d8b2 2113 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
595b13e2
JK
2114 caps->sr_iov_1_1);
2115 break;
2116 case ICE_AQC_CAPS_DCB:
2117 caps->dcb = (number == 1);
2118 caps->active_tc_bitmap = logical_id;
2119 caps->maxtc = phys_id;
9228d8b2
JK
2120 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2121 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
595b13e2 2122 caps->active_tc_bitmap);
9228d8b2 2123 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
595b13e2
JK
2124 break;
2125 case ICE_AQC_CAPS_RSS:
2126 caps->rss_table_size = number;
2127 caps->rss_table_entry_width = logical_id;
9228d8b2 2128 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
595b13e2 2129 caps->rss_table_size);
9228d8b2 2130 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
595b13e2
JK
2131 caps->rss_table_entry_width);
2132 break;
2133 case ICE_AQC_CAPS_RXQS:
2134 caps->num_rxq = number;
2135 caps->rxq_first_id = phys_id;
9228d8b2 2136 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
595b13e2 2137 caps->num_rxq);
9228d8b2 2138 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
595b13e2
JK
2139 caps->rxq_first_id);
2140 break;
2141 case ICE_AQC_CAPS_TXQS:
2142 caps->num_txq = number;
2143 caps->txq_first_id = phys_id;
9228d8b2 2144 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
595b13e2 2145 caps->num_txq);
9228d8b2 2146 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
595b13e2
JK
2147 caps->txq_first_id);
2148 break;
2149 case ICE_AQC_CAPS_MSIX:
2150 caps->num_msix_vectors = number;
2151 caps->msix_vector_first_id = phys_id;
9228d8b2 2152 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
595b13e2 2153 caps->num_msix_vectors);
9228d8b2 2154 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
595b13e2
JK
2155 caps->msix_vector_first_id);
2156 break;
2ab560a7
JK
2157 case ICE_AQC_CAPS_PENDING_NVM_VER:
2158 caps->nvm_update_pending_nvm = true;
2159 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2160 break;
2161 case ICE_AQC_CAPS_PENDING_OROM_VER:
2162 caps->nvm_update_pending_orom = true;
2163 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2164 break;
2165 case ICE_AQC_CAPS_PENDING_NET_VER:
2166 caps->nvm_update_pending_netlist = true;
2167 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2168 break;
de9b277e
JN
2169 case ICE_AQC_CAPS_NVM_MGMT:
2170 caps->nvm_unified_update =
2171 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2172 true : false;
2173 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2174 caps->nvm_unified_update);
2175 break;
d25a0fc4
DE
2176 case ICE_AQC_CAPS_RDMA:
2177 caps->rdma = (number == 1);
2178 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2179 break;
595b13e2
JK
2180 case ICE_AQC_CAPS_MAX_MTU:
2181 caps->max_mtu = number;
2182 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2183 prefix, caps->max_mtu);
2184 break;
399e27db
JK
2185 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2186 caps->pcie_reset_avoidance = (number > 0);
2187 ice_debug(hw, ICE_DBG_INIT,
2188 "%s: pcie_reset_avoidance = %d\n", prefix,
2189 caps->pcie_reset_avoidance);
2190 break;
2191 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2192 caps->reset_restrict_support = (number == 1);
2193 ice_debug(hw, ICE_DBG_INIT,
2194 "%s: reset_restrict_support = %d\n", prefix,
2195 caps->reset_restrict_support);
2196 break;
bb52f42a
DE
2197 case ICE_AQC_CAPS_FW_LAG_SUPPORT:
2198 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
2199 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
2200 prefix, caps->roce_lag);
2201 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
2202 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
2203 prefix, caps->sriov_lag);
2204 break;
91427e6d
RV
2205 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
2206 caps->tx_sched_topo_comp_mode_en = (number == 1);
2207 break;
595b13e2
JK
2208 default:
2209 /* Not one of the recognized common capabilities */
2210 found = false;
2211 }
2212
2213 return found;
2214}
2215
2216/**
2217 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2218 * @hw: pointer to the HW structure
2219 * @caps: pointer to capabilities structure to fix
2220 *
2221 * Re-calculate the capabilities that are dependent on the number of physical
2222 * ports; i.e. some features are not supported or function differently on
2223 * devices with more than 4 ports.
9c20346b
AV
2224 */
2225static void
595b13e2
JK
2226ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2227{
2228 /* This assumes device capabilities are always scanned before function
2229 * capabilities during the initialization flow.
2230 */
2231 if (hw->dev_caps.num_funcs > 4) {
2232 /* Max 4 TCs per port */
2233 caps->maxtc = 4;
9228d8b2 2234 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
595b13e2 2235 caps->maxtc);
d25a0fc4
DE
2236 if (caps->rdma) {
2237 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2238 caps->rdma = 0;
2239 }
2240
2241 /* print message only when processing device capabilities
2242 * during initialization.
2243 */
2244 if (caps == &hw->dev_caps.common_cap)
2245 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
595b13e2
JK
2246 }
2247}
2248
2249/**
2250 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2251 * @hw: pointer to the HW struct
2252 * @func_p: pointer to function capabilities structure
2253 * @cap: pointer to the capability element to parse
2254 *
2255 * Extract function capabilities for ICE_AQC_CAPS_VF.
2256 */
2257static void
2258ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2259 struct ice_aqc_list_caps_elem *cap)
2260{
2261 u32 logical_id = le32_to_cpu(cap->logical_id);
2262 u32 number = le32_to_cpu(cap->number);
2263
2264 func_p->num_allocd_vfs = number;
2265 func_p->vf_base_id = logical_id;
2266 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2267 func_p->num_allocd_vfs);
2268 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2269 func_p->vf_base_id);
2270}
2271
2272/**
2273 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2274 * @hw: pointer to the HW struct
2275 * @func_p: pointer to function capabilities structure
2276 * @cap: pointer to the capability element to parse
2277 *
2278 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2279 */
2280static void
2281ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2282 struct ice_aqc_list_caps_elem *cap)
2283{
2284 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2285 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2286 le32_to_cpu(cap->number));
2287 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2288 func_p->guar_num_vsi);
2289}
2290
9733cc94
JK
2291/**
2292 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2293 * @hw: pointer to the HW struct
2294 * @func_p: pointer to function capabilities structure
2295 * @cap: pointer to the capability element to parse
2296 *
2297 * Extract function capabilities for ICE_AQC_CAPS_1588.
2298 */
2299static void
2300ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2301 struct ice_aqc_list_caps_elem *cap)
2302{
2303 struct ice_ts_func_info *info = &func_p->ts_func_info;
2304 u32 number = le32_to_cpu(cap->number);
2305
2306 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2307 func_p->common_cap.ieee_1588 = info->ena;
2308
2309 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2310 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2311 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2312 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2313
713dcad2
MM
2314 if (!ice_is_e825c(hw)) {
2315 info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
2316 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2317 } else {
2318 info->clk_freq = ICE_TIME_REF_FREQ_156_250;
2319 info->clk_src = ICE_CLK_SRC_TCXO;
2320 }
9733cc94 2321
405efa49
JK
2322 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2323 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2324 } else {
2325 /* Unknown clock frequency, so assume a (probably incorrect)
2326 * default to avoid out-of-bounds look ups of frequency
2327 * related information.
2328 */
2329 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2330 info->clk_freq);
2331 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2332 }
2333
9733cc94
JK
2334 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2335 func_p->common_cap.ieee_1588);
2336 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2337 info->src_tmr_owned);
2338 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2339 info->tmr_ena);
2340 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2341 info->tmr_index_owned);
2342 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2343 info->tmr_index_assoc);
2344 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2345 info->clk_freq);
2346 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2347 info->clk_src);
2348}
2349
595b13e2
JK
2350/**
2351 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2352 * @hw: pointer to the HW struct
2353 * @func_p: pointer to function capabilities structure
2354 *
2355 * Extract function capabilities for ICE_AQC_CAPS_FD.
2356 */
2357static void
2358ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2359{
ba1124f5 2360 u32 reg_val, gsize, bsize;
595b13e2
JK
2361
2362 reg_val = rd32(hw, GLQF_FD_SIZE);
ba1124f5
PG
2363 switch (hw->mac_type) {
2364 case ICE_MAC_E830:
2365 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
2366 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
2367 break;
2368 case ICE_MAC_E810:
2369 default:
2370 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
2371 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
2372 }
2373 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize);
2374 func_p->fd_fltr_best_effort = bsize;
595b13e2 2375
9228d8b2 2376 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
595b13e2 2377 func_p->fd_fltr_guar);
9228d8b2 2378 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
595b13e2
JK
2379 func_p->fd_fltr_best_effort);
2380}
2381
2382/**
2383 * ice_parse_func_caps - Parse function capabilities
2384 * @hw: pointer to the HW struct
2385 * @func_p: pointer to function capabilities structure
2386 * @buf: buffer containing the function capability records
2387 * @cap_count: the number of capabilities
2388 *
2389 * Helper function to parse function (0x000A) capabilities list. For
2390 * capabilities shared between device and function, this relies on
2391 * ice_parse_common_caps.
2392 *
2393 * Loop through the list of provided capabilities and extract the relevant
2394 * data into the function capabilities structured.
2395 */
2396static void
2397ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2398 void *buf, u32 cap_count)
9c20346b
AV
2399{
2400 struct ice_aqc_list_caps_elem *cap_resp;
9c20346b
AV
2401 u32 i;
2402
7a63dae0 2403 cap_resp = buf;
9c20346b 2404
595b13e2 2405 memset(func_p, 0, sizeof(*func_p));
9c20346b 2406
595b13e2
JK
2407 for (i = 0; i < cap_count; i++) {
2408 u16 cap = le16_to_cpu(cap_resp[i].cap);
2409 bool found;
9c20346b 2410
595b13e2
JK
2411 found = ice_parse_common_caps(hw, &func_p->common_cap,
2412 &cap_resp[i], "func caps");
eae1bbb2 2413
595b13e2 2414 switch (cap) {
75d2b253 2415 case ICE_AQC_CAPS_VF:
595b13e2 2416 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
75d2b253 2417 break;
9c20346b 2418 case ICE_AQC_CAPS_VSI:
595b13e2 2419 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
a257f188 2420 break;
9733cc94
JK
2421 case ICE_AQC_CAPS_1588:
2422 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2423 break;
595b13e2
JK
2424 case ICE_AQC_CAPS_FD:
2425 ice_parse_fdir_func_caps(hw, func_p);
9c20346b 2426 break;
595b13e2
JK
2427 default:
2428 /* Don't list common capabilities as unknown */
2429 if (!found)
9228d8b2 2430 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
595b13e2 2431 i, cap);
9c20346b 2432 break;
595b13e2
JK
2433 }
2434 }
2435
2436 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2437}
2438
2439/**
2440 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2441 * @hw: pointer to the HW struct
2442 * @dev_p: pointer to device capabilities structure
2443 * @cap: capability element to parse
2444 *
2445 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2446 */
2447static void
2448ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2449 struct ice_aqc_list_caps_elem *cap)
2450{
2451 u32 number = le32_to_cpu(cap->number);
2452
2453 dev_p->num_funcs = hweight32(number);
2454 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2455 dev_p->num_funcs);
2456}
2457
2458/**
2459 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2460 * @hw: pointer to the HW struct
2461 * @dev_p: pointer to device capabilities structure
2462 * @cap: capability element to parse
2463 *
2464 * Parse ICE_AQC_CAPS_VF for device capabilities.
2465 */
2466static void
2467ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2468 struct ice_aqc_list_caps_elem *cap)
2469{
2470 u32 number = le32_to_cpu(cap->number);
2471
2472 dev_p->num_vfs_exposed = number;
2473 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2474 dev_p->num_vfs_exposed);
2475}
2476
2477/**
2478 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2479 * @hw: pointer to the HW struct
2480 * @dev_p: pointer to device capabilities structure
2481 * @cap: capability element to parse
2482 *
2483 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2484 */
2485static void
2486ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2487 struct ice_aqc_list_caps_elem *cap)
2488{
2489 u32 number = le32_to_cpu(cap->number);
2490
2491 dev_p->num_vsi_allocd_to_host = number;
2492 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2493 dev_p->num_vsi_allocd_to_host);
2494}
2495
9733cc94
JK
2496/**
2497 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2498 * @hw: pointer to the HW struct
2499 * @dev_p: pointer to device capabilities structure
2500 * @cap: capability element to parse
2501 *
2502 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2503 */
2504static void
2505ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2506 struct ice_aqc_list_caps_elem *cap)
2507{
2508 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2509 u32 logical_id = le32_to_cpu(cap->logical_id);
2510 u32 phys_id = le32_to_cpu(cap->phys_id);
2511 u32 number = le32_to_cpu(cap->number);
2512
2513 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2514 dev_p->common_cap.ieee_1588 = info->ena;
2515
2516 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2517 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2518 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2519
5a259f8e 2520 info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number);
9733cc94
JK
2521 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2522 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2523
1229b339 2524 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
82e71b22 2525 info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
1229b339 2526
9733cc94
JK
2527 info->ena_ports = logical_id;
2528 info->tmr_own_map = phys_id;
2529
2530 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2531 dev_p->common_cap.ieee_1588);
2532 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2533 info->tmr0_owner);
2534 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2535 info->tmr0_owned);
2536 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2537 info->tmr0_ena);
2538 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2539 info->tmr1_owner);
2540 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2541 info->tmr1_owned);
2542 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2543 info->tmr1_ena);
1229b339
KK
2544 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
2545 info->ts_ll_read);
82e71b22
KK
2546 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
2547 info->ts_ll_int_read);
9733cc94
JK
2548 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2549 info->ena_ports);
2550 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2551 info->tmr_own_map);
2552}
2553
595b13e2
JK
2554/**
2555 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2556 * @hw: pointer to the HW struct
2557 * @dev_p: pointer to device capabilities structure
2558 * @cap: capability element to parse
2559 *
2560 * Parse ICE_AQC_CAPS_FD for device capabilities.
2561 */
2562static void
2563ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2564 struct ice_aqc_list_caps_elem *cap)
2565{
2566 u32 number = le32_to_cpu(cap->number);
2567
2568 dev_p->num_flow_director_fltr = number;
2569 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2570 dev_p->num_flow_director_fltr);
2571}
2572
4da71a77
KK
2573/**
2574 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
2575 * @hw: pointer to the HW struct
2576 * @dev_p: pointer to device capabilities structure
2577 * @cap: capability element to parse
2578 *
2579 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
2580 * enabled sensors.
2581 */
2582static void
2583ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2584 struct ice_aqc_list_caps_elem *cap)
2585{
2586 dev_p->supported_sensors = le32_to_cpu(cap->number);
2587
2588 ice_debug(hw, ICE_DBG_INIT,
2589 "dev caps: supported sensors (bitmap) = 0x%x\n",
2590 dev_p->supported_sensors);
2591}
2592
5f847eed
GN
2593/**
2594 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2595 * @hw: pointer to the HW struct
2596 * @dev_p: pointer to device capabilities structure
2597 * @cap: capability element to parse
2598 *
2599 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
2600 */
2601static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
2602 struct ice_hw_dev_caps *dev_p,
2603 struct ice_aqc_list_caps_elem *cap)
2604{
2605 dev_p->nac_topo.mode = le32_to_cpu(cap->number);
2606 dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2607
2608 dev_info(ice_hw_to_dev(hw),
2609 "PF is configured in %s mode with IP instance ID %d\n",
2610 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
2611 "primary" : "secondary", dev_p->nac_topo.id);
2612
2613 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2614 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2615 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2616 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2617 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
2618 dev_p->nac_topo.id);
2619}
2620
595b13e2
JK
2621/**
2622 * ice_parse_dev_caps - Parse device capabilities
2623 * @hw: pointer to the HW struct
2624 * @dev_p: pointer to device capabilities structure
2625 * @buf: buffer containing the device capability records
2626 * @cap_count: the number of capabilities
2627 *
2628 * Helper device to parse device (0x000B) capabilities list. For
7dbc63f0 2629 * capabilities shared between device and function, this relies on
595b13e2
JK
2630 * ice_parse_common_caps.
2631 *
2632 * Loop through the list of provided capabilities and extract the relevant
2633 * data into the device capabilities structured.
2634 */
2635static void
2636ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2637 void *buf, u32 cap_count)
2638{
2639 struct ice_aqc_list_caps_elem *cap_resp;
2640 u32 i;
2641
7a63dae0 2642 cap_resp = buf;
595b13e2
JK
2643
2644 memset(dev_p, 0, sizeof(*dev_p));
2645
2646 for (i = 0; i < cap_count; i++) {
2647 u16 cap = le16_to_cpu(cap_resp[i].cap);
2648 bool found;
2649
2650 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2651 &cap_resp[i], "dev caps");
2652
2653 switch (cap) {
2654 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2655 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
9c20346b 2656 break;
595b13e2
JK
2657 case ICE_AQC_CAPS_VF:
2658 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
9c20346b 2659 break;
595b13e2
JK
2660 case ICE_AQC_CAPS_VSI:
2661 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
148beb61 2662 break;
9733cc94
JK
2663 case ICE_AQC_CAPS_1588:
2664 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2665 break;
4da71a77 2666 case ICE_AQC_CAPS_FD:
595b13e2 2667 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
9c20346b 2668 break;
4da71a77
KK
2669 case ICE_AQC_CAPS_SENSOR_READING:
2670 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
5f847eed
GN
2671 break;
2672 case ICE_AQC_CAPS_NAC_TOPOLOGY:
2673 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
4da71a77 2674 break;
9c20346b 2675 default:
595b13e2
JK
2676 /* Don't list common capabilities as unknown */
2677 if (!found)
9228d8b2 2678 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
595b13e2 2679 i, cap);
9c20346b
AV
2680 break;
2681 }
2682 }
9164f761 2683
595b13e2
JK
2684 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2685}
2686
272ad794
KK
2687/**
2688 * ice_is_pf_c827 - check if pf contains c827 phy
2689 * @hw: pointer to the hw struct
2690 */
2691bool ice_is_pf_c827(struct ice_hw *hw)
2692{
2693 struct ice_aqc_get_link_topo cmd = {};
2694 u8 node_part_number;
2695 u16 node_handle;
2696 int status;
2697
2698 if (hw->mac_type != ICE_MAC_E810)
2699 return false;
2700
2701 if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
2702 return true;
2703
2704 cmd.addr.topo_params.node_type_ctx =
2705 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
2706 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
2707 cmd.addr.topo_params.index = 0;
2708
2709 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
2710 &node_handle);
2711
2712 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
2713 return false;
2714
2715 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
2716 return true;
2717
2718 return false;
2719}
2720
91e43ca0
JK
2721/**
2722 * ice_is_phy_rclk_in_netlist
2723 * @hw: pointer to the hw struct
2724 *
2725 * Check if the PHY Recovered Clock device is present in the netlist
2726 */
2727bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
2728{
2729 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2730 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
2731 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2732 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
2733 return false;
2734
2735 return true;
2736}
2737
2738/**
2739 * ice_is_clock_mux_in_netlist
2740 * @hw: pointer to the hw struct
2741 *
2742 * Check if the Clock Multiplexer device is present in the netlist
2743 */
2744bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
2745{
2746 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
2747 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
2748 NULL))
2749 return false;
2750
2751 return true;
2752}
2753
2754/**
2755 * ice_is_cgu_in_netlist - check for CGU presence
2756 * @hw: pointer to the hw struct
2757 *
2758 * Check if the Clock Generation Unit (CGU) device is present in the netlist.
2759 * Save the CGU part number in the hw structure for later use.
2760 * Return:
2761 * * true - cgu is present
2762 * * false - cgu is not present
2763 */
2764bool ice_is_cgu_in_netlist(struct ice_hw *hw)
2765{
2766 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2767 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
2768 NULL)) {
2769 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
2770 return true;
2771 } else if (!ice_find_netlist_node(hw,
2772 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
2773 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
2774 NULL)) {
2775 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
2776 return true;
2777 }
2778
2779 return false;
2780}
2781
89776a6a
JK
2782/**
2783 * ice_is_gps_in_netlist
2784 * @hw: pointer to the hw struct
2785 *
2786 * Check if the GPS generic device is present in the netlist
2787 */
2788bool ice_is_gps_in_netlist(struct ice_hw *hw)
2789{
2790 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
2791 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
2792 return false;
2793
2794 return true;
2795}
2796
9c20346b 2797/**
8d7aab35 2798 * ice_aq_list_caps - query function/device capabilities
f9867df6 2799 * @hw: pointer to the HW struct
8d7aab35
JK
2800 * @buf: a buffer to hold the capabilities
2801 * @buf_size: size of the buffer
2802 * @cap_count: if not NULL, set to the number of capabilities reported
2803 * @opc: capabilities type to discover, device or function
9c20346b
AV
2804 * @cd: pointer to command details structure or NULL
2805 *
8d7aab35
JK
2806 * Get the function (0x000A) or device (0x000B) capabilities description from
2807 * firmware and store it in the buffer.
2808 *
2809 * If the cap_count pointer is not NULL, then it is set to the number of
2810 * capabilities firmware will report. Note that if the buffer size is too
2811 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2812 * cap_count will still be updated in this case. It is recommended that the
2813 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2814 * firmware could return) to avoid this.
9c20346b 2815 */
5e24d598 2816int
8d7aab35
JK
2817ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2818 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
9c20346b
AV
2819{
2820 struct ice_aqc_list_caps *cmd;
2821 struct ice_aq_desc desc;
5e24d598 2822 int status;
9c20346b
AV
2823
2824 cmd = &desc.params.get_cap;
2825
2826 if (opc != ice_aqc_opc_list_func_caps &&
2827 opc != ice_aqc_opc_list_dev_caps)
d54699e2 2828 return -EINVAL;
9c20346b
AV
2829
2830 ice_fill_dflt_direct_cmd_desc(&desc, opc);
9c20346b 2831 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
8d7aab35
JK
2832
2833 if (cap_count)
2834 *cap_count = le32_to_cpu(cmd->count);
2835
2836 return status;
2837}
2838
2839/**
81aed647
JK
2840 * ice_discover_dev_caps - Read and extract device capabilities
2841 * @hw: pointer to the hardware structure
2842 * @dev_caps: pointer to device capabilities structure
8d7aab35 2843 *
81aed647
JK
2844 * Read the device capabilities and extract them into the dev_caps structure
2845 * for later use.
8d7aab35 2846 */
5e24d598 2847int
81aed647 2848ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
8d7aab35 2849{
81aed647
JK
2850 u32 cap_count = 0;
2851 void *cbuf;
5518ac2a 2852 int status;
8d7aab35 2853
81aed647
JK
2854 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2855 if (!cbuf)
d54699e2 2856 return -ENOMEM;
81aed647
JK
2857
2858 /* Although the driver doesn't know the number of capabilities the
2859 * device will return, we can simply send a 4KB buffer, the maximum
2860 * possible size that firmware can return.
2861 */
2862 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2863
2864 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2865 ice_aqc_opc_list_dev_caps, NULL);
9c20346b 2866 if (!status)
81aed647
JK
2867 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2868 kfree(cbuf);
8d7aab35 2869
9c20346b
AV
2870 return status;
2871}
2872
2873/**
81aed647 2874 * ice_discover_func_caps - Read and extract function capabilities
9c20346b 2875 * @hw: pointer to the hardware structure
81aed647
JK
2876 * @func_caps: pointer to function capabilities structure
2877 *
2878 * Read the function capabilities and extract them into the func_caps structure
2879 * for later use.
9c20346b 2880 */
5e24d598 2881static int
81aed647 2882ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
9c20346b 2883{
81aed647 2884 u32 cap_count = 0;
1082b360 2885 void *cbuf;
5518ac2a 2886 int status;
9c20346b 2887
1082b360
JK
2888 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2889 if (!cbuf)
d54699e2 2890 return -ENOMEM;
9c20346b 2891
1082b360
JK
2892 /* Although the driver doesn't know the number of capabilities the
2893 * device will return, we can simply send a 4KB buffer, the maximum
2894 * possible size that firmware can return.
2895 */
2896 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
9c20346b 2897
81aed647
JK
2898 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2899 ice_aqc_opc_list_func_caps, NULL);
2900 if (!status)
2901 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
1082b360 2902 kfree(cbuf);
9c20346b
AV
2903
2904 return status;
2905}
2906
462acf6a
TN
2907/**
2908 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2909 * @hw: pointer to the hardware structure
2910 */
2911void ice_set_safe_mode_caps(struct ice_hw *hw)
2912{
2913 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2914 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
be49b1ad 2915 struct ice_hw_common_caps cached_caps;
eae1bbb2 2916 u32 num_funcs;
462acf6a
TN
2917
2918 /* cache some func_caps values that should be restored after memset */
be49b1ad 2919 cached_caps = func_caps->common_cap;
462acf6a
TN
2920
2921 /* unset func capabilities */
2922 memset(func_caps, 0, sizeof(*func_caps));
2923
be49b1ad
JK
2924#define ICE_RESTORE_FUNC_CAP(name) \
2925 func_caps->common_cap.name = cached_caps.name
2926
462acf6a 2927 /* restore cached values */
be49b1ad
JK
2928 ICE_RESTORE_FUNC_CAP(valid_functions);
2929 ICE_RESTORE_FUNC_CAP(txq_first_id);
2930 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2931 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2932 ICE_RESTORE_FUNC_CAP(max_mtu);
2933 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2934 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2935 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2936 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
462acf6a
TN
2937
2938 /* one Tx and one Rx queue in safe mode */
2939 func_caps->common_cap.num_rxq = 1;
2940 func_caps->common_cap.num_txq = 1;
2941
2942 /* two MSIX vectors, one for traffic and one for misc causes */
2943 func_caps->common_cap.num_msix_vectors = 2;
2944 func_caps->guar_num_vsi = 1;
2945
2946 /* cache some dev_caps values that should be restored after memset */
be49b1ad 2947 cached_caps = dev_caps->common_cap;
eae1bbb2 2948 num_funcs = dev_caps->num_funcs;
462acf6a
TN
2949
2950 /* unset dev capabilities */
2951 memset(dev_caps, 0, sizeof(*dev_caps));
2952
be49b1ad
JK
2953#define ICE_RESTORE_DEV_CAP(name) \
2954 dev_caps->common_cap.name = cached_caps.name
2955
462acf6a 2956 /* restore cached values */
be49b1ad
JK
2957 ICE_RESTORE_DEV_CAP(valid_functions);
2958 ICE_RESTORE_DEV_CAP(txq_first_id);
2959 ICE_RESTORE_DEV_CAP(rxq_first_id);
2960 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2961 ICE_RESTORE_DEV_CAP(max_mtu);
2962 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2963 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2964 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2965 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
eae1bbb2 2966 dev_caps->num_funcs = num_funcs;
462acf6a
TN
2967
2968 /* one Tx and one Rx queue per function in safe mode */
eae1bbb2
BA
2969 dev_caps->common_cap.num_rxq = num_funcs;
2970 dev_caps->common_cap.num_txq = num_funcs;
462acf6a
TN
2971
2972 /* two MSIX vectors per function */
eae1bbb2 2973 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
462acf6a
TN
2974}
2975
7d86cf38
AV
2976/**
2977 * ice_get_caps - get info about the HW
2978 * @hw: pointer to the hardware structure
2979 */
5e24d598 2980int ice_get_caps(struct ice_hw *hw)
7d86cf38 2981{
5e24d598 2982 int status;
7d86cf38 2983
81aed647
JK
2984 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2985 if (status)
2986 return status;
7d86cf38 2987
81aed647 2988 return ice_discover_func_caps(hw, &hw->func_caps);
7d86cf38
AV
2989}
2990
e94d4478
AV
2991/**
2992 * ice_aq_manage_mac_write - manage MAC address write command
f9867df6 2993 * @hw: pointer to the HW struct
e94d4478
AV
2994 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2995 * @flags: flags to control write behavior
2996 * @cd: pointer to command details structure or NULL
2997 *
2998 * This function is used to write MAC address to the NVM (0x0108).
2999 */
5e24d598 3000int
d671e3e0 3001ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
e94d4478
AV
3002 struct ice_sq_cd *cd)
3003{
3004 struct ice_aqc_manage_mac_write *cmd;
3005 struct ice_aq_desc desc;
3006
3007 cmd = &desc.params.mac_write;
3008 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
3009
3010 cmd->flags = flags;
5df42c82 3011 ether_addr_copy(cmd->mac_addr, mac_addr);
e94d4478
AV
3012
3013 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3014}
3015
f31e4b6f
AV
3016/**
3017 * ice_aq_clear_pxe_mode
f9867df6 3018 * @hw: pointer to the HW struct
f31e4b6f
AV
3019 *
3020 * Tell the firmware that the driver is taking over from PXE (0x0110).
3021 */
5e24d598 3022static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
f31e4b6f
AV
3023{
3024 struct ice_aq_desc desc;
3025
3026 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
3027 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
3028
3029 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3030}
3031
3032/**
3033 * ice_clear_pxe_mode - clear pxe operations mode
f9867df6 3034 * @hw: pointer to the HW struct
f31e4b6f
AV
3035 *
3036 * Make sure all PXE mode settings are cleared, including things
3037 * like descriptor fetch/write-back mode.
3038 */
3039void ice_clear_pxe_mode(struct ice_hw *hw)
3040{
3041 if (ice_check_sq_alive(hw, &hw->adminq))
3042 ice_aq_clear_pxe_mode(hw);
3043}
cdedef59 3044
a1ffafb0
BC
3045/**
3046 * ice_aq_set_port_params - set physical port parameters.
3047 * @pi: pointer to the port info struct
3048 * @double_vlan: if set double VLAN is enabled
3049 * @cd: pointer to command details structure or NULL
3050 *
3051 * Set Physical port parameters (0x0203)
3052 */
3053int
3054ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
3055 struct ice_sq_cd *cd)
3056
3057{
3058 struct ice_aqc_set_port_params *cmd;
3059 struct ice_hw *hw = pi->hw;
3060 struct ice_aq_desc desc;
3061 u16 cmd_flags = 0;
3062
3063 cmd = &desc.params.set_port_params;
3064
3065 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3066 if (double_vlan)
3067 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3068 cmd->cmd_flags = cpu_to_le16(cmd_flags);
3069
a59618b9
PK
3070 cmd->local_fwd_mode = pi->local_fwd_mode |
3071 ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID;
3072
a1ffafb0
BC
3073 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3074}
3075
39ed02a4
AV
3076/**
3077 * ice_is_100m_speed_supported
3078 * @hw: pointer to the HW struct
3079 *
3080 * returns true if 100M speeds are supported by the device,
3081 * false otherwise.
3082 */
3083bool ice_is_100m_speed_supported(struct ice_hw *hw)
3084{
3085 switch (hw->device_id) {
3086 case ICE_DEV_ID_E822C_SGMII:
3087 case ICE_DEV_ID_E822L_SGMII:
3088 case ICE_DEV_ID_E823L_1GBE:
3089 case ICE_DEV_ID_E823C_SGMII:
3090 return true;
3091 default:
3092 return false;
3093 }
3094}
3095
48cb27f2
CC
3096/**
3097 * ice_get_link_speed_based_on_phy_type - returns link speed
3098 * @phy_type_low: lower part of phy_type
aef74145 3099 * @phy_type_high: higher part of phy_type
48cb27f2 3100 *
f9867df6 3101 * This helper function will convert an entry in PHY type structure
aef74145
AV
3102 * [phy_type_low, phy_type_high] to its corresponding link speed.
3103 * Note: In the structure of [phy_type_low, phy_type_high], there should
f9867df6 3104 * be one bit set, as this function will convert one PHY type to its
48cb27f2 3105 * speed.
7cab44f1
ST
3106 *
3107 * Return:
3108 * * PHY speed for recognized PHY type
3109 * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3110 * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
48cb27f2 3111 */
7cab44f1 3112u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
48cb27f2 3113{
aef74145 3114 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
48cb27f2
CC
3115 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3116
3117 switch (phy_type_low) {
3118 case ICE_PHY_TYPE_LOW_100BASE_TX:
3119 case ICE_PHY_TYPE_LOW_100M_SGMII:
3120 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3121 break;
3122 case ICE_PHY_TYPE_LOW_1000BASE_T:
3123 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3124 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3125 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3126 case ICE_PHY_TYPE_LOW_1G_SGMII:
3127 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3128 break;
3129 case ICE_PHY_TYPE_LOW_2500BASE_T:
3130 case ICE_PHY_TYPE_LOW_2500BASE_X:
3131 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3132 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3133 break;
3134 case ICE_PHY_TYPE_LOW_5GBASE_T:
3135 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3136 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3137 break;
3138 case ICE_PHY_TYPE_LOW_10GBASE_T:
3139 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3140 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3141 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3142 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3143 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3144 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3145 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3146 break;
3147 case ICE_PHY_TYPE_LOW_25GBASE_T:
3148 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3149 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3150 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3151 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3152 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3153 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3154 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3155 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3156 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3157 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3158 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3159 break;
3160 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3161 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3162 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3163 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3164 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3165 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3166 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3167 break;
aef74145
AV
3168 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3169 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3170 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3171 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3172 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3173 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3174 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3175 case ICE_PHY_TYPE_LOW_50G_AUI2:
3176 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3177 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3178 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3179 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3180 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3181 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3182 case ICE_PHY_TYPE_LOW_50G_AUI1:
3183 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3184 break;
3185 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3186 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3187 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3188 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3189 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3190 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3191 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3192 case ICE_PHY_TYPE_LOW_100G_AUI4:
3193 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3194 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3195 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3196 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3197 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3198 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3199 break;
48cb27f2
CC
3200 default:
3201 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3202 break;
3203 }
3204
aef74145
AV
3205 switch (phy_type_high) {
3206 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3207 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3208 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3209 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3210 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3211 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3212 break;
2a6d8f2d
PG
3213 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
3214 case ICE_PHY_TYPE_HIGH_200G_SR4:
3215 case ICE_PHY_TYPE_HIGH_200G_FR4:
3216 case ICE_PHY_TYPE_HIGH_200G_LR4:
3217 case ICE_PHY_TYPE_HIGH_200G_DR4:
3218 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
3219 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
3220 case ICE_PHY_TYPE_HIGH_200G_AUI4:
3221 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
3222 break;
aef74145
AV
3223 default:
3224 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3225 break;
3226 }
3227
3228 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3229 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3230 return ICE_AQ_LINK_SPEED_UNKNOWN;
3231 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3232 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3233 return ICE_AQ_LINK_SPEED_UNKNOWN;
3234 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3235 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3236 return speed_phy_type_low;
3237 else
3238 return speed_phy_type_high;
48cb27f2
CC
3239}
3240
3241/**
3242 * ice_update_phy_type
3243 * @phy_type_low: pointer to the lower part of phy_type
aef74145 3244 * @phy_type_high: pointer to the higher part of phy_type
48cb27f2
CC
3245 * @link_speeds_bitmap: targeted link speeds bitmap
3246 *
3247 * Note: For the link_speeds_bitmap structure, you can check it at
3248 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3249 * link_speeds_bitmap include multiple speeds.
3250 *
aef74145
AV
3251 * Each entry in this [phy_type_low, phy_type_high] structure will
3252 * present a certain link speed. This helper function will turn on bits
3253 * in [phy_type_low, phy_type_high] structure based on the value of
48cb27f2
CC
3254 * link_speeds_bitmap input parameter.
3255 */
aef74145
AV
3256void
3257ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3258 u16 link_speeds_bitmap)
48cb27f2 3259{
aef74145 3260 u64 pt_high;
48cb27f2
CC
3261 u64 pt_low;
3262 int index;
207e3721 3263 u16 speed;
48cb27f2
CC
3264
3265 /* We first check with low part of phy_type */
3266 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3267 pt_low = BIT_ULL(index);
aef74145 3268 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
48cb27f2
CC
3269
3270 if (link_speeds_bitmap & speed)
3271 *phy_type_low |= BIT_ULL(index);
3272 }
aef74145
AV
3273
3274 /* We then check with high part of phy_type */
3275 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3276 pt_high = BIT_ULL(index);
3277 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3278
3279 if (link_speeds_bitmap & speed)
3280 *phy_type_high |= BIT_ULL(index);
3281 }
48cb27f2
CC
3282}
3283
fcea6f3d
AV
3284/**
3285 * ice_aq_set_phy_cfg
f9867df6 3286 * @hw: pointer to the HW struct
1a3571b5 3287 * @pi: port info structure of the interested logical port
fcea6f3d
AV
3288 * @cfg: structure with PHY configuration data to be set
3289 * @cd: pointer to command details structure or NULL
3290 *
3291 * Set the various PHY configuration parameters supported on the Port.
3292 * One or more of the Set PHY config parameters may be ignored in an MFP
3293 * mode as the PF may not have the privilege to set some of the PHY Config
3294 * parameters. This status will be indicated by the command response (0x0601).
3295 */
5e24d598 3296int
1a3571b5 3297ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
fcea6f3d
AV
3298 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3299{
fcea6f3d 3300 struct ice_aq_desc desc;
5e24d598 3301 int status;
fcea6f3d
AV
3302
3303 if (!cfg)
d54699e2 3304 return -EINVAL;
fcea6f3d 3305
d8df260a
CC
3306 /* Ensure that only valid bits of cfg->caps can be turned on. */
3307 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
9228d8b2 3308 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
d8df260a
CC
3309 cfg->caps);
3310
3311 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3312 }
3313
fcea6f3d 3314 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1a3571b5 3315 desc.params.set_phy.lport_num = pi->lport;
48cb27f2 3316 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
fcea6f3d 3317
55df52a0
PG
3318 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3319 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
dc67039b 3320 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
55df52a0 3321 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
dc67039b 3322 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
55df52a0 3323 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
bdeff971
LF
3324 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3325 cfg->low_power_ctrl_an);
55df52a0
PG
3326 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3327 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3328 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3329 cfg->link_fec_opt);
dc67039b 3330
b5e19a64
CC
3331 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3332 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3333 status = 0;
3334
1a3571b5
PG
3335 if (!status)
3336 pi->phy.curr_user_phy_cfg = *cfg;
3337
b5e19a64 3338 return status;
fcea6f3d
AV
3339}
3340
3341/**
3342 * ice_update_link_info - update status of the HW network link
3343 * @pi: port info structure of the interested logical port
3344 */
5e24d598 3345int ice_update_link_info(struct ice_port_info *pi)
fcea6f3d 3346{
092a33d4 3347 struct ice_link_status *li;
5e24d598 3348 int status;
fcea6f3d
AV
3349
3350 if (!pi)
d54699e2 3351 return -EINVAL;
fcea6f3d 3352
092a33d4 3353 li = &pi->phy.link_info;
fcea6f3d 3354
fcea6f3d
AV
3355 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3356 if (status)
092a33d4
BA
3357 return status;
3358
3359 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
90ca6956 3360 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
092a33d4 3361
90f821d7 3362 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
092a33d4 3363 if (!pcaps)
d54699e2 3364 return -ENOMEM;
fcea6f3d 3365
d6730a87 3366 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
fcea6f3d 3367 pcaps, NULL);
fcea6f3d 3368 }
092a33d4 3369
fcea6f3d
AV
3370 return status;
3371}
3372
70838938
AS
3373/**
3374 * ice_aq_get_phy_equalization - function to read serdes equaliser
3375 * value from firmware using admin queue command.
3376 * @hw: pointer to the HW struct
3377 * @data_in: represents the serdes equalization parameter requested
3378 * @op_code: represents the serdes number and flag to represent tx or rx
3379 * @serdes_num: represents the serdes number
3380 * @output: pointer to the caller-supplied buffer to return serdes equaliser
3381 *
3382 * Return: non-zero status on error and 0 on success.
3383 */
3384int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
3385 u8 serdes_num, int *output)
3386{
3387 struct ice_aqc_dnl_call_command *cmd;
3388 struct ice_aqc_dnl_call buf = {};
3389 struct ice_aq_desc desc;
3390 int err;
3391
3392 buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
3393 buf.sto.txrx_equa_reqs.op_code_serdes_sel =
3394 cpu_to_le16(op_code | (serdes_num & 0xF));
3395 cmd = &desc.params.dnl_call;
3396 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
3397 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF |
3398 ICE_AQ_FLAG_RD |
3399 ICE_AQ_FLAG_SI);
3400 desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
3401 cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
3402
3403 err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call),
3404 NULL);
3405 *output = err ? 0 : buf.sto.txrx_equa_resp.val;
3406
3407 return err;
3408}
3409
ac21add2
AS
3410#define FEC_REG_PORT(port) { \
3411 FEC_CORR_LOW_REG_PORT##port, \
3412 FEC_CORR_HIGH_REG_PORT##port, \
3413 FEC_UNCORR_LOW_REG_PORT##port, \
3414 FEC_UNCORR_HIGH_REG_PORT##port, \
3415}
3416
3417static const u32 fec_reg[][ICE_FEC_MAX] = {
3418 FEC_REG_PORT(0),
3419 FEC_REG_PORT(1),
3420 FEC_REG_PORT(2),
3421 FEC_REG_PORT(3)
3422};
3423
3424/**
3425 * ice_aq_get_fec_stats - reads fec stats from phy
3426 * @hw: pointer to the HW struct
3427 * @pcs_quad: represents pcsquad of user input serdes
3428 * @pcs_port: represents the pcs port number part of above pcs quad
3429 * @fec_type: represents FEC stats type
3430 * @output: pointer to the caller-supplied buffer to return requested fec stats
3431 *
3432 * Return: non-zero status on error and 0 on success.
3433 */
3434int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
3435 enum ice_fec_stats_types fec_type, u32 *output)
3436{
3437 u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
3438 struct ice_sbq_msg_input msg = {};
3439 u32 receiver_id, reg_offset;
3440 int err;
3441
3442 if (pcs_port > 3)
3443 return -EINVAL;
3444
3445 reg_offset = fec_reg[pcs_port][fec_type];
3446
3447 if (pcs_quad == 0)
3448 receiver_id = FEC_RECEIVER_ID_PCS0;
3449 else if (pcs_quad == 1)
3450 receiver_id = FEC_RECEIVER_ID_PCS1;
3451 else
3452 return -EINVAL;
3453
3454 msg.msg_addr_low = lower_16_bits(reg_offset);
3455 msg.msg_addr_high = receiver_id;
3456 msg.opcode = ice_sbq_msg_rd;
3457 msg.dest_dev = rmn_0;
3458
3459 err = ice_sbq_rw_reg(hw, &msg, flag);
3460 if (err)
3461 return err;
3462
3463 *output = msg.data;
3464 return 0;
3465}
3466
1a3571b5
PG
3467/**
3468 * ice_cache_phy_user_req
3469 * @pi: port information structure
3470 * @cache_data: PHY logging data
3471 * @cache_mode: PHY logging mode
3472 *
3473 * Log the user request on (FC, FEC, SPEED) for later use.
3474 */
3475static void
3476ice_cache_phy_user_req(struct ice_port_info *pi,
3477 struct ice_phy_cache_mode_data cache_data,
3478 enum ice_phy_cache_mode cache_mode)
3479{
3480 if (!pi)
3481 return;
3482
3483 switch (cache_mode) {
3484 case ICE_FC_MODE:
3485 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3486 break;
3487 case ICE_SPEED_MODE:
3488 pi->phy.curr_user_speed_req =
3489 cache_data.data.curr_user_speed_req;
3490 break;
3491 case ICE_FEC_MODE:
3492 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3493 break;
3494 default:
3495 break;
3496 }
3497}
3498
3499/**
3500 * ice_caps_to_fc_mode
3501 * @caps: PHY capabilities
3502 *
3503 * Convert PHY FC capabilities to ice FC mode
3504 */
3505enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3506{
3507 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3508 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3509 return ICE_FC_FULL;
3510
3511 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3512 return ICE_FC_TX_PAUSE;
3513
3514 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3515 return ICE_FC_RX_PAUSE;
3516
3517 return ICE_FC_NONE;
3518}
3519
3520/**
3521 * ice_caps_to_fec_mode
3522 * @caps: PHY capabilities
3523 * @fec_options: Link FEC options
3524 *
3525 * Convert PHY FEC capabilities to ice FEC mode
3526 */
3527enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3528{
3529 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3530 return ICE_FEC_AUTO;
3531
3532 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3533 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3534 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3535 ICE_AQC_PHY_FEC_25G_KR_REQ))
3536 return ICE_FEC_BASER;
3537
3538 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3539 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3540 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3541 return ICE_FEC_RS;
3542
3543 return ICE_FEC_NONE;
3544}
3545
fcea6f3d 3546/**
2ffb6085 3547 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
1a3571b5 3548 * @pi: port information structure
2ffb6085
PG
3549 * @cfg: PHY configuration data to set FC mode
3550 * @req_mode: FC mode to configure
fcea6f3d 3551 */
5e24d598 3552int
1a3571b5
PG
3553ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3554 enum ice_fc_mode req_mode)
fcea6f3d 3555{
1a3571b5 3556 struct ice_phy_cache_mode_data cache_data;
fcea6f3d 3557 u8 pause_mask = 0x0;
fcea6f3d 3558
1a3571b5 3559 if (!pi || !cfg)
d54699e2 3560 return -EINVAL;
fcea6f3d 3561
2ffb6085 3562 switch (req_mode) {
fcea6f3d
AV
3563 case ICE_FC_FULL:
3564 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3565 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3566 break;
3567 case ICE_FC_RX_PAUSE:
3568 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3569 break;
3570 case ICE_FC_TX_PAUSE:
3571 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3572 break;
3573 default:
3574 break;
3575 }
3576
2ffb6085
PG
3577 /* clear the old pause settings */
3578 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3579 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3580
3581 /* set the new capabilities */
3582 cfg->caps |= pause_mask;
3583
1a3571b5
PG
3584 /* Cache user FC request */
3585 cache_data.data.curr_user_fc_req = req_mode;
3586 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3587
2ffb6085
PG
3588 return 0;
3589}
3590
3591/**
3592 * ice_set_fc
3593 * @pi: port information structure
3594 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3595 * @ena_auto_link_update: enable automatic link update
3596 *
3597 * Set the requested flow control mode.
3598 */
5e24d598 3599int
2ffb6085
PG
3600ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3601{
90ca6956 3602 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
1a3571b5 3603 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2ffb6085 3604 struct ice_hw *hw;
5518ac2a 3605 int status;
2ffb6085 3606
1a3571b5 3607 if (!pi || !aq_failures)
d54699e2 3608 return -EINVAL;
2ffb6085
PG
3609
3610 *aq_failures = 0;
3611 hw = pi->hw;
3612
90f821d7 3613 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
fcea6f3d 3614 if (!pcaps)
d54699e2 3615 return -ENOMEM;
fcea6f3d 3616
f9867df6 3617 /* Get the current PHY config */
d6730a87
AV
3618 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3619 pcaps, NULL);
fcea6f3d
AV
3620 if (status) {
3621 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3622 goto out;
3623 }
3624
ea78ce4d 3625 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
d8df260a 3626
2ffb6085 3627 /* Configure the set PHY data */
1a3571b5 3628 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2ffb6085
PG
3629 if (status)
3630 goto out;
d8df260a 3631
fcea6f3d
AV
3632 /* If the capabilities have changed, then set the new config */
3633 if (cfg.caps != pcaps->caps) {
3634 int retry_count, retry_max = 10;
3635
3636 /* Auto restart link so settings take effect */
48cb27f2
CC
3637 if (ena_auto_link_update)
3638 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
fcea6f3d 3639
1a3571b5 3640 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
fcea6f3d
AV
3641 if (status) {
3642 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3643 goto out;
3644 }
3645
3646 /* Update the link info
3647 * It sometimes takes a really long time for link to
3648 * come back from the atomic reset. Thus, we wait a
3649 * little bit.
3650 */
3651 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3652 status = ice_update_link_info(pi);
3653
3654 if (!status)
3655 break;
3656
3657 mdelay(100);
3658 }
3659
3660 if (status)
3661 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3662 }
3663
3664out:
fcea6f3d
AV
3665 return status;
3666}
3667
1a3571b5
PG
3668/**
3669 * ice_phy_caps_equals_cfg
3670 * @phy_caps: PHY capabilities
3671 * @phy_cfg: PHY configuration
3672 *
3673 * Helper function to determine if PHY capabilities matches PHY
3674 * configuration
3675 */
3676bool
3677ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3678 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3679{
3680 u8 caps_mask, cfg_mask;
3681
3682 if (!phy_caps || !phy_cfg)
3683 return false;
3684
3685 /* These bits are not common between capabilities and configuration.
3686 * Do not use them to determine equality.
3687 */
3688 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3689 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3690 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3691
3692 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3693 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3694 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
bdeff971 3695 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
1a3571b5
PG
3696 phy_caps->eee_cap != phy_cfg->eee_cap ||
3697 phy_caps->eeer_value != phy_cfg->eeer_value ||
3698 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3699 return false;
3700
3701 return true;
3702}
3703
f776b3ac
PG
3704/**
3705 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
ea78ce4d 3706 * @pi: port information structure
f776b3ac
PG
3707 * @caps: PHY ability structure to copy date from
3708 * @cfg: PHY configuration structure to copy data to
3709 *
3710 * Helper function to copy AQC PHY get ability data to PHY set configuration
3711 * data structure
3712 */
3713void
ea78ce4d
PG
3714ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3715 struct ice_aqc_get_phy_caps_data *caps,
f776b3ac
PG
3716 struct ice_aqc_set_phy_cfg_data *cfg)
3717{
ea78ce4d 3718 if (!pi || !caps || !cfg)
f776b3ac
PG
3719 return;
3720
2ffb6085 3721 memset(cfg, 0, sizeof(*cfg));
f776b3ac
PG
3722 cfg->phy_type_low = caps->phy_type_low;
3723 cfg->phy_type_high = caps->phy_type_high;
3724 cfg->caps = caps->caps;
bdeff971 3725 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
f776b3ac
PG
3726 cfg->eee_cap = caps->eee_cap;
3727 cfg->eeer_value = caps->eeer_value;
3728 cfg->link_fec_opt = caps->link_fec_options;
ea78ce4d
PG
3729 cfg->module_compliance_enforcement =
3730 caps->module_compliance_enforcement;
f776b3ac
PG
3731}
3732
3733/**
3734 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
61cf42e7 3735 * @pi: port information structure
f776b3ac
PG
3736 * @cfg: PHY configuration data to set FEC mode
3737 * @fec: FEC mode to configure
f776b3ac 3738 */
5e24d598 3739int
61cf42e7
PG
3740ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3741 enum ice_fec_mode fec)
f776b3ac 3742{
90ca6956 3743 struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
0a02944f 3744 struct ice_hw *hw;
5518ac2a 3745 int status;
61cf42e7
PG
3746
3747 if (!pi || !cfg)
d54699e2 3748 return -EINVAL;
61cf42e7 3749
0a02944f
AV
3750 hw = pi->hw;
3751
61cf42e7
PG
3752 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3753 if (!pcaps)
d54699e2 3754 return -ENOMEM;
61cf42e7 3755
0a02944f
AV
3756 status = ice_aq_get_phy_caps(pi, false,
3757 (ice_fw_supports_report_dflt_cfg(hw) ?
3758 ICE_AQC_REPORT_DFLT_CFG :
3759 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
61cf42e7
PG
3760 if (status)
3761 goto out;
3762
3763 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3764 cfg->link_fec_opt = pcaps->link_fec_options;
3765
f776b3ac
PG
3766 switch (fec) {
3767 case ICE_FEC_BASER:
3747f031 3768 /* Clear RS bits, and AND BASE-R ability
f776b3ac
PG
3769 * bits and OR request bits.
3770 */
f776b3ac 3771 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
61cf42e7 3772 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
f776b3ac 3773 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
61cf42e7 3774 ICE_AQC_PHY_FEC_25G_KR_REQ;
f776b3ac
PG
3775 break;
3776 case ICE_FEC_RS:
3747f031 3777 /* Clear BASE-R bits, and AND RS ability
f776b3ac
PG
3778 * bits and OR request bits.
3779 */
f776b3ac
PG
3780 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3781 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
61cf42e7 3782 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
f776b3ac
PG
3783 break;
3784 case ICE_FEC_NONE:
3747f031 3785 /* Clear all FEC option bits. */
f776b3ac
PG
3786 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3787 break;
3788 case ICE_FEC_AUTO:
3789 /* AND auto FEC bit, and all caps bits. */
3790 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
61cf42e7
PG
3791 cfg->link_fec_opt |= pcaps->link_fec_options;
3792 break;
3793 default:
d54699e2 3794 status = -EINVAL;
f776b3ac
PG
3795 break;
3796 }
61cf42e7 3797
75751c80
JC
3798 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3799 !ice_fw_supports_report_dflt_cfg(hw)) {
5950bdc8 3800 struct ice_link_default_override_tlv tlv = { 0 };
ea78ce4d 3801
21338d58
DC
3802 status = ice_get_link_default_override(&tlv, pi);
3803 if (status)
ea78ce4d
PG
3804 goto out;
3805
3806 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3807 (tlv.options & ICE_LINK_OVERRIDE_EN))
3808 cfg->link_fec_opt = tlv.fec_options;
3809 }
3810
61cf42e7 3811out:
61cf42e7 3812 return status;
f776b3ac
PG
3813}
3814
0b28b702
AV
3815/**
3816 * ice_get_link_status - get status of the HW network link
3817 * @pi: port information structure
3818 * @link_up: pointer to bool (true/false = linkup/linkdown)
3819 *
3820 * Variable link_up is true if link is up, false if link is down.
3821 * The variable link_up is invalid if status is non zero. As a
3822 * result of this call, link status reporting becomes enabled
3823 */
5e24d598 3824int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
0b28b702
AV
3825{
3826 struct ice_phy_info *phy_info;
5e24d598 3827 int status = 0;
0b28b702 3828
c7f2c42b 3829 if (!pi || !link_up)
d54699e2 3830 return -EINVAL;
0b28b702
AV
3831
3832 phy_info = &pi->phy;
3833
3834 if (phy_info->get_link_info) {
3835 status = ice_update_link_info(pi);
3836
3837 if (status)
9228d8b2 3838 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
0b28b702
AV
3839 status);
3840 }
3841
3842 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3843
3844 return status;
3845}
3846
fcea6f3d
AV
3847/**
3848 * ice_aq_set_link_restart_an
3849 * @pi: pointer to the port information structure
3850 * @ena_link: if true: enable link, if false: disable link
3851 * @cd: pointer to command details structure or NULL
3852 *
3853 * Sets up the link and restarts the Auto-Negotiation over the link.
3854 */
5e24d598 3855int
fcea6f3d
AV
3856ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3857 struct ice_sq_cd *cd)
3858{
3859 struct ice_aqc_restart_an *cmd;
3860 struct ice_aq_desc desc;
3861
3862 cmd = &desc.params.restart_an;
3863
3864 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3865
3866 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3867 cmd->lport_num = pi->lport;
3868 if (ena_link)
3869 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3870 else
3871 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3872
3873 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3874}
3875
250c3b3e
BC
3876/**
3877 * ice_aq_set_event_mask
3878 * @hw: pointer to the HW struct
3879 * @port_num: port number of the physical function
3880 * @mask: event mask to be set
3881 * @cd: pointer to command details structure or NULL
3882 *
3883 * Set event mask (0x0613)
3884 */
5e24d598 3885int
250c3b3e
BC
3886ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3887 struct ice_sq_cd *cd)
3888{
3889 struct ice_aqc_set_event_mask *cmd;
3890 struct ice_aq_desc desc;
3891
3892 cmd = &desc.params.set_event_mask;
3893
3894 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3895
3896 cmd->lport_num = port_num;
3897
3898 cmd->event_mask = cpu_to_le16(mask);
3899 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3900}
3901
0e674aeb
AV
3902/**
3903 * ice_aq_set_mac_loopback
3904 * @hw: pointer to the HW struct
3905 * @ena_lpbk: Enable or Disable loopback
3906 * @cd: pointer to command details structure or NULL
3907 *
3908 * Enable/disable loopback on a given port
3909 */
5e24d598 3910int
0e674aeb
AV
3911ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3912{
3913 struct ice_aqc_set_mac_lb *cmd;
3914 struct ice_aq_desc desc;
3915
3916 cmd = &desc.params.set_mac_lb;
3917
3918 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3919 if (ena_lpbk)
3920 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3921
3922 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3923}
3924
8e151d50
AV
3925/**
3926 * ice_aq_set_port_id_led
3927 * @pi: pointer to the port information
3928 * @is_orig_mode: is this LED set to original mode (by the net-list)
3929 * @cd: pointer to command details structure or NULL
3930 *
3931 * Set LED value for the given port (0x06e9)
3932 */
5e24d598 3933int
8e151d50
AV
3934ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3935 struct ice_sq_cd *cd)
3936{
3937 struct ice_aqc_set_port_id_led *cmd;
3938 struct ice_hw *hw = pi->hw;
3939 struct ice_aq_desc desc;
3940
3941 cmd = &desc.params.set_port_id_led;
3942
3943 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3944
3945 if (is_orig_mode)
3946 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3947 else
3948 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3949
3950 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3951}
3952
781f15ea
AG
3953/**
3954 * ice_aq_get_port_options
3955 * @hw: pointer to the HW struct
3956 * @options: buffer for the resultant port options
3957 * @option_count: input - size of the buffer in port options structures,
3958 * output - number of returned port options
3959 * @lport: logical port to call the command with (optional)
3960 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
3961 * when PF owns more than 1 port it must be true
3962 * @active_option_idx: index of active port option in returned buffer
3963 * @active_option_valid: active option in returned buffer is valid
3964 * @pending_option_idx: index of pending port option in returned buffer
3965 * @pending_option_valid: pending option in returned buffer is valid
3966 *
3967 * Calls Get Port Options AQC (0x06ea) and verifies result.
3968 */
3969int
3970ice_aq_get_port_options(struct ice_hw *hw,
3971 struct ice_aqc_get_port_options_elem *options,
3972 u8 *option_count, u8 lport, bool lport_valid,
3973 u8 *active_option_idx, bool *active_option_valid,
3974 u8 *pending_option_idx, bool *pending_option_valid)
3975{
3976 struct ice_aqc_get_port_options *cmd;
3977 struct ice_aq_desc desc;
3978 int status;
3979 u8 i;
3980
3981 /* options buffer shall be able to hold max returned options */
3982 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
3983 return -EINVAL;
3984
3985 cmd = &desc.params.get_port_options;
3986 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
3987
3988 if (lport_valid)
3989 cmd->lport_num = lport;
3990 cmd->lport_num_valid = lport_valid;
3991
3992 status = ice_aq_send_cmd(hw, &desc, options,
3993 *option_count * sizeof(*options), NULL);
3994 if (status)
3995 return status;
3996
3997 /* verify direct FW response & set output parameters */
3998 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
3999 cmd->port_options_count);
4000 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
4001 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
4002 cmd->port_options);
4003 if (*active_option_valid) {
4004 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
4005 cmd->port_options);
4006 if (*active_option_idx > (*option_count - 1))
4007 return -EIO;
4008 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
4009 *active_option_idx);
4010 }
4011
4012 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
4013 cmd->pending_port_option_status);
4014 if (*pending_option_valid) {
4015 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
4016 cmd->pending_port_option_status);
4017 if (*pending_option_idx > (*option_count - 1))
4018 return -EIO;
4019 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
4020 *pending_option_idx);
4021 }
4022
4023 /* mask output options fields */
4024 for (i = 0; i < *option_count; i++) {
4025 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
4026 options[i].pmd);
4027 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
4028 options[i].max_lane_speed);
4029 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
4030 options[i].pmd, options[i].max_lane_speed);
4031 }
4032
4033 return 0;
4034}
4035
4036/**
4037 * ice_aq_set_port_option
4038 * @hw: pointer to the HW struct
4039 * @lport: logical port to call the command with
4040 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
4041 * when PF owns more than 1 port it must be true
4042 * @new_option: new port option to be written
4043 *
4044 * Calls Set Port Options AQC (0x06eb).
4045 */
4046int
4047ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
4048 u8 new_option)
4049{
4050 struct ice_aqc_set_port_option *cmd;
4051 struct ice_aq_desc desc;
4052
4053 if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
4054 return -EINVAL;
4055
4056 cmd = &desc.params.set_port_option;
4057 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
4058
4059 if (lport_valid)
4060 cmd->lport_num = lport;
4061
4062 cmd->lport_num_valid = lport_valid;
4063 cmd->selected_port_option = new_option;
4064
4065 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4066}
4067
a012dca9
ST
4068/**
4069 * ice_aq_sff_eeprom
4070 * @hw: pointer to the HW struct
4071 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
4072 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
4073 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
4074 * @page: QSFP page
4075 * @set_page: set or ignore the page
4076 * @data: pointer to data buffer to be read/written to the I2C device.
4077 * @length: 1-16 for read, 1 for write.
4078 * @write: 0 read, 1 for write.
4079 * @cd: pointer to command details structure or NULL
4080 *
4081 * Read/Write SFF EEPROM (0x06EE)
4082 */
5e24d598 4083int
a012dca9
ST
4084ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
4085 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
4086 bool write, struct ice_sq_cd *cd)
4087{
4088 struct ice_aqc_sff_eeprom *cmd;
4089 struct ice_aq_desc desc;
23eca34e 4090 u16 i2c_bus_addr;
5e24d598 4091 int status;
a012dca9
ST
4092
4093 if (!data || (mem_addr & 0xff00))
d54699e2 4094 return -EINVAL;
a012dca9
ST
4095
4096 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
4097 cmd = &desc.params.read_write_sff_param;
800c1443 4098 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
a012dca9
ST
4099 cmd->lport_num = (u8)(lport & 0xff);
4100 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
23eca34e
JB
4101 i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
4102 FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page);
a012dca9 4103 if (write)
23eca34e
JB
4104 i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE;
4105 cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr);
4106 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
4107 cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M);
a012dca9
ST
4108
4109 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
4110 return status;
4111}
4112
b6143c9b
PK
4113static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type)
4114{
4115 switch (type) {
4116 case ICE_LUT_VSI:
4117 return ICE_LUT_VSI_SIZE;
4118 case ICE_LUT_GLOBAL:
4119 return ICE_LUT_GLOBAL_SIZE;
4120 case ICE_LUT_PF:
4121 return ICE_LUT_PF_SIZE;
4122 }
4123 WARN_ONCE(1, "incorrect type passed");
4124 return ICE_LUT_VSI_SIZE;
4125}
4126
4127static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size)
4128{
4129 switch (size) {
4130 case ICE_LUT_VSI_SIZE:
4131 return ICE_AQC_LUT_SIZE_SMALL;
4132 case ICE_LUT_GLOBAL_SIZE:
4133 return ICE_AQC_LUT_SIZE_512;
4134 case ICE_LUT_PF_SIZE:
4135 return ICE_AQC_LUT_SIZE_2K;
4136 }
4137 WARN_ONCE(1, "incorrect size passed");
4138 return 0;
4139}
4140
d76a60ba
AV
4141/**
4142 * __ice_aq_get_set_rss_lut
4143 * @hw: pointer to the hardware structure
e3c53928 4144 * @params: RSS LUT parameters
d76a60ba
AV
4145 * @set: set true to set the table, false to get the table
4146 *
4147 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
4148 */
5e24d598 4149static int
b6143c9b
PK
4150__ice_aq_get_set_rss_lut(struct ice_hw *hw,
4151 struct ice_aq_get_set_rss_lut_params *params, bool set)
d76a60ba 4152{
b6143c9b
PK
4153 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0;
4154 enum ice_lut_type lut_type = params->lut_type;
4155 struct ice_aqc_get_set_rss_lut *desc_params;
4156 enum ice_aqc_lut_flags flags;
4157 enum ice_lut_size lut_size;
d76a60ba 4158 struct ice_aq_desc desc;
b6143c9b 4159 u8 *lut = params->lut;
e3c53928 4160
e3c53928 4161
b6143c9b 4162 if (!lut || !ice_is_vsi_valid(hw, vsi_handle))
d54699e2 4163 return -EINVAL;
e3c53928 4164
b6143c9b
PK
4165 lut_size = ice_lut_type_to_size(lut_type);
4166 if (lut_size > params->lut_size)
4167 return -EINVAL;
4168 else if (set && lut_size != params->lut_size)
4169 return -EINVAL;
d76a60ba 4170
b6143c9b
PK
4171 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
4172 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
4173 if (set)
d76a60ba 4174 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
d76a60ba 4175
b6143c9b
PK
4176 desc_params = &desc.params.get_set_rss_lut;
4177 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4178 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
d76a60ba 4179
b6143c9b
PK
4180 if (lut_type == ICE_LUT_GLOBAL)
4181 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX,
4182 params->global_lut_id);
d76a60ba 4183
b6143c9b
PK
4184 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size);
4185 desc_params->flags = cpu_to_le16(flags);
d76a60ba 4186
b6143c9b 4187 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
d76a60ba
AV
4188}
4189
4190/**
4191 * ice_aq_get_rss_lut
4192 * @hw: pointer to the hardware structure
e3c53928 4193 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
d76a60ba
AV
4194 *
4195 * get the RSS lookup table, PF or VSI type
4196 */
5e24d598 4197int
e3c53928 4198ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
d76a60ba 4199{
e3c53928 4200 return __ice_aq_get_set_rss_lut(hw, get_params, false);
d76a60ba
AV
4201}
4202
4203/**
4204 * ice_aq_set_rss_lut
4205 * @hw: pointer to the hardware structure
e3c53928 4206 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
d76a60ba
AV
4207 *
4208 * set the RSS lookup table, PF or VSI type
4209 */
5e24d598 4210int
e3c53928 4211ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
d76a60ba 4212{
e3c53928 4213 return __ice_aq_get_set_rss_lut(hw, set_params, true);
d76a60ba
AV
4214}
4215
4216/**
4217 * __ice_aq_get_set_rss_key
f9867df6 4218 * @hw: pointer to the HW struct
d76a60ba
AV
4219 * @vsi_id: VSI FW index
4220 * @key: pointer to key info struct
4221 * @set: set true to set the key, false to get the key
4222 *
4223 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4224 */
5518ac2a
TN
4225static int
4226__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4227 struct ice_aqc_get_set_rss_keys *key, bool set)
d76a60ba 4228{
b6143c9b 4229 struct ice_aqc_get_set_rss_key *desc_params;
d76a60ba
AV
4230 u16 key_size = sizeof(*key);
4231 struct ice_aq_desc desc;
4232
d76a60ba
AV
4233 if (set) {
4234 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4235 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4236 } else {
4237 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4238 }
4239
b6143c9b
PK
4240 desc_params = &desc.params.get_set_rss_key;
4241 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
d76a60ba
AV
4242
4243 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4244}
4245
4246/**
4247 * ice_aq_get_rss_key
f9867df6 4248 * @hw: pointer to the HW struct
4fb33f31 4249 * @vsi_handle: software VSI handle
d76a60ba
AV
4250 * @key: pointer to key info struct
4251 *
4252 * get the RSS key per VSI
4253 */
5e24d598 4254int
4fb33f31 4255ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
d76a60ba
AV
4256 struct ice_aqc_get_set_rss_keys *key)
4257{
4fb33f31 4258 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
d54699e2 4259 return -EINVAL;
4fb33f31
AV
4260
4261 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4262 key, false);
d76a60ba
AV
4263}
4264
4265/**
4266 * ice_aq_set_rss_key
f9867df6 4267 * @hw: pointer to the HW struct
4fb33f31 4268 * @vsi_handle: software VSI handle
d76a60ba
AV
4269 * @keys: pointer to key info struct
4270 *
4271 * set the RSS key per VSI
4272 */
5e24d598 4273int
4fb33f31 4274ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
d76a60ba
AV
4275 struct ice_aqc_get_set_rss_keys *keys)
4276{
4fb33f31 4277 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
d54699e2 4278 return -EINVAL;
4fb33f31
AV
4279
4280 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4281 keys, true);
d76a60ba
AV
4282}
4283
cdedef59
AV
4284/**
4285 * ice_aq_add_lan_txq
4286 * @hw: pointer to the hardware structure
4287 * @num_qgrps: Number of added queue groups
4288 * @qg_list: list of queue groups to be added
4289 * @buf_size: size of buffer for indirect command
4290 * @cd: pointer to command details structure or NULL
4291 *
4292 * Add Tx LAN queue (0x0C30)
4293 *
4294 * NOTE:
4295 * Prior to calling add Tx LAN queue:
4296 * Initialize the following as part of the Tx queue context:
4297 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4298 * Cache profile and Packet shaper profile.
4299 *
4300 * After add Tx LAN queue AQ command is completed:
4301 * Interrupts should be associated with specific queues,
4302 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4303 * flow.
4304 */
5e24d598 4305static int
cdedef59
AV
4306ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4307 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4308 struct ice_sq_cd *cd)
4309{
cdedef59
AV
4310 struct ice_aqc_add_tx_qgrp *list;
4311 struct ice_aqc_add_txqs *cmd;
4312 struct ice_aq_desc desc;
66486d89 4313 u16 i, sum_size = 0;
cdedef59
AV
4314
4315 cmd = &desc.params.add_txqs;
4316
4317 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4318
4319 if (!qg_list)
d54699e2 4320 return -EINVAL;
cdedef59
AV
4321
4322 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
d54699e2 4323 return -EINVAL;
cdedef59 4324
66486d89
BA
4325 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4326 sum_size += struct_size(list, txqs, list->num_txqs);
4327 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4328 list->num_txqs);
cdedef59
AV
4329 }
4330
66486d89 4331 if (buf_size != sum_size)
d54699e2 4332 return -EINVAL;
cdedef59
AV
4333
4334 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4335
4336 cmd->num_qgrps = num_qgrps;
4337
4338 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4339}
4340
4341/**
4342 * ice_aq_dis_lan_txq
4343 * @hw: pointer to the hardware structure
4344 * @num_qgrps: number of groups in the list
4345 * @qg_list: the list of groups to disable
4346 * @buf_size: the total size of the qg_list buffer in bytes
94c4441b 4347 * @rst_src: if called due to reset, specifies the reset source
ddf30f7f 4348 * @vmvf_num: the relative VM or VF number that is undergoing the reset
cdedef59
AV
4349 * @cd: pointer to command details structure or NULL
4350 *
4351 * Disable LAN Tx queue (0x0C31)
4352 */
5e24d598 4353static int
cdedef59
AV
4354ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4355 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
ddf30f7f 4356 enum ice_disq_rst_src rst_src, u16 vmvf_num,
cdedef59
AV
4357 struct ice_sq_cd *cd)
4358{
66486d89 4359 struct ice_aqc_dis_txq_item *item;
cdedef59
AV
4360 struct ice_aqc_dis_txqs *cmd;
4361 struct ice_aq_desc desc;
23eca34e 4362 u16 vmvf_and_timeout;
cdedef59 4363 u16 i, sz = 0;
5518ac2a 4364 int status;
cdedef59
AV
4365
4366 cmd = &desc.params.dis_txqs;
4367 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4368
ddf30f7f
AV
4369 /* qg_list can be NULL only in VM/VF reset flow */
4370 if (!qg_list && !rst_src)
d54699e2 4371 return -EINVAL;
cdedef59
AV
4372
4373 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
d54699e2 4374 return -EINVAL;
ddf30f7f 4375
cdedef59
AV
4376 cmd->num_entries = num_qgrps;
4377
23eca34e 4378 vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5);
ddf30f7f
AV
4379
4380 switch (rst_src) {
4381 case ICE_VM_RESET:
4382 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
23eca34e 4383 vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M;
ddf30f7f
AV
4384 break;
4385 case ICE_VF_RESET:
4386 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
f9867df6 4387 /* In this case, FW expects vmvf_num to be absolute VF ID */
23eca34e
JB
4388 vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) &
4389 ICE_AQC_Q_DIS_VMVF_NUM_M;
ddf30f7f
AV
4390 break;
4391 case ICE_NO_RESET:
4392 default:
4393 break;
4394 }
4395
23eca34e
JB
4396 cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout);
4397
6e9650d5
VR
4398 /* flush pipe on time out */
4399 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
ddf30f7f
AV
4400 /* If no queue group info, we are in a reset flow. Issue the AQ */
4401 if (!qg_list)
4402 goto do_aq;
4403
4404 /* set RD bit to indicate that command buffer is provided by the driver
4405 * and it needs to be read by the firmware
4406 */
4407 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4408
66486d89
BA
4409 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4410 u16 item_size = struct_size(item, q_id, item->num_qs);
cdedef59
AV
4411
4412 /* If the num of queues is even, add 2 bytes of padding */
66486d89
BA
4413 if ((item->num_qs % 2) == 0)
4414 item_size += 2;
4415
4416 sz += item_size;
4417
4418 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
cdedef59
AV
4419 }
4420
4421 if (buf_size != sz)
d54699e2 4422 return -EINVAL;
cdedef59 4423
ddf30f7f 4424do_aq:
6e9650d5
VR
4425 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4426 if (status) {
4427 if (!qg_list)
4428 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4429 vmvf_num, hw->adminq.sq_last_status);
4430 else
2f2da36e 4431 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
6e9650d5
VR
4432 le16_to_cpu(qg_list[0].q_id[0]),
4433 hw->adminq.sq_last_status);
4434 }
4435 return status;
cdedef59
AV
4436}
4437
23ccae5c
DE
4438/**
4439 * ice_aq_cfg_lan_txq
4440 * @hw: pointer to the hardware structure
4441 * @buf: buffer for command
4442 * @buf_size: size of buffer in bytes
4443 * @num_qs: number of queues being configured
4444 * @oldport: origination lport
4445 * @newport: destination lport
4446 * @cd: pointer to command details structure or NULL
4447 *
4448 * Move/Configure LAN Tx queue (0x0C32)
4449 *
4450 * There is a better AQ command to use for moving nodes, so only coding
4451 * this one for configuring the node.
4452 */
4453int
4454ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
4455 u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
4456 struct ice_sq_cd *cd)
4457{
4458 struct ice_aqc_cfg_txqs *cmd;
4459 struct ice_aq_desc desc;
4460 int status;
4461
4462 cmd = &desc.params.cfg_txqs;
4463 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
4464 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4465
4466 if (!buf)
4467 return -EINVAL;
4468
4469 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
4470 cmd->num_qs = num_qs;
4471 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
23eca34e
JB
4472 cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
4473 cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
23ccae5c
DE
4474 cmd->blocked_cgds = 0;
4475
4476 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4477 if (status)
4478 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n",
4479 hw->adminq.sq_last_status);
4480 return status;
4481}
4482
348048e7
DE
4483/**
4484 * ice_aq_add_rdma_qsets
4485 * @hw: pointer to the hardware structure
4486 * @num_qset_grps: Number of RDMA Qset groups
4487 * @qset_list: list of Qset groups to be added
4488 * @buf_size: size of buffer for indirect command
4489 * @cd: pointer to command details structure or NULL
4490 *
4491 * Add Tx RDMA Qsets (0x0C33)
4492 */
4493static int
4494ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4495 struct ice_aqc_add_rdma_qset_data *qset_list,
4496 u16 buf_size, struct ice_sq_cd *cd)
4497{
4498 struct ice_aqc_add_rdma_qset_data *list;
4499 struct ice_aqc_add_rdma_qset *cmd;
4500 struct ice_aq_desc desc;
4501 u16 i, sum_size = 0;
4502
4503 cmd = &desc.params.add_rdma_qset;
4504
4505 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4506
4507 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4508 return -EINVAL;
4509
4510 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4511 u16 num_qsets = le16_to_cpu(list->num_qsets);
4512
4513 sum_size += struct_size(list, rdma_qsets, num_qsets);
4514 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4515 num_qsets);
4516 }
4517
4518 if (buf_size != sum_size)
4519 return -EINVAL;
4520
4521 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
4522
4523 cmd->num_qset_grps = num_qset_grps;
4524
d54699e2 4525 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
348048e7
DE
4526}
4527
cdedef59
AV
4528/* End of FW Admin Queue command wrappers */
4529
4530/**
1260b45d
JK
4531 * ice_pack_ctx_byte - write a byte to a packed context structure
4532 * @src_ctx: unpacked source context structure
4533 * @dest_ctx: packed destination context data
4534 * @ce_info: context element description
cdedef59 4535 */
1260b45d
JK
4536static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
4537 const struct ice_ctx_ele *ce_info)
cdedef59
AV
4538{
4539 u8 src_byte, dest_byte, mask;
4540 u8 *from, *dest;
4541 u16 shift_width;
4542
4543 /* copy from the next struct field */
4544 from = src_ctx + ce_info->offset;
4545
4546 /* prepare the bits and mask */
4547 shift_width = ce_info->lsb % 8;
a45d1bf5 4548 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
cdedef59
AV
4549
4550 src_byte = *from;
cdedef59 4551 src_byte <<= shift_width;
a45d1bf5 4552 src_byte &= mask;
cdedef59
AV
4553
4554 /* get the current bits from the target bit string */
4555 dest = dest_ctx + (ce_info->lsb / 8);
4556
4557 memcpy(&dest_byte, dest, sizeof(dest_byte));
4558
4559 dest_byte &= ~mask; /* get the bits not changing */
4560 dest_byte |= src_byte; /* add in the new bits */
4561
4562 /* put it all back */
4563 memcpy(dest, &dest_byte, sizeof(dest_byte));
4564}
4565
4566/**
1260b45d
JK
4567 * ice_pack_ctx_word - write a word to a packed context structure
4568 * @src_ctx: unpacked source context structure
4569 * @dest_ctx: packed destination context data
4570 * @ce_info: context element description
cdedef59 4571 */
1260b45d
JK
4572static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
4573 const struct ice_ctx_ele *ce_info)
cdedef59
AV
4574{
4575 u16 src_word, mask;
4576 __le16 dest_word;
4577 u8 *from, *dest;
4578 u16 shift_width;
4579
4580 /* copy from the next struct field */
4581 from = src_ctx + ce_info->offset;
4582
4583 /* prepare the bits and mask */
4584 shift_width = ce_info->lsb % 8;
a45d1bf5 4585 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
cdedef59
AV
4586
4587 /* don't swizzle the bits until after the mask because the mask bits
4588 * will be in a different bit position on big endian machines
4589 */
4590 src_word = *(u16 *)from;
cdedef59 4591 src_word <<= shift_width;
a45d1bf5 4592 src_word &= mask;
cdedef59
AV
4593
4594 /* get the current bits from the target bit string */
4595 dest = dest_ctx + (ce_info->lsb / 8);
4596
4597 memcpy(&dest_word, dest, sizeof(dest_word));
4598
4599 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
4600 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
4601
4602 /* put it all back */
4603 memcpy(dest, &dest_word, sizeof(dest_word));
4604}
4605
4606/**
1260b45d
JK
4607 * ice_pack_ctx_dword - write a dword to a packed context structure
4608 * @src_ctx: unpacked source context structure
4609 * @dest_ctx: packed destination context data
4610 * @ce_info: context element description
cdedef59 4611 */
1260b45d
JK
4612static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
4613 const struct ice_ctx_ele *ce_info)
cdedef59
AV
4614{
4615 u32 src_dword, mask;
4616 __le32 dest_dword;
4617 u8 *from, *dest;
4618 u16 shift_width;
4619
4620 /* copy from the next struct field */
4621 from = src_ctx + ce_info->offset;
4622
4623 /* prepare the bits and mask */
4624 shift_width = ce_info->lsb % 8;
a45d1bf5 4625 mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
cdedef59
AV
4626
4627 /* don't swizzle the bits until after the mask because the mask bits
4628 * will be in a different bit position on big endian machines
4629 */
4630 src_dword = *(u32 *)from;
cdedef59 4631 src_dword <<= shift_width;
a45d1bf5 4632 src_dword &= mask;
cdedef59
AV
4633
4634 /* get the current bits from the target bit string */
4635 dest = dest_ctx + (ce_info->lsb / 8);
4636
4637 memcpy(&dest_dword, dest, sizeof(dest_dword));
4638
4639 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
4640 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
4641
4642 /* put it all back */
4643 memcpy(dest, &dest_dword, sizeof(dest_dword));
4644}
4645
4646/**
1260b45d
JK
4647 * ice_pack_ctx_qword - write a qword to a packed context structure
4648 * @src_ctx: unpacked source context structure
4649 * @dest_ctx: packed destination context data
4650 * @ce_info: context element description
cdedef59 4651 */
1260b45d
JK
4652static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
4653 const struct ice_ctx_ele *ce_info)
cdedef59
AV
4654{
4655 u64 src_qword, mask;
4656 __le64 dest_qword;
4657 u8 *from, *dest;
4658 u16 shift_width;
4659
4660 /* copy from the next struct field */
4661 from = src_ctx + ce_info->offset;
4662
4663 /* prepare the bits and mask */
4664 shift_width = ce_info->lsb % 8;
a45d1bf5 4665 mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
cdedef59
AV
4666
4667 /* don't swizzle the bits until after the mask because the mask bits
4668 * will be in a different bit position on big endian machines
4669 */
4670 src_qword = *(u64 *)from;
cdedef59 4671 src_qword <<= shift_width;
a45d1bf5 4672 src_qword &= mask;
cdedef59
AV
4673
4674 /* get the current bits from the target bit string */
4675 dest = dest_ctx + (ce_info->lsb / 8);
4676
4677 memcpy(&dest_qword, dest, sizeof(dest_qword));
4678
4679 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
4680 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
4681
4682 /* put it all back */
4683 memcpy(dest, &dest_qword, sizeof(dest_qword));
4684}
4685
4686/**
4687 * ice_set_ctx - set context bits in packed structure
7e34786a 4688 * @hw: pointer to the hardware structure
cdedef59
AV
4689 * @src_ctx: pointer to a generic non-packed context structure
4690 * @dest_ctx: pointer to memory for the packed structure
979c2c04 4691 * @ce_info: List of Rx context elements
cdedef59 4692 */
979c2c04
JK
4693int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4694 const struct ice_ctx_ele *ce_info)
cdedef59
AV
4695{
4696 int f;
4697
4698 for (f = 0; ce_info[f].width; f++) {
4699 /* We have to deal with each element of the FW response
4700 * using the correct size so that we are correct regardless
4701 * of the endianness of the machine.
4702 */
7e34786a 4703 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
9228d8b2 4704 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
7e34786a
BA
4705 f, ce_info[f].width, ce_info[f].size_of);
4706 continue;
4707 }
cdedef59
AV
4708 switch (ce_info[f].size_of) {
4709 case sizeof(u8):
1260b45d 4710 ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
cdedef59
AV
4711 break;
4712 case sizeof(u16):
1260b45d 4713 ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
cdedef59
AV
4714 break;
4715 case sizeof(u32):
1260b45d 4716 ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
cdedef59
AV
4717 break;
4718 case sizeof(u64):
1260b45d 4719 ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
cdedef59
AV
4720 break;
4721 default:
d54699e2 4722 return -EINVAL;
cdedef59
AV
4723 }
4724 }
4725
4726 return 0;
4727}
4728
bb87ee0e
AV
4729/**
4730 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4731 * @hw: pointer to the HW struct
4732 * @vsi_handle: software VSI handle
4733 * @tc: TC number
4734 * @q_handle: software queue handle
4735 */
1ddef455 4736struct ice_q_ctx *
bb87ee0e
AV
4737ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4738{
4739 struct ice_vsi_ctx *vsi;
4740 struct ice_q_ctx *q_ctx;
4741
4742 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4743 if (!vsi)
4744 return NULL;
4745 if (q_handle >= vsi->num_lan_q_entries[tc])
4746 return NULL;
4747 if (!vsi->lan_q_ctx[tc])
4748 return NULL;
4749 q_ctx = vsi->lan_q_ctx[tc];
4750 return &q_ctx[q_handle];
4751}
4752
cdedef59
AV
4753/**
4754 * ice_ena_vsi_txq
4755 * @pi: port information structure
4fb33f31 4756 * @vsi_handle: software VSI handle
f9867df6 4757 * @tc: TC number
bb87ee0e 4758 * @q_handle: software queue handle
cdedef59
AV
4759 * @num_qgrps: Number of added queue groups
4760 * @buf: list of queue groups to be added
4761 * @buf_size: size of buffer for indirect command
4762 * @cd: pointer to command details structure or NULL
4763 *
f9867df6 4764 * This function adds one LAN queue
cdedef59 4765 */
5e24d598 4766int
bb87ee0e
AV
4767ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4768 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
cdedef59
AV
4769 struct ice_sq_cd *cd)
4770{
4771 struct ice_aqc_txsched_elem_data node = { 0 };
4772 struct ice_sched_node *parent;
bb87ee0e 4773 struct ice_q_ctx *q_ctx;
cdedef59 4774 struct ice_hw *hw;
5518ac2a 4775 int status;
cdedef59
AV
4776
4777 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
d54699e2 4778 return -EIO;
cdedef59
AV
4779
4780 if (num_qgrps > 1 || buf->num_txqs > 1)
d54699e2 4781 return -ENOSPC;
cdedef59
AV
4782
4783 hw = pi->hw;
4784
4fb33f31 4785 if (!ice_is_vsi_valid(hw, vsi_handle))
d54699e2 4786 return -EINVAL;
4fb33f31 4787
cdedef59
AV
4788 mutex_lock(&pi->sched_lock);
4789
bb87ee0e
AV
4790 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4791 if (!q_ctx) {
4792 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4793 q_handle);
d54699e2 4794 status = -EINVAL;
bb87ee0e
AV
4795 goto ena_txq_exit;
4796 }
4797
cdedef59 4798 /* find a parent node */
4fb33f31 4799 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
cdedef59
AV
4800 ICE_SCHED_NODE_OWNER_LAN);
4801 if (!parent) {
d54699e2 4802 status = -EINVAL;
cdedef59
AV
4803 goto ena_txq_exit;
4804 }
4fb33f31 4805
cdedef59
AV
4806 buf->parent_teid = parent->info.node_teid;
4807 node.parent_teid = parent->info.node_teid;
4808 /* Mark that the values in the "generic" section as valid. The default
4809 * value in the "generic" section is zero. This means that :
4810 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4811 * - 0 priority among siblings, indicated by Bit 1-3.
4812 * - WFQ, indicated by Bit 4.
4813 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4814 * Bit 5-6.
4815 * - Bit 7 is reserved.
4816 * Without setting the generic section as valid in valid_sections, the
f9867df6 4817 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
cdedef59 4818 */
984824a2
TS
4819 buf->txqs[0].info.valid_sections =
4820 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4821 ICE_AQC_ELEM_VALID_EIR;
4822 buf->txqs[0].info.generic = 0;
4823 buf->txqs[0].info.cir_bw.bw_profile_idx =
4824 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4825 buf->txqs[0].info.cir_bw.bw_alloc =
4826 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4827 buf->txqs[0].info.eir_bw.bw_profile_idx =
4828 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4829 buf->txqs[0].info.eir_bw.bw_alloc =
4830 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
cdedef59 4831
f9867df6 4832 /* add the LAN queue */
cdedef59 4833 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
6e9650d5 4834 if (status) {
bb87ee0e 4835 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
6e9650d5
VR
4836 le16_to_cpu(buf->txqs[0].txq_id),
4837 hw->adminq.sq_last_status);
cdedef59 4838 goto ena_txq_exit;
6e9650d5 4839 }
cdedef59
AV
4840
4841 node.node_teid = buf->txqs[0].q_teid;
4842 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
bb87ee0e 4843 q_ctx->q_handle = q_handle;
1ddef455 4844 q_ctx->q_teid = le32_to_cpu(node.node_teid);
cdedef59 4845
1ddef455 4846 /* add a leaf node into scheduler tree queue layer */
bdf96d96 4847 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
1ddef455
UK
4848 if (!status)
4849 status = ice_sched_replay_q_bw(pi, q_ctx);
cdedef59
AV
4850
4851ena_txq_exit:
4852 mutex_unlock(&pi->sched_lock);
4853 return status;
4854}
4855
4856/**
4857 * ice_dis_vsi_txq
4858 * @pi: port information structure
bb87ee0e
AV
4859 * @vsi_handle: software VSI handle
4860 * @tc: TC number
cdedef59 4861 * @num_queues: number of queues
bb87ee0e 4862 * @q_handles: pointer to software queue handle array
cdedef59
AV
4863 * @q_ids: pointer to the q_id array
4864 * @q_teids: pointer to queue node teids
94c4441b 4865 * @rst_src: if called due to reset, specifies the reset source
ddf30f7f 4866 * @vmvf_num: the relative VM or VF number that is undergoing the reset
cdedef59
AV
4867 * @cd: pointer to command details structure or NULL
4868 *
4869 * This function removes queues and their corresponding nodes in SW DB
4870 */
5e24d598 4871int
bb87ee0e
AV
4872ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4873 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4874 enum ice_disq_rst_src rst_src, u16 vmvf_num,
ddf30f7f 4875 struct ice_sq_cd *cd)
cdedef59 4876{
d8e45f29 4877 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
11dee3d6 4878 u16 i, buf_size = __struct_size(qg_list);
bb87ee0e 4879 struct ice_q_ctx *q_ctx;
5518ac2a 4880 int status = -ENOENT;
66486d89 4881 struct ice_hw *hw;
cdedef59
AV
4882
4883 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
d54699e2 4884 return -EIO;
cdedef59 4885
66486d89
BA
4886 hw = pi->hw;
4887
85796d6e
AA
4888 if (!num_queues) {
4889 /* if queue is disabled already yet the disable queue command
4890 * has to be sent to complete the VF reset, then call
4891 * ice_aq_dis_lan_txq without any queue information
4892 */
4893 if (rst_src)
66486d89 4894 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
85796d6e 4895 vmvf_num, NULL);
d54699e2 4896 return -EIO;
85796d6e 4897 }
ddf30f7f 4898
cdedef59
AV
4899 mutex_lock(&pi->sched_lock);
4900
4901 for (i = 0; i < num_queues; i++) {
4902 struct ice_sched_node *node;
4903
4904 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4905 if (!node)
4906 continue;
66486d89 4907 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
bb87ee0e 4908 if (!q_ctx) {
66486d89 4909 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
bb87ee0e
AV
4910 q_handles[i]);
4911 continue;
4912 }
4913 if (q_ctx->q_handle != q_handles[i]) {
66486d89 4914 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
bb87ee0e
AV
4915 q_ctx->q_handle, q_handles[i]);
4916 continue;
4917 }
66486d89
BA
4918 qg_list->parent_teid = node->info.parent_teid;
4919 qg_list->num_qs = 1;
4920 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4921 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4922 vmvf_num, cd);
cdedef59
AV
4923
4924 if (status)
4925 break;
4926 ice_free_sched_node(pi, node);
bb87ee0e 4927 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
f3fbda33 4928 q_ctx->q_teid = ICE_INVAL_TEID;
cdedef59
AV
4929 }
4930 mutex_unlock(&pi->sched_lock);
4931 return status;
4932}
5513b920
AV
4933
4934/**
94c4441b 4935 * ice_cfg_vsi_qs - configure the new/existing VSI queues
5513b920 4936 * @pi: port information structure
4fb33f31 4937 * @vsi_handle: software VSI handle
5513b920
AV
4938 * @tc_bitmap: TC bitmap
4939 * @maxqs: max queues array per TC
f9867df6 4940 * @owner: LAN or RDMA
5513b920
AV
4941 *
4942 * This function adds/updates the VSI queues per TC.
4943 */
5e24d598 4944static int
4fb33f31 4945ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
5513b920
AV
4946 u16 *maxqs, u8 owner)
4947{
5e24d598 4948 int status = 0;
5513b920
AV
4949 u8 i;
4950
4951 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
d54699e2 4952 return -EIO;
5513b920 4953
4fb33f31 4954 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
d54699e2 4955 return -EINVAL;
4fb33f31 4956
5513b920
AV
4957 mutex_lock(&pi->sched_lock);
4958
2bdc97be 4959 ice_for_each_traffic_class(i) {
5513b920
AV
4960 /* configuration is possible only if TC node is present */
4961 if (!ice_sched_get_tc_node(pi, i))
4962 continue;
4963
4fb33f31 4964 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5513b920
AV
4965 ice_is_tc_ena(tc_bitmap, i));
4966 if (status)
4967 break;
4968 }
4969
4970 mutex_unlock(&pi->sched_lock);
4971 return status;
4972}
4973
4974/**
f9867df6 4975 * ice_cfg_vsi_lan - configure VSI LAN queues
5513b920 4976 * @pi: port information structure
4fb33f31 4977 * @vsi_handle: software VSI handle
5513b920 4978 * @tc_bitmap: TC bitmap
f9867df6 4979 * @max_lanqs: max LAN queues array per TC
5513b920 4980 *
f9867df6 4981 * This function adds/updates the VSI LAN queues per TC.
5513b920 4982 */
5e24d598 4983int
4fb33f31 4984ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
5513b920
AV
4985 u16 *max_lanqs)
4986{
4fb33f31 4987 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5513b920
AV
4988 ICE_SCHED_NODE_OWNER_LAN);
4989}
45d3d428 4990
348048e7
DE
4991/**
4992 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4993 * @pi: port information structure
4994 * @vsi_handle: software VSI handle
4995 * @tc_bitmap: TC bitmap
4996 * @max_rdmaqs: max RDMA queues array per TC
4997 *
4998 * This function adds/updates the VSI RDMA queues per TC.
4999 */
5000int
5001ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5002 u16 *max_rdmaqs)
5003{
d54699e2
TN
5004 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5005 ICE_SCHED_NODE_OWNER_RDMA);
348048e7
DE
5006}
5007
5008/**
5009 * ice_ena_vsi_rdma_qset
5010 * @pi: port information structure
5011 * @vsi_handle: software VSI handle
5012 * @tc: TC number
5013 * @rdma_qset: pointer to RDMA Qset
5014 * @num_qsets: number of RDMA Qsets
5015 * @qset_teid: pointer to Qset node TEIDs
5016 *
5017 * This function adds RDMA Qset
5018 */
5019int
5020ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5021 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5022{
5023 struct ice_aqc_txsched_elem_data node = { 0 };
5024 struct ice_aqc_add_rdma_qset_data *buf;
5025 struct ice_sched_node *parent;
348048e7
DE
5026 struct ice_hw *hw;
5027 u16 i, buf_size;
5028 int ret;
5029
5030 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5031 return -EIO;
5032 hw = pi->hw;
5033
5034 if (!ice_is_vsi_valid(hw, vsi_handle))
5035 return -EINVAL;
5036
5037 buf_size = struct_size(buf, rdma_qsets, num_qsets);
5038 buf = kzalloc(buf_size, GFP_KERNEL);
5039 if (!buf)
5040 return -ENOMEM;
5041 mutex_lock(&pi->sched_lock);
5042
5043 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5044 ICE_SCHED_NODE_OWNER_RDMA);
5045 if (!parent) {
5046 ret = -EINVAL;
5047 goto rdma_error_exit;
5048 }
5049 buf->parent_teid = parent->info.node_teid;
5050 node.parent_teid = parent->info.node_teid;
5051
5052 buf->num_qsets = cpu_to_le16(num_qsets);
5053 for (i = 0; i < num_qsets; i++) {
5054 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
5055 buf->rdma_qsets[i].info.valid_sections =
5056 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5057 ICE_AQC_ELEM_VALID_EIR;
5058 buf->rdma_qsets[i].info.generic = 0;
5059 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5060 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
5061 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5062 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
5063 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5064 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
5065 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5066 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
5067 }
5068 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5069 if (ret) {
5070 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5071 goto rdma_error_exit;
5072 }
5073 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5074 for (i = 0; i < num_qsets; i++) {
5075 node.node_teid = buf->rdma_qsets[i].qset_teid;
2ccc1c1c 5076 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
bdf96d96 5077 &node, NULL);
2ccc1c1c 5078 if (ret)
348048e7 5079 break;
348048e7
DE
5080 qset_teid[i] = le32_to_cpu(node.node_teid);
5081 }
5082rdma_error_exit:
5083 mutex_unlock(&pi->sched_lock);
5084 kfree(buf);
5085 return ret;
5086}
5087
5088/**
5089 * ice_dis_vsi_rdma_qset - free RDMA resources
5090 * @pi: port_info struct
5091 * @count: number of RDMA Qsets to free
5092 * @qset_teid: TEID of Qset node
5093 * @q_id: list of queue IDs being disabled
5094 */
5095int
5096ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5097 u16 *q_id)
5098{
d8e45f29 5099 DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
11dee3d6 5100 u16 qg_size = __struct_size(qg_list);
348048e7 5101 struct ice_hw *hw;
5518ac2a 5102 int status = 0;
348048e7
DE
5103 int i;
5104
5105 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5106 return -EIO;
5107
5108 hw = pi->hw;
5109
348048e7
DE
5110 mutex_lock(&pi->sched_lock);
5111
5112 for (i = 0; i < count; i++) {
5113 struct ice_sched_node *node;
5114
5115 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5116 if (!node)
5117 continue;
5118
5119 qg_list->parent_teid = node->info.parent_teid;
5120 qg_list->num_qs = 1;
5121 qg_list->q_id[0] =
5122 cpu_to_le16(q_id[i] |
5123 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5124
5125 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5126 ICE_NO_RESET, 0, NULL);
5127 if (status)
5128 break;
5129
5130 ice_free_sched_node(pi, node);
5131 }
5132
5133 mutex_unlock(&pi->sched_lock);
d54699e2 5134 return status;
348048e7
DE
5135}
5136
8a3a565f
AK
5137/**
5138 * ice_aq_get_cgu_abilities - get cgu abilities
5139 * @hw: pointer to the HW struct
5140 * @abilities: CGU abilities
5141 *
5142 * Get CGU abilities (0x0C61)
5143 * Return: 0 on success or negative value on failure.
5144 */
5145int
5146ice_aq_get_cgu_abilities(struct ice_hw *hw,
5147 struct ice_aqc_get_cgu_abilities *abilities)
5148{
5149 struct ice_aq_desc desc;
5150
5151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
5152 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
5153}
5154
5155/**
5156 * ice_aq_set_input_pin_cfg - set input pin config
5157 * @hw: pointer to the HW struct
5158 * @input_idx: Input index
5159 * @flags1: Input flags
5160 * @flags2: Input flags
5161 * @freq: Frequency in Hz
5162 * @phase_delay: Delay in ps
5163 *
5164 * Set CGU input config (0x0C62)
5165 * Return: 0 on success or negative value on failure.
5166 */
5167int
5168ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
5169 u32 freq, s32 phase_delay)
5170{
5171 struct ice_aqc_set_cgu_input_config *cmd;
5172 struct ice_aq_desc desc;
5173
5174 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
5175 cmd = &desc.params.set_cgu_input_config;
5176 cmd->input_idx = input_idx;
5177 cmd->flags1 = flags1;
5178 cmd->flags2 = flags2;
5179 cmd->freq = cpu_to_le32(freq);
5180 cmd->phase_delay = cpu_to_le32(phase_delay);
5181
5182 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5183}
5184
5185/**
5186 * ice_aq_get_input_pin_cfg - get input pin config
5187 * @hw: pointer to the HW struct
5188 * @input_idx: Input index
5189 * @status: Pin status
5190 * @type: Pin type
5191 * @flags1: Input flags
5192 * @flags2: Input flags
5193 * @freq: Frequency in Hz
5194 * @phase_delay: Delay in ps
5195 *
5196 * Get CGU input config (0x0C63)
5197 * Return: 0 on success or negative value on failure.
5198 */
5199int
5200ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
5201 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
5202{
5203 struct ice_aqc_get_cgu_input_config *cmd;
5204 struct ice_aq_desc desc;
5205 int ret;
5206
5207 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
5208 cmd = &desc.params.get_cgu_input_config;
5209 cmd->input_idx = input_idx;
5210
5211 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5212 if (!ret) {
5213 if (status)
5214 *status = cmd->status;
5215 if (type)
5216 *type = cmd->type;
5217 if (flags1)
5218 *flags1 = cmd->flags1;
5219 if (flags2)
5220 *flags2 = cmd->flags2;
5221 if (freq)
5222 *freq = le32_to_cpu(cmd->freq);
5223 if (phase_delay)
5224 *phase_delay = le32_to_cpu(cmd->phase_delay);
5225 }
5226
5227 return ret;
5228}
5229
5230/**
5231 * ice_aq_set_output_pin_cfg - set output pin config
5232 * @hw: pointer to the HW struct
5233 * @output_idx: Output index
5234 * @flags: Output flags
5235 * @src_sel: Index of DPLL block
5236 * @freq: Output frequency
5237 * @phase_delay: Output phase compensation
5238 *
5239 * Set CGU output config (0x0C64)
5240 * Return: 0 on success or negative value on failure.
5241 */
5242int
5243ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
5244 u8 src_sel, u32 freq, s32 phase_delay)
5245{
5246 struct ice_aqc_set_cgu_output_config *cmd;
5247 struct ice_aq_desc desc;
5248
5249 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
5250 cmd = &desc.params.set_cgu_output_config;
5251 cmd->output_idx = output_idx;
5252 cmd->flags = flags;
5253 cmd->src_sel = src_sel;
5254 cmd->freq = cpu_to_le32(freq);
5255 cmd->phase_delay = cpu_to_le32(phase_delay);
5256
5257 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5258}
5259
5260/**
5261 * ice_aq_get_output_pin_cfg - get output pin config
5262 * @hw: pointer to the HW struct
5263 * @output_idx: Output index
5264 * @flags: Output flags
5265 * @src_sel: Internal DPLL source
5266 * @freq: Output frequency
5267 * @src_freq: Source frequency
5268 *
5269 * Get CGU output config (0x0C65)
5270 * Return: 0 on success or negative value on failure.
5271 */
5272int
5273ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
5274 u8 *src_sel, u32 *freq, u32 *src_freq)
5275{
5276 struct ice_aqc_get_cgu_output_config *cmd;
5277 struct ice_aq_desc desc;
5278 int ret;
5279
5280 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
5281 cmd = &desc.params.get_cgu_output_config;
5282 cmd->output_idx = output_idx;
5283
5284 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5285 if (!ret) {
5286 if (flags)
5287 *flags = cmd->flags;
5288 if (src_sel)
5289 *src_sel = cmd->src_sel;
5290 if (freq)
5291 *freq = le32_to_cpu(cmd->freq);
5292 if (src_freq)
5293 *src_freq = le32_to_cpu(cmd->src_freq);
5294 }
5295
5296 return ret;
5297}
5298
5299/**
5300 * ice_aq_get_cgu_dpll_status - get dpll status
5301 * @hw: pointer to the HW struct
5302 * @dpll_num: DPLL index
5303 * @ref_state: Reference clock state
5304 * @config: current DPLL config
5305 * @dpll_state: current DPLL state
5306 * @phase_offset: Phase offset in ns
5307 * @eec_mode: EEC_mode
5308 *
5309 * Get CGU DPLL status (0x0C66)
5310 * Return: 0 on success or negative value on failure.
5311 */
5312int
5313ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
5314 u8 *dpll_state, u8 *config, s64 *phase_offset,
5315 u8 *eec_mode)
5316{
5317 struct ice_aqc_get_cgu_dpll_status *cmd;
8a3a565f
AK
5318 struct ice_aq_desc desc;
5319 int status;
5320
5321 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
5322 cmd = &desc.params.get_cgu_dpll_status;
5323 cmd->dpll_num = dpll_num;
5324
5325 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5326 if (!status) {
5327 *ref_state = cmd->ref_state;
5328 *dpll_state = cmd->dpll_state;
5329 *config = cmd->config;
5330 *phase_offset = le32_to_cpu(cmd->phase_offset_h);
5331 *phase_offset <<= 32;
5332 *phase_offset += le32_to_cpu(cmd->phase_offset_l);
8278a6a4 5333 *phase_offset = sign_extend64(*phase_offset, 47);
8a3a565f
AK
5334 *eec_mode = cmd->eec_mode;
5335 }
5336
5337 return status;
5338}
5339
5340/**
5341 * ice_aq_set_cgu_dpll_config - set dpll config
5342 * @hw: pointer to the HW struct
5343 * @dpll_num: DPLL index
5344 * @ref_state: Reference clock state
5345 * @config: DPLL config
5346 * @eec_mode: EEC mode
5347 *
5348 * Set CGU DPLL config (0x0C67)
5349 * Return: 0 on success or negative value on failure.
5350 */
5351int
5352ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
5353 u8 config, u8 eec_mode)
5354{
5355 struct ice_aqc_set_cgu_dpll_config *cmd;
5356 struct ice_aq_desc desc;
5357
5358 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
5359 cmd = &desc.params.set_cgu_dpll_config;
5360 cmd->dpll_num = dpll_num;
5361 cmd->ref_state = ref_state;
5362 cmd->config = config;
5363 cmd->eec_mode = eec_mode;
5364
5365 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5366}
5367
5368/**
5369 * ice_aq_set_cgu_ref_prio - set input reference priority
5370 * @hw: pointer to the HW struct
5371 * @dpll_num: DPLL index
5372 * @ref_idx: Reference pin index
5373 * @ref_priority: Reference input priority
5374 *
5375 * Set CGU reference priority (0x0C68)
5376 * Return: 0 on success or negative value on failure.
5377 */
5378int
5379ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
5380 u8 ref_priority)
5381{
5382 struct ice_aqc_set_cgu_ref_prio *cmd;
5383 struct ice_aq_desc desc;
5384
5385 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
5386 cmd = &desc.params.set_cgu_ref_prio;
5387 cmd->dpll_num = dpll_num;
5388 cmd->ref_idx = ref_idx;
5389 cmd->ref_priority = ref_priority;
5390
5391 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5392}
5393
5394/**
5395 * ice_aq_get_cgu_ref_prio - get input reference priority
5396 * @hw: pointer to the HW struct
5397 * @dpll_num: DPLL index
5398 * @ref_idx: Reference pin index
5399 * @ref_prio: Reference input priority
5400 *
5401 * Get CGU reference priority (0x0C69)
5402 * Return: 0 on success or negative value on failure.
5403 */
5404int
5405ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
5406 u8 *ref_prio)
5407{
5408 struct ice_aqc_get_cgu_ref_prio *cmd;
5409 struct ice_aq_desc desc;
5410 int status;
5411
5412 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
5413 cmd = &desc.params.get_cgu_ref_prio;
5414 cmd->dpll_num = dpll_num;
5415 cmd->ref_idx = ref_idx;
5416
5417 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5418 if (!status)
5419 *ref_prio = cmd->ref_priority;
5420
5421 return status;
5422}
5423
5424/**
5425 * ice_aq_get_cgu_info - get cgu info
5426 * @hw: pointer to the HW struct
5427 * @cgu_id: CGU ID
5428 * @cgu_cfg_ver: CGU config version
5429 * @cgu_fw_ver: CGU firmware version
5430 *
5431 * Get CGU info (0x0C6A)
5432 * Return: 0 on success or negative value on failure.
5433 */
5434int
5435ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
5436 u32 *cgu_fw_ver)
5437{
5438 struct ice_aqc_get_cgu_info *cmd;
5439 struct ice_aq_desc desc;
5440 int status;
5441
5442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
5443 cmd = &desc.params.get_cgu_info;
5444
5445 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5446 if (!status) {
5447 *cgu_id = le32_to_cpu(cmd->cgu_id);
5448 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver);
5449 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver);
5450 }
5451
5452 return status;
5453}
5454
5455/**
5456 * ice_aq_set_phy_rec_clk_out - set RCLK phy out
5457 * @hw: pointer to the HW struct
5458 * @phy_output: PHY reference clock output pin
5459 * @enable: GPIO state to be applied
5460 * @freq: PHY output frequency
5461 *
5462 * Set phy recovered clock as reference (0x0630)
5463 * Return: 0 on success or negative value on failure.
5464 */
5465int
5466ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
5467 u32 *freq)
5468{
5469 struct ice_aqc_set_phy_rec_clk_out *cmd;
5470 struct ice_aq_desc desc;
5471 int status;
5472
5473 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
5474 cmd = &desc.params.set_phy_rec_clk_out;
5475 cmd->phy_output = phy_output;
5476 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
5477 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
5478 cmd->freq = cpu_to_le32(*freq);
5479
5480 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5481 if (!status)
5482 *freq = le32_to_cpu(cmd->freq);
5483
5484 return status;
5485}
5486
5487/**
5488 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info
5489 * @hw: pointer to the HW struct
5490 * @phy_output: PHY reference clock output pin
5491 * @port_num: Port number
5492 * @flags: PHY flags
5493 * @node_handle: PHY output frequency
5494 *
5495 * Get PHY recovered clock output info (0x0631)
5496 * Return: 0 on success or negative value on failure.
5497 */
5498int
5499ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
5500 u8 *flags, u16 *node_handle)
5501{
5502 struct ice_aqc_get_phy_rec_clk_out *cmd;
5503 struct ice_aq_desc desc;
5504 int status;
5505
5506 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
5507 cmd = &desc.params.get_phy_rec_clk_out;
5508 cmd->phy_output = *phy_output;
5509
5510 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5511 if (!status) {
5512 *phy_output = cmd->phy_output;
5513 if (port_num)
5514 *port_num = cmd->port_num;
5515 if (flags)
5516 *flags = cmd->flags;
5517 if (node_handle)
5518 *node_handle = le16_to_cpu(cmd->node_handle);
5519 }
5520
5521 return status;
5522}
5523
4da71a77
KK
5524/**
5525 * ice_aq_get_sensor_reading
5526 * @hw: pointer to the HW struct
5527 * @data: pointer to data to be read from the sensor
5528 *
5529 * Get sensor reading (0x0632)
5530 */
5531int ice_aq_get_sensor_reading(struct ice_hw *hw,
5532 struct ice_aqc_get_sensor_reading_resp *data)
5533{
5534 struct ice_aqc_get_sensor_reading *cmd;
5535 struct ice_aq_desc desc;
5536 int status;
5537
5538 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
5539 cmd = &desc.params.get_sensor_reading;
5540#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
5541#define ICE_INTERNAL_TEMP_SENSOR 0
5542 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
5543 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT;
5544
5545 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5546 if (!status)
5547 memcpy(data, &desc.params.get_sensor_reading_resp,
5548 sizeof(*data));
5549
5550 return status;
5551}
5552
334cb062
AV
5553/**
5554 * ice_replay_pre_init - replay pre initialization
f9867df6 5555 * @hw: pointer to the HW struct
334cb062
AV
5556 *
5557 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5558 */
5e24d598 5559static int ice_replay_pre_init(struct ice_hw *hw)
334cb062
AV
5560{
5561 struct ice_switch_info *sw = hw->switch_info;
5562 u8 i;
5563
5564 /* Delete old entries from replay filter list head if there is any */
5565 ice_rm_all_sw_replay_rule_info(hw);
5566 /* In start of replay, move entries into replay_rules list, it
5567 * will allow adding rules entries back to filt_rules list,
5568 * which is operational list.
5569 */
c36a2b97 5570 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
334cb062
AV
5571 list_replace_init(&sw->recp_list[i].filt_rules,
5572 &sw->recp_list[i].filt_replay_rules);
b126bd6b 5573 ice_sched_replay_agg_vsi_preinit(hw);
334cb062
AV
5574
5575 return 0;
5576}
5577
5578/**
5579 * ice_replay_vsi - replay VSI configuration
f9867df6 5580 * @hw: pointer to the HW struct
334cb062
AV
5581 * @vsi_handle: driver VSI handle
5582 *
5583 * Restore all VSI configuration after reset. It is required to call this
5584 * function with main VSI first.
5585 */
5e24d598 5586int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
334cb062 5587{
5e24d598 5588 int status;
334cb062
AV
5589
5590 if (!ice_is_vsi_valid(hw, vsi_handle))
d54699e2 5591 return -EINVAL;
334cb062
AV
5592
5593 /* Replay pre-initialization if there is any */
5594 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
5595 status = ice_replay_pre_init(hw);
5596 if (status)
5597 return status;
5598 }
c90ed40c
TN
5599 /* Replay per VSI all RSS configurations */
5600 status = ice_replay_rss_cfg(hw, vsi_handle);
5601 if (status)
5602 return status;
334cb062
AV
5603 /* Replay per VSI all filters */
5604 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
b126bd6b
KP
5605 if (!status)
5606 status = ice_replay_vsi_agg(hw, vsi_handle);
334cb062
AV
5607 return status;
5608}
5609
5610/**
5611 * ice_replay_post - post replay configuration cleanup
f9867df6 5612 * @hw: pointer to the HW struct
334cb062
AV
5613 *
5614 * Post replay cleanup.
5615 */
5616void ice_replay_post(struct ice_hw *hw)
5617{
5618 /* Delete old entries from replay filter list head */
5619 ice_rm_all_sw_replay_rule_info(hw);
b126bd6b 5620 ice_sched_replay_agg(hw);
334cb062
AV
5621}
5622
45d3d428
AV
5623/**
5624 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5625 * @hw: ptr to the hardware info
36517fd3 5626 * @reg: offset of 64 bit HW register to read from
45d3d428
AV
5627 * @prev_stat_loaded: bool to specify if previous stats are loaded
5628 * @prev_stat: ptr to previous loaded stat value
5629 * @cur_stat: ptr to current stat value
5630 */
c8b7abdd 5631void
36517fd3
JK
5632ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5633 u64 *prev_stat, u64 *cur_stat)
45d3d428 5634{
36517fd3 5635 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
45d3d428
AV
5636
5637 /* device stats are not reset at PFR, they likely will not be zeroed
36517fd3
JK
5638 * when the driver starts. Thus, save the value from the first read
5639 * without adding to the statistic value so that we report stats which
5640 * count up from zero.
45d3d428 5641 */
36517fd3 5642 if (!prev_stat_loaded) {
45d3d428 5643 *prev_stat = new_data;
36517fd3
JK
5644 return;
5645 }
5646
5647 /* Calculate the difference between the new and old values, and then
5648 * add it to the software stat value.
5649 */
45d3d428 5650 if (new_data >= *prev_stat)
36517fd3 5651 *cur_stat += new_data - *prev_stat;
45d3d428
AV
5652 else
5653 /* to manage the potential roll-over */
36517fd3
JK
5654 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5655
5656 /* Update the previously stored value to prepare for next read */
5657 *prev_stat = new_data;
45d3d428
AV
5658}
5659
5660/**
5661 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5662 * @hw: ptr to the hardware info
36517fd3 5663 * @reg: offset of HW register to read from
45d3d428
AV
5664 * @prev_stat_loaded: bool to specify if previous stats are loaded
5665 * @prev_stat: ptr to previous loaded stat value
5666 * @cur_stat: ptr to current stat value
5667 */
c8b7abdd
BA
5668void
5669ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5670 u64 *prev_stat, u64 *cur_stat)
45d3d428
AV
5671{
5672 u32 new_data;
5673
5674 new_data = rd32(hw, reg);
5675
5676 /* device stats are not reset at PFR, they likely will not be zeroed
36517fd3
JK
5677 * when the driver starts. Thus, save the value from the first read
5678 * without adding to the statistic value so that we report stats which
5679 * count up from zero.
45d3d428 5680 */
36517fd3 5681 if (!prev_stat_loaded) {
45d3d428 5682 *prev_stat = new_data;
36517fd3
JK
5683 return;
5684 }
5685
5686 /* Calculate the difference between the new and old values, and then
5687 * add it to the software stat value.
5688 */
45d3d428 5689 if (new_data >= *prev_stat)
36517fd3 5690 *cur_stat += new_data - *prev_stat;
45d3d428
AV
5691 else
5692 /* to manage the potential roll-over */
36517fd3
JK
5693 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5694
5695 /* Update the previously stored value to prepare for next read */
5696 *prev_stat = new_data;
45d3d428 5697}
7b9ffc76
AV
5698
5699/**
5700 * ice_sched_query_elem - query element information from HW
5701 * @hw: pointer to the HW struct
5702 * @node_teid: node TEID to be queried
5703 * @buf: buffer to element information
5704 *
5705 * This function queries HW element information
5706 */
5e24d598 5707int
7b9ffc76 5708ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
b3c38904 5709 struct ice_aqc_txsched_elem_data *buf)
7b9ffc76
AV
5710{
5711 u16 buf_size, num_elem_ret = 0;
5e24d598 5712 int status;
7b9ffc76
AV
5713
5714 buf_size = sizeof(*buf);
5715 memset(buf, 0, buf_size);
b3c38904 5716 buf->node_teid = cpu_to_le32(node_teid);
7b9ffc76
AV
5717 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5718 NULL);
5719 if (status || num_elem_ret != 1)
5720 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5721 return status;
5722}
ea78ce4d 5723
43113ff7
KK
5724/**
5725 * ice_aq_read_i2c
5726 * @hw: pointer to the hw struct
5727 * @topo_addr: topology address for a device to communicate with
5728 * @bus_addr: 7-bit I2C bus address
5729 * @addr: I2C memory address (I2C offset) with up to 16 bits
5730 * @params: I2C parameters: bit [7] - Repeated start,
5731 * bits [6:5] data offset size,
5732 * bit [4] - I2C address type,
5733 * bits [3:0] - data size to read (0-16 bytes)
5734 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5735 * @cd: pointer to command details structure or NULL
5736 *
5737 * Read I2C (0x06E2)
5738 */
5739int
5740ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5741 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5742 struct ice_sq_cd *cd)
5743{
5744 struct ice_aq_desc desc = { 0 };
5745 struct ice_aqc_i2c *cmd;
5746 u8 data_size;
5747 int status;
5748
5749 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
fcf9b695 5750 cmd = &desc.params.read_write_i2c;
43113ff7
KK
5751
5752 if (!data)
5753 return -EINVAL;
5754
5755 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
5756
5757 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
5758 cmd->topo_addr = topo_addr;
5759 cmd->i2c_params = params;
5760 cmd->i2c_addr = addr;
5761
5762 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5763 if (!status) {
5764 struct ice_aqc_read_i2c_resp *resp;
5765 u8 i;
5766
5767 resp = &desc.params.read_i2c_resp;
5768 for (i = 0; i < data_size; i++) {
5769 *data = resp->i2c_data[i];
5770 data++;
5771 }
5772 }
5773
5774 return status;
5775}
5776
fcf9b695
KK
5777/**
5778 * ice_aq_write_i2c
5779 * @hw: pointer to the hw struct
5780 * @topo_addr: topology address for a device to communicate with
5781 * @bus_addr: 7-bit I2C bus address
5782 * @addr: I2C memory address (I2C offset) with up to 16 bits
5783 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5784 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5785 * @cd: pointer to command details structure or NULL
5786 *
5787 * Write I2C (0x06E3)
5788 *
5789 * * Return:
5790 * * 0 - Successful write to the i2c device
5791 * * -EINVAL - Data size greater than 4 bytes
5792 * * -EIO - FW error
5793 */
5794int
5795ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
bf15bb38 5796 u16 bus_addr, __le16 addr, u8 params, const u8 *data,
fcf9b695
KK
5797 struct ice_sq_cd *cd)
5798{
5799 struct ice_aq_desc desc = { 0 };
5800 struct ice_aqc_i2c *cmd;
5801 u8 data_size;
5802
5803 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5804 cmd = &desc.params.read_write_i2c;
5805
5806 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
5807
5808 /* data_size limited to 4 */
5809 if (data_size > 4)
5810 return -EINVAL;
5811
5812 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
5813 cmd->topo_addr = topo_addr;
5814 cmd->i2c_params = params;
5815 cmd->i2c_addr = addr;
5816
5817 memcpy(cmd->i2c_data, data, data_size);
5818
5819 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5820}
5821
3bb6324b
MM
5822/**
5823 * ice_aq_set_gpio
5824 * @hw: pointer to the hw struct
5825 * @gpio_ctrl_handle: GPIO controller node handle
5826 * @pin_idx: IO Number of the GPIO that needs to be set
5827 * @value: SW provide IO value to set in the LSB
5828 * @cd: pointer to command details structure or NULL
5829 *
5830 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5831 */
5832int
5833ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5834 struct ice_sq_cd *cd)
5835{
5836 struct ice_aqc_gpio *cmd;
5837 struct ice_aq_desc desc;
5838
5839 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5840 cmd = &desc.params.read_write_gpio;
5841 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
5842 cmd->gpio_num = pin_idx;
5843 cmd->gpio_val = value ? 1 : 0;
5844
d54699e2 5845 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3bb6324b
MM
5846}
5847
5848/**
5849 * ice_aq_get_gpio
5850 * @hw: pointer to the hw struct
5851 * @gpio_ctrl_handle: GPIO controller node handle
5852 * @pin_idx: IO Number of the GPIO that needs to be set
5853 * @value: IO value read
5854 * @cd: pointer to command details structure or NULL
5855 *
5856 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5857 * the topology
5858 */
5859int
5860ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5861 bool *value, struct ice_sq_cd *cd)
5862{
5863 struct ice_aqc_gpio *cmd;
5864 struct ice_aq_desc desc;
5e24d598 5865 int status;
3bb6324b
MM
5866
5867 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5868 cmd = &desc.params.read_write_gpio;
5869 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
5870 cmd->gpio_num = pin_idx;
5871
5872 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5873 if (status)
d54699e2 5874 return status;
3bb6324b
MM
5875
5876 *value = !!cmd->gpio_val;
5877 return 0;
5878}
5879
ea78ce4d 5880/**
1bd50f2d 5881 * ice_is_fw_api_min_ver
ea78ce4d 5882 * @hw: pointer to the hardware structure
1bd50f2d
PG
5883 * @maj: major version
5884 * @min: minor version
5885 * @patch: patch version
ea78ce4d 5886 *
1bd50f2d 5887 * Checks if the firmware API is minimum version
ea78ce4d 5888 */
1bd50f2d 5889static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
ea78ce4d 5890{
1bd50f2d
PG
5891 if (hw->api_maj_ver == maj) {
5892 if (hw->api_min_ver > min)
ea78ce4d 5893 return true;
1bd50f2d 5894 if (hw->api_min_ver == min && hw->api_patch >= patch)
ea78ce4d 5895 return true;
1bd50f2d 5896 } else if (hw->api_maj_ver > maj) {
ea78ce4d
PG
5897 return true;
5898 }
5899
5900 return false;
5901}
5902
1bd50f2d
PG
5903/**
5904 * ice_fw_supports_link_override
5905 * @hw: pointer to the hardware structure
5906 *
5907 * Checks if the firmware supports link override
5908 */
5909bool ice_fw_supports_link_override(struct ice_hw *hw)
5910{
5911 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
5912 ICE_FW_API_LINK_OVERRIDE_MIN,
5913 ICE_FW_API_LINK_OVERRIDE_PATCH);
5914}
5915
ea78ce4d
PG
5916/**
5917 * ice_get_link_default_override
5918 * @ldo: pointer to the link default override struct
5919 * @pi: pointer to the port info struct
5920 *
5921 * Gets the link default override for a port
5922 */
5e24d598 5923int
ea78ce4d
PG
5924ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5925 struct ice_port_info *pi)
5926{
5927 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5928 struct ice_hw *hw = pi->hw;
5e24d598 5929 int status;
ea78ce4d
PG
5930
5931 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5932 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5933 if (status) {
9228d8b2 5934 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
ea78ce4d
PG
5935 return status;
5936 }
5937
5938 /* Each port has its own config; calculate for our port */
5939 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5940 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5941
5942 /* link options first */
5943 status = ice_read_sr_word(hw, tlv_start, &buf);
5944 if (status) {
9228d8b2 5945 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
ea78ce4d
PG
5946 return status;
5947 }
5a259f8e 5948 ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf);
ea78ce4d
PG
5949 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5950 ICE_LINK_OVERRIDE_PHY_CFG_S;
5951
5952 /* link PHY config */
5953 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5954 status = ice_read_sr_word(hw, offset, &buf);
5955 if (status) {
9228d8b2 5956 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
ea78ce4d
PG
5957 return status;
5958 }
5959 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5960
5961 /* PHY types low */
5962 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5963 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5964 status = ice_read_sr_word(hw, (offset + i), &buf);
5965 if (status) {
9228d8b2 5966 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
ea78ce4d
PG
5967 return status;
5968 }
5969 /* shift 16 bits at a time to fill 64 bits */
5970 ldo->phy_type_low |= ((u64)buf << (i * 16));
5971 }
5972
5973 /* PHY types high */
5974 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5975 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5976 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5977 status = ice_read_sr_word(hw, (offset + i), &buf);
5978 if (status) {
9228d8b2 5979 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
ea78ce4d
PG
5980 return status;
5981 }
5982 /* shift 16 bits at a time to fill 64 bits */
5983 ldo->phy_type_high |= ((u64)buf << (i * 16));
5984 }
5985
5986 return status;
5987}
5ee30564
PG
5988
5989/**
5990 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5991 * @caps: get PHY capability data
5992 */
5993bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5994{
5995 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
bdeff971
LF
5996 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5997 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5998 ICE_AQC_PHY_AN_EN_CLAUSE37))
5ee30564
PG
5999 return true;
6000
6001 return false;
6002}
7d9c9b79
DE
6003
6004/**
6005 * ice_aq_set_lldp_mib - Set the LLDP MIB
6006 * @hw: pointer to the HW struct
6007 * @mib_type: Local, Remote or both Local and Remote MIBs
6008 * @buf: pointer to the caller-supplied buffer to store the MIB block
6009 * @buf_size: size of the buffer (in bytes)
6010 * @cd: pointer to command details structure or NULL
6011 *
6012 * Set the LLDP MIB. (0x0A08)
6013 */
5e24d598 6014int
7d9c9b79
DE
6015ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6016 struct ice_sq_cd *cd)
6017{
6018 struct ice_aqc_lldp_set_local_mib *cmd;
6019 struct ice_aq_desc desc;
6020
6021 cmd = &desc.params.lldp_set_mib;
6022
6023 if (buf_size == 0 || !buf)
d54699e2 6024 return -EINVAL;
7d9c9b79
DE
6025
6026 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6027
6028 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
6029 desc.datalen = cpu_to_le16(buf_size);
6030
6031 cmd->type = mib_type;
6032 cmd->length = cpu_to_le16(buf_size);
6033
6034 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6035}
34295a36
DE
6036
6037/**
ef860480 6038 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
34295a36
DE
6039 * @hw: pointer to HW struct
6040 */
6041bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6042{
6043 if (hw->mac_type != ICE_MAC_E810)
6044 return false;
6045
1bd50f2d
PG
6046 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6047 ICE_FW_API_LLDP_FLTR_MIN,
6048 ICE_FW_API_LLDP_FLTR_PATCH);
34295a36
DE
6049}
6050
6051/**
6052 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6053 * @hw: pointer to HW struct
6054 * @vsi_num: absolute HW index for VSI
6055 * @add: boolean for if adding or removing a filter
6056 */
5e24d598 6057int
34295a36
DE
6058ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6059{
6060 struct ice_aqc_lldp_filter_ctrl *cmd;
6061 struct ice_aq_desc desc;
6062
6063 cmd = &desc.params.lldp_filter_ctrl;
6064
6065 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6066
6067 if (add)
6068 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6069 else
6070 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6071
6072 cmd->vsi_num = cpu_to_le16(vsi_num);
6073
6074 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6075}
0a02944f 6076
a4f68f37
TC
6077/**
6078 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6079 * @hw: pointer to HW struct
6080 */
6081int ice_lldp_execute_pending_mib(struct ice_hw *hw)
6082{
6083 struct ice_aq_desc desc;
6084
6085 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
6086
6087 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6088}
6089
0a02944f
AV
6090/**
6091 * ice_fw_supports_report_dflt_cfg
6092 * @hw: pointer to the hardware structure
6093 *
6094 * Checks if the firmware supports report default configuration
6095 */
6096bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6097{
1bd50f2d
PG
6098 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6099 ICE_FW_API_REPORT_DFLT_CFG_MIN,
6100 ICE_FW_API_REPORT_DFLT_CFG_PATCH);
0a02944f 6101}
1d0e28a9
BC
6102
6103/* each of the indexes into the following array match the speed of a return
6104 * value from the list of AQ returned speeds like the range:
6105 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
6106 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
6107 * array. The array is defined as 15 elements long because the link_speed
6108 * returned by the firmware is a 16 bit * value, but is indexed
6109 * by [fls(speed) - 1]
6110 */
b2dbde3a 6111static const u32 ice_aq_to_link_speed[] = {
1d0e28a9
BC
6112 SPEED_10, /* BIT(0) */
6113 SPEED_100,
6114 SPEED_1000,
6115 SPEED_2500,
6116 SPEED_5000,
6117 SPEED_10000,
6118 SPEED_20000,
6119 SPEED_25000,
6120 SPEED_40000,
6121 SPEED_50000,
6122 SPEED_100000, /* BIT(10) */
24407a01 6123 SPEED_200000,
1d0e28a9
BC
6124};
6125
6126/**
6127 * ice_get_link_speed - get integer speed from table
6128 * @index: array index from fls(aq speed) - 1
6129 *
6130 * Returns: u32 value containing integer speed
6131 */
6132u32 ice_get_link_speed(u16 index)
6133{
b2dbde3a
MS
6134 if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
6135 return 0;
6136
1d0e28a9
BC
6137 return ice_aq_to_link_speed[index];
6138}