ice: Refactor VF reset
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_virtchnl_pf.c
CommitLineData
ddf30f7f
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
eff380aa 5#include "ice_base.h"
ddf30f7f 6#include "ice_lib.h"
1b8f15b6 7#include "ice_fltr.h"
ddf30f7f 8
4c66d227
JB
9/**
10 * ice_validate_vf_id - helper to check if VF ID is valid
11 * @pf: pointer to the PF structure
12 * @vf_id: the ID of the VF to check
13 */
53bb6698 14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
4c66d227 15{
53bb6698 16 /* vf_id range is only valid for 0-255, and should always be unsigned */
4c66d227 17 if (vf_id >= pf->num_alloc_vfs) {
53bb6698 18 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
4c66d227
JB
19 return -EINVAL;
20 }
21 return 0;
22}
23
24/**
25 * ice_check_vf_init - helper to check if VF init complete
26 * @pf: pointer to the PF structure
27 * @vf: the pointer to the VF to check
28 */
29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30{
31 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
53bb6698 32 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
4c66d227
JB
33 vf->vf_id);
34 return -EBUSY;
35 }
36 return 0;
37}
38
01b5e89a
BC
39/**
40 * ice_err_to_virt_err - translate errors for VF return code
41 * @ice_err: error return code
42 */
43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44{
45 switch (ice_err) {
46 case ICE_SUCCESS:
47 return VIRTCHNL_STATUS_SUCCESS;
48 case ICE_ERR_BAD_PTR:
49 case ICE_ERR_INVAL_SIZE:
50 case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 case ICE_ERR_PARAM:
52 case ICE_ERR_CFG:
53 return VIRTCHNL_STATUS_ERR_PARAM;
54 case ICE_ERR_NO_MEMORY:
55 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 case ICE_ERR_NOT_READY:
57 case ICE_ERR_RESET_FAILED:
58 case ICE_ERR_FW_API_VER:
59 case ICE_ERR_AQ_ERROR:
60 case ICE_ERR_AQ_TIMEOUT:
61 case ICE_ERR_AQ_FULL:
62 case ICE_ERR_AQ_NO_WORK:
63 case ICE_ERR_AQ_EMPTY:
64 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 default:
66 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 }
68}
69
007676b4
AV
70/**
71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
72 * @pf: pointer to the PF structure
73 * @v_opcode: operation code
74 * @v_retval: return value
75 * @msg: pointer to the msg buffer
76 * @msglen: msg length
77 */
78static void
79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
cf6c6e01 80 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
007676b4
AV
81{
82 struct ice_hw *hw = &pf->hw;
c1e08830 83 unsigned int i;
007676b4 84
005881bc
BC
85 ice_for_each_vf(pf, i) {
86 struct ice_vf *vf = &pf->vf[i];
87
007676b4
AV
88 /* Not all vfs are enabled so skip the ones that are not */
89 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 continue;
92
93 /* Ignore return value on purpose - a given VF may fail, but
94 * we need to keep going and send to all of them
95 */
96 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 msglen, NULL);
98 }
99}
100
7c710869
AV
101/**
102 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
103 * @vf: pointer to the VF structure
104 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
105 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
106 * @link_up: whether or not to set the link up/down
107 */
108static void
109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 int ice_link_speed, bool link_up)
111{
112 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 pfe->event_data.link_event_adv.link_status = link_up;
114 /* Speed in Mbps */
115 pfe->event_data.link_event_adv.link_speed =
116 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 } else {
118 pfe->event_data.link_event.link_status = link_up;
119 /* Legacy method for virtchnl link speeds */
120 pfe->event_data.link_event.link_speed =
121 (enum virtchnl_link_speed)
122 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 }
124}
125
e1fe6926
BC
126/**
127 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
128 * @vf: the VF to check
129 *
130 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
131 * otherwise
132 */
133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134{
0ca469fb
MW
135 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
e1fe6926
BC
137}
138
0b6c6a8b
BC
139/**
140 * ice_is_vf_link_up - check if the VF's link is up
141 * @vf: VF to check if link is up
142 */
143static bool ice_is_vf_link_up(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 if (ice_check_vf_init(pf, vf))
148 return false;
149
e1fe6926 150 if (ice_vf_has_no_qs_ena(vf))
0b6c6a8b
BC
151 return false;
152 else if (vf->link_forced)
153 return vf->link_up;
154 else
155 return pf->hw.port_info->phy.link_info.link_info &
156 ICE_AQ_LINK_UP;
157}
158
1071a835
AV
159/**
160 * ice_vc_notify_vf_link_state - Inform a VF of link status
161 * @vf: pointer to the VF structure
162 *
163 * send a link status message to a single VF
164 */
165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166{
167 struct virtchnl_pf_event pfe = { 0 };
0b6c6a8b 168 struct ice_hw *hw = &vf->pf->hw;
1071a835
AV
169
170 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 pfe.severity = PF_EVENT_SEVERITY_INFO;
172
0b6c6a8b
BC
173 if (ice_is_vf_link_up(vf))
174 ice_set_pfe_link(vf, &pfe,
175 hw->port_info->phy.link_info.link_speed, true);
176 else
c61d2342 177 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
1071a835 178
cf6c6e01
MW
179 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
1071a835
AV
181 sizeof(pfe), NULL);
182}
183
ddf30f7f
AV
184/**
185 * ice_free_vf_res - Free a VF's resources
186 * @vf: pointer to the VF info
187 */
188static void ice_free_vf_res(struct ice_vf *vf)
189{
190 struct ice_pf *pf = vf->pf;
72ecb896 191 int i, last_vector_idx;
ddf30f7f
AV
192
193 /* First, disable VF's configuration API to prevent OS from
194 * accessing the VF's VSI after it's freed or invalidated.
195 */
196 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
197
2f2da36e 198 /* free VSI and disconnect it from the parent uplink */
ddf30f7f
AV
199 if (vf->lan_vsi_idx) {
200 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
201 vf->lan_vsi_idx = 0;
202 vf->lan_vsi_num = 0;
203 vf->num_mac = 0;
204 }
205
46c276ce 206 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
9d5c5a52
PG
207
208 /* clear VF MDD event information */
209 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
210 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
211
ddf30f7f 212 /* Disable interrupts so that VF starts in a known state */
72ecb896
BC
213 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
214 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ddf30f7f
AV
215 ice_flush(&pf->hw);
216 }
217 /* reset some of the state variables keeping track of the resources */
218 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
219 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
220}
221
ddf30f7f
AV
222/**
223 * ice_dis_vf_mappings
224 * @vf: pointer to the VF structure
225 */
226static void ice_dis_vf_mappings(struct ice_vf *vf)
227{
228 struct ice_pf *pf = vf->pf;
229 struct ice_vsi *vsi;
4015d11e 230 struct device *dev;
ddf30f7f
AV
231 int first, last, v;
232 struct ice_hw *hw;
233
234 hw = &pf->hw;
235 vsi = pf->vsi[vf->lan_vsi_idx];
236
4015d11e 237 dev = ice_pf_to_dev(pf);
ddf30f7f 238 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
982b1219 239 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
ddf30f7f 240
cbe66bfe 241 first = vf->first_vector_idx;
46c276ce 242 last = first + pf->num_msix_per_vf - 1;
ddf30f7f
AV
243 for (v = first; v <= last; v++) {
244 u32 reg;
245
246 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
247 GLINT_VECT2FUNC_IS_PF_M) |
248 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
249 GLINT_VECT2FUNC_PF_NUM_M));
250 wr32(hw, GLINT_VECT2FUNC(v), reg);
251 }
252
253 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
254 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
255 else
4015d11e 256 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
257
258 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
259 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
260 else
19cce2c6 261 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
262}
263
cbe66bfe
BC
264/**
265 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
266 * @pf: pointer to the PF structure
267 *
0ca469fb 268 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
cbe66bfe
BC
269 * the pf->sriov_base_vector.
270 *
271 * Returns 0 on success, and -EINVAL on error.
272 */
273static int ice_sriov_free_msix_res(struct ice_pf *pf)
274{
275 struct ice_res_tracker *res;
276
277 if (!pf)
278 return -EINVAL;
279
280 res = pf->irq_tracker;
281 if (!res)
282 return -EINVAL;
283
284 /* give back irq_tracker resources used */
0ca469fb 285 WARN_ON(pf->sriov_base_vector < res->num_entries);
cbe66bfe
BC
286
287 pf->sriov_base_vector = 0;
288
289 return 0;
290}
291
77ca27c4
PG
292/**
293 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
294 * @vf: pointer to the VF structure
295 */
296void ice_set_vf_state_qs_dis(struct ice_vf *vf)
297{
298 /* Clear Rx/Tx enabled queues flag */
0ca469fb
MW
299 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
300 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
77ca27c4
PG
301 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
302}
303
304/**
305 * ice_dis_vf_qs - Disable the VF queues
306 * @vf: pointer to the VF structure
307 */
308static void ice_dis_vf_qs(struct ice_vf *vf)
309{
310 struct ice_pf *pf = vf->pf;
311 struct ice_vsi *vsi;
312
313 vsi = pf->vsi[vf->lan_vsi_idx];
314
315 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
13a6233b 316 ice_vsi_stop_all_rx_rings(vsi);
77ca27c4
PG
317 ice_set_vf_state_qs_dis(vf);
318}
319
ddf30f7f
AV
320/**
321 * ice_free_vfs - Free all VFs
322 * @pf: pointer to the PF structure
323 */
324void ice_free_vfs(struct ice_pf *pf)
325{
4015d11e 326 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 327 struct ice_hw *hw = &pf->hw;
c1e08830 328 unsigned int tmp, i;
ddf30f7f
AV
329
330 if (!pf->vf)
331 return;
332
333 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
334 usleep_range(1000, 2000);
335
72ecb896
BC
336 /* Disable IOV before freeing resources. This lets any VF drivers
337 * running in the host get themselves cleaned up before we yank
338 * the carpet out from underneath their feet.
339 */
340 if (!pci_vfs_assigned(pf->pdev))
341 pci_disable_sriov(pf->pdev);
342 else
4015d11e 343 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
72ecb896 344
f844d521
BC
345 /* Avoid wait time by stopping all VFs at the same time */
346 ice_for_each_vf(pf, i)
347 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
348 ice_dis_vf_qs(&pf->vf[i]);
349
ddf30f7f 350 tmp = pf->num_alloc_vfs;
46c276ce 351 pf->num_qps_per_vf = 0;
ddf30f7f
AV
352 pf->num_alloc_vfs = 0;
353 for (i = 0; i < tmp; i++) {
354 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
1f9639d2 355 /* disable VF qp mappings and set VF disable state */
ddf30f7f 356 ice_dis_vf_mappings(&pf->vf[i]);
1f9639d2 357 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ddf30f7f
AV
358 ice_free_vf_res(&pf->vf[i]);
359 }
360 }
361
cbe66bfe 362 if (ice_sriov_free_msix_res(pf))
4015d11e 363 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 364
4015d11e 365 devm_kfree(dev, pf->vf);
ddf30f7f
AV
366 pf->vf = NULL;
367
368 /* This check is for when the driver is unloaded while VFs are
369 * assigned. Setting the number of VFs to 0 through sysfs is caught
370 * before this function ever gets called.
371 */
372 if (!pci_vfs_assigned(pf->pdev)) {
53bb6698 373 unsigned int vf_id;
ddf30f7f
AV
374
375 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
376 * work correctly when SR-IOV gets re-enabled.
377 */
378 for (vf_id = 0; vf_id < tmp; vf_id++) {
379 u32 reg_idx, bit_idx;
380
381 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
382 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
383 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
384 }
385 }
386 clear_bit(__ICE_VF_DIS, pf->state);
387 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
388}
389
390/**
391 * ice_trigger_vf_reset - Reset a VF on HW
392 * @vf: pointer to the VF structure
393 * @is_vflr: true if VFLR was issued, false if not
29d42f1f 394 * @is_pfr: true if the reset was triggered due to a previous PFR
ddf30f7f
AV
395 *
396 * Trigger hardware to start a reset for a particular VF. Expects the caller
397 * to wait the proper amount of time to allow hardware to reset the VF before
398 * it cleans up and restores VF functionality.
399 */
29d42f1f 400static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
ddf30f7f
AV
401{
402 struct ice_pf *pf = vf->pf;
403 u32 reg, reg_idx, bit_idx;
53bb6698 404 unsigned int vf_abs_id, i;
4015d11e 405 struct device *dev;
ddf30f7f 406 struct ice_hw *hw;
ddf30f7f 407
4015d11e 408 dev = ice_pf_to_dev(pf);
ddf30f7f
AV
409 hw = &pf->hw;
410 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
411
412 /* Inform VF that it is no longer active, as a warning */
413 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
414
415 /* Disable VF's configuration API during reset. The flag is re-enabled
12bb018c 416 * when it's safe again to access VF's VSI.
ddf30f7f
AV
417 */
418 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
82ba0128 419
29d42f1f
MW
420 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
421 * in the case of VFR. If this is done for PFR, it can mess up VF
422 * resets because the VF driver may already have started cleanup
423 * by the time we get here.
82ba0128 424 */
29d42f1f 425 if (!is_pfr)
39559456 426 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
ddf30f7f
AV
427
428 /* In the case of a VFLR, the HW has already reset the VF and we
429 * just need to clean up, so don't hit the VFRTRIG register.
430 */
431 if (!is_vflr) {
432 /* reset VF using VPGEN_VFRTRIG reg */
433 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
434 reg |= VPGEN_VFRTRIG_VFSWR_M;
435 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
436 }
437 /* clear the VFLR bit in GLGEN_VFLRSTAT */
438 reg_idx = (vf_abs_id) / 32;
439 bit_idx = (vf_abs_id) % 32;
440 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
441 ice_flush(hw);
442
443 wr32(hw, PF_PCI_CIAA,
444 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
60d628ea 445 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
ddf30f7f 446 reg = rd32(hw, PF_PCI_CIAD);
60d628ea
BC
447 /* no transactions pending so stop polling */
448 if ((reg & VF_TRANS_PENDING_M) == 0)
449 break;
450
53bb6698 451 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
60d628ea 452 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
ddf30f7f
AV
453 }
454}
455
77a7a84d
MS
456/**
457 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
458 * @vsi: the VSI to update
b093841f 459 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
f9867df6 460 * @enable: true for enable PVID false for disable
77a7a84d 461 */
b093841f 462static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
ddf30f7f 463{
ddf30f7f 464 struct ice_hw *hw = &vsi->back->hw;
b093841f 465 struct ice_aqc_vsi_props *info;
198a666a 466 struct ice_vsi_ctx *ctxt;
ddf30f7f 467 enum ice_status status;
198a666a
BA
468 int ret = 0;
469
9efe35d0 470 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
198a666a
BA
471 if (!ctxt)
472 return -ENOMEM;
ddf30f7f 473
77a7a84d 474 ctxt->info = vsi->info;
b093841f
BC
475 info = &ctxt->info;
476 if (enable) {
477 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
478 ICE_AQ_VSI_PVLAN_INSERT_PVID |
479 ICE_AQ_VSI_VLAN_EMOD_STR;
480 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
481 } else {
482 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
483 ICE_AQ_VSI_VLAN_MODE_ALL;
484 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
485 }
486
487 info->pvid = cpu_to_le16(pvid_info);
488 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
489 ICE_AQ_VSI_PROP_SW_VALID);
ddf30f7f 490
198a666a 491 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
ddf30f7f 492 if (status) {
0fee3577
LY
493 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
494 ice_stat_str(status),
495 ice_aq_str(hw->adminq.sq_last_status));
198a666a
BA
496 ret = -EIO;
497 goto out;
ddf30f7f
AV
498 }
499
b093841f
BC
500 vsi->info.vlan_flags = info->vlan_flags;
501 vsi->info.sw_flags2 = info->sw_flags2;
502 vsi->info.pvid = info->pvid;
198a666a 503out:
9efe35d0 504 kfree(ctxt);
198a666a 505 return ret;
ddf30f7f
AV
506}
507
508/**
509 * ice_vf_vsi_setup - Set up a VF VSI
510 * @pf: board private structure
511 * @pi: pointer to the port_info instance
f9867df6 512 * @vf_id: defines VF ID to which this VSI connects.
ddf30f7f
AV
513 *
514 * Returns pointer to the successfully allocated VSI struct on success,
515 * otherwise returns NULL on failure.
516 */
517static struct ice_vsi *
518ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
519{
520 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
521}
522
cbe66bfe 523/**
1337175d 524 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
cbe66bfe
BC
525 * @pf: pointer to PF structure
526 * @vf: pointer to VF that the first MSIX vector index is being calculated for
527 *
1337175d
PG
528 * This returns the first MSIX vector index in PF space that is used by this VF.
529 * This index is used when accessing PF relative registers such as
530 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
531 * This will always be the OICR index in the AVF driver so any functionality
cbe66bfe
BC
532 * using vf->first_vector_idx for queue configuration will have to increment by
533 * 1 to avoid meddling with the OICR index.
534 */
535static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
536{
46c276ce 537 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
cbe66bfe
BC
538}
539
350e822c
BC
540/**
541 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
542 * @vf: VF to add MAC filters for
543 *
544 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
545 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
546 */
547static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
548{
549 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
550 struct device *dev = ice_pf_to_dev(vf->pf);
551 u16 vlan_id = 0;
552 int err;
553
554 if (vf->port_vlan_info) {
555 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
556 if (err) {
557 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
558 vf->vf_id, err);
559 return err;
560 }
561
562 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
563 }
564
565 /* vlan_id will either be 0 or the port VLAN number */
566 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
567 if (err) {
568 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
569 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
570 err);
571 return err;
572 }
573
574 return 0;
575}
576
577/**
578 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
579 * @vf: VF to add MAC filters for
580 *
581 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
582 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
583 */
584static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
585{
586 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
587 struct device *dev = ice_pf_to_dev(vf->pf);
588 enum ice_status status;
589 u8 broadcast[ETH_ALEN];
590
591 eth_broadcast_addr(broadcast);
592 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
593 if (status) {
594 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
595 vf->vf_id, ice_stat_str(status));
596 return ice_status_to_errno(status);
597 }
598
599 vf->num_mac++;
600
601 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
602 status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
603 ICE_FWD_TO_VSI);
604 if (status) {
605 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
606 &vf->dflt_lan_addr.addr[0], vf->vf_id,
607 ice_stat_str(status));
608 return ice_status_to_errno(status);
609 }
610 vf->num_mac++;
611 }
612
613 return 0;
614}
615
eb2af3ee
BC
616/**
617 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
618 * @vf: VF to configure trust setting for
619 */
620static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
621{
622 if (vf->trusted)
623 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
624 else
625 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
626}
627
ddf30f7f 628/**
ac371613
BC
629 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
630 * @vf: VF to enable MSIX mappings for
ddf30f7f 631 *
ac371613
BC
632 * Some of the registers need to be indexed/configured using hardware global
633 * device values and other registers need 0-based values, which represent PF
634 * based values.
ddf30f7f 635 */
ac371613 636static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
ddf30f7f 637{
ac371613
BC
638 int device_based_first_msix, device_based_last_msix;
639 int pf_based_first_msix, pf_based_last_msix, v;
ddf30f7f 640 struct ice_pf *pf = vf->pf;
ac371613 641 int device_based_vf_id;
ddf30f7f 642 struct ice_hw *hw;
ddf30f7f
AV
643 u32 reg;
644
645 hw = &pf->hw;
ac371613
BC
646 pf_based_first_msix = vf->first_vector_idx;
647 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
648
649 device_based_first_msix = pf_based_first_msix +
650 pf->hw.func_caps.common_cap.msix_vector_first_id;
651 device_based_last_msix =
652 (device_based_first_msix + pf->num_msix_per_vf) - 1;
653 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
654
655 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
656 VPINT_ALLOC_FIRST_M) |
657 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
658 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
ddf30f7f
AV
659 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
660
ac371613 661 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1337175d 662 & VPINT_ALLOC_PCI_FIRST_M) |
ac371613
BC
663 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
664 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
982b1219 665 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
ac371613 666
ddf30f7f 667 /* map the interrupts to its functions */
ac371613
BC
668 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
669 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
ddf30f7f
AV
670 GLINT_VECT2FUNC_VF_NUM_M) |
671 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
672 GLINT_VECT2FUNC_PF_NUM_M));
673 wr32(hw, GLINT_VECT2FUNC(v), reg);
674 }
675
ac371613
BC
676 /* Map mailbox interrupt to VF MSI-X vector 0 */
677 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
678}
679
680/**
681 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
682 * @vf: VF to enable the mappings for
683 * @max_txq: max Tx queues allowed on the VF's VSI
684 * @max_rxq: max Rx queues allowed on the VF's VSI
685 */
686static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
687{
688 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
689 struct device *dev = ice_pf_to_dev(vf->pf);
690 struct ice_hw *hw = &vf->pf->hw;
691 u32 reg;
692
982b1219
AV
693 /* set regardless of mapping mode */
694 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
695
ddf30f7f
AV
696 /* VF Tx queues allocation */
697 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
698 /* set the VF PF Tx queue range
699 * VFNUMQ value should be set to (number of queues - 1). A value
700 * of 0 means 1 queue and a value of 255 means 256 queues
701 */
702 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
703 VPLAN_TX_QBASE_VFFIRSTQ_M) |
ac371613 704 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
705 VPLAN_TX_QBASE_VFNUMQ_M));
706 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
707 } else {
4015d11e 708 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
709 }
710
982b1219
AV
711 /* set regardless of mapping mode */
712 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
713
ddf30f7f
AV
714 /* VF Rx queues allocation */
715 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
716 /* set the VF PF Rx queue range
717 * VFNUMQ value should be set to (number of queues - 1). A value
718 * of 0 means 1 queue and a value of 255 means 256 queues
719 */
720 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
721 VPLAN_RX_QBASE_VFFIRSTQ_M) |
ac371613 722 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
723 VPLAN_RX_QBASE_VFNUMQ_M));
724 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
725 } else {
4015d11e 726 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
727 }
728}
729
ac371613
BC
730/**
731 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
732 * @vf: pointer to the VF structure
733 */
734static void ice_ena_vf_mappings(struct ice_vf *vf)
735{
736 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
737
738 ice_ena_vf_msix_mappings(vf);
739 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
740}
741
ddf30f7f
AV
742/**
743 * ice_determine_res
744 * @pf: pointer to the PF structure
745 * @avail_res: available resources in the PF structure
746 * @max_res: maximum resources that can be given per VF
747 * @min_res: minimum resources that can be given per VF
748 *
749 * Returns non-zero value if resources (queues/vectors) are available or
750 * returns zero if PF cannot accommodate for all num_alloc_vfs.
751 */
752static int
753ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
754{
755 bool checked_min_res = false;
756 int res;
757
758 /* start by checking if PF can assign max number of resources for
759 * all num_alloc_vfs.
760 * if yes, return number per VF
761 * If no, divide by 2 and roundup, check again
762 * repeat the loop till we reach a point where even minimum resources
763 * are not available, in that case return 0
764 */
765 res = max_res;
766 while ((res >= min_res) && !checked_min_res) {
767 int num_all_res;
768
769 num_all_res = pf->num_alloc_vfs * res;
770 if (num_all_res <= avail_res)
771 return res;
772
773 if (res == min_res)
774 checked_min_res = true;
775
776 res = DIV_ROUND_UP(res, 2);
777 }
778 return 0;
779}
780
cbe66bfe
BC
781/**
782 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
783 * @vf: VF to calculate the register index for
784 * @q_vector: a q_vector associated to the VF
785 */
786int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
787{
788 struct ice_pf *pf;
789
790 if (!vf || !q_vector)
791 return -EINVAL;
792
793 pf = vf->pf;
794
795 /* always add one to account for the OICR being the first MSIX */
46c276ce 796 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
cbe66bfe
BC
797 q_vector->v_idx + 1;
798}
799
800/**
801 * ice_get_max_valid_res_idx - Get the max valid resource index
802 * @res: pointer to the resource to find the max valid index for
803 *
804 * Start from the end of the ice_res_tracker and return right when we find the
805 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
806 * valid for SR-IOV because it is the only consumer that manipulates the
807 * res->end and this is always called when res->end is set to res->num_entries.
808 */
809static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
810{
811 int i;
812
813 if (!res)
814 return -EINVAL;
815
816 for (i = res->num_entries - 1; i >= 0; i--)
817 if (res->list[i] & ICE_RES_VALID_BIT)
818 return i;
819
820 return 0;
821}
822
823/**
824 * ice_sriov_set_msix_res - Set any used MSIX resources
825 * @pf: pointer to PF structure
826 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
827 *
828 * This function allows SR-IOV resources to be taken from the end of the PF's
0ca469fb
MW
829 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
830 * just set the pf->sriov_base_vector and return success.
cbe66bfe 831 *
0ca469fb
MW
832 * If there are not enough resources available, return an error. This should
833 * always be caught by ice_set_per_vf_res().
cbe66bfe
BC
834 *
835 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
836 * in the PF's space available for SR-IOV.
837 */
838static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
839{
0ca469fb
MW
840 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
841 int vectors_used = pf->irq_tracker->num_entries;
cbe66bfe
BC
842 int sriov_base_vector;
843
0ca469fb 844 sriov_base_vector = total_vectors - num_msix_needed;
cbe66bfe
BC
845
846 /* make sure we only grab irq_tracker entries from the list end and
847 * that we have enough available MSIX vectors
848 */
0ca469fb 849 if (sriov_base_vector < vectors_used)
cbe66bfe
BC
850 return -EINVAL;
851
852 pf->sriov_base_vector = sriov_base_vector;
853
cbe66bfe
BC
854 return 0;
855}
856
ddf30f7f 857/**
0ca469fb 858 * ice_set_per_vf_res - check if vectors and queues are available
ddf30f7f
AV
859 * @pf: pointer to the PF structure
860 *
0ca469fb
MW
861 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
862 * get more vectors and can enable more queues per VF. Note that this does not
863 * grab any vectors from the SW pool already allocated. Also note, that all
864 * vector counts include one for each VF's miscellaneous interrupt vector
865 * (i.e. OICR).
866 *
867 * Minimum VFs - 2 vectors, 1 queue pair
868 * Small VFs - 5 vectors, 4 queue pairs
869 * Medium VFs - 17 vectors, 16 queue pairs
870 *
871 * Second, determine number of queue pairs per VF by starting with a pre-defined
872 * maximum each VF supports. If this is not possible, then we adjust based on
873 * queue pairs available on the device.
874 *
875 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
876 * by each VF during VF initialization and reset.
ddf30f7f 877 */
0ca469fb 878static int ice_set_per_vf_res(struct ice_pf *pf)
ddf30f7f 879{
cbe66bfe 880 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
46c276ce 881 int msix_avail_per_vf, msix_avail_for_sriov;
4015d11e 882 struct device *dev = ice_pf_to_dev(pf);
46c276ce 883 u16 num_msix_per_vf, num_txq, num_rxq;
ddf30f7f 884
cbe66bfe 885 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
ddf30f7f
AV
886 return -EINVAL;
887
0ca469fb 888 /* determine MSI-X resources per VF */
46c276ce
BC
889 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
890 pf->irq_tracker->num_entries;
891 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
892 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
893 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
894 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
895 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
896 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
897 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
ddf30f7f 898 } else {
46c276ce
BC
899 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
900 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
0ca469fb 901 pf->num_alloc_vfs);
ddf30f7f
AV
902 return -EIO;
903 }
904
0ca469fb 905 /* determine queue resources per VF */
8c243700 906 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
46c276ce
BC
907 min_t(u16,
908 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
909 ICE_MAX_RSS_QS_PER_VF),
910 ICE_MIN_QS_PER_VF);
ddf30f7f 911
8c243700 912 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
46c276ce
BC
913 min_t(u16,
914 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
915 ICE_MAX_RSS_QS_PER_VF),
916 ICE_MIN_QS_PER_VF);
ddf30f7f 917
0ca469fb 918 if (!num_txq || !num_rxq) {
46c276ce
BC
919 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
920 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
ddf30f7f 921 return -EIO;
0ca469fb 922 }
ddf30f7f 923
46c276ce 924 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
0ca469fb
MW
925 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
926 pf->num_alloc_vfs);
cbe66bfe 927 return -EINVAL;
0ca469fb 928 }
cbe66bfe 929
0ca469fb 930 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
46c276ce
BC
931 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
932 pf->num_msix_per_vf = num_msix_per_vf;
0ca469fb 933 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
46c276ce 934 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
ddf30f7f
AV
935
936 return 0;
937}
938
cfcee02b
BC
939/**
940 * ice_clear_vf_reset_trigger - enable VF to access hardware
941 * @vf: VF to enabled hardware access for
942 */
943static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
944{
945 struct ice_hw *hw = &vf->pf->hw;
946 u32 reg;
947
948 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
949 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
950 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
951 ice_flush(hw);
952}
953
5eda8afd
AA
954/**
955 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
956 * @vf: pointer to the VF info
957 * @vsi: the VSI being configured
958 * @promisc_m: mask of promiscuous config bits
959 * @rm_promisc: promisc flag request from the VF to remove or add filter
960 *
961 * This function configures VF VSI promiscuous mode, based on the VF requests,
962 * for Unicast, Multicast and VLAN
963 */
964static enum ice_status
965ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
966 bool rm_promisc)
967{
968 struct ice_pf *pf = vf->pf;
969 enum ice_status status = 0;
970 struct ice_hw *hw;
971
972 hw = &pf->hw;
cd6d6b83 973 if (vsi->num_vlan) {
5eda8afd
AA
974 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
975 rm_promisc);
b093841f 976 } else if (vf->port_vlan_info) {
5eda8afd
AA
977 if (rm_promisc)
978 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 979 vf->port_vlan_info);
5eda8afd
AA
980 else
981 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 982 vf->port_vlan_info);
5eda8afd
AA
983 } else {
984 if (rm_promisc)
985 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
986 0);
987 else
988 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
989 0);
990 }
991
992 return status;
993}
994
12bb018c
BC
995static void ice_vf_clear_counters(struct ice_vf *vf)
996{
997 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
998
999 vf->num_mac = 0;
1000 vsi->num_vlan = 0;
1001 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1002 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1003}
1004
d82dd83d 1005/**
12bb018c
BC
1006 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1007 * @vf: VF to perform pre VSI rebuild tasks
d82dd83d 1008 *
12bb018c
BC
1009 * These tasks are items that don't need to be amortized since they are most
1010 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
d82dd83d 1011 */
12bb018c 1012static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
d82dd83d 1013{
12bb018c
BC
1014 ice_vf_clear_counters(vf);
1015 ice_clear_vf_reset_trigger(vf);
1016}
d82dd83d 1017
12bb018c
BC
1018/**
1019 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1020 * @vf: VF to rebuild host configuration on
1021 */
1022static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1023{
1024 struct device *dev = ice_pf_to_dev(vf->pf);
1025
1026 ice_vf_set_host_trust_cfg(vf);
1027
1028 if (ice_vf_rebuild_host_mac_cfg(vf))
1029 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1030 vf->vf_id);
1031
1032 if (ice_vf_rebuild_host_vlan_cfg(vf))
1033 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1034 vf->vf_id);
1035}
1036
1037/**
1038 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1039 * @vf: VF to release and setup the VSI for
1040 *
1041 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1042 * configuration change, etc.).
1043 */
1044static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1045{
1046 struct ice_pf *pf = vf->pf;
1047 struct ice_vsi *vsi;
1048
1049 vsi = pf->vsi[vf->lan_vsi_idx];
1050 ice_vsi_release(vsi);
1051 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
1052 if (!vsi) {
1053 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
1054 return -ENOMEM;
d82dd83d
AA
1055 }
1056
12bb018c
BC
1057 vf->lan_vsi_idx = vsi->idx;
1058 vf->lan_vsi_num = vsi->vsi_num;
d82dd83d 1059
12bb018c
BC
1060 return 0;
1061}
d82dd83d 1062
12bb018c
BC
1063/**
1064 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1065 * @vf: VF to rebuild the VSI for
1066 *
1067 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1068 * host, PFR, CORER, etc.).
1069 */
1070static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1071{
1072 struct ice_pf *pf = vf->pf;
1073 struct ice_vsi *vsi;
1074
1075 vsi = pf->vsi[vf->lan_vsi_idx];
1076
1077 if (ice_vsi_rebuild(vsi, true)) {
1078 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1079 vf->vf_id);
1080 return -EIO;
d82dd83d 1081 }
12bb018c
BC
1082 /* vsi->idx will remain the same in this case so don't update
1083 * vf->lan_vsi_idx
1084 */
1085 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1086 vf->lan_vsi_num = vsi->vsi_num;
d82dd83d 1087
12bb018c
BC
1088 return 0;
1089}
d82dd83d 1090
12bb018c
BC
1091/**
1092 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1093 * @vf: VF to set in initialized state
1094 *
1095 * After this function the VF will be ready to receive/handle the
1096 * VIRTCHNL_OP_GET_VF_RESOURCES message
1097 */
1098static void ice_vf_set_initialized(struct ice_vf *vf)
1099{
1100 ice_set_vf_state_qs_dis(vf);
1101 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1102 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1103 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1104 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1105}
1106
1107/**
1108 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1109 * @vf: VF to perform tasks on
1110 */
1111static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1112{
1113 struct ice_pf *pf = vf->pf;
1114 struct ice_hw *hw;
1115
1116 hw = &pf->hw;
1117
1118 ice_vf_rebuild_host_cfg(vf);
1119
1120 ice_vf_set_initialized(vf);
1121 ice_ena_vf_mappings(vf);
1122 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
d82dd83d
AA
1123}
1124
ddf30f7f
AV
1125/**
1126 * ice_reset_all_vfs - reset all allocated VFs in one go
1127 * @pf: pointer to the PF structure
1128 * @is_vflr: true if VFLR was issued, false if not
1129 *
1130 * First, tell the hardware to reset each VF, then do all the waiting in one
1131 * chunk, and finally finish restoring each VF after the wait. This is useful
1132 * during PF routines which need to reset all VFs, as otherwise it must perform
1133 * these resets in a serialized fashion.
1134 *
1135 * Returns true if any VFs were reset, and false otherwise.
1136 */
1137bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1138{
4015d11e 1139 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1140 struct ice_hw *hw = &pf->hw;
42b2cc83 1141 struct ice_vf *vf;
ddf30f7f
AV
1142 int v, i;
1143
1144 /* If we don't have any VFs, then there is nothing to reset */
1145 if (!pf->num_alloc_vfs)
1146 return false;
1147
1148 /* If VFs have been disabled, there is no need to reset */
1149 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1150 return false;
1151
1152 /* Begin reset on all VFs at once */
005881bc 1153 ice_for_each_vf(pf, v)
29d42f1f 1154 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
ddf30f7f 1155
ddf30f7f
AV
1156 /* HW requires some time to make sure it can flush the FIFO for a VF
1157 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1158 * sequence to make sure that it has completed. We'll keep track of
1159 * the VFs using a simple iterator that increments once that VF has
1160 * finished resetting.
1161 */
1162 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
ddf30f7f
AV
1163 /* Check each VF in sequence */
1164 while (v < pf->num_alloc_vfs) {
ddf30f7f
AV
1165 u32 reg;
1166
42b2cc83 1167 vf = &pf->vf[v];
ddf30f7f 1168 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
60d628ea
BC
1169 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1170 /* only delay if the check failed */
1171 usleep_range(10, 20);
ddf30f7f 1172 break;
60d628ea 1173 }
ddf30f7f
AV
1174
1175 /* If the current VF has finished resetting, move on
1176 * to the next VF in sequence.
1177 */
1178 v++;
1179 }
1180 }
1181
1182 /* Display a warning if at least one VF didn't manage to reset in
1183 * time, but continue on with the operation.
1184 */
1185 if (v < pf->num_alloc_vfs)
4015d11e 1186 dev_warn(dev, "VF reset check timeout\n");
ddf30f7f
AV
1187
1188 /* free VF resources to begin resetting the VSI state */
005881bc 1189 ice_for_each_vf(pf, v) {
5743020d
AA
1190 vf = &pf->vf[v];
1191
12bb018c
BC
1192 ice_vf_pre_vsi_rebuild(vf);
1193 ice_vf_rebuild_vsi(vf);
1194 ice_vf_post_vsi_rebuild(vf);
5743020d 1195 }
ddf30f7f 1196
12bb018c
BC
1197 ice_flush(hw);
1198 clear_bit(__ICE_VF_DIS, pf->state);
ddf30f7f
AV
1199
1200 return true;
1201}
1202
ec4f5a43
AA
1203/**
1204 * ice_is_vf_disabled
1205 * @vf: pointer to the VF info
1206 *
1207 * Returns true if the PF or VF is disabled, false otherwise.
1208 */
1209static bool ice_is_vf_disabled(struct ice_vf *vf)
1210{
1211 struct ice_pf *pf = vf->pf;
1212
1213 /* If the PF has been disabled, there is no need resetting VF until
1214 * PF is active again. Similarly, if the VF has been disabled, this
1215 * means something else is resetting the VF, so we shouldn't continue.
1216 * Otherwise, set disable VF state bit for actual reset, and continue.
1217 */
1218 return (test_bit(__ICE_VF_DIS, pf->state) ||
1219 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1220}
1221
007676b4
AV
1222/**
1223 * ice_reset_vf - Reset a particular VF
1224 * @vf: pointer to the VF structure
1225 * @is_vflr: true if VFLR was issued, false if not
1226 *
f844d521
BC
1227 * Returns true if the VF is currently in reset, resets successfully, or resets
1228 * are disabled and false otherwise.
007676b4 1229 */
9d5c5a52 1230bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
007676b4
AV
1231{
1232 struct ice_pf *pf = vf->pf;
03f7a986 1233 struct ice_vsi *vsi;
4015d11e 1234 struct device *dev;
5eda8afd 1235 struct ice_hw *hw;
007676b4 1236 bool rsd = false;
5eda8afd 1237 u8 promisc_m;
007676b4
AV
1238 u32 reg;
1239 int i;
1240
4015d11e
BC
1241 dev = ice_pf_to_dev(pf);
1242
f844d521
BC
1243 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1244 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1245 vf->vf_id);
1246 return true;
1247 }
1248
ec4f5a43 1249 if (ice_is_vf_disabled(vf)) {
4015d11e
BC
1250 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1251 vf->vf_id);
ec4f5a43
AA
1252 return true;
1253 }
cb6a8dc0 1254
ec4f5a43
AA
1255 /* Set VF disable bit state here, before triggering reset */
1256 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
29d42f1f 1257 ice_trigger_vf_reset(vf, is_vflr, false);
007676b4 1258
03f7a986
AV
1259 vsi = pf->vsi[vf->lan_vsi_idx];
1260
77ca27c4
PG
1261 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1262 ice_dis_vf_qs(vf);
06914ac2
MW
1263
1264 /* Call Disable LAN Tx queue AQ whether or not queues are
1265 * enabled. This is needed for successful completion of VFR.
1266 */
1267 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1268 NULL, ICE_VF_RESET, vf->vf_id, NULL);
007676b4 1269
5eda8afd 1270 hw = &pf->hw;
007676b4
AV
1271 /* poll VPGEN_VFRSTAT reg to make sure
1272 * that reset is complete
1273 */
1274 for (i = 0; i < 10; i++) {
1275 /* VF reset requires driver to first reset the VF and then
1276 * poll the status register to make sure that the reset
1277 * completed successfully.
1278 */
007676b4
AV
1279 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1280 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1281 rsd = true;
1282 break;
1283 }
60d628ea
BC
1284
1285 /* only sleep if the reset is not done */
1286 usleep_range(10, 20);
007676b4
AV
1287 }
1288
1289 /* Display a warning if VF didn't manage to reset in time, but need to
1290 * continue on with the operation.
1291 */
1292 if (!rsd)
4015d11e 1293 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
007676b4 1294
5eda8afd
AA
1295 /* disable promiscuous modes in case they were enabled
1296 * ignore any error if disabling process failed
1297 */
1298 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1299 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
b093841f 1300 if (vf->port_vlan_info || vsi->num_vlan)
5eda8afd
AA
1301 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1302 else
1303 promisc_m = ICE_UCAST_PROMISC_BITS;
1304
1305 vsi = pf->vsi[vf->lan_vsi_idx];
1306 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
4015d11e 1307 dev_err(dev, "disabling promiscuous mode failed\n");
5eda8afd
AA
1308 }
1309
12bb018c
BC
1310 ice_vf_pre_vsi_rebuild(vf);
1311 ice_vf_rebuild_vsi_with_release(vf);
1312 ice_vf_post_vsi_rebuild(vf);
007676b4
AV
1313
1314 return true;
1315}
1316
53b8decb
AV
1317/**
1318 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1319 * @pf: pointer to the PF structure
1320 */
1321void ice_vc_notify_link_state(struct ice_pf *pf)
1322{
1323 int i;
1324
005881bc 1325 ice_for_each_vf(pf, i)
53b8decb
AV
1326 ice_vc_notify_vf_link_state(&pf->vf[i]);
1327}
1328
007676b4
AV
1329/**
1330 * ice_vc_notify_reset - Send pending reset message to all VFs
1331 * @pf: pointer to the PF structure
1332 *
1333 * indicate a pending reset to all VFs on a given PF
1334 */
1335void ice_vc_notify_reset(struct ice_pf *pf)
1336{
1337 struct virtchnl_pf_event pfe;
1338
1339 if (!pf->num_alloc_vfs)
1340 return;
1341
1342 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1343 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
cf6c6e01 1344 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
007676b4
AV
1345 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1346}
1347
7c710869
AV
1348/**
1349 * ice_vc_notify_vf_reset - Notify VF of a reset event
1350 * @vf: pointer to the VF structure
1351 */
1352static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1353{
1354 struct virtchnl_pf_event pfe;
4c66d227 1355 struct ice_pf *pf;
7c710869 1356
4c66d227
JB
1357 if (!vf)
1358 return;
1359
1360 pf = vf->pf;
1361 if (ice_validate_vf_id(pf, vf->vf_id))
7c710869
AV
1362 return;
1363
1f9639d2
AA
1364 /* Bail out if VF is in disabled state, neither initialized, nor active
1365 * state - otherwise proceed with notifications
1366 */
1367 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1368 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1369 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
7c710869
AV
1370 return;
1371
1372 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1373 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
4c66d227 1374 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
cf6c6e01
MW
1375 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1376 NULL);
7c710869
AV
1377}
1378
916c7fdf
BC
1379/**
1380 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1381 * @vf: VF to initialize/setup the VSI for
1382 *
1383 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1384 * VF VSI's broadcast filter and is only used during initial VF creation.
1385 */
1386static int ice_init_vf_vsi_res(struct ice_vf *vf)
1387{
1388 struct ice_pf *pf = vf->pf;
1389 u8 broadcast[ETH_ALEN];
1390 enum ice_status status;
1391 struct ice_vsi *vsi;
1392 struct device *dev;
1393 int err;
1394
1395 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1396
1397 dev = ice_pf_to_dev(pf);
1398 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
1399 if (!vsi) {
1400 dev_err(dev, "Failed to create VF VSI\n");
1401 return -ENOMEM;
1402 }
1403
1404 vf->lan_vsi_idx = vsi->idx;
1405 vf->lan_vsi_num = vsi->vsi_num;
1406
1407 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1408 if (err) {
1409 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1410 vf->vf_id);
1411 goto release_vsi;
1412 }
1413
1414 eth_broadcast_addr(broadcast);
1415 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1416 if (status) {
1417 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1418 vf->vf_id, ice_stat_str(status));
1419 err = ice_status_to_errno(status);
1420 goto release_vsi;
1421 }
1422
1423 vf->num_mac = 1;
1424
1425 return 0;
1426
1427release_vsi:
1428 ice_vsi_release(vsi);
1429 return err;
1430}
1431
1432/**
1433 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1434 * @pf: PF the VFs are associated with
1435 */
1436static int ice_start_vfs(struct ice_pf *pf)
1437{
1438 struct ice_hw *hw = &pf->hw;
1439 int retval, i;
1440
1441 ice_for_each_vf(pf, i) {
1442 struct ice_vf *vf = &pf->vf[i];
1443
1444 ice_clear_vf_reset_trigger(vf);
1445
1446 retval = ice_init_vf_vsi_res(vf);
1447 if (retval) {
1448 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1449 vf->vf_id, retval);
1450 goto teardown;
1451 }
1452
1453 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1454 ice_ena_vf_mappings(vf);
1455 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1456 }
1457
1458 ice_flush(hw);
1459 return 0;
1460
1461teardown:
1462 for (i = i - 1; i >= 0; i--) {
1463 struct ice_vf *vf = &pf->vf[i];
1464
1465 ice_dis_vf_mappings(vf);
1466 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1467 }
1468
1469 return retval;
1470}
1471
ddf30f7f 1472/**
a06325a0
BC
1473 * ice_set_dflt_settings - set VF defaults during initialization/creation
1474 * @pf: PF holding reference to all VFs for default configuration
1475 */
1476static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1477{
1478 int i;
1479
1480 ice_for_each_vf(pf, i) {
1481 struct ice_vf *vf = &pf->vf[i];
1482
1483 vf->pf = pf;
1484 vf->vf_id = i;
1485 vf->vf_sw_id = pf->first_sw;
1486 /* assign default capabilities */
1487 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1488 vf->spoofchk = true;
1489 vf->num_vf_qs = pf->num_qps_per_vf;
1490 }
1491}
1492
1493/**
1494 * ice_alloc_vfs - allocate num_vfs in the PF structure
1495 * @pf: PF to store the allocated VFs in
1496 * @num_vfs: number of VFs to allocate
1497 */
1498static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1499{
1500 struct ice_vf *vfs;
1501
1502 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1503 GFP_KERNEL);
1504 if (!vfs)
1505 return -ENOMEM;
1506
1507 pf->vf = vfs;
1508 pf->num_alloc_vfs = num_vfs;
1509
1510 return 0;
1511}
1512
1513/**
1514 * ice_ena_vfs - enable VFs so they are ready to be used
ddf30f7f 1515 * @pf: pointer to the PF structure
a06325a0 1516 * @num_vfs: number of VFs to enable
ddf30f7f 1517 */
a06325a0 1518static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
ddf30f7f 1519{
4015d11e 1520 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1521 struct ice_hw *hw = &pf->hw;
a06325a0 1522 int ret;
ddf30f7f
AV
1523
1524 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
cbe66bfe 1525 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ddf30f7f 1526 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
d82dd83d 1527 set_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1528 ice_flush(hw);
1529
a06325a0 1530 ret = pci_enable_sriov(pf->pdev, num_vfs);
ddf30f7f
AV
1531 if (ret) {
1532 pf->num_alloc_vfs = 0;
1533 goto err_unroll_intr;
1534 }
a06325a0
BC
1535
1536 ret = ice_alloc_vfs(pf, num_vfs);
1537 if (ret)
72f9c203 1538 goto err_pci_disable_sriov;
ddf30f7f 1539
916c7fdf
BC
1540 if (ice_set_per_vf_res(pf)) {
1541 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
a06325a0 1542 num_vfs);
916c7fdf
BC
1543 ret = -ENOSPC;
1544 goto err_unroll_sriov;
1545 }
1546
a06325a0 1547 ice_set_dflt_settings_vfs(pf);
ddf30f7f 1548
916c7fdf
BC
1549 if (ice_start_vfs(pf)) {
1550 dev_err(dev, "Failed to start VF(s)\n");
1551 ret = -EAGAIN;
ddf30f7f 1552 goto err_unroll_sriov;
72f9c203 1553 }
ddf30f7f 1554
916c7fdf
BC
1555 clear_bit(__ICE_VF_DIS, pf->state);
1556 return 0;
ddf30f7f
AV
1557
1558err_unroll_sriov:
a06325a0 1559 devm_kfree(dev, pf->vf);
72f9c203 1560 pf->vf = NULL;
72f9c203
BC
1561 pf->num_alloc_vfs = 0;
1562err_pci_disable_sriov:
ddf30f7f
AV
1563 pci_disable_sriov(pf->pdev);
1564err_unroll_intr:
1565 /* rearm interrupts here */
1566 ice_irq_dynamic_ena(hw, NULL, NULL);
d82dd83d 1567 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1568 return ret;
1569}
1570
1571/**
2f2da36e
AV
1572 * ice_pf_state_is_nominal - checks the PF for nominal state
1573 * @pf: pointer to PF to check
ddf30f7f
AV
1574 *
1575 * Check the PF's state for a collection of bits that would indicate
1576 * the PF is in a state that would inhibit normal operation for
1577 * driver functionality.
1578 *
1579 * Returns true if PF is in a nominal state.
1580 * Returns false otherwise
1581 */
1582static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1583{
1584 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1585
1586 if (!pf)
1587 return false;
1588
1589 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1590 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1591 return false;
1592
1593 return true;
1594}
1595
1596/**
1597 * ice_pci_sriov_ena - Enable or change number of VFs
1598 * @pf: pointer to the PF structure
1599 * @num_vfs: number of VFs to allocate
02337f1f
BC
1600 *
1601 * Returns 0 on success and negative on failure
ddf30f7f
AV
1602 */
1603static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1604{
1605 int pre_existing_vfs = pci_num_vf(pf->pdev);
4015d11e 1606 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1607 int err;
1608
ddf30f7f
AV
1609 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1610 ice_free_vfs(pf);
1611 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
02337f1f 1612 return 0;
ddf30f7f
AV
1613
1614 if (num_vfs > pf->num_vfs_supported) {
1615 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1616 num_vfs, pf->num_vfs_supported);
dced8ad3 1617 return -EOPNOTSUPP;
ddf30f7f
AV
1618 }
1619
a06325a0
BC
1620 dev_info(dev, "Enabling %d VFs\n", num_vfs);
1621 err = ice_ena_vfs(pf, num_vfs);
ddf30f7f
AV
1622 if (err) {
1623 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1624 return err;
1625 }
1626
1627 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
02337f1f
BC
1628 return 0;
1629}
1630
1631/**
1632 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1633 * @pf: PF to enabled SR-IOV on
1634 */
1635static int ice_check_sriov_allowed(struct ice_pf *pf)
1636{
1637 struct device *dev = ice_pf_to_dev(pf);
1638
1639 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1640 dev_err(dev, "This device is not capable of SR-IOV\n");
1641 return -EOPNOTSUPP;
1642 }
1643
1644 if (ice_is_safe_mode(pf)) {
1645 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1646 return -EOPNOTSUPP;
1647 }
1648
1649 if (!ice_pf_state_is_nominal(pf)) {
1650 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1651 return -EBUSY;
1652 }
1653
1654 return 0;
ddf30f7f
AV
1655}
1656
1657/**
1658 * ice_sriov_configure - Enable or change number of VFs via sysfs
1659 * @pdev: pointer to a pci_dev structure
02337f1f 1660 * @num_vfs: number of VFs to allocate or 0 to free VFs
ddf30f7f 1661 *
02337f1f
BC
1662 * This function is called when the user updates the number of VFs in sysfs. On
1663 * success return whatever num_vfs was set to by the caller. Return negative on
1664 * failure.
ddf30f7f
AV
1665 */
1666int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1667{
1668 struct ice_pf *pf = pci_get_drvdata(pdev);
4015d11e 1669 struct device *dev = ice_pf_to_dev(pf);
02337f1f 1670 int err;
ddf30f7f 1671
02337f1f
BC
1672 err = ice_check_sriov_allowed(pf);
1673 if (err)
1674 return err;
462acf6a 1675
02337f1f
BC
1676 if (!num_vfs) {
1677 if (!pci_vfs_assigned(pdev)) {
1678 ice_free_vfs(pf);
1679 return 0;
1680 }
ddf30f7f 1681
4015d11e 1682 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
ddf30f7f
AV
1683 return -EBUSY;
1684 }
1685
02337f1f
BC
1686 err = ice_pci_sriov_ena(pf, num_vfs);
1687 if (err)
1688 return err;
1689
1690 return num_vfs;
ddf30f7f 1691}
007676b4
AV
1692
1693/**
1694 * ice_process_vflr_event - Free VF resources via IRQ calls
1695 * @pf: pointer to the PF structure
1696 *
df17b7e0 1697 * called from the VFLR IRQ handler to
007676b4
AV
1698 * free up VF resources and state variables
1699 */
1700void ice_process_vflr_event(struct ice_pf *pf)
1701{
1702 struct ice_hw *hw = &pf->hw;
53bb6698 1703 unsigned int vf_id;
007676b4
AV
1704 u32 reg;
1705
8d7189d2 1706 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
007676b4
AV
1707 !pf->num_alloc_vfs)
1708 return;
1709
005881bc 1710 ice_for_each_vf(pf, vf_id) {
007676b4
AV
1711 struct ice_vf *vf = &pf->vf[vf_id];
1712 u32 reg_idx, bit_idx;
1713
1714 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1715 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1716 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1717 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1718 if (reg & BIT(bit_idx))
1719 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1720 ice_reset_vf(vf, true);
1721 }
1722}
7c710869
AV
1723
1724/**
ff010eca 1725 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
7c710869 1726 * @vf: pointer to the VF info
7c710869 1727 */
ff010eca 1728static void ice_vc_reset_vf(struct ice_vf *vf)
7c710869
AV
1729{
1730 ice_vc_notify_vf_reset(vf);
1731 ice_reset_vf(vf, false);
1732}
1733
2309ae38
BC
1734/**
1735 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1736 * @pf: PF used to index all VFs
1737 * @pfq: queue index relative to the PF's function space
1738 *
1739 * If no VF is found who owns the pfq then return NULL, otherwise return a
1740 * pointer to the VF who owns the pfq
1741 */
1742static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1743{
53bb6698 1744 unsigned int vf_id;
2309ae38
BC
1745
1746 ice_for_each_vf(pf, vf_id) {
1747 struct ice_vf *vf = &pf->vf[vf_id];
1748 struct ice_vsi *vsi;
1749 u16 rxq_idx;
1750
1751 vsi = pf->vsi[vf->lan_vsi_idx];
1752
1753 ice_for_each_rxq(vsi, rxq_idx)
1754 if (vsi->rxq_map[rxq_idx] == pfq)
1755 return vf;
1756 }
1757
1758 return NULL;
1759}
1760
1761/**
1762 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1763 * @pf: PF used for conversion
1764 * @globalq: global queue index used to convert to PF space queue index
1765 */
1766static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1767{
1768 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1769}
1770
1771/**
1772 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1773 * @pf: PF that the LAN overflow event happened on
1774 * @event: structure holding the event information for the LAN overflow event
1775 *
1776 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1777 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1778 * reset on the offending VF.
1779 */
1780void
1781ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1782{
1783 u32 gldcb_rtctq, queue;
1784 struct ice_vf *vf;
1785
1786 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1787 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1788
1789 /* event returns device global Rx queue number */
1790 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1791 GLDCB_RTCTQ_RXQNUM_S;
1792
1793 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1794 if (!vf)
1795 return;
1796
1797 ice_vc_reset_vf(vf);
1798}
1799
1071a835
AV
1800/**
1801 * ice_vc_send_msg_to_vf - Send message to VF
1802 * @vf: pointer to the VF info
1803 * @v_opcode: virtual channel opcode
1804 * @v_retval: virtual channel return value
1805 * @msg: pointer to the msg buffer
1806 * @msglen: msg length
1807 *
1808 * send msg to VF
1809 */
c8b7abdd 1810static int
cf6c6e01
MW
1811ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1812 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1071a835
AV
1813{
1814 enum ice_status aq_ret;
4015d11e 1815 struct device *dev;
1071a835
AV
1816 struct ice_pf *pf;
1817
4c66d227 1818 if (!vf)
1071a835
AV
1819 return -EINVAL;
1820
1821 pf = vf->pf;
4c66d227
JB
1822 if (ice_validate_vf_id(pf, vf->vf_id))
1823 return -EINVAL;
1071a835 1824
4015d11e
BC
1825 dev = ice_pf_to_dev(pf);
1826
1071a835
AV
1827 /* single place to detect unsuccessful return values */
1828 if (v_retval) {
1829 vf->num_inval_msgs++;
4015d11e
BC
1830 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1831 v_opcode, v_retval);
1071a835 1832 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
19cce2c6 1833 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1071a835 1834 vf->vf_id);
4015d11e 1835 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1071a835
AV
1836 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1837 return -EIO;
1838 }
1839 } else {
1840 vf->num_valid_msgs++;
1841 /* reset the invalid counter, if a valid message is received. */
1842 vf->num_inval_msgs = 0;
1843 }
1844
1845 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1846 msg, msglen, NULL);
90e47737 1847 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
0fee3577
LY
1848 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1849 vf->vf_id, ice_stat_str(aq_ret),
1850 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1071a835
AV
1851 return -EIO;
1852 }
1853
1854 return 0;
1855}
1856
1857/**
1858 * ice_vc_get_ver_msg
1859 * @vf: pointer to the VF info
1860 * @msg: pointer to the msg buffer
1861 *
1862 * called from the VF to request the API version used by the PF
1863 */
1864static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1865{
1866 struct virtchnl_version_info info = {
1867 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1868 };
1869
1870 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1871 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1872 if (VF_IS_V10(&vf->vf_ver))
1873 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1874
cf6c6e01
MW
1875 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1876 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1071a835
AV
1877 sizeof(struct virtchnl_version_info));
1878}
1879
1880/**
1881 * ice_vc_get_vf_res_msg
1882 * @vf: pointer to the VF info
1883 * @msg: pointer to the msg buffer
1884 *
1885 * called from the VF to request its resources
1886 */
1887static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1888{
cf6c6e01 1889 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835 1890 struct virtchnl_vf_resource *vfres = NULL;
1071a835
AV
1891 struct ice_pf *pf = vf->pf;
1892 struct ice_vsi *vsi;
1893 int len = 0;
1894 int ret;
1895
4c66d227 1896 if (ice_check_vf_init(pf, vf)) {
cf6c6e01 1897 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1898 goto err;
1899 }
1900
1901 len = sizeof(struct virtchnl_vf_resource);
1902
9efe35d0 1903 vfres = kzalloc(len, GFP_KERNEL);
1071a835 1904 if (!vfres) {
cf6c6e01 1905 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1071a835
AV
1906 len = 0;
1907 goto err;
1908 }
1909 if (VF_IS_V11(&vf->vf_ver))
1910 vf->driver_caps = *(u32 *)msg;
1911 else
1912 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1913 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1914 VIRTCHNL_VF_OFFLOAD_VLAN;
1915
1916 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1917 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 1918 if (!vsi) {
cf6c6e01 1919 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
1920 goto err;
1921 }
1922
1071a835
AV
1923 if (!vsi->info.pvid)
1924 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1925
1926 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1927 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1928 } else {
1929 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1930 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1931 else
1932 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1933 }
1934
1935 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1936 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1937
1938 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1939 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1940
1941 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1942 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1943
1944 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1945 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1946
1947 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1948 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1949
1950 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1951 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1952
1953 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1954 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1955
1956 vfres->num_vsis = 1;
1957 /* Tx and Rx queue are equal for VF */
1958 vfres->num_queue_pairs = vsi->num_txq;
46c276ce 1959 vfres->max_vectors = pf->num_msix_per_vf;
1071a835
AV
1960 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1961 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1962
1963 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1964 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1965 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1966 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1967 vf->dflt_lan_addr.addr);
1968
d4bc4e2d
BC
1969 /* match guest capabilities */
1970 vf->driver_caps = vfres->vf_cap_flags;
1971
1071a835
AV
1972 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1973
1974err:
1975 /* send the response back to the VF */
cf6c6e01 1976 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1071a835
AV
1977 (u8 *)vfres, len);
1978
9efe35d0 1979 kfree(vfres);
1071a835
AV
1980 return ret;
1981}
1982
1983/**
1984 * ice_vc_reset_vf_msg
1985 * @vf: pointer to the VF info
1986 *
1987 * called from the VF to reset itself,
1988 * unlike other virtchnl messages, PF driver
1989 * doesn't send the response back to the VF
1990 */
1991static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1992{
1993 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1994 ice_reset_vf(vf, false);
1995}
1996
1997/**
1998 * ice_find_vsi_from_id
2f2da36e 1999 * @pf: the PF structure to search for the VSI
f9867df6 2000 * @id: ID of the VSI it is searching for
1071a835 2001 *
f9867df6 2002 * searches for the VSI with the given ID
1071a835
AV
2003 */
2004static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2005{
2006 int i;
2007
80ed404a 2008 ice_for_each_vsi(pf, i)
1071a835
AV
2009 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2010 return pf->vsi[i];
2011
2012 return NULL;
2013}
2014
2015/**
2016 * ice_vc_isvalid_vsi_id
2017 * @vf: pointer to the VF info
f9867df6 2018 * @vsi_id: VF relative VSI ID
1071a835 2019 *
f9867df6 2020 * check for the valid VSI ID
1071a835
AV
2021 */
2022static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2023{
2024 struct ice_pf *pf = vf->pf;
2025 struct ice_vsi *vsi;
2026
2027 vsi = ice_find_vsi_from_id(pf, vsi_id);
2028
2029 return (vsi && (vsi->vf_id == vf->vf_id));
2030}
2031
2032/**
2033 * ice_vc_isvalid_q_id
2034 * @vf: pointer to the VF info
f9867df6
AV
2035 * @vsi_id: VSI ID
2036 * @qid: VSI relative queue ID
1071a835 2037 *
f9867df6 2038 * check for the valid queue ID
1071a835
AV
2039 */
2040static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2041{
2042 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2043 /* allocated Tx and Rx queues should be always equal for VF VSI */
2044 return (vsi && (qid < vsi->alloc_txq));
2045}
2046
9c7dd756
MS
2047/**
2048 * ice_vc_isvalid_ring_len
2049 * @ring_len: length of ring
2050 *
2051 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
77ca27c4 2052 * or zero
9c7dd756
MS
2053 */
2054static bool ice_vc_isvalid_ring_len(u16 ring_len)
2055{
77ca27c4
PG
2056 return ring_len == 0 ||
2057 (ring_len >= ICE_MIN_NUM_DESC &&
9c7dd756
MS
2058 ring_len <= ICE_MAX_NUM_DESC &&
2059 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2060}
2061
1071a835
AV
2062/**
2063 * ice_vc_config_rss_key
2064 * @vf: pointer to the VF info
2065 * @msg: pointer to the msg buffer
2066 *
2067 * Configure the VF's RSS key
2068 */
2069static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2070{
cf6c6e01 2071 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2072 struct virtchnl_rss_key *vrk =
2073 (struct virtchnl_rss_key *)msg;
f1ef73f5 2074 struct ice_pf *pf = vf->pf;
4c66d227 2075 struct ice_vsi *vsi;
1071a835
AV
2076
2077 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2078 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2079 goto error_param;
2080 }
2081
2082 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
cf6c6e01 2083 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2084 goto error_param;
2085 }
2086
3f416961 2087 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
cf6c6e01 2088 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2089 goto error_param;
2090 }
2091
3f416961 2092 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2093 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2094 goto error_param;
2095 }
2096
3f416961
A
2097 vsi = pf->vsi[vf->lan_vsi_idx];
2098 if (!vsi) {
cf6c6e01 2099 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2100 goto error_param;
2101 }
2102
cf6c6e01
MW
2103 if (ice_set_rss(vsi, vrk->key, NULL, 0))
2104 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2105error_param:
cf6c6e01 2106 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1071a835
AV
2107 NULL, 0);
2108}
2109
2110/**
2111 * ice_vc_config_rss_lut
2112 * @vf: pointer to the VF info
2113 * @msg: pointer to the msg buffer
2114 *
2115 * Configure the VF's RSS LUT
2116 */
2117static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2118{
2119 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
cf6c6e01 2120 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
f1ef73f5 2121 struct ice_pf *pf = vf->pf;
4c66d227 2122 struct ice_vsi *vsi;
1071a835
AV
2123
2124 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2125 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2126 goto error_param;
2127 }
2128
2129 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
cf6c6e01 2130 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2131 goto error_param;
2132 }
2133
3f416961 2134 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
cf6c6e01 2135 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2136 goto error_param;
2137 }
2138
3f416961 2139 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2140 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2141 goto error_param;
2142 }
2143
3f416961
A
2144 vsi = pf->vsi[vf->lan_vsi_idx];
2145 if (!vsi) {
cf6c6e01 2146 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2147 goto error_param;
2148 }
2149
cf6c6e01
MW
2150 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2151 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2152error_param:
cf6c6e01 2153 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1071a835
AV
2154 NULL, 0);
2155}
2156
c54d209c
BC
2157/**
2158 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2159 * @vf: The VF being resseting
2160 *
2161 * The max poll time is about ~800ms, which is about the maximum time it takes
2162 * for a VF to be reset and/or a VF driver to be removed.
2163 */
2164static void ice_wait_on_vf_reset(struct ice_vf *vf)
2165{
2166 int i;
2167
2168 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2169 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2170 break;
2171 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2172 }
2173}
2174
2175/**
2176 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2177 * @vf: VF to check if it's ready to be configured/queried
2178 *
2179 * The purpose of this function is to make sure the VF is not in reset, not
2180 * disabled, and initialized so it can be configured and/or queried by a host
2181 * administrator.
2182 */
2183static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2184{
2185 struct ice_pf *pf;
2186
2187 ice_wait_on_vf_reset(vf);
2188
2189 if (ice_is_vf_disabled(vf))
2190 return -EINVAL;
2191
2192 pf = vf->pf;
2193 if (ice_check_vf_init(pf, vf))
2194 return -EBUSY;
2195
2196 return 0;
2197}
2198
cd6d6b83
BC
2199/**
2200 * ice_set_vf_spoofchk
2201 * @netdev: network interface device structure
2202 * @vf_id: VF identifier
2203 * @ena: flag to enable or disable feature
2204 *
2205 * Enable or disable VF spoof checking
2206 */
2207int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2208{
2209 struct ice_netdev_priv *np = netdev_priv(netdev);
2210 struct ice_pf *pf = np->vsi->back;
2211 struct ice_vsi_ctx *ctx;
2212 struct ice_vsi *vf_vsi;
2213 enum ice_status status;
2214 struct device *dev;
2215 struct ice_vf *vf;
c54d209c 2216 int ret;
cd6d6b83
BC
2217
2218 dev = ice_pf_to_dev(pf);
2219 if (ice_validate_vf_id(pf, vf_id))
2220 return -EINVAL;
2221
2222 vf = &pf->vf[vf_id];
c54d209c
BC
2223 ret = ice_check_vf_ready_for_cfg(vf);
2224 if (ret)
2225 return ret;
cd6d6b83
BC
2226
2227 vf_vsi = pf->vsi[vf->lan_vsi_idx];
2228 if (!vf_vsi) {
2229 netdev_err(netdev, "VSI %d for VF %d is null\n",
2230 vf->lan_vsi_idx, vf->vf_id);
2231 return -EINVAL;
2232 }
2233
2234 if (vf_vsi->type != ICE_VSI_VF) {
19cce2c6 2235 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
cd6d6b83
BC
2236 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2237 return -ENODEV;
2238 }
2239
2240 if (ena == vf->spoofchk) {
2241 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2242 return 0;
2243 }
2244
2245 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2246 if (!ctx)
2247 return -ENOMEM;
2248
2249 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2250 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2251 if (ena) {
2252 ctx->info.sec_flags |=
2253 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2254 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2255 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2256 } else {
2257 ctx->info.sec_flags &=
2258 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2259 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2260 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2261 }
2262
2263 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2264 if (status) {
0fee3577
LY
2265 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2266 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2267 ice_stat_str(status));
cd6d6b83
BC
2268 ret = -EIO;
2269 goto out;
2270 }
2271
2272 /* only update spoofchk state and VSI context on success */
2273 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2274 vf->spoofchk = ena;
2275
2276out:
2277 kfree(ctx);
2278 return ret;
2279}
2280
01b5e89a
BC
2281/**
2282 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2283 * @pf: PF structure for accessing VF(s)
2284 *
2285 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2286 * else return true
2287 */
2288bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2289{
2290 int vf_idx;
2291
2292 ice_for_each_vf(pf, vf_idx) {
2293 struct ice_vf *vf = &pf->vf[vf_idx];
2294
2295 /* found a VF that has promiscuous mode configured */
2296 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2297 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2298 return true;
2299 }
2300
2301 return false;
2302}
2303
2304/**
2305 * ice_vc_cfg_promiscuous_mode_msg
2306 * @vf: pointer to the VF info
2307 * @msg: pointer to the msg buffer
2308 *
2309 * called from the VF to configure VF VSIs promiscuous mode
2310 */
2311static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2312{
2313 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2314 struct virtchnl_promisc_info *info =
2315 (struct virtchnl_promisc_info *)msg;
2316 struct ice_pf *pf = vf->pf;
2317 struct ice_vsi *vsi;
2318 struct device *dev;
2319 bool rm_promisc;
2320 int ret = 0;
2321
2322 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2323 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2324 goto error_param;
2325 }
2326
2327 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2328 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2329 goto error_param;
2330 }
2331
2332 vsi = pf->vsi[vf->lan_vsi_idx];
2333 if (!vsi) {
2334 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2335 goto error_param;
2336 }
2337
2338 dev = ice_pf_to_dev(pf);
2339 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2340 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2341 vf->vf_id);
2342 /* Leave v_ret alone, lie to the VF on purpose. */
2343 goto error_param;
2344 }
2345
2346 rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2347 !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2348
2349 if (vsi->num_vlan || vf->port_vlan_info) {
2350 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2351 struct net_device *pf_netdev;
2352
2353 if (!pf_vsi) {
2354 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2355 goto error_param;
2356 }
2357
2358 pf_netdev = pf_vsi->netdev;
2359
2360 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2361 if (ret) {
2362 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2363 rm_promisc ? "ON" : "OFF", vf->vf_id,
2364 vsi->vsi_num);
2365 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2366 }
2367
2368 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2369 if (ret) {
2370 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2371 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2372 goto error_param;
2373 }
2374 }
2375
2376 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2377 bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2378
2379 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2380 /* only attempt to set the default forwarding VSI if
2381 * it's not currently set
2382 */
2383 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2384 else if (!set_dflt_vsi &&
2385 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2386 /* only attempt to free the default forwarding VSI if we
2387 * are the owner
2388 */
2389 ret = ice_clear_dflt_vsi(pf->first_sw);
2390
2391 if (ret) {
2392 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2393 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2394 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2395 goto error_param;
2396 }
2397 } else {
2398 enum ice_status status;
2399 u8 promisc_m;
2400
2401 if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2402 if (vf->port_vlan_info || vsi->num_vlan)
2403 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2404 else
2405 promisc_m = ICE_UCAST_PROMISC_BITS;
2406 } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2407 if (vf->port_vlan_info || vsi->num_vlan)
2408 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2409 else
2410 promisc_m = ICE_MCAST_PROMISC_BITS;
2411 } else {
2412 if (vf->port_vlan_info || vsi->num_vlan)
2413 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2414 else
2415 promisc_m = ICE_UCAST_PROMISC_BITS;
2416 }
2417
2418 /* Configure multicast/unicast with or without VLAN promiscuous
2419 * mode
2420 */
2421 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2422 if (status) {
0fee3577
LY
2423 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2424 rm_promisc ? "dis" : "en", vf->vf_id,
2425 ice_stat_str(status));
01b5e89a
BC
2426 v_ret = ice_err_to_virt_err(status);
2427 goto error_param;
2428 } else {
2429 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2430 rm_promisc ? "dis" : "en", vf->vf_id);
2431 }
2432 }
2433
2434 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2435 set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2436 else
2437 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2438
2439 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2440 set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2441 else
2442 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2443
2444error_param:
2445 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2446 v_ret, NULL, 0);
2447}
2448
1071a835
AV
2449/**
2450 * ice_vc_get_stats_msg
2451 * @vf: pointer to the VF info
2452 * @msg: pointer to the msg buffer
2453 *
2454 * called from the VF to get VSI stats
2455 */
2456static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2457{
cf6c6e01 2458 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2459 struct virtchnl_queue_select *vqs =
2460 (struct virtchnl_queue_select *)msg;
949375de 2461 struct ice_eth_stats stats = { 0 };
f1ef73f5 2462 struct ice_pf *pf = vf->pf;
1071a835
AV
2463 struct ice_vsi *vsi;
2464
2465 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2466 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2467 goto error_param;
2468 }
2469
2470 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2471 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2472 goto error_param;
2473 }
2474
f1ef73f5 2475 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2476 if (!vsi) {
cf6c6e01 2477 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2478 goto error_param;
2479 }
2480
1071a835
AV
2481 ice_update_eth_stats(vsi);
2482
2483 stats = vsi->eth_stats;
2484
2485error_param:
2486 /* send the response to the VF */
cf6c6e01 2487 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1071a835
AV
2488 (u8 *)&stats, sizeof(stats));
2489}
2490
24e2e2a0
BC
2491/**
2492 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2493 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2494 *
2495 * Return true on successful validation, else false
2496 */
2497static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2498{
2499 if ((!vqs->rx_queues && !vqs->tx_queues) ||
0ca469fb
MW
2500 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2501 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
24e2e2a0
BC
2502 return false;
2503
2504 return true;
2505}
2506
4dc926d3
BC
2507/**
2508 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2509 * @vsi: VSI of the VF to configure
2510 * @q_idx: VF queue index used to determine the queue in the PF's space
2511 */
2512static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2513{
2514 struct ice_hw *hw = &vsi->back->hw;
2515 u32 pfq = vsi->txq_map[q_idx];
2516 u32 reg;
2517
2518 reg = rd32(hw, QINT_TQCTL(pfq));
2519
2520 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2521 * this is most likely a poll mode VF driver, so don't enable an
2522 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2523 */
2524 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2525 return;
2526
2527 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2528}
2529
2530/**
2531 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2532 * @vsi: VSI of the VF to configure
2533 * @q_idx: VF queue index used to determine the queue in the PF's space
2534 */
2535static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2536{
2537 struct ice_hw *hw = &vsi->back->hw;
2538 u32 pfq = vsi->rxq_map[q_idx];
2539 u32 reg;
2540
2541 reg = rd32(hw, QINT_RQCTL(pfq));
2542
2543 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2544 * this is most likely a poll mode VF driver, so don't enable an
2545 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2546 */
2547 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2548 return;
2549
2550 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2551}
2552
1071a835
AV
2553/**
2554 * ice_vc_ena_qs_msg
2555 * @vf: pointer to the VF info
2556 * @msg: pointer to the msg buffer
2557 *
2558 * called from the VF to enable all or specific queue(s)
2559 */
2560static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2561{
cf6c6e01 2562 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2563 struct virtchnl_queue_select *vqs =
2564 (struct virtchnl_queue_select *)msg;
f1ef73f5 2565 struct ice_pf *pf = vf->pf;
1071a835 2566 struct ice_vsi *vsi;
77ca27c4
PG
2567 unsigned long q_map;
2568 u16 vf_q_id;
1071a835
AV
2569
2570 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2571 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2572 goto error_param;
2573 }
2574
2575 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2576 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2577 goto error_param;
2578 }
2579
24e2e2a0 2580 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3f416961
A
2581 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2582 goto error_param;
2583 }
2584
f1ef73f5 2585 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2586 if (!vsi) {
cf6c6e01 2587 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2588 goto error_param;
2589 }
2590
2591 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2592 * Tx queue group list was configured and the context bits were
2593 * programmed using ice_vsi_cfg_txqs
2594 */
77ca27c4 2595 q_map = vqs->rx_queues;
0ca469fb 2596 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2597 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2598 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2599 goto error_param;
2600 }
2601
2602 /* Skip queue if enabled */
2603 if (test_bit(vf_q_id, vf->rxq_ena))
2604 continue;
2605
13a6233b 2606 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
19cce2c6 2607 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
77ca27c4
PG
2608 vf_q_id, vsi->vsi_num);
2609 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2610 goto error_param;
2611 }
2612
4dc926d3 2613 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
77ca27c4 2614 set_bit(vf_q_id, vf->rxq_ena);
77ca27c4
PG
2615 }
2616
2617 vsi = pf->vsi[vf->lan_vsi_idx];
2618 q_map = vqs->tx_queues;
0ca469fb 2619 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2620 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2621 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2622 goto error_param;
2623 }
2624
2625 /* Skip queue if enabled */
2626 if (test_bit(vf_q_id, vf->txq_ena))
2627 continue;
2628
4dc926d3 2629 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
77ca27c4 2630 set_bit(vf_q_id, vf->txq_ena);
77ca27c4 2631 }
1071a835
AV
2632
2633 /* Set flag to indicate that queues are enabled */
cf6c6e01 2634 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
77ca27c4 2635 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2636
2637error_param:
2638 /* send the response to the VF */
cf6c6e01 2639 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1071a835
AV
2640 NULL, 0);
2641}
2642
2643/**
2644 * ice_vc_dis_qs_msg
2645 * @vf: pointer to the VF info
2646 * @msg: pointer to the msg buffer
2647 *
2648 * called from the VF to disable all or specific
2649 * queue(s)
2650 */
2651static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2652{
cf6c6e01 2653 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2654 struct virtchnl_queue_select *vqs =
2655 (struct virtchnl_queue_select *)msg;
f1ef73f5 2656 struct ice_pf *pf = vf->pf;
1071a835 2657 struct ice_vsi *vsi;
77ca27c4
PG
2658 unsigned long q_map;
2659 u16 vf_q_id;
1071a835
AV
2660
2661 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
77ca27c4 2662 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
cf6c6e01 2663 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2664 goto error_param;
2665 }
2666
2667 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2668 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2669 goto error_param;
2670 }
2671
24e2e2a0 2672 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
cf6c6e01 2673 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2674 goto error_param;
2675 }
2676
f1ef73f5 2677 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2678 if (!vsi) {
cf6c6e01 2679 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2680 goto error_param;
2681 }
2682
77ca27c4
PG
2683 if (vqs->tx_queues) {
2684 q_map = vqs->tx_queues;
2685
0ca469fb 2686 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2687 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2688 struct ice_txq_meta txq_meta = { 0 };
2689
2690 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2691 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2692 goto error_param;
2693 }
2694
2695 /* Skip queue if not enabled */
2696 if (!test_bit(vf_q_id, vf->txq_ena))
2697 continue;
2698
2699 ice_fill_txq_meta(vsi, ring, &txq_meta);
2700
2701 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2702 ring, &txq_meta)) {
19cce2c6 2703 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
77ca27c4
PG
2704 vf_q_id, vsi->vsi_num);
2705 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2706 goto error_param;
2707 }
2708
2709 /* Clear enabled queues flag */
2710 clear_bit(vf_q_id, vf->txq_ena);
77ca27c4 2711 }
1071a835
AV
2712 }
2713
e1fe6926
BC
2714 q_map = vqs->rx_queues;
2715 /* speed up Rx queue disable by batching them if possible */
2716 if (q_map &&
0ca469fb 2717 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
e1fe6926
BC
2718 if (ice_vsi_stop_all_rx_rings(vsi)) {
2719 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2720 vsi->vsi_num);
2721 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2722 goto error_param;
2723 }
77ca27c4 2724
0ca469fb 2725 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
e1fe6926 2726 } else if (q_map) {
0ca469fb 2727 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2728 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2729 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2730 goto error_param;
2731 }
2732
2733 /* Skip queue if not enabled */
2734 if (!test_bit(vf_q_id, vf->rxq_ena))
2735 continue;
2736
13a6233b
BC
2737 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2738 true)) {
19cce2c6 2739 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
77ca27c4
PG
2740 vf_q_id, vsi->vsi_num);
2741 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2742 goto error_param;
2743 }
2744
2745 /* Clear enabled queues flag */
2746 clear_bit(vf_q_id, vf->rxq_ena);
77ca27c4 2747 }
1071a835
AV
2748 }
2749
2750 /* Clear enabled queues flag */
e1fe6926 2751 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
77ca27c4 2752 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2753
2754error_param:
2755 /* send the response to the VF */
cf6c6e01 2756 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1071a835
AV
2757 NULL, 0);
2758}
2759
0ca469fb
MW
2760/**
2761 * ice_cfg_interrupt
2762 * @vf: pointer to the VF info
2763 * @vsi: the VSI being configured
2764 * @vector_id: vector ID
2765 * @map: vector map for mapping vectors to queues
2766 * @q_vector: structure for interrupt vector
2767 * configure the IRQ to queue map
2768 */
2769static int
2770ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2771 struct virtchnl_vector_map *map,
2772 struct ice_q_vector *q_vector)
2773{
2774 u16 vsi_q_id, vsi_q_id_idx;
2775 unsigned long qmap;
2776
2777 q_vector->num_ring_rx = 0;
2778 q_vector->num_ring_tx = 0;
2779
2780 qmap = map->rxq_map;
2781 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2782 vsi_q_id = vsi_q_id_idx;
2783
2784 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2785 return VIRTCHNL_STATUS_ERR_PARAM;
2786
2787 q_vector->num_ring_rx++;
2788 q_vector->rx.itr_idx = map->rxitr_idx;
2789 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2790 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2791 q_vector->rx.itr_idx);
2792 }
2793
2794 qmap = map->txq_map;
2795 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2796 vsi_q_id = vsi_q_id_idx;
2797
2798 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2799 return VIRTCHNL_STATUS_ERR_PARAM;
2800
2801 q_vector->num_ring_tx++;
2802 q_vector->tx.itr_idx = map->txitr_idx;
2803 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2804 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2805 q_vector->tx.itr_idx);
2806 }
2807
2808 return VIRTCHNL_STATUS_SUCCESS;
2809}
2810
1071a835
AV
2811/**
2812 * ice_vc_cfg_irq_map_msg
2813 * @vf: pointer to the VF info
2814 * @msg: pointer to the msg buffer
2815 *
2816 * called from the VF to configure the IRQ to queue map
2817 */
2818static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2819{
cf6c6e01 2820 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
0ca469fb 2821 u16 num_q_vectors_mapped, vsi_id, vector_id;
173e23c0 2822 struct virtchnl_irq_map_info *irqmap_info;
1071a835 2823 struct virtchnl_vector_map *map;
1071a835 2824 struct ice_pf *pf = vf->pf;
173e23c0 2825 struct ice_vsi *vsi;
1071a835
AV
2826 int i;
2827
173e23c0 2828 irqmap_info = (struct virtchnl_irq_map_info *)msg;
047e52c0
AV
2829 num_q_vectors_mapped = irqmap_info->num_vectors;
2830
047e52c0
AV
2831 /* Check to make sure number of VF vectors mapped is not greater than
2832 * number of VF vectors originally allocated, and check that
2833 * there is actually at least a single VF queue vector mapped
2834 */
ba0db585 2835 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
46c276ce 2836 pf->num_msix_per_vf < num_q_vectors_mapped ||
0ca469fb 2837 !num_q_vectors_mapped) {
cf6c6e01 2838 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2839 goto error_param;
2840 }
2841
3f416961
A
2842 vsi = pf->vsi[vf->lan_vsi_idx];
2843 if (!vsi) {
2844 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2845 goto error_param;
2846 }
2847
047e52c0
AV
2848 for (i = 0; i < num_q_vectors_mapped; i++) {
2849 struct ice_q_vector *q_vector;
ba0db585 2850
1071a835
AV
2851 map = &irqmap_info->vecmap[i];
2852
2853 vector_id = map->vector_id;
2854 vsi_id = map->vsi_id;
b791cdd5
BC
2855 /* vector_id is always 0-based for each VF, and can never be
2856 * larger than or equal to the max allowed interrupts per VF
2857 */
46c276ce 2858 if (!(vector_id < pf->num_msix_per_vf) ||
b791cdd5 2859 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
047e52c0
AV
2860 (!vector_id && (map->rxq_map || map->txq_map))) {
2861 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2862 goto error_param;
2863 }
2864
2865 /* No need to map VF miscellaneous or rogue vector */
2866 if (!vector_id)
2867 continue;
2868
2869 /* Subtract non queue vector from vector_id passed by VF
2870 * to get actual number of VSI queue vector array index
2871 */
2872 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2873 if (!q_vector) {
cf6c6e01 2874 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2875 goto error_param;
2876 }
2877
1071a835 2878 /* lookout for the invalid queue index */
0ca469fb
MW
2879 v_ret = (enum virtchnl_status_code)
2880 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2881 if (v_ret)
2882 goto error_param;
1071a835
AV
2883 }
2884
1071a835
AV
2885error_param:
2886 /* send the response to the VF */
cf6c6e01 2887 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1071a835
AV
2888 NULL, 0);
2889}
2890
2891/**
2892 * ice_vc_cfg_qs_msg
2893 * @vf: pointer to the VF info
2894 * @msg: pointer to the msg buffer
2895 *
2896 * called from the VF to configure the Rx/Tx queues
2897 */
2898static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2899{
cf6c6e01 2900 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2901 struct virtchnl_vsi_queue_config_info *qci =
2902 (struct virtchnl_vsi_queue_config_info *)msg;
2903 struct virtchnl_queue_pair_info *qpi;
77ca27c4 2904 u16 num_rxq = 0, num_txq = 0;
5743020d 2905 struct ice_pf *pf = vf->pf;
1071a835
AV
2906 struct ice_vsi *vsi;
2907 int i;
2908
2909 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2910 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2911 goto error_param;
2912 }
2913
2914 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
cf6c6e01 2915 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2916 goto error_param;
2917 }
2918
9c7dd756
MS
2919 vsi = pf->vsi[vf->lan_vsi_idx];
2920 if (!vsi) {
cf6c6e01 2921 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5743020d
AA
2922 goto error_param;
2923 }
2924
0ca469fb 2925 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
9c7dd756 2926 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
19cce2c6 2927 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
9c7dd756 2928 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3f416961
A
2929 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2930 goto error_param;
2931 }
2932
1071a835
AV
2933 for (i = 0; i < qci->num_queue_pairs; i++) {
2934 qpi = &qci->qpair[i];
2935 if (qpi->txq.vsi_id != qci->vsi_id ||
2936 qpi->rxq.vsi_id != qci->vsi_id ||
2937 qpi->rxq.queue_id != qpi->txq.queue_id ||
f8af5bf5 2938 qpi->txq.headwb_enabled ||
9c7dd756
MS
2939 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2940 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1071a835 2941 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
cf6c6e01 2942 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2943 goto error_param;
2944 }
2945 /* copy Tx queue info from VF into VSI */
77ca27c4
PG
2946 if (qpi->txq.ring_len > 0) {
2947 num_txq++;
2948 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2949 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1071a835 2950 }
77ca27c4
PG
2951
2952 /* copy Rx queue info from VF into VSI */
2953 if (qpi->rxq.ring_len > 0) {
2954 num_rxq++;
2955 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2956 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2957
2958 if (qpi->rxq.databuffer_size != 0 &&
2959 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2960 qpi->rxq.databuffer_size < 1024)) {
2961 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2962 goto error_param;
2963 }
2964 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2965 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2966 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2967 qpi->rxq.max_pkt_size < 64) {
2968 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2969 goto error_param;
2970 }
1071a835 2971 }
77ca27c4 2972
1071a835
AV
2973 vsi->max_frame = qpi->rxq.max_pkt_size;
2974 }
2975
2976 /* VF can request to configure less than allocated queues
2977 * or default allocated queues. So update the VSI with new number
2978 */
77ca27c4
PG
2979 vsi->num_txq = num_txq;
2980 vsi->num_rxq = num_rxq;
105e5bc2 2981 /* All queues of VF VSI are in TC 0 */
77ca27c4
PG
2982 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2983 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
1071a835 2984
cf6c6e01
MW
2985 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2986 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
2987
2988error_param:
2989 /* send the response to the VF */
cf6c6e01 2990 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
1071a835
AV
2991 NULL, 0);
2992}
2993
2994/**
2995 * ice_is_vf_trusted
2996 * @vf: pointer to the VF info
2997 */
2998static bool ice_is_vf_trusted(struct ice_vf *vf)
2999{
3000 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3001}
3002
3003/**
3004 * ice_can_vf_change_mac
3005 * @vf: pointer to the VF info
3006 *
3007 * Return true if the VF is allowed to change its MAC filters, false otherwise
3008 */
3009static bool ice_can_vf_change_mac(struct ice_vf *vf)
3010{
3011 /* If the VF MAC address has been set administratively (via the
3012 * ndo_set_vf_mac command), then deny permission to the VF to
3013 * add/delete unicast MAC addresses, unless the VF is trusted
3014 */
3015 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3016 return false;
3017
3018 return true;
3019}
3020
ed4c068d
BC
3021/**
3022 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3023 * @vf: pointer to the VF info
3024 * @vsi: pointer to the VF's VSI
3025 * @mac_addr: MAC address to add
3026 */
3027static int
3028ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3029{
3030 struct device *dev = ice_pf_to_dev(vf->pf);
3031 enum ice_status status;
3032
3033 /* default unicast MAC already added */
3034 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3035 return 0;
3036
3037 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3038 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3039 return -EPERM;
3040 }
3041
1b8f15b6 3042 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3043 if (status == ICE_ERR_ALREADY_EXISTS) {
3044 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3045 vf->vf_id);
3046 return -EEXIST;
3047 } else if (status) {
0fee3577
LY
3048 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3049 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3050 return -EIO;
3051 }
3052
bf8987df
PG
3053 /* Set the default LAN address to the latest unicast MAC address added
3054 * by the VF. The default LAN address is reported by the PF via
3055 * ndo_get_vf_config.
3056 */
3057 if (is_unicast_ether_addr(mac_addr))
ed4c068d
BC
3058 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3059
3060 vf->num_mac++;
3061
3062 return 0;
3063}
3064
3065/**
3066 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3067 * @vf: pointer to the VF info
3068 * @vsi: pointer to the VF's VSI
3069 * @mac_addr: MAC address to delete
3070 */
3071static int
3072ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3073{
3074 struct device *dev = ice_pf_to_dev(vf->pf);
3075 enum ice_status status;
3076
3077 if (!ice_can_vf_change_mac(vf) &&
3078 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3079 return 0;
3080
1b8f15b6 3081 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3082 if (status == ICE_ERR_DOES_NOT_EXIST) {
3083 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3084 vf->vf_id);
3085 return -ENOENT;
3086 } else if (status) {
0fee3577
LY
3087 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3088 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3089 return -EIO;
3090 }
3091
3092 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3093 eth_zero_addr(vf->dflt_lan_addr.addr);
3094
3095 vf->num_mac--;
3096
3097 return 0;
3098}
3099
1071a835
AV
3100/**
3101 * ice_vc_handle_mac_addr_msg
3102 * @vf: pointer to the VF info
3103 * @msg: pointer to the msg buffer
f9867df6 3104 * @set: true if MAC filters are being set, false otherwise
1071a835 3105 *
df17b7e0 3106 * add guest MAC address filter
1071a835
AV
3107 */
3108static int
3109ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3110{
ed4c068d
BC
3111 int (*ice_vc_cfg_mac)
3112 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
cf6c6e01 3113 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3114 struct virtchnl_ether_addr_list *al =
3115 (struct virtchnl_ether_addr_list *)msg;
3116 struct ice_pf *pf = vf->pf;
3117 enum virtchnl_ops vc_op;
1071a835 3118 struct ice_vsi *vsi;
1071a835
AV
3119 int i;
3120
ed4c068d 3121 if (set) {
1071a835 3122 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
ed4c068d
BC
3123 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3124 } else {
1071a835 3125 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
ed4c068d
BC
3126 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3127 }
1071a835
AV
3128
3129 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3130 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
cf6c6e01 3131 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3132 goto handle_mac_exit;
3133 }
3134
ed4c068d
BC
3135 /* If this VF is not privileged, then we can't add more than a
3136 * limited number of addresses. Check to make sure that the
3137 * additions do not push us over the limit.
3138 */
1071a835
AV
3139 if (set && !ice_is_vf_trusted(vf) &&
3140 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
19cce2c6 3141 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
d84b899a 3142 vf->vf_id);
cf6c6e01 3143 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3144 goto handle_mac_exit;
3145 }
3146
3147 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3148 if (!vsi) {
cf6c6e01 3149 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3150 goto handle_mac_exit;
3151 }
1071a835
AV
3152
3153 for (i = 0; i < al->num_elements; i++) {
ed4c068d
BC
3154 u8 *mac_addr = al->list[i].addr;
3155 int result;
1071a835 3156
ed4c068d
BC
3157 if (is_broadcast_ether_addr(mac_addr) ||
3158 is_zero_ether_addr(mac_addr))
3159 continue;
1071a835 3160
ed4c068d
BC
3161 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3162 if (result == -EEXIST || result == -ENOENT) {
3163 continue;
3164 } else if (result) {
3165 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
3166 goto handle_mac_exit;
3167 }
1071a835
AV
3168 }
3169
1071a835 3170handle_mac_exit:
1071a835 3171 /* send the response to the VF */
cf6c6e01 3172 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1071a835
AV
3173}
3174
3175/**
3176 * ice_vc_add_mac_addr_msg
3177 * @vf: pointer to the VF info
3178 * @msg: pointer to the msg buffer
3179 *
3180 * add guest MAC address filter
3181 */
3182static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3183{
3184 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3185}
3186
3187/**
3188 * ice_vc_del_mac_addr_msg
3189 * @vf: pointer to the VF info
3190 * @msg: pointer to the msg buffer
3191 *
3192 * remove guest MAC address filter
3193 */
3194static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3195{
3196 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3197}
3198
3199/**
3200 * ice_vc_request_qs_msg
3201 * @vf: pointer to the VF info
3202 * @msg: pointer to the msg buffer
3203 *
3204 * VFs get a default number of queues but can use this message to request a
df17b7e0 3205 * different number. If the request is successful, PF will reset the VF and
1071a835 3206 * return 0. If unsuccessful, PF will send message informing VF of number of
f9867df6 3207 * available queue pairs via virtchnl message response to VF.
1071a835
AV
3208 */
3209static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3210{
cf6c6e01 3211 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3212 struct virtchnl_vf_res_request *vfres =
3213 (struct virtchnl_vf_res_request *)msg;
cbfe31b5 3214 u16 req_queues = vfres->num_queue_pairs;
1071a835 3215 struct ice_pf *pf = vf->pf;
cbfe31b5
PK
3216 u16 max_allowed_vf_queues;
3217 u16 tx_rx_queue_left;
4015d11e 3218 struct device *dev;
4ee656bb 3219 u16 cur_queues;
1071a835 3220
4015d11e 3221 dev = ice_pf_to_dev(pf);
1071a835 3222 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3223 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3224 goto error_param;
3225 }
3226
5743020d 3227 cur_queues = vf->num_vf_qs;
8c243700
AV
3228 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3229 ice_get_avail_rxq_count(pf));
5743020d 3230 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
cbfe31b5 3231 if (!req_queues) {
4015d11e 3232 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
cbfe31b5 3233 vf->vf_id);
0ca469fb 3234 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4015d11e 3235 dev_err(dev, "VF %d tried to request more than %d queues.\n",
0ca469fb
MW
3236 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3237 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
cbfe31b5
PK
3238 } else if (req_queues > cur_queues &&
3239 req_queues - cur_queues > tx_rx_queue_left) {
19cce2c6 3240 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1071a835 3241 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
cbfe31b5 3242 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
0ca469fb 3243 ICE_MAX_RSS_QS_PER_VF);
1071a835
AV
3244 } else {
3245 /* request is successful, then reset VF */
3246 vf->num_req_qs = req_queues;
ff010eca 3247 ice_vc_reset_vf(vf);
4015d11e 3248 dev_info(dev, "VF %d granted request of %u queues.\n",
1071a835
AV
3249 vf->vf_id, req_queues);
3250 return 0;
3251 }
3252
3253error_param:
3254 /* send the response to the VF */
3255 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
cf6c6e01 3256 v_ret, (u8 *)vfres, sizeof(*vfres));
1071a835
AV
3257}
3258
7c710869
AV
3259/**
3260 * ice_set_vf_port_vlan
3261 * @netdev: network interface device structure
3262 * @vf_id: VF identifier
f9867df6 3263 * @vlan_id: VLAN ID being set
7c710869
AV
3264 * @qos: priority setting
3265 * @vlan_proto: VLAN protocol
3266 *
f9867df6 3267 * program VF Port VLAN ID and/or QoS
7c710869
AV
3268 */
3269int
3270ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3271 __be16 vlan_proto)
3272{
4c66d227 3273 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3274 struct ice_vsi *vsi;
4015d11e 3275 struct device *dev;
7c710869 3276 struct ice_vf *vf;
61c9ce86 3277 u16 vlanprio;
c54d209c 3278 int ret;
7c710869 3279
4015d11e 3280 dev = ice_pf_to_dev(pf);
4c66d227 3281 if (ice_validate_vf_id(pf, vf_id))
7c710869 3282 return -EINVAL;
7c710869 3283
61c9ce86
BC
3284 if (vlan_id >= VLAN_N_VID || qos > 7) {
3285 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3286 vf_id, vlan_id, qos);
7c710869
AV
3287 return -EINVAL;
3288 }
3289
3290 if (vlan_proto != htons(ETH_P_8021Q)) {
4015d11e 3291 dev_err(dev, "VF VLAN protocol is not supported\n");
7c710869
AV
3292 return -EPROTONOSUPPORT;
3293 }
3294
3295 vf = &pf->vf[vf_id];
3296 vsi = pf->vsi[vf->lan_vsi_idx];
c54d209c
BC
3297
3298 ret = ice_check_vf_ready_for_cfg(vf);
3299 if (ret)
3300 return ret;
7c710869 3301
61c9ce86
BC
3302 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3303
3304 if (vf->port_vlan_info == vlanprio) {
7c710869 3305 /* duplicate request, so just return success */
4015d11e 3306 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
c54d209c 3307 return 0;
7c710869
AV
3308 }
3309
7c710869 3310 if (vlan_id || qos) {
72634bc2
BC
3311 /* remove VLAN 0 filter set by default when transitioning from
3312 * no port VLAN to a port VLAN. No change to old port VLAN on
3313 * failure.
3314 */
3315 ret = ice_vsi_kill_vlan(vsi, 0);
3316 if (ret)
3317 return ret;
77a7a84d 3318 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
7c710869 3319 if (ret)
72634bc2 3320 return ret;
7c710869 3321 } else {
72634bc2
BC
3322 /* add VLAN 0 filter back when transitioning from port VLAN to
3323 * no port VLAN. No change to old port VLAN on failure.
3324 */
1b8f15b6 3325 ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
72634bc2
BC
3326 if (ret)
3327 return ret;
b093841f
BC
3328 ret = ice_vsi_manage_pvid(vsi, 0, false);
3329 if (ret)
e65ee2fb 3330 return ret;
7c710869
AV
3331 }
3332
3333 if (vlan_id) {
4015d11e 3334 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
7c710869
AV
3335 vlan_id, qos, vf_id);
3336
72634bc2 3337 /* add VLAN filter for the port VLAN */
1b8f15b6 3338 ret = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
7c710869 3339 if (ret)
c54d209c 3340 return ret;
7c710869 3341 }
72634bc2
BC
3342 /* remove old port VLAN filter with valid VLAN ID or QoS fields */
3343 if (vf->port_vlan_info)
3344 ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
7c710869 3345
72634bc2 3346 /* keep port VLAN information persistent on resets */
b093841f 3347 vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
7c710869 3348
c54d209c 3349 return 0;
7c710869
AV
3350}
3351
d4bc4e2d
BC
3352/**
3353 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3354 * @caps: VF driver negotiated capabilities
3355 *
3356 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3357 */
3358static bool ice_vf_vlan_offload_ena(u32 caps)
3359{
3360 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3361}
3362
1071a835
AV
3363/**
3364 * ice_vc_process_vlan_msg
3365 * @vf: pointer to the VF info
3366 * @msg: pointer to the msg buffer
3367 * @add_v: Add VLAN if true, otherwise delete VLAN
3368 *
f9867df6 3369 * Process virtchnl op to add or remove programmed guest VLAN ID
1071a835
AV
3370 */
3371static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3372{
cf6c6e01 3373 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3374 struct virtchnl_vlan_filter_list *vfl =
3375 (struct virtchnl_vlan_filter_list *)msg;
1071a835 3376 struct ice_pf *pf = vf->pf;
5eda8afd 3377 bool vlan_promisc = false;
1071a835 3378 struct ice_vsi *vsi;
4015d11e 3379 struct device *dev;
5eda8afd
AA
3380 struct ice_hw *hw;
3381 int status = 0;
3382 u8 promisc_m;
1071a835
AV
3383 int i;
3384
4015d11e 3385 dev = ice_pf_to_dev(pf);
1071a835 3386 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3387 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3388 goto error_param;
3389 }
3390
d4bc4e2d
BC
3391 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3392 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3393 goto error_param;
3394 }
3395
1071a835 3396 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
cf6c6e01 3397 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3398 goto error_param;
3399 }
3400
1071a835 3401 for (i = 0; i < vfl->num_elements; i++) {
61c9ce86 3402 if (vfl->vlan_id[i] >= VLAN_N_VID) {
cf6c6e01 3403 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6
AV
3404 dev_err(dev, "invalid VF VLAN id %d\n",
3405 vfl->vlan_id[i]);
1071a835
AV
3406 goto error_param;
3407 }
3408 }
3409
5eda8afd 3410 hw = &pf->hw;
f1ef73f5 3411 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 3412 if (!vsi) {
cf6c6e01 3413 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3414 goto error_param;
3415 }
3416
cd6d6b83
BC
3417 if (add_v && !ice_is_vf_trusted(vf) &&
3418 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3419 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
cd6d6b83
BC
3420 vf->vf_id);
3421 /* There is no need to let VF know about being not trusted,
3422 * so we can just return success message here
3423 */
3424 goto error_param;
3425 }
3426
1071a835 3427 if (vsi->info.pvid) {
cf6c6e01 3428 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3429 goto error_param;
3430 }
3431
01b5e89a
BC
3432 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3433 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3434 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
5eda8afd
AA
3435 vlan_promisc = true;
3436
1071a835
AV
3437 if (add_v) {
3438 for (i = 0; i < vfl->num_elements; i++) {
3439 u16 vid = vfl->vlan_id[i];
3440
5079b853 3441 if (!ice_is_vf_trusted(vf) &&
cd6d6b83 3442 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3443 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
5079b853
AA
3444 vf->vf_id);
3445 /* There is no need to let VF know about being
3446 * not trusted, so we can just return success
3447 * message here as well.
3448 */
3449 goto error_param;
3450 }
3451
cd6d6b83
BC
3452 /* we add VLAN 0 by default for each VF so we can enable
3453 * Tx VLAN anti-spoof without triggering MDD events so
3454 * we don't need to add it again here
3455 */
3456 if (!vid)
3457 continue;
3458
1b8f15b6 3459 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
cd6d6b83 3460 if (status) {
cf6c6e01 3461 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3462 goto error_param;
3463 }
1071a835 3464
42f3efef
BC
3465 /* Enable VLAN pruning when non-zero VLAN is added */
3466 if (!vlan_promisc && vid &&
3467 !ice_vsi_is_vlan_pruning_ena(vsi)) {
5eda8afd
AA
3468 status = ice_cfg_vlan_pruning(vsi, true, false);
3469 if (status) {
cf6c6e01 3470 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3471 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
5eda8afd
AA
3472 vid, status);
3473 goto error_param;
3474 }
42f3efef 3475 } else if (vlan_promisc) {
5eda8afd
AA
3476 /* Enable Ucast/Mcast VLAN promiscuous mode */
3477 promisc_m = ICE_PROMISC_VLAN_TX |
3478 ICE_PROMISC_VLAN_RX;
3479
3480 status = ice_set_vsi_promisc(hw, vsi->idx,
3481 promisc_m, vid);
cf6c6e01
MW
3482 if (status) {
3483 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3484 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
5eda8afd 3485 vid, status);
cf6c6e01 3486 }
1071a835
AV
3487 }
3488 }
3489 } else {
bb877b22
AA
3490 /* In case of non_trusted VF, number of VLAN elements passed
3491 * to PF for removal might be greater than number of VLANs
3492 * filter programmed for that VF - So, use actual number of
3493 * VLANS added earlier with add VLAN opcode. In order to avoid
3494 * removing VLAN that doesn't exist, which result to sending
3495 * erroneous failed message back to the VF
3496 */
3497 int num_vf_vlan;
3498
cd6d6b83 3499 num_vf_vlan = vsi->num_vlan;
bb877b22 3500 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1071a835
AV
3501 u16 vid = vfl->vlan_id[i];
3502
cd6d6b83
BC
3503 /* we add VLAN 0 by default for each VF so we can enable
3504 * Tx VLAN anti-spoof without triggering MDD events so
3505 * we don't want a VIRTCHNL request to remove it
3506 */
3507 if (!vid)
3508 continue;
3509
1071a835
AV
3510 /* Make sure ice_vsi_kill_vlan is successful before
3511 * updating VLAN information
3512 */
cd6d6b83
BC
3513 status = ice_vsi_kill_vlan(vsi, vid);
3514 if (status) {
cf6c6e01 3515 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3516 goto error_param;
3517 }
3518
42f3efef
BC
3519 /* Disable VLAN pruning when only VLAN 0 is left */
3520 if (vsi->num_vlan == 1 &&
3521 ice_vsi_is_vlan_pruning_ena(vsi))
cd186e51 3522 ice_cfg_vlan_pruning(vsi, false, false);
5eda8afd
AA
3523
3524 /* Disable Unicast/Multicast VLAN promiscuous mode */
3525 if (vlan_promisc) {
3526 promisc_m = ICE_PROMISC_VLAN_TX |
3527 ICE_PROMISC_VLAN_RX;
1071a835 3528
5eda8afd
AA
3529 ice_clear_vsi_promisc(hw, vsi->idx,
3530 promisc_m, vid);
1071a835
AV
3531 }
3532 }
3533 }
3534
3535error_param:
3536 /* send the response to the VF */
3537 if (add_v)
cf6c6e01 3538 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1071a835
AV
3539 NULL, 0);
3540 else
cf6c6e01 3541 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1071a835
AV
3542 NULL, 0);
3543}
3544
3545/**
3546 * ice_vc_add_vlan_msg
3547 * @vf: pointer to the VF info
3548 * @msg: pointer to the msg buffer
3549 *
f9867df6 3550 * Add and program guest VLAN ID
1071a835
AV
3551 */
3552static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3553{
3554 return ice_vc_process_vlan_msg(vf, msg, true);
3555}
3556
3557/**
3558 * ice_vc_remove_vlan_msg
3559 * @vf: pointer to the VF info
3560 * @msg: pointer to the msg buffer
3561 *
f9867df6 3562 * remove programmed guest VLAN ID
1071a835
AV
3563 */
3564static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3565{
3566 return ice_vc_process_vlan_msg(vf, msg, false);
3567}
3568
3569/**
3570 * ice_vc_ena_vlan_stripping
3571 * @vf: pointer to the VF info
3572 *
3573 * Enable VLAN header stripping for a given VF
3574 */
3575static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3576{
cf6c6e01 3577 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3578 struct ice_pf *pf = vf->pf;
3579 struct ice_vsi *vsi;
3580
3581 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3582 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3583 goto error_param;
3584 }
3585
d4bc4e2d
BC
3586 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3587 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3588 goto error_param;
3589 }
3590
1071a835
AV
3591 vsi = pf->vsi[vf->lan_vsi_idx];
3592 if (ice_vsi_manage_vlan_stripping(vsi, true))
cf6c6e01 3593 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3594
3595error_param:
3596 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
cf6c6e01 3597 v_ret, NULL, 0);
1071a835
AV
3598}
3599
3600/**
3601 * ice_vc_dis_vlan_stripping
3602 * @vf: pointer to the VF info
3603 *
3604 * Disable VLAN header stripping for a given VF
3605 */
3606static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3607{
cf6c6e01 3608 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3609 struct ice_pf *pf = vf->pf;
3610 struct ice_vsi *vsi;
3611
3612 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3613 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3614 goto error_param;
3615 }
3616
d4bc4e2d
BC
3617 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3618 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3619 goto error_param;
3620 }
3621
1071a835 3622 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3623 if (!vsi) {
cf6c6e01 3624 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3625 goto error_param;
3626 }
3627
1071a835 3628 if (ice_vsi_manage_vlan_stripping(vsi, false))
cf6c6e01 3629 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3630
3631error_param:
3632 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
cf6c6e01 3633 v_ret, NULL, 0);
1071a835
AV
3634}
3635
2f9ec241
BC
3636/**
3637 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3638 * @vf: VF to enable/disable VLAN stripping for on initialization
3639 *
3640 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3641 * the flag is cleared then we want to disable stripping. For example, the flag
3642 * will be cleared when port VLANs are configured by the administrator before
3643 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3644 * offloads.
3645 */
3646static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3647{
3648 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3649
3650 if (!vsi)
3651 return -EINVAL;
3652
3653 /* don't modify stripping if port VLAN is configured */
3654 if (vsi->info.pvid)
3655 return 0;
3656
3657 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3658 return ice_vsi_manage_vlan_stripping(vsi, true);
3659 else
3660 return ice_vsi_manage_vlan_stripping(vsi, false);
3661}
3662
1071a835
AV
3663/**
3664 * ice_vc_process_vf_msg - Process request from VF
3665 * @pf: pointer to the PF structure
3666 * @event: pointer to the AQ event
3667 *
3668 * called from the common asq/arq handler to
3669 * process request from VF
3670 */
3671void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3672{
3673 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3674 s16 vf_id = le16_to_cpu(event->desc.retval);
3675 u16 msglen = event->msg_len;
3676 u8 *msg = event->msg_buf;
3677 struct ice_vf *vf = NULL;
4015d11e 3678 struct device *dev;
1071a835
AV
3679 int err = 0;
3680
4015d11e 3681 dev = ice_pf_to_dev(pf);
4c66d227 3682 if (ice_validate_vf_id(pf, vf_id)) {
1071a835
AV
3683 err = -EINVAL;
3684 goto error_handler;
3685 }
3686
3687 vf = &pf->vf[vf_id];
3688
3689 /* Check if VF is disabled. */
3690 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3691 err = -EPERM;
3692 goto error_handler;
3693 }
3694
3695 /* Perform basic checks on the msg */
3696 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3697 if (err) {
cf6c6e01 3698 if (err == VIRTCHNL_STATUS_ERR_PARAM)
1071a835
AV
3699 err = -EPERM;
3700 else
3701 err = -EINVAL;
1071a835
AV
3702 }
3703
3704error_handler:
3705 if (err) {
cf6c6e01
MW
3706 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3707 NULL, 0);
4015d11e 3708 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
1071a835
AV
3709 vf_id, v_opcode, msglen, err);
3710 return;
3711 }
3712
3713 switch (v_opcode) {
3714 case VIRTCHNL_OP_VERSION:
3715 err = ice_vc_get_ver_msg(vf, msg);
3716 break;
3717 case VIRTCHNL_OP_GET_VF_RESOURCES:
3718 err = ice_vc_get_vf_res_msg(vf, msg);
2f9ec241 3719 if (ice_vf_init_vlan_stripping(vf))
19cce2c6 3720 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
2f9ec241 3721 vf->vf_id);
dfc62400 3722 ice_vc_notify_vf_link_state(vf);
1071a835
AV
3723 break;
3724 case VIRTCHNL_OP_RESET_VF:
3725 ice_vc_reset_vf_msg(vf);
3726 break;
3727 case VIRTCHNL_OP_ADD_ETH_ADDR:
3728 err = ice_vc_add_mac_addr_msg(vf, msg);
3729 break;
3730 case VIRTCHNL_OP_DEL_ETH_ADDR:
3731 err = ice_vc_del_mac_addr_msg(vf, msg);
3732 break;
3733 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3734 err = ice_vc_cfg_qs_msg(vf, msg);
3735 break;
3736 case VIRTCHNL_OP_ENABLE_QUEUES:
3737 err = ice_vc_ena_qs_msg(vf, msg);
3738 ice_vc_notify_vf_link_state(vf);
3739 break;
3740 case VIRTCHNL_OP_DISABLE_QUEUES:
3741 err = ice_vc_dis_qs_msg(vf, msg);
3742 break;
3743 case VIRTCHNL_OP_REQUEST_QUEUES:
3744 err = ice_vc_request_qs_msg(vf, msg);
3745 break;
3746 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3747 err = ice_vc_cfg_irq_map_msg(vf, msg);
3748 break;
3749 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3750 err = ice_vc_config_rss_key(vf, msg);
3751 break;
3752 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3753 err = ice_vc_config_rss_lut(vf, msg);
3754 break;
3755 case VIRTCHNL_OP_GET_STATS:
3756 err = ice_vc_get_stats_msg(vf, msg);
3757 break;
01b5e89a
BC
3758 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3759 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3760 break;
1071a835
AV
3761 case VIRTCHNL_OP_ADD_VLAN:
3762 err = ice_vc_add_vlan_msg(vf, msg);
3763 break;
3764 case VIRTCHNL_OP_DEL_VLAN:
3765 err = ice_vc_remove_vlan_msg(vf, msg);
3766 break;
3767 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3768 err = ice_vc_ena_vlan_stripping(vf);
3769 break;
3770 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3771 err = ice_vc_dis_vlan_stripping(vf);
3772 break;
3773 case VIRTCHNL_OP_UNKNOWN:
3774 default:
4015d11e
BC
3775 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3776 vf_id);
cf6c6e01
MW
3777 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3778 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
1071a835
AV
3779 NULL, 0);
3780 break;
3781 }
3782 if (err) {
3783 /* Helper function cares less about error return values here
3784 * as it is busy with pending work.
3785 */
4015d11e 3786 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
1071a835
AV
3787 vf_id, v_opcode, err);
3788 }
3789}
3790
7c710869
AV
3791/**
3792 * ice_get_vf_cfg
3793 * @netdev: network interface device structure
3794 * @vf_id: VF identifier
3795 * @ivi: VF configuration structure
3796 *
3797 * return VF configuration
3798 */
c8b7abdd
BA
3799int
3800ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
7c710869 3801{
4c66d227 3802 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
3803 struct ice_vf *vf;
3804
4c66d227 3805 if (ice_validate_vf_id(pf, vf_id))
7c710869 3806 return -EINVAL;
7c710869
AV
3807
3808 vf = &pf->vf[vf_id];
7c710869 3809
4c66d227 3810 if (ice_check_vf_init(pf, vf))
7c710869 3811 return -EBUSY;
7c710869
AV
3812
3813 ivi->vf = vf_id;
3814 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3815
3816 /* VF configuration for VLAN and applicable QoS */
61c9ce86
BC
3817 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3818 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
7c710869
AV
3819
3820 ivi->trusted = vf->trusted;
3821 ivi->spoofchk = vf->spoofchk;
3822 if (!vf->link_forced)
3823 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3824 else if (vf->link_up)
3825 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3826 else
3827 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3828 ivi->max_tx_rate = vf->tx_rate;
3829 ivi->min_tx_rate = 0;
3830 return 0;
3831}
3832
47ebc7b0
BC
3833/**
3834 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3835 * @pf: PF used to reference the switch's rules
3836 * @umac: unicast MAC to compare against existing switch rules
3837 *
3838 * Return true on the first/any match, else return false
3839 */
3840static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3841{
3842 struct ice_sw_recipe *mac_recipe_list =
3843 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3844 struct ice_fltr_mgmt_list_entry *list_itr;
3845 struct list_head *rule_head;
3846 struct mutex *rule_lock; /* protect MAC filter list access */
3847
3848 rule_head = &mac_recipe_list->filt_rules;
3849 rule_lock = &mac_recipe_list->filt_rule_lock;
3850
3851 mutex_lock(rule_lock);
3852 list_for_each_entry(list_itr, rule_head, list_entry) {
3853 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3854
3855 if (ether_addr_equal(existing_mac, umac)) {
3856 mutex_unlock(rule_lock);
3857 return true;
3858 }
3859 }
3860
3861 mutex_unlock(rule_lock);
3862
3863 return false;
3864}
3865
7c710869
AV
3866/**
3867 * ice_set_vf_mac
3868 * @netdev: network interface device structure
3869 * @vf_id: VF identifier
f9867df6 3870 * @mac: MAC address
7c710869 3871 *
f9867df6 3872 * program VF MAC address
7c710869
AV
3873 */
3874int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3875{
4c66d227 3876 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3877 struct ice_vf *vf;
c54d209c 3878 int ret;
7c710869 3879
4c66d227 3880 if (ice_validate_vf_id(pf, vf_id))
7c710869 3881 return -EINVAL;
7c710869 3882
7c710869
AV
3883 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3884 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3885 return -EINVAL;
3886 }
3887
c54d209c 3888 vf = &pf->vf[vf_id];
47ebc7b0
BC
3889 /* nothing left to do, unicast MAC already set */
3890 if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3891 return 0;
3892
c54d209c
BC
3893 ret = ice_check_vf_ready_for_cfg(vf);
3894 if (ret)
3895 return ret;
3896
47ebc7b0
BC
3897 if (ice_unicast_mac_exists(pf, mac)) {
3898 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3899 mac, vf_id, mac);
3900 return -EINVAL;
3901 }
3902
f9867df6 3903 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
7c710869
AV
3904 * flow will use the updated dflt_lan_addr and add a MAC filter
3905 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3906 * set the MAC address for this VF.
3907 */
3908 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3909 vf->pf_set_mac = true;
19cce2c6 3910 netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
7c710869
AV
3911 vf_id, mac);
3912
ff010eca 3913 ice_vc_reset_vf(vf);
c54d209c 3914 return 0;
7c710869
AV
3915}
3916
3917/**
3918 * ice_set_vf_trust
3919 * @netdev: network interface device structure
3920 * @vf_id: VF identifier
3921 * @trusted: Boolean value to enable/disable trusted VF
3922 *
3923 * Enable or disable a given VF as trusted
3924 */
3925int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3926{
4c66d227 3927 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3928 struct ice_vf *vf;
c54d209c 3929 int ret;
7c710869 3930
4c66d227 3931 if (ice_validate_vf_id(pf, vf_id))
7c710869 3932 return -EINVAL;
7c710869
AV
3933
3934 vf = &pf->vf[vf_id];
c54d209c
BC
3935 ret = ice_check_vf_ready_for_cfg(vf);
3936 if (ret)
3937 return ret;
7c710869
AV
3938
3939 /* Check if already trusted */
3940 if (trusted == vf->trusted)
3941 return 0;
3942
3943 vf->trusted = trusted;
ff010eca 3944 ice_vc_reset_vf(vf);
19cce2c6 3945 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
7c710869
AV
3946 vf_id, trusted ? "" : "un");
3947
3948 return 0;
3949}
3950
3951/**
3952 * ice_set_vf_link_state
3953 * @netdev: network interface device structure
3954 * @vf_id: VF identifier
3955 * @link_state: required link state
3956 *
3957 * Set VF's link state, irrespective of physical link state status
3958 */
3959int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3960{
4c66d227 3961 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3962 struct ice_vf *vf;
c54d209c 3963 int ret;
7c710869 3964
4c66d227 3965 if (ice_validate_vf_id(pf, vf_id))
7c710869 3966 return -EINVAL;
7c710869
AV
3967
3968 vf = &pf->vf[vf_id];
c54d209c
BC
3969 ret = ice_check_vf_ready_for_cfg(vf);
3970 if (ret)
3971 return ret;
7c710869 3972
7c710869
AV
3973 switch (link_state) {
3974 case IFLA_VF_LINK_STATE_AUTO:
3975 vf->link_forced = false;
7c710869
AV
3976 break;
3977 case IFLA_VF_LINK_STATE_ENABLE:
3978 vf->link_forced = true;
3979 vf->link_up = true;
3980 break;
3981 case IFLA_VF_LINK_STATE_DISABLE:
3982 vf->link_forced = true;
3983 vf->link_up = false;
3984 break;
3985 default:
3986 return -EINVAL;
3987 }
3988
26a91525 3989 ice_vc_notify_vf_link_state(vf);
7c710869
AV
3990
3991 return 0;
3992}
730fdea4
JB
3993
3994/**
3995 * ice_get_vf_stats - populate some stats for the VF
3996 * @netdev: the netdev of the PF
3997 * @vf_id: the host OS identifier (0-255)
3998 * @vf_stats: pointer to the OS memory to be initialized
3999 */
4000int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4001 struct ifla_vf_stats *vf_stats)
4002{
4003 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4004 struct ice_eth_stats *stats;
4005 struct ice_vsi *vsi;
4006 struct ice_vf *vf;
c54d209c 4007 int ret;
730fdea4
JB
4008
4009 if (ice_validate_vf_id(pf, vf_id))
4010 return -EINVAL;
4011
4012 vf = &pf->vf[vf_id];
c54d209c
BC
4013 ret = ice_check_vf_ready_for_cfg(vf);
4014 if (ret)
4015 return ret;
730fdea4
JB
4016
4017 vsi = pf->vsi[vf->lan_vsi_idx];
4018 if (!vsi)
4019 return -EINVAL;
4020
4021 ice_update_eth_stats(vsi);
4022 stats = &vsi->eth_stats;
4023
4024 memset(vf_stats, 0, sizeof(*vf_stats));
4025
4026 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4027 stats->rx_multicast;
4028 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4029 stats->tx_multicast;
4030 vf_stats->rx_bytes = stats->rx_bytes;
4031 vf_stats->tx_bytes = stats->tx_bytes;
4032 vf_stats->broadcast = stats->rx_broadcast;
4033 vf_stats->multicast = stats->rx_multicast;
4034 vf_stats->rx_dropped = stats->rx_discards;
4035 vf_stats->tx_dropped = stats->tx_discards;
4036
4037 return 0;
4038}
9d5c5a52 4039
7438a3b0
PG
4040/**
4041 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4042 * @vf: pointer to the VF structure
4043 */
4044void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4045{
4046 struct ice_pf *pf = vf->pf;
4047 struct device *dev;
4048
4049 dev = ice_pf_to_dev(pf);
4050
4051 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4052 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4053 vf->dflt_lan_addr.addr,
4054 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4055 ? "on" : "off");
4056}
4057
9d5c5a52
PG
4058/**
4059 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4060 * @pf: pointer to the PF structure
4061 *
4062 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4063 */
4064void ice_print_vfs_mdd_events(struct ice_pf *pf)
4065{
4066 struct device *dev = ice_pf_to_dev(pf);
4067 struct ice_hw *hw = &pf->hw;
4068 int i;
4069
4070 /* check that there are pending MDD events to print */
4071 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4072 return;
4073
4074 /* VF MDD event logs are rate limited to one second intervals */
4075 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4076 return;
4077
4078 pf->last_printed_mdd_jiffies = jiffies;
4079
4080 ice_for_each_vf(pf, i) {
4081 struct ice_vf *vf = &pf->vf[i];
4082
4083 /* only print Rx MDD event message if there are new events */
4084 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4085 vf->mdd_rx_events.last_printed =
4086 vf->mdd_rx_events.count;
7438a3b0 4087 ice_print_vf_rx_mdd_event(vf);
9d5c5a52
PG
4088 }
4089
4090 /* only print Tx MDD event message if there are new events */
4091 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4092 vf->mdd_tx_events.last_printed =
4093 vf->mdd_tx_events.count;
4094
4095 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4096 vf->mdd_tx_events.count, hw->pf_id, i,
4097 vf->dflt_lan_addr.addr);
4098 }
4099 }
4100}