ice: Separate VF VSI initialization/creation from reset flow
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_virtchnl_pf.c
CommitLineData
ddf30f7f
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
eff380aa 5#include "ice_base.h"
ddf30f7f 6#include "ice_lib.h"
1b8f15b6 7#include "ice_fltr.h"
ddf30f7f 8
4c66d227
JB
9/**
10 * ice_validate_vf_id - helper to check if VF ID is valid
11 * @pf: pointer to the PF structure
12 * @vf_id: the ID of the VF to check
13 */
53bb6698 14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
4c66d227 15{
53bb6698 16 /* vf_id range is only valid for 0-255, and should always be unsigned */
4c66d227 17 if (vf_id >= pf->num_alloc_vfs) {
53bb6698 18 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
4c66d227
JB
19 return -EINVAL;
20 }
21 return 0;
22}
23
24/**
25 * ice_check_vf_init - helper to check if VF init complete
26 * @pf: pointer to the PF structure
27 * @vf: the pointer to the VF to check
28 */
29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30{
31 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
53bb6698 32 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
4c66d227
JB
33 vf->vf_id);
34 return -EBUSY;
35 }
36 return 0;
37}
38
01b5e89a
BC
39/**
40 * ice_err_to_virt_err - translate errors for VF return code
41 * @ice_err: error return code
42 */
43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44{
45 switch (ice_err) {
46 case ICE_SUCCESS:
47 return VIRTCHNL_STATUS_SUCCESS;
48 case ICE_ERR_BAD_PTR:
49 case ICE_ERR_INVAL_SIZE:
50 case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 case ICE_ERR_PARAM:
52 case ICE_ERR_CFG:
53 return VIRTCHNL_STATUS_ERR_PARAM;
54 case ICE_ERR_NO_MEMORY:
55 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 case ICE_ERR_NOT_READY:
57 case ICE_ERR_RESET_FAILED:
58 case ICE_ERR_FW_API_VER:
59 case ICE_ERR_AQ_ERROR:
60 case ICE_ERR_AQ_TIMEOUT:
61 case ICE_ERR_AQ_FULL:
62 case ICE_ERR_AQ_NO_WORK:
63 case ICE_ERR_AQ_EMPTY:
64 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 default:
66 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 }
68}
69
007676b4
AV
70/**
71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
72 * @pf: pointer to the PF structure
73 * @v_opcode: operation code
74 * @v_retval: return value
75 * @msg: pointer to the msg buffer
76 * @msglen: msg length
77 */
78static void
79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
cf6c6e01 80 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
007676b4
AV
81{
82 struct ice_hw *hw = &pf->hw;
c1e08830 83 unsigned int i;
007676b4 84
005881bc
BC
85 ice_for_each_vf(pf, i) {
86 struct ice_vf *vf = &pf->vf[i];
87
007676b4
AV
88 /* Not all vfs are enabled so skip the ones that are not */
89 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 continue;
92
93 /* Ignore return value on purpose - a given VF may fail, but
94 * we need to keep going and send to all of them
95 */
96 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 msglen, NULL);
98 }
99}
100
7c710869
AV
101/**
102 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
103 * @vf: pointer to the VF structure
104 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
105 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
106 * @link_up: whether or not to set the link up/down
107 */
108static void
109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 int ice_link_speed, bool link_up)
111{
112 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 pfe->event_data.link_event_adv.link_status = link_up;
114 /* Speed in Mbps */
115 pfe->event_data.link_event_adv.link_speed =
116 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 } else {
118 pfe->event_data.link_event.link_status = link_up;
119 /* Legacy method for virtchnl link speeds */
120 pfe->event_data.link_event.link_speed =
121 (enum virtchnl_link_speed)
122 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 }
124}
125
e1fe6926
BC
126/**
127 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
128 * @vf: the VF to check
129 *
130 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
131 * otherwise
132 */
133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134{
0ca469fb
MW
135 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
e1fe6926
BC
137}
138
0b6c6a8b
BC
139/**
140 * ice_is_vf_link_up - check if the VF's link is up
141 * @vf: VF to check if link is up
142 */
143static bool ice_is_vf_link_up(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 if (ice_check_vf_init(pf, vf))
148 return false;
149
e1fe6926 150 if (ice_vf_has_no_qs_ena(vf))
0b6c6a8b
BC
151 return false;
152 else if (vf->link_forced)
153 return vf->link_up;
154 else
155 return pf->hw.port_info->phy.link_info.link_info &
156 ICE_AQ_LINK_UP;
157}
158
1071a835
AV
159/**
160 * ice_vc_notify_vf_link_state - Inform a VF of link status
161 * @vf: pointer to the VF structure
162 *
163 * send a link status message to a single VF
164 */
165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166{
167 struct virtchnl_pf_event pfe = { 0 };
0b6c6a8b 168 struct ice_hw *hw = &vf->pf->hw;
1071a835
AV
169
170 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 pfe.severity = PF_EVENT_SEVERITY_INFO;
172
0b6c6a8b
BC
173 if (ice_is_vf_link_up(vf))
174 ice_set_pfe_link(vf, &pfe,
175 hw->port_info->phy.link_info.link_speed, true);
176 else
c61d2342 177 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
1071a835 178
cf6c6e01
MW
179 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
1071a835
AV
181 sizeof(pfe), NULL);
182}
183
ddf30f7f
AV
184/**
185 * ice_free_vf_res - Free a VF's resources
186 * @vf: pointer to the VF info
187 */
188static void ice_free_vf_res(struct ice_vf *vf)
189{
190 struct ice_pf *pf = vf->pf;
72ecb896 191 int i, last_vector_idx;
ddf30f7f
AV
192
193 /* First, disable VF's configuration API to prevent OS from
194 * accessing the VF's VSI after it's freed or invalidated.
195 */
196 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
197
2f2da36e 198 /* free VSI and disconnect it from the parent uplink */
ddf30f7f
AV
199 if (vf->lan_vsi_idx) {
200 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
201 vf->lan_vsi_idx = 0;
202 vf->lan_vsi_num = 0;
203 vf->num_mac = 0;
204 }
205
46c276ce 206 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
9d5c5a52
PG
207
208 /* clear VF MDD event information */
209 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
210 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
211
ddf30f7f 212 /* Disable interrupts so that VF starts in a known state */
72ecb896
BC
213 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
214 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ddf30f7f
AV
215 ice_flush(&pf->hw);
216 }
217 /* reset some of the state variables keeping track of the resources */
218 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
219 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
220}
221
ddf30f7f
AV
222/**
223 * ice_dis_vf_mappings
224 * @vf: pointer to the VF structure
225 */
226static void ice_dis_vf_mappings(struct ice_vf *vf)
227{
228 struct ice_pf *pf = vf->pf;
229 struct ice_vsi *vsi;
4015d11e 230 struct device *dev;
ddf30f7f
AV
231 int first, last, v;
232 struct ice_hw *hw;
233
234 hw = &pf->hw;
235 vsi = pf->vsi[vf->lan_vsi_idx];
236
4015d11e 237 dev = ice_pf_to_dev(pf);
ddf30f7f 238 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
982b1219 239 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
ddf30f7f 240
cbe66bfe 241 first = vf->first_vector_idx;
46c276ce 242 last = first + pf->num_msix_per_vf - 1;
ddf30f7f
AV
243 for (v = first; v <= last; v++) {
244 u32 reg;
245
246 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
247 GLINT_VECT2FUNC_IS_PF_M) |
248 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
249 GLINT_VECT2FUNC_PF_NUM_M));
250 wr32(hw, GLINT_VECT2FUNC(v), reg);
251 }
252
253 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
254 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
255 else
4015d11e 256 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
257
258 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
259 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
260 else
19cce2c6 261 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
262}
263
cbe66bfe
BC
264/**
265 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
266 * @pf: pointer to the PF structure
267 *
0ca469fb 268 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
cbe66bfe
BC
269 * the pf->sriov_base_vector.
270 *
271 * Returns 0 on success, and -EINVAL on error.
272 */
273static int ice_sriov_free_msix_res(struct ice_pf *pf)
274{
275 struct ice_res_tracker *res;
276
277 if (!pf)
278 return -EINVAL;
279
280 res = pf->irq_tracker;
281 if (!res)
282 return -EINVAL;
283
284 /* give back irq_tracker resources used */
0ca469fb 285 WARN_ON(pf->sriov_base_vector < res->num_entries);
cbe66bfe
BC
286
287 pf->sriov_base_vector = 0;
288
289 return 0;
290}
291
77ca27c4
PG
292/**
293 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
294 * @vf: pointer to the VF structure
295 */
296void ice_set_vf_state_qs_dis(struct ice_vf *vf)
297{
298 /* Clear Rx/Tx enabled queues flag */
0ca469fb
MW
299 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
300 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
77ca27c4
PG
301 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
302}
303
304/**
305 * ice_dis_vf_qs - Disable the VF queues
306 * @vf: pointer to the VF structure
307 */
308static void ice_dis_vf_qs(struct ice_vf *vf)
309{
310 struct ice_pf *pf = vf->pf;
311 struct ice_vsi *vsi;
312
313 vsi = pf->vsi[vf->lan_vsi_idx];
314
315 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
13a6233b 316 ice_vsi_stop_all_rx_rings(vsi);
77ca27c4
PG
317 ice_set_vf_state_qs_dis(vf);
318}
319
ddf30f7f
AV
320/**
321 * ice_free_vfs - Free all VFs
322 * @pf: pointer to the PF structure
323 */
324void ice_free_vfs(struct ice_pf *pf)
325{
4015d11e 326 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 327 struct ice_hw *hw = &pf->hw;
c1e08830 328 unsigned int tmp, i;
ddf30f7f
AV
329
330 if (!pf->vf)
331 return;
332
333 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
334 usleep_range(1000, 2000);
335
72ecb896
BC
336 /* Disable IOV before freeing resources. This lets any VF drivers
337 * running in the host get themselves cleaned up before we yank
338 * the carpet out from underneath their feet.
339 */
340 if (!pci_vfs_assigned(pf->pdev))
341 pci_disable_sriov(pf->pdev);
342 else
4015d11e 343 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
72ecb896 344
f844d521
BC
345 /* Avoid wait time by stopping all VFs at the same time */
346 ice_for_each_vf(pf, i)
347 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
348 ice_dis_vf_qs(&pf->vf[i]);
349
ddf30f7f 350 tmp = pf->num_alloc_vfs;
46c276ce 351 pf->num_qps_per_vf = 0;
ddf30f7f
AV
352 pf->num_alloc_vfs = 0;
353 for (i = 0; i < tmp; i++) {
354 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
1f9639d2 355 /* disable VF qp mappings and set VF disable state */
ddf30f7f 356 ice_dis_vf_mappings(&pf->vf[i]);
1f9639d2 357 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ddf30f7f
AV
358 ice_free_vf_res(&pf->vf[i]);
359 }
360 }
361
cbe66bfe 362 if (ice_sriov_free_msix_res(pf))
4015d11e 363 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 364
4015d11e 365 devm_kfree(dev, pf->vf);
ddf30f7f
AV
366 pf->vf = NULL;
367
368 /* This check is for when the driver is unloaded while VFs are
369 * assigned. Setting the number of VFs to 0 through sysfs is caught
370 * before this function ever gets called.
371 */
372 if (!pci_vfs_assigned(pf->pdev)) {
53bb6698 373 unsigned int vf_id;
ddf30f7f
AV
374
375 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
376 * work correctly when SR-IOV gets re-enabled.
377 */
378 for (vf_id = 0; vf_id < tmp; vf_id++) {
379 u32 reg_idx, bit_idx;
380
381 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
382 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
383 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
384 }
385 }
386 clear_bit(__ICE_VF_DIS, pf->state);
387 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
388}
389
390/**
391 * ice_trigger_vf_reset - Reset a VF on HW
392 * @vf: pointer to the VF structure
393 * @is_vflr: true if VFLR was issued, false if not
29d42f1f 394 * @is_pfr: true if the reset was triggered due to a previous PFR
ddf30f7f
AV
395 *
396 * Trigger hardware to start a reset for a particular VF. Expects the caller
397 * to wait the proper amount of time to allow hardware to reset the VF before
398 * it cleans up and restores VF functionality.
399 */
29d42f1f 400static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
ddf30f7f
AV
401{
402 struct ice_pf *pf = vf->pf;
403 u32 reg, reg_idx, bit_idx;
53bb6698 404 unsigned int vf_abs_id, i;
4015d11e 405 struct device *dev;
ddf30f7f 406 struct ice_hw *hw;
ddf30f7f 407
4015d11e 408 dev = ice_pf_to_dev(pf);
ddf30f7f
AV
409 hw = &pf->hw;
410 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
411
412 /* Inform VF that it is no longer active, as a warning */
413 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
414
415 /* Disable VF's configuration API during reset. The flag is re-enabled
416 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
417 * It's normally disabled in ice_free_vf_res(), but it's safer
418 * to do it earlier to give some time to finish to any VF config
419 * functions that may still be running at this point.
420 */
421 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
82ba0128 422
29d42f1f
MW
423 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
424 * in the case of VFR. If this is done for PFR, it can mess up VF
425 * resets because the VF driver may already have started cleanup
426 * by the time we get here.
82ba0128 427 */
29d42f1f 428 if (!is_pfr)
39559456 429 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
ddf30f7f
AV
430
431 /* In the case of a VFLR, the HW has already reset the VF and we
432 * just need to clean up, so don't hit the VFRTRIG register.
433 */
434 if (!is_vflr) {
435 /* reset VF using VPGEN_VFRTRIG reg */
436 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
437 reg |= VPGEN_VFRTRIG_VFSWR_M;
438 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
439 }
440 /* clear the VFLR bit in GLGEN_VFLRSTAT */
441 reg_idx = (vf_abs_id) / 32;
442 bit_idx = (vf_abs_id) % 32;
443 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
444 ice_flush(hw);
445
446 wr32(hw, PF_PCI_CIAA,
447 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
60d628ea 448 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
ddf30f7f 449 reg = rd32(hw, PF_PCI_CIAD);
60d628ea
BC
450 /* no transactions pending so stop polling */
451 if ((reg & VF_TRANS_PENDING_M) == 0)
452 break;
453
53bb6698 454 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
60d628ea 455 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
ddf30f7f
AV
456 }
457}
458
77a7a84d
MS
459/**
460 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
461 * @vsi: the VSI to update
b093841f 462 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
f9867df6 463 * @enable: true for enable PVID false for disable
77a7a84d 464 */
b093841f 465static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
ddf30f7f 466{
ddf30f7f 467 struct ice_hw *hw = &vsi->back->hw;
b093841f 468 struct ice_aqc_vsi_props *info;
198a666a 469 struct ice_vsi_ctx *ctxt;
ddf30f7f 470 enum ice_status status;
198a666a
BA
471 int ret = 0;
472
9efe35d0 473 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
198a666a
BA
474 if (!ctxt)
475 return -ENOMEM;
ddf30f7f 476
77a7a84d 477 ctxt->info = vsi->info;
b093841f
BC
478 info = &ctxt->info;
479 if (enable) {
480 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
481 ICE_AQ_VSI_PVLAN_INSERT_PVID |
482 ICE_AQ_VSI_VLAN_EMOD_STR;
483 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
484 } else {
485 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
486 ICE_AQ_VSI_VLAN_MODE_ALL;
487 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
488 }
489
490 info->pvid = cpu_to_le16(pvid_info);
491 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
492 ICE_AQ_VSI_PROP_SW_VALID);
ddf30f7f 493
198a666a 494 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
ddf30f7f 495 if (status) {
0fee3577
LY
496 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
497 ice_stat_str(status),
498 ice_aq_str(hw->adminq.sq_last_status));
198a666a
BA
499 ret = -EIO;
500 goto out;
ddf30f7f
AV
501 }
502
b093841f
BC
503 vsi->info.vlan_flags = info->vlan_flags;
504 vsi->info.sw_flags2 = info->sw_flags2;
505 vsi->info.pvid = info->pvid;
198a666a 506out:
9efe35d0 507 kfree(ctxt);
198a666a 508 return ret;
ddf30f7f
AV
509}
510
511/**
512 * ice_vf_vsi_setup - Set up a VF VSI
513 * @pf: board private structure
514 * @pi: pointer to the port_info instance
f9867df6 515 * @vf_id: defines VF ID to which this VSI connects.
ddf30f7f
AV
516 *
517 * Returns pointer to the successfully allocated VSI struct on success,
518 * otherwise returns NULL on failure.
519 */
520static struct ice_vsi *
521ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
522{
523 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
524}
525
cbe66bfe 526/**
1337175d 527 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
cbe66bfe
BC
528 * @pf: pointer to PF structure
529 * @vf: pointer to VF that the first MSIX vector index is being calculated for
530 *
1337175d
PG
531 * This returns the first MSIX vector index in PF space that is used by this VF.
532 * This index is used when accessing PF relative registers such as
533 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
534 * This will always be the OICR index in the AVF driver so any functionality
cbe66bfe
BC
535 * using vf->first_vector_idx for queue configuration will have to increment by
536 * 1 to avoid meddling with the OICR index.
537 */
538static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
539{
46c276ce 540 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
cbe66bfe
BC
541}
542
ddf30f7f
AV
543/**
544 * ice_alloc_vsi_res - Setup VF VSI and its resources
545 * @vf: pointer to the VF structure
546 *
547 * Returns 0 on success, negative value on failure
548 */
549static int ice_alloc_vsi_res(struct ice_vf *vf)
550{
551 struct ice_pf *pf = vf->pf;
ddf30f7f
AV
552 u8 broadcast[ETH_ALEN];
553 struct ice_vsi *vsi;
4015d11e 554 struct device *dev;
ddf30f7f
AV
555 int status = 0;
556
4015d11e 557 dev = ice_pf_to_dev(pf);
cbe66bfe
BC
558 /* first vector index is the VFs OICR index */
559 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
560
ddf30f7f 561 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
ddf30f7f 562 if (!vsi) {
4015d11e 563 dev_err(dev, "Failed to create VF VSI\n");
ddf30f7f
AV
564 return -ENOMEM;
565 }
566
567 vf->lan_vsi_idx = vsi->idx;
568 vf->lan_vsi_num = vsi->vsi_num;
569
ddf30f7f 570 /* Check if port VLAN exist before, and restore it accordingly */
b093841f
BC
571 if (vf->port_vlan_info) {
572 ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
1b8f15b6
MS
573 if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK,
574 ICE_FWD_TO_VSI))
72634bc2
BC
575 dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
576 vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
577 } else {
578 /* set VLAN 0 filter by default when no port VLAN is
579 * enabled. If a port VLAN is enabled we don't want
580 * untagged broadcast/multicast traffic seen on the VF
581 * interface.
582 */
1b8f15b6 583 if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI))
72634bc2
BC
584 dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
585 vf->vf_id);
840bcd88 586 }
ddf30f7f 587
ddf30f7f 588 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
1b8f15b6
MS
589 status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
590 ICE_FWD_TO_VSI);
ddf30f7f
AV
591 if (status)
592 goto ice_alloc_vsi_res_exit;
593 }
594
1b8f15b6
MS
595 eth_broadcast_addr(broadcast);
596 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
ddf30f7f 597 if (status)
1b8f15b6
MS
598 dev_err(dev, "could not add mac filters error %d\n",
599 status);
bbb968e8
AA
600 else
601 vf->num_mac = 1;
ddf30f7f
AV
602
603 /* Clear this bit after VF initialization since we shouldn't reclaim
604 * and reassign interrupts for synchronous or asynchronous VFR events.
94c4441b 605 * We don't want to reconfigure interrupts since AVF driver doesn't
ddf30f7f
AV
606 * expect vector assignment to be changed unless there is a request for
607 * more vectors.
608 */
ddf30f7f 609ice_alloc_vsi_res_exit:
ddf30f7f
AV
610 return status;
611}
612
613/**
614 * ice_alloc_vf_res - Allocate VF resources
615 * @vf: pointer to the VF structure
616 */
617static int ice_alloc_vf_res(struct ice_vf *vf)
618{
5743020d
AA
619 struct ice_pf *pf = vf->pf;
620 int tx_rx_queue_left;
ddf30f7f
AV
621 int status;
622
5743020d
AA
623 /* Update number of VF queues, in case VF had requested for queue
624 * changes
625 */
8c243700
AV
626 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
627 ice_get_avail_rxq_count(pf));
46c276ce 628 tx_rx_queue_left += pf->num_qps_per_vf;
5743020d
AA
629 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
630 vf->num_req_qs != vf->num_vf_qs)
631 vf->num_vf_qs = vf->num_req_qs;
632
66b29e7a
AA
633 /* setup VF VSI and necessary resources */
634 status = ice_alloc_vsi_res(vf);
635 if (status)
636 goto ice_alloc_vf_res_exit;
637
ddf30f7f
AV
638 if (vf->trusted)
639 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
640 else
641 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
642
643 /* VF is now completely initialized */
644 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
645
646 return status;
647
648ice_alloc_vf_res_exit:
649 ice_free_vf_res(vf);
650 return status;
651}
652
653/**
ac371613
BC
654 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
655 * @vf: VF to enable MSIX mappings for
ddf30f7f 656 *
ac371613
BC
657 * Some of the registers need to be indexed/configured using hardware global
658 * device values and other registers need 0-based values, which represent PF
659 * based values.
ddf30f7f 660 */
ac371613 661static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
ddf30f7f 662{
ac371613
BC
663 int device_based_first_msix, device_based_last_msix;
664 int pf_based_first_msix, pf_based_last_msix, v;
ddf30f7f 665 struct ice_pf *pf = vf->pf;
ac371613 666 int device_based_vf_id;
ddf30f7f 667 struct ice_hw *hw;
ddf30f7f
AV
668 u32 reg;
669
670 hw = &pf->hw;
ac371613
BC
671 pf_based_first_msix = vf->first_vector_idx;
672 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
673
674 device_based_first_msix = pf_based_first_msix +
675 pf->hw.func_caps.common_cap.msix_vector_first_id;
676 device_based_last_msix =
677 (device_based_first_msix + pf->num_msix_per_vf) - 1;
678 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
679
680 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
681 VPINT_ALLOC_FIRST_M) |
682 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
683 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
ddf30f7f
AV
684 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
685
ac371613 686 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1337175d 687 & VPINT_ALLOC_PCI_FIRST_M) |
ac371613
BC
688 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
689 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
982b1219 690 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
ac371613 691
ddf30f7f 692 /* map the interrupts to its functions */
ac371613
BC
693 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
694 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
ddf30f7f
AV
695 GLINT_VECT2FUNC_VF_NUM_M) |
696 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
697 GLINT_VECT2FUNC_PF_NUM_M));
698 wr32(hw, GLINT_VECT2FUNC(v), reg);
699 }
700
ac371613
BC
701 /* Map mailbox interrupt to VF MSI-X vector 0 */
702 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
703}
704
705/**
706 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
707 * @vf: VF to enable the mappings for
708 * @max_txq: max Tx queues allowed on the VF's VSI
709 * @max_rxq: max Rx queues allowed on the VF's VSI
710 */
711static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
712{
713 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
714 struct device *dev = ice_pf_to_dev(vf->pf);
715 struct ice_hw *hw = &vf->pf->hw;
716 u32 reg;
717
982b1219
AV
718 /* set regardless of mapping mode */
719 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
720
ddf30f7f
AV
721 /* VF Tx queues allocation */
722 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
723 /* set the VF PF Tx queue range
724 * VFNUMQ value should be set to (number of queues - 1). A value
725 * of 0 means 1 queue and a value of 255 means 256 queues
726 */
727 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
728 VPLAN_TX_QBASE_VFFIRSTQ_M) |
ac371613 729 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
730 VPLAN_TX_QBASE_VFNUMQ_M));
731 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
732 } else {
4015d11e 733 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
734 }
735
982b1219
AV
736 /* set regardless of mapping mode */
737 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
738
ddf30f7f
AV
739 /* VF Rx queues allocation */
740 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
741 /* set the VF PF Rx queue range
742 * VFNUMQ value should be set to (number of queues - 1). A value
743 * of 0 means 1 queue and a value of 255 means 256 queues
744 */
745 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
746 VPLAN_RX_QBASE_VFFIRSTQ_M) |
ac371613 747 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
748 VPLAN_RX_QBASE_VFNUMQ_M));
749 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
750 } else {
4015d11e 751 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
752 }
753}
754
ac371613
BC
755/**
756 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
757 * @vf: pointer to the VF structure
758 */
759static void ice_ena_vf_mappings(struct ice_vf *vf)
760{
761 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
762
763 ice_ena_vf_msix_mappings(vf);
764 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
765}
766
ddf30f7f
AV
767/**
768 * ice_determine_res
769 * @pf: pointer to the PF structure
770 * @avail_res: available resources in the PF structure
771 * @max_res: maximum resources that can be given per VF
772 * @min_res: minimum resources that can be given per VF
773 *
774 * Returns non-zero value if resources (queues/vectors) are available or
775 * returns zero if PF cannot accommodate for all num_alloc_vfs.
776 */
777static int
778ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
779{
780 bool checked_min_res = false;
781 int res;
782
783 /* start by checking if PF can assign max number of resources for
784 * all num_alloc_vfs.
785 * if yes, return number per VF
786 * If no, divide by 2 and roundup, check again
787 * repeat the loop till we reach a point where even minimum resources
788 * are not available, in that case return 0
789 */
790 res = max_res;
791 while ((res >= min_res) && !checked_min_res) {
792 int num_all_res;
793
794 num_all_res = pf->num_alloc_vfs * res;
795 if (num_all_res <= avail_res)
796 return res;
797
798 if (res == min_res)
799 checked_min_res = true;
800
801 res = DIV_ROUND_UP(res, 2);
802 }
803 return 0;
804}
805
cbe66bfe
BC
806/**
807 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
808 * @vf: VF to calculate the register index for
809 * @q_vector: a q_vector associated to the VF
810 */
811int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
812{
813 struct ice_pf *pf;
814
815 if (!vf || !q_vector)
816 return -EINVAL;
817
818 pf = vf->pf;
819
820 /* always add one to account for the OICR being the first MSIX */
46c276ce 821 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
cbe66bfe
BC
822 q_vector->v_idx + 1;
823}
824
825/**
826 * ice_get_max_valid_res_idx - Get the max valid resource index
827 * @res: pointer to the resource to find the max valid index for
828 *
829 * Start from the end of the ice_res_tracker and return right when we find the
830 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
831 * valid for SR-IOV because it is the only consumer that manipulates the
832 * res->end and this is always called when res->end is set to res->num_entries.
833 */
834static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
835{
836 int i;
837
838 if (!res)
839 return -EINVAL;
840
841 for (i = res->num_entries - 1; i >= 0; i--)
842 if (res->list[i] & ICE_RES_VALID_BIT)
843 return i;
844
845 return 0;
846}
847
848/**
849 * ice_sriov_set_msix_res - Set any used MSIX resources
850 * @pf: pointer to PF structure
851 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
852 *
853 * This function allows SR-IOV resources to be taken from the end of the PF's
0ca469fb
MW
854 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
855 * just set the pf->sriov_base_vector and return success.
cbe66bfe 856 *
0ca469fb
MW
857 * If there are not enough resources available, return an error. This should
858 * always be caught by ice_set_per_vf_res().
cbe66bfe
BC
859 *
860 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
861 * in the PF's space available for SR-IOV.
862 */
863static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
864{
0ca469fb
MW
865 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
866 int vectors_used = pf->irq_tracker->num_entries;
cbe66bfe
BC
867 int sriov_base_vector;
868
0ca469fb 869 sriov_base_vector = total_vectors - num_msix_needed;
cbe66bfe
BC
870
871 /* make sure we only grab irq_tracker entries from the list end and
872 * that we have enough available MSIX vectors
873 */
0ca469fb 874 if (sriov_base_vector < vectors_used)
cbe66bfe
BC
875 return -EINVAL;
876
877 pf->sriov_base_vector = sriov_base_vector;
878
cbe66bfe
BC
879 return 0;
880}
881
ddf30f7f 882/**
0ca469fb 883 * ice_set_per_vf_res - check if vectors and queues are available
ddf30f7f
AV
884 * @pf: pointer to the PF structure
885 *
0ca469fb
MW
886 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
887 * get more vectors and can enable more queues per VF. Note that this does not
888 * grab any vectors from the SW pool already allocated. Also note, that all
889 * vector counts include one for each VF's miscellaneous interrupt vector
890 * (i.e. OICR).
891 *
892 * Minimum VFs - 2 vectors, 1 queue pair
893 * Small VFs - 5 vectors, 4 queue pairs
894 * Medium VFs - 17 vectors, 16 queue pairs
895 *
896 * Second, determine number of queue pairs per VF by starting with a pre-defined
897 * maximum each VF supports. If this is not possible, then we adjust based on
898 * queue pairs available on the device.
899 *
900 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
901 * by each VF during VF initialization and reset.
ddf30f7f 902 */
0ca469fb 903static int ice_set_per_vf_res(struct ice_pf *pf)
ddf30f7f 904{
cbe66bfe 905 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
46c276ce 906 int msix_avail_per_vf, msix_avail_for_sriov;
4015d11e 907 struct device *dev = ice_pf_to_dev(pf);
46c276ce 908 u16 num_msix_per_vf, num_txq, num_rxq;
ddf30f7f 909
cbe66bfe 910 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
ddf30f7f
AV
911 return -EINVAL;
912
0ca469fb 913 /* determine MSI-X resources per VF */
46c276ce
BC
914 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
915 pf->irq_tracker->num_entries;
916 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
917 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
918 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
919 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
920 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
921 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
922 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
ddf30f7f 923 } else {
46c276ce
BC
924 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
925 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
0ca469fb 926 pf->num_alloc_vfs);
ddf30f7f
AV
927 return -EIO;
928 }
929
0ca469fb 930 /* determine queue resources per VF */
8c243700 931 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
46c276ce
BC
932 min_t(u16,
933 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
934 ICE_MAX_RSS_QS_PER_VF),
935 ICE_MIN_QS_PER_VF);
ddf30f7f 936
8c243700 937 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
46c276ce
BC
938 min_t(u16,
939 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
940 ICE_MAX_RSS_QS_PER_VF),
941 ICE_MIN_QS_PER_VF);
ddf30f7f 942
0ca469fb 943 if (!num_txq || !num_rxq) {
46c276ce
BC
944 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
945 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
ddf30f7f 946 return -EIO;
0ca469fb 947 }
ddf30f7f 948
46c276ce 949 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
0ca469fb
MW
950 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
951 pf->num_alloc_vfs);
cbe66bfe 952 return -EINVAL;
0ca469fb 953 }
cbe66bfe 954
0ca469fb 955 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
46c276ce
BC
956 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
957 pf->num_msix_per_vf = num_msix_per_vf;
0ca469fb 958 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
46c276ce 959 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
ddf30f7f
AV
960
961 return 0;
962}
963
cfcee02b
BC
964/**
965 * ice_clear_vf_reset_trigger - enable VF to access hardware
966 * @vf: VF to enabled hardware access for
967 */
968static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
969{
970 struct ice_hw *hw = &vf->pf->hw;
971 u32 reg;
972
973 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
974 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
975 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
976 ice_flush(hw);
977}
978
ddf30f7f
AV
979/**
980 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
981 * @vf: pointer to the VF structure
982 *
983 * Cleanup a VF after the hardware reset is finished. Expects the caller to
984 * have verified whether the reset is finished properly, and ensure the
985 * minimum amount of wait time has passed. Reallocate VF resources back to make
986 * VF state active
987 */
988static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
989{
990 struct ice_pf *pf = vf->pf;
991 struct ice_hw *hw;
ddf30f7f
AV
992
993 hw = &pf->hw;
994
cfcee02b
BC
995 /* Allow HW to access VF memory after calling
996 * ice_clear_vf_reset_trigger(). If we did it any sooner, HW could
997 * access memory while it was being freed in ice_free_vf_res(), causing
998 * an IOMMU fault.
ddf30f7f
AV
999 *
1000 * On the other hand, this needs to be done ASAP, because the VF driver
1001 * is waiting for this to happen and may report a timeout. It's
1002 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1003 * it.
1004 */
cfcee02b 1005 ice_clear_vf_reset_trigger(vf);
ddf30f7f
AV
1006
1007 /* reallocate VF resources to finish resetting the VSI state */
1008 if (!ice_alloc_vf_res(vf)) {
1009 ice_ena_vf_mappings(vf);
1010 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1011 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
ddf30f7f
AV
1012 }
1013
1014 /* Tell the VF driver the reset is done. This needs to be done only
1015 * after VF has been fully initialized, because the VF driver may
1016 * request resources immediately after setting this flag.
1017 */
1018 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1019}
1020
5eda8afd
AA
1021/**
1022 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
1023 * @vf: pointer to the VF info
1024 * @vsi: the VSI being configured
1025 * @promisc_m: mask of promiscuous config bits
1026 * @rm_promisc: promisc flag request from the VF to remove or add filter
1027 *
1028 * This function configures VF VSI promiscuous mode, based on the VF requests,
1029 * for Unicast, Multicast and VLAN
1030 */
1031static enum ice_status
1032ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1033 bool rm_promisc)
1034{
1035 struct ice_pf *pf = vf->pf;
1036 enum ice_status status = 0;
1037 struct ice_hw *hw;
1038
1039 hw = &pf->hw;
cd6d6b83 1040 if (vsi->num_vlan) {
5eda8afd
AA
1041 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1042 rm_promisc);
b093841f 1043 } else if (vf->port_vlan_info) {
5eda8afd
AA
1044 if (rm_promisc)
1045 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 1046 vf->port_vlan_info);
5eda8afd
AA
1047 else
1048 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 1049 vf->port_vlan_info);
5eda8afd
AA
1050 } else {
1051 if (rm_promisc)
1052 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1053 0);
1054 else
1055 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1056 0);
1057 }
1058
1059 return status;
1060}
1061
d82dd83d
AA
1062/**
1063 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1064 * @pf: pointer to the PF structure
1065 *
1066 * This function is being called as last part of resetting all VFs, or when
1067 * configuring VFs for the first time, where there is no resource to be freed
1068 * Returns true if resources were properly allocated for all VFs, and false
1069 * otherwise.
1070 */
1071static bool ice_config_res_vfs(struct ice_pf *pf)
1072{
4015d11e 1073 struct device *dev = ice_pf_to_dev(pf);
d82dd83d
AA
1074 struct ice_hw *hw = &pf->hw;
1075 int v;
1076
0ca469fb 1077 if (ice_set_per_vf_res(pf)) {
4015d11e 1078 dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
d82dd83d
AA
1079 return false;
1080 }
1081
1082 /* rearm global interrupts */
1083 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1084 ice_irq_dynamic_ena(hw, NULL, NULL);
1085
1086 /* Finish resetting each VF and allocate resources */
005881bc 1087 ice_for_each_vf(pf, v) {
d82dd83d
AA
1088 struct ice_vf *vf = &pf->vf[v];
1089
46c276ce 1090 vf->num_vf_qs = pf->num_qps_per_vf;
4015d11e
BC
1091 dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
1092 vf->num_vf_qs);
d82dd83d
AA
1093 ice_cleanup_and_realloc_vf(vf);
1094 }
1095
1096 ice_flush(hw);
1097 clear_bit(__ICE_VF_DIS, pf->state);
1098
1099 return true;
1100}
1101
ddf30f7f
AV
1102/**
1103 * ice_reset_all_vfs - reset all allocated VFs in one go
1104 * @pf: pointer to the PF structure
1105 * @is_vflr: true if VFLR was issued, false if not
1106 *
1107 * First, tell the hardware to reset each VF, then do all the waiting in one
1108 * chunk, and finally finish restoring each VF after the wait. This is useful
1109 * during PF routines which need to reset all VFs, as otherwise it must perform
1110 * these resets in a serialized fashion.
1111 *
1112 * Returns true if any VFs were reset, and false otherwise.
1113 */
1114bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1115{
4015d11e 1116 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1117 struct ice_hw *hw = &pf->hw;
42b2cc83 1118 struct ice_vf *vf;
ddf30f7f
AV
1119 int v, i;
1120
1121 /* If we don't have any VFs, then there is nothing to reset */
1122 if (!pf->num_alloc_vfs)
1123 return false;
1124
1125 /* If VFs have been disabled, there is no need to reset */
1126 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1127 return false;
1128
1129 /* Begin reset on all VFs at once */
005881bc 1130 ice_for_each_vf(pf, v)
29d42f1f 1131 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
ddf30f7f 1132
005881bc 1133 ice_for_each_vf(pf, v) {
06914ac2
MW
1134 struct ice_vsi *vsi;
1135
1136 vf = &pf->vf[v];
1137 vsi = pf->vsi[vf->lan_vsi_idx];
1138 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1139 ice_dis_vf_qs(vf);
1140 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1141 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1142 }
ddf30f7f
AV
1143
1144 /* HW requires some time to make sure it can flush the FIFO for a VF
1145 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1146 * sequence to make sure that it has completed. We'll keep track of
1147 * the VFs using a simple iterator that increments once that VF has
1148 * finished resetting.
1149 */
1150 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
ddf30f7f
AV
1151 /* Check each VF in sequence */
1152 while (v < pf->num_alloc_vfs) {
ddf30f7f
AV
1153 u32 reg;
1154
42b2cc83 1155 vf = &pf->vf[v];
ddf30f7f 1156 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
60d628ea
BC
1157 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1158 /* only delay if the check failed */
1159 usleep_range(10, 20);
ddf30f7f 1160 break;
60d628ea 1161 }
ddf30f7f
AV
1162
1163 /* If the current VF has finished resetting, move on
1164 * to the next VF in sequence.
1165 */
1166 v++;
1167 }
1168 }
1169
1170 /* Display a warning if at least one VF didn't manage to reset in
1171 * time, but continue on with the operation.
1172 */
1173 if (v < pf->num_alloc_vfs)
4015d11e 1174 dev_warn(dev, "VF reset check timeout\n");
ddf30f7f
AV
1175
1176 /* free VF resources to begin resetting the VSI state */
005881bc 1177 ice_for_each_vf(pf, v) {
5743020d
AA
1178 vf = &pf->vf[v];
1179
1180 ice_free_vf_res(vf);
1181
1182 /* Free VF queues as well, and reallocate later.
1183 * If a given VF has different number of queues
1184 * configured, the request for update will come
1185 * via mailbox communication.
1186 */
1187 vf->num_vf_qs = 0;
1188 }
ddf30f7f 1189
cbe66bfe 1190 if (ice_sriov_free_msix_res(pf))
4015d11e 1191 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 1192
d82dd83d 1193 if (!ice_config_res_vfs(pf))
ddf30f7f 1194 return false;
ddf30f7f
AV
1195
1196 return true;
1197}
1198
ec4f5a43
AA
1199/**
1200 * ice_is_vf_disabled
1201 * @vf: pointer to the VF info
1202 *
1203 * Returns true if the PF or VF is disabled, false otherwise.
1204 */
1205static bool ice_is_vf_disabled(struct ice_vf *vf)
1206{
1207 struct ice_pf *pf = vf->pf;
1208
1209 /* If the PF has been disabled, there is no need resetting VF until
1210 * PF is active again. Similarly, if the VF has been disabled, this
1211 * means something else is resetting the VF, so we shouldn't continue.
1212 * Otherwise, set disable VF state bit for actual reset, and continue.
1213 */
1214 return (test_bit(__ICE_VF_DIS, pf->state) ||
1215 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1216}
1217
007676b4
AV
1218/**
1219 * ice_reset_vf - Reset a particular VF
1220 * @vf: pointer to the VF structure
1221 * @is_vflr: true if VFLR was issued, false if not
1222 *
f844d521
BC
1223 * Returns true if the VF is currently in reset, resets successfully, or resets
1224 * are disabled and false otherwise.
007676b4 1225 */
9d5c5a52 1226bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
007676b4
AV
1227{
1228 struct ice_pf *pf = vf->pf;
03f7a986 1229 struct ice_vsi *vsi;
4015d11e 1230 struct device *dev;
5eda8afd 1231 struct ice_hw *hw;
007676b4 1232 bool rsd = false;
5eda8afd 1233 u8 promisc_m;
007676b4
AV
1234 u32 reg;
1235 int i;
1236
4015d11e
BC
1237 dev = ice_pf_to_dev(pf);
1238
f844d521
BC
1239 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1240 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1241 vf->vf_id);
1242 return true;
1243 }
1244
ec4f5a43 1245 if (ice_is_vf_disabled(vf)) {
4015d11e
BC
1246 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1247 vf->vf_id);
ec4f5a43
AA
1248 return true;
1249 }
cb6a8dc0 1250
ec4f5a43
AA
1251 /* Set VF disable bit state here, before triggering reset */
1252 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
29d42f1f 1253 ice_trigger_vf_reset(vf, is_vflr, false);
007676b4 1254
03f7a986
AV
1255 vsi = pf->vsi[vf->lan_vsi_idx];
1256
77ca27c4
PG
1257 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1258 ice_dis_vf_qs(vf);
06914ac2
MW
1259
1260 /* Call Disable LAN Tx queue AQ whether or not queues are
1261 * enabled. This is needed for successful completion of VFR.
1262 */
1263 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1264 NULL, ICE_VF_RESET, vf->vf_id, NULL);
007676b4 1265
5eda8afd 1266 hw = &pf->hw;
007676b4
AV
1267 /* poll VPGEN_VFRSTAT reg to make sure
1268 * that reset is complete
1269 */
1270 for (i = 0; i < 10; i++) {
1271 /* VF reset requires driver to first reset the VF and then
1272 * poll the status register to make sure that the reset
1273 * completed successfully.
1274 */
007676b4
AV
1275 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1276 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1277 rsd = true;
1278 break;
1279 }
60d628ea
BC
1280
1281 /* only sleep if the reset is not done */
1282 usleep_range(10, 20);
007676b4
AV
1283 }
1284
1285 /* Display a warning if VF didn't manage to reset in time, but need to
1286 * continue on with the operation.
1287 */
1288 if (!rsd)
4015d11e 1289 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
007676b4 1290
5eda8afd
AA
1291 /* disable promiscuous modes in case they were enabled
1292 * ignore any error if disabling process failed
1293 */
1294 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1295 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
b093841f 1296 if (vf->port_vlan_info || vsi->num_vlan)
5eda8afd
AA
1297 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1298 else
1299 promisc_m = ICE_UCAST_PROMISC_BITS;
1300
1301 vsi = pf->vsi[vf->lan_vsi_idx];
1302 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
4015d11e 1303 dev_err(dev, "disabling promiscuous mode failed\n");
5eda8afd
AA
1304 }
1305
007676b4
AV
1306 /* free VF resources to begin resetting the VSI state */
1307 ice_free_vf_res(vf);
1308
1309 ice_cleanup_and_realloc_vf(vf);
1310
1311 ice_flush(hw);
007676b4
AV
1312
1313 return true;
1314}
1315
53b8decb
AV
1316/**
1317 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1318 * @pf: pointer to the PF structure
1319 */
1320void ice_vc_notify_link_state(struct ice_pf *pf)
1321{
1322 int i;
1323
005881bc 1324 ice_for_each_vf(pf, i)
53b8decb
AV
1325 ice_vc_notify_vf_link_state(&pf->vf[i]);
1326}
1327
007676b4
AV
1328/**
1329 * ice_vc_notify_reset - Send pending reset message to all VFs
1330 * @pf: pointer to the PF structure
1331 *
1332 * indicate a pending reset to all VFs on a given PF
1333 */
1334void ice_vc_notify_reset(struct ice_pf *pf)
1335{
1336 struct virtchnl_pf_event pfe;
1337
1338 if (!pf->num_alloc_vfs)
1339 return;
1340
1341 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1342 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
cf6c6e01 1343 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
007676b4
AV
1344 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1345}
1346
7c710869
AV
1347/**
1348 * ice_vc_notify_vf_reset - Notify VF of a reset event
1349 * @vf: pointer to the VF structure
1350 */
1351static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1352{
1353 struct virtchnl_pf_event pfe;
4c66d227 1354 struct ice_pf *pf;
7c710869 1355
4c66d227
JB
1356 if (!vf)
1357 return;
1358
1359 pf = vf->pf;
1360 if (ice_validate_vf_id(pf, vf->vf_id))
7c710869
AV
1361 return;
1362
1f9639d2
AA
1363 /* Bail out if VF is in disabled state, neither initialized, nor active
1364 * state - otherwise proceed with notifications
1365 */
1366 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1367 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1368 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
7c710869
AV
1369 return;
1370
1371 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1372 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
4c66d227 1373 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
cf6c6e01
MW
1374 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1375 NULL);
7c710869
AV
1376}
1377
916c7fdf
BC
1378/**
1379 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1380 * @vf: VF to initialize/setup the VSI for
1381 *
1382 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1383 * VF VSI's broadcast filter and is only used during initial VF creation.
1384 */
1385static int ice_init_vf_vsi_res(struct ice_vf *vf)
1386{
1387 struct ice_pf *pf = vf->pf;
1388 u8 broadcast[ETH_ALEN];
1389 enum ice_status status;
1390 struct ice_vsi *vsi;
1391 struct device *dev;
1392 int err;
1393
1394 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1395
1396 dev = ice_pf_to_dev(pf);
1397 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
1398 if (!vsi) {
1399 dev_err(dev, "Failed to create VF VSI\n");
1400 return -ENOMEM;
1401 }
1402
1403 vf->lan_vsi_idx = vsi->idx;
1404 vf->lan_vsi_num = vsi->vsi_num;
1405
1406 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1407 if (err) {
1408 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1409 vf->vf_id);
1410 goto release_vsi;
1411 }
1412
1413 eth_broadcast_addr(broadcast);
1414 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1415 if (status) {
1416 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1417 vf->vf_id, ice_stat_str(status));
1418 err = ice_status_to_errno(status);
1419 goto release_vsi;
1420 }
1421
1422 vf->num_mac = 1;
1423
1424 return 0;
1425
1426release_vsi:
1427 ice_vsi_release(vsi);
1428 return err;
1429}
1430
1431/**
1432 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1433 * @pf: PF the VFs are associated with
1434 */
1435static int ice_start_vfs(struct ice_pf *pf)
1436{
1437 struct ice_hw *hw = &pf->hw;
1438 int retval, i;
1439
1440 ice_for_each_vf(pf, i) {
1441 struct ice_vf *vf = &pf->vf[i];
1442
1443 ice_clear_vf_reset_trigger(vf);
1444
1445 retval = ice_init_vf_vsi_res(vf);
1446 if (retval) {
1447 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1448 vf->vf_id, retval);
1449 goto teardown;
1450 }
1451
1452 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1453 ice_ena_vf_mappings(vf);
1454 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1455 }
1456
1457 ice_flush(hw);
1458 return 0;
1459
1460teardown:
1461 for (i = i - 1; i >= 0; i--) {
1462 struct ice_vf *vf = &pf->vf[i];
1463
1464 ice_dis_vf_mappings(vf);
1465 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1466 }
1467
1468 return retval;
1469}
1470
ddf30f7f
AV
1471/**
1472 * ice_alloc_vfs - Allocate and set up VFs resources
1473 * @pf: pointer to the PF structure
1474 * @num_alloc_vfs: number of VFs to allocate
1475 */
1476static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1477{
4015d11e 1478 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1479 struct ice_hw *hw = &pf->hw;
1480 struct ice_vf *vfs;
1481 int i, ret;
1482
1483 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
cbe66bfe 1484 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ddf30f7f 1485 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
d82dd83d 1486 set_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1487 ice_flush(hw);
1488
1489 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1490 if (ret) {
1491 pf->num_alloc_vfs = 0;
1492 goto err_unroll_intr;
1493 }
1494 /* allocate memory */
4015d11e 1495 vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
ddf30f7f
AV
1496 if (!vfs) {
1497 ret = -ENOMEM;
72f9c203 1498 goto err_pci_disable_sriov;
ddf30f7f
AV
1499 }
1500 pf->vf = vfs;
005881bc 1501 pf->num_alloc_vfs = num_alloc_vfs;
ddf30f7f 1502
916c7fdf
BC
1503 if (ice_set_per_vf_res(pf)) {
1504 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1505 num_alloc_vfs);
1506 ret = -ENOSPC;
1507 goto err_unroll_sriov;
1508 }
1509
ddf30f7f 1510 /* apply default profile */
005881bc 1511 ice_for_each_vf(pf, i) {
ddf30f7f
AV
1512 vfs[i].pf = pf;
1513 vfs[i].vf_sw_id = pf->first_sw;
1514 vfs[i].vf_id = i;
1515
1516 /* assign default capabilities */
1517 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1518 vfs[i].spoofchk = true;
916c7fdf 1519 vfs[i].num_vf_qs = pf->num_qps_per_vf;
ddf30f7f 1520 }
ddf30f7f 1521
916c7fdf
BC
1522 if (ice_start_vfs(pf)) {
1523 dev_err(dev, "Failed to start VF(s)\n");
1524 ret = -EAGAIN;
ddf30f7f 1525 goto err_unroll_sriov;
72f9c203 1526 }
ddf30f7f 1527
916c7fdf
BC
1528 clear_bit(__ICE_VF_DIS, pf->state);
1529 return 0;
ddf30f7f
AV
1530
1531err_unroll_sriov:
72f9c203 1532 pf->vf = NULL;
4015d11e 1533 devm_kfree(dev, vfs);
72f9c203
BC
1534 vfs = NULL;
1535 pf->num_alloc_vfs = 0;
1536err_pci_disable_sriov:
ddf30f7f
AV
1537 pci_disable_sriov(pf->pdev);
1538err_unroll_intr:
1539 /* rearm interrupts here */
1540 ice_irq_dynamic_ena(hw, NULL, NULL);
d82dd83d 1541 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1542 return ret;
1543}
1544
1545/**
2f2da36e
AV
1546 * ice_pf_state_is_nominal - checks the PF for nominal state
1547 * @pf: pointer to PF to check
ddf30f7f
AV
1548 *
1549 * Check the PF's state for a collection of bits that would indicate
1550 * the PF is in a state that would inhibit normal operation for
1551 * driver functionality.
1552 *
1553 * Returns true if PF is in a nominal state.
1554 * Returns false otherwise
1555 */
1556static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1557{
1558 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1559
1560 if (!pf)
1561 return false;
1562
1563 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1564 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1565 return false;
1566
1567 return true;
1568}
1569
1570/**
1571 * ice_pci_sriov_ena - Enable or change number of VFs
1572 * @pf: pointer to the PF structure
1573 * @num_vfs: number of VFs to allocate
02337f1f
BC
1574 *
1575 * Returns 0 on success and negative on failure
ddf30f7f
AV
1576 */
1577static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1578{
1579 int pre_existing_vfs = pci_num_vf(pf->pdev);
4015d11e 1580 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1581 int err;
1582
ddf30f7f
AV
1583 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1584 ice_free_vfs(pf);
1585 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
02337f1f 1586 return 0;
ddf30f7f
AV
1587
1588 if (num_vfs > pf->num_vfs_supported) {
1589 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1590 num_vfs, pf->num_vfs_supported);
dced8ad3 1591 return -EOPNOTSUPP;
ddf30f7f
AV
1592 }
1593
1594 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1595 err = ice_alloc_vfs(pf, num_vfs);
1596 if (err) {
1597 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1598 return err;
1599 }
1600
1601 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
02337f1f
BC
1602 return 0;
1603}
1604
1605/**
1606 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1607 * @pf: PF to enabled SR-IOV on
1608 */
1609static int ice_check_sriov_allowed(struct ice_pf *pf)
1610{
1611 struct device *dev = ice_pf_to_dev(pf);
1612
1613 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1614 dev_err(dev, "This device is not capable of SR-IOV\n");
1615 return -EOPNOTSUPP;
1616 }
1617
1618 if (ice_is_safe_mode(pf)) {
1619 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1620 return -EOPNOTSUPP;
1621 }
1622
1623 if (!ice_pf_state_is_nominal(pf)) {
1624 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1625 return -EBUSY;
1626 }
1627
1628 return 0;
ddf30f7f
AV
1629}
1630
1631/**
1632 * ice_sriov_configure - Enable or change number of VFs via sysfs
1633 * @pdev: pointer to a pci_dev structure
02337f1f 1634 * @num_vfs: number of VFs to allocate or 0 to free VFs
ddf30f7f 1635 *
02337f1f
BC
1636 * This function is called when the user updates the number of VFs in sysfs. On
1637 * success return whatever num_vfs was set to by the caller. Return negative on
1638 * failure.
ddf30f7f
AV
1639 */
1640int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1641{
1642 struct ice_pf *pf = pci_get_drvdata(pdev);
4015d11e 1643 struct device *dev = ice_pf_to_dev(pf);
02337f1f 1644 int err;
ddf30f7f 1645
02337f1f
BC
1646 err = ice_check_sriov_allowed(pf);
1647 if (err)
1648 return err;
462acf6a 1649
02337f1f
BC
1650 if (!num_vfs) {
1651 if (!pci_vfs_assigned(pdev)) {
1652 ice_free_vfs(pf);
1653 return 0;
1654 }
ddf30f7f 1655
4015d11e 1656 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
ddf30f7f
AV
1657 return -EBUSY;
1658 }
1659
02337f1f
BC
1660 err = ice_pci_sriov_ena(pf, num_vfs);
1661 if (err)
1662 return err;
1663
1664 return num_vfs;
ddf30f7f 1665}
007676b4
AV
1666
1667/**
1668 * ice_process_vflr_event - Free VF resources via IRQ calls
1669 * @pf: pointer to the PF structure
1670 *
df17b7e0 1671 * called from the VFLR IRQ handler to
007676b4
AV
1672 * free up VF resources and state variables
1673 */
1674void ice_process_vflr_event(struct ice_pf *pf)
1675{
1676 struct ice_hw *hw = &pf->hw;
53bb6698 1677 unsigned int vf_id;
007676b4
AV
1678 u32 reg;
1679
8d7189d2 1680 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
007676b4
AV
1681 !pf->num_alloc_vfs)
1682 return;
1683
005881bc 1684 ice_for_each_vf(pf, vf_id) {
007676b4
AV
1685 struct ice_vf *vf = &pf->vf[vf_id];
1686 u32 reg_idx, bit_idx;
1687
1688 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1689 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1690 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1691 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1692 if (reg & BIT(bit_idx))
1693 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1694 ice_reset_vf(vf, true);
1695 }
1696}
7c710869
AV
1697
1698/**
ff010eca 1699 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
7c710869 1700 * @vf: pointer to the VF info
7c710869 1701 */
ff010eca 1702static void ice_vc_reset_vf(struct ice_vf *vf)
7c710869
AV
1703{
1704 ice_vc_notify_vf_reset(vf);
1705 ice_reset_vf(vf, false);
1706}
1707
2309ae38
BC
1708/**
1709 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1710 * @pf: PF used to index all VFs
1711 * @pfq: queue index relative to the PF's function space
1712 *
1713 * If no VF is found who owns the pfq then return NULL, otherwise return a
1714 * pointer to the VF who owns the pfq
1715 */
1716static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1717{
53bb6698 1718 unsigned int vf_id;
2309ae38
BC
1719
1720 ice_for_each_vf(pf, vf_id) {
1721 struct ice_vf *vf = &pf->vf[vf_id];
1722 struct ice_vsi *vsi;
1723 u16 rxq_idx;
1724
1725 vsi = pf->vsi[vf->lan_vsi_idx];
1726
1727 ice_for_each_rxq(vsi, rxq_idx)
1728 if (vsi->rxq_map[rxq_idx] == pfq)
1729 return vf;
1730 }
1731
1732 return NULL;
1733}
1734
1735/**
1736 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1737 * @pf: PF used for conversion
1738 * @globalq: global queue index used to convert to PF space queue index
1739 */
1740static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1741{
1742 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1743}
1744
1745/**
1746 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1747 * @pf: PF that the LAN overflow event happened on
1748 * @event: structure holding the event information for the LAN overflow event
1749 *
1750 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1751 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1752 * reset on the offending VF.
1753 */
1754void
1755ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1756{
1757 u32 gldcb_rtctq, queue;
1758 struct ice_vf *vf;
1759
1760 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1761 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1762
1763 /* event returns device global Rx queue number */
1764 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1765 GLDCB_RTCTQ_RXQNUM_S;
1766
1767 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1768 if (!vf)
1769 return;
1770
1771 ice_vc_reset_vf(vf);
1772}
1773
1071a835
AV
1774/**
1775 * ice_vc_send_msg_to_vf - Send message to VF
1776 * @vf: pointer to the VF info
1777 * @v_opcode: virtual channel opcode
1778 * @v_retval: virtual channel return value
1779 * @msg: pointer to the msg buffer
1780 * @msglen: msg length
1781 *
1782 * send msg to VF
1783 */
c8b7abdd 1784static int
cf6c6e01
MW
1785ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1786 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1071a835
AV
1787{
1788 enum ice_status aq_ret;
4015d11e 1789 struct device *dev;
1071a835
AV
1790 struct ice_pf *pf;
1791
4c66d227 1792 if (!vf)
1071a835
AV
1793 return -EINVAL;
1794
1795 pf = vf->pf;
4c66d227
JB
1796 if (ice_validate_vf_id(pf, vf->vf_id))
1797 return -EINVAL;
1071a835 1798
4015d11e
BC
1799 dev = ice_pf_to_dev(pf);
1800
1071a835
AV
1801 /* single place to detect unsuccessful return values */
1802 if (v_retval) {
1803 vf->num_inval_msgs++;
4015d11e
BC
1804 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1805 v_opcode, v_retval);
1071a835 1806 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
19cce2c6 1807 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1071a835 1808 vf->vf_id);
4015d11e 1809 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1071a835
AV
1810 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1811 return -EIO;
1812 }
1813 } else {
1814 vf->num_valid_msgs++;
1815 /* reset the invalid counter, if a valid message is received. */
1816 vf->num_inval_msgs = 0;
1817 }
1818
1819 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1820 msg, msglen, NULL);
90e47737 1821 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
0fee3577
LY
1822 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1823 vf->vf_id, ice_stat_str(aq_ret),
1824 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1071a835
AV
1825 return -EIO;
1826 }
1827
1828 return 0;
1829}
1830
1831/**
1832 * ice_vc_get_ver_msg
1833 * @vf: pointer to the VF info
1834 * @msg: pointer to the msg buffer
1835 *
1836 * called from the VF to request the API version used by the PF
1837 */
1838static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1839{
1840 struct virtchnl_version_info info = {
1841 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1842 };
1843
1844 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1845 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1846 if (VF_IS_V10(&vf->vf_ver))
1847 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1848
cf6c6e01
MW
1849 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1850 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1071a835
AV
1851 sizeof(struct virtchnl_version_info));
1852}
1853
1854/**
1855 * ice_vc_get_vf_res_msg
1856 * @vf: pointer to the VF info
1857 * @msg: pointer to the msg buffer
1858 *
1859 * called from the VF to request its resources
1860 */
1861static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1862{
cf6c6e01 1863 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835 1864 struct virtchnl_vf_resource *vfres = NULL;
1071a835
AV
1865 struct ice_pf *pf = vf->pf;
1866 struct ice_vsi *vsi;
1867 int len = 0;
1868 int ret;
1869
4c66d227 1870 if (ice_check_vf_init(pf, vf)) {
cf6c6e01 1871 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1872 goto err;
1873 }
1874
1875 len = sizeof(struct virtchnl_vf_resource);
1876
9efe35d0 1877 vfres = kzalloc(len, GFP_KERNEL);
1071a835 1878 if (!vfres) {
cf6c6e01 1879 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1071a835
AV
1880 len = 0;
1881 goto err;
1882 }
1883 if (VF_IS_V11(&vf->vf_ver))
1884 vf->driver_caps = *(u32 *)msg;
1885 else
1886 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1887 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1888 VIRTCHNL_VF_OFFLOAD_VLAN;
1889
1890 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1891 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 1892 if (!vsi) {
cf6c6e01 1893 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
1894 goto err;
1895 }
1896
1071a835
AV
1897 if (!vsi->info.pvid)
1898 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1899
1900 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1901 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1902 } else {
1903 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1904 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1905 else
1906 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1907 }
1908
1909 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1910 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1911
1912 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1913 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1914
1915 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1916 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1917
1918 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1919 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1920
1921 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1922 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1923
1924 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1925 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1926
1927 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1928 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1929
1930 vfres->num_vsis = 1;
1931 /* Tx and Rx queue are equal for VF */
1932 vfres->num_queue_pairs = vsi->num_txq;
46c276ce 1933 vfres->max_vectors = pf->num_msix_per_vf;
1071a835
AV
1934 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1935 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1936
1937 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1938 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1939 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1940 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1941 vf->dflt_lan_addr.addr);
1942
d4bc4e2d
BC
1943 /* match guest capabilities */
1944 vf->driver_caps = vfres->vf_cap_flags;
1945
1071a835
AV
1946 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1947
1948err:
1949 /* send the response back to the VF */
cf6c6e01 1950 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1071a835
AV
1951 (u8 *)vfres, len);
1952
9efe35d0 1953 kfree(vfres);
1071a835
AV
1954 return ret;
1955}
1956
1957/**
1958 * ice_vc_reset_vf_msg
1959 * @vf: pointer to the VF info
1960 *
1961 * called from the VF to reset itself,
1962 * unlike other virtchnl messages, PF driver
1963 * doesn't send the response back to the VF
1964 */
1965static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1966{
1967 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1968 ice_reset_vf(vf, false);
1969}
1970
1971/**
1972 * ice_find_vsi_from_id
2f2da36e 1973 * @pf: the PF structure to search for the VSI
f9867df6 1974 * @id: ID of the VSI it is searching for
1071a835 1975 *
f9867df6 1976 * searches for the VSI with the given ID
1071a835
AV
1977 */
1978static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1979{
1980 int i;
1981
80ed404a 1982 ice_for_each_vsi(pf, i)
1071a835
AV
1983 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1984 return pf->vsi[i];
1985
1986 return NULL;
1987}
1988
1989/**
1990 * ice_vc_isvalid_vsi_id
1991 * @vf: pointer to the VF info
f9867df6 1992 * @vsi_id: VF relative VSI ID
1071a835 1993 *
f9867df6 1994 * check for the valid VSI ID
1071a835
AV
1995 */
1996static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1997{
1998 struct ice_pf *pf = vf->pf;
1999 struct ice_vsi *vsi;
2000
2001 vsi = ice_find_vsi_from_id(pf, vsi_id);
2002
2003 return (vsi && (vsi->vf_id == vf->vf_id));
2004}
2005
2006/**
2007 * ice_vc_isvalid_q_id
2008 * @vf: pointer to the VF info
f9867df6
AV
2009 * @vsi_id: VSI ID
2010 * @qid: VSI relative queue ID
1071a835 2011 *
f9867df6 2012 * check for the valid queue ID
1071a835
AV
2013 */
2014static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2015{
2016 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2017 /* allocated Tx and Rx queues should be always equal for VF VSI */
2018 return (vsi && (qid < vsi->alloc_txq));
2019}
2020
9c7dd756
MS
2021/**
2022 * ice_vc_isvalid_ring_len
2023 * @ring_len: length of ring
2024 *
2025 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
77ca27c4 2026 * or zero
9c7dd756
MS
2027 */
2028static bool ice_vc_isvalid_ring_len(u16 ring_len)
2029{
77ca27c4
PG
2030 return ring_len == 0 ||
2031 (ring_len >= ICE_MIN_NUM_DESC &&
9c7dd756
MS
2032 ring_len <= ICE_MAX_NUM_DESC &&
2033 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2034}
2035
1071a835
AV
2036/**
2037 * ice_vc_config_rss_key
2038 * @vf: pointer to the VF info
2039 * @msg: pointer to the msg buffer
2040 *
2041 * Configure the VF's RSS key
2042 */
2043static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2044{
cf6c6e01 2045 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2046 struct virtchnl_rss_key *vrk =
2047 (struct virtchnl_rss_key *)msg;
f1ef73f5 2048 struct ice_pf *pf = vf->pf;
4c66d227 2049 struct ice_vsi *vsi;
1071a835
AV
2050
2051 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2052 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2053 goto error_param;
2054 }
2055
2056 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
cf6c6e01 2057 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2058 goto error_param;
2059 }
2060
3f416961 2061 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
cf6c6e01 2062 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2063 goto error_param;
2064 }
2065
3f416961 2066 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2067 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2068 goto error_param;
2069 }
2070
3f416961
A
2071 vsi = pf->vsi[vf->lan_vsi_idx];
2072 if (!vsi) {
cf6c6e01 2073 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2074 goto error_param;
2075 }
2076
cf6c6e01
MW
2077 if (ice_set_rss(vsi, vrk->key, NULL, 0))
2078 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2079error_param:
cf6c6e01 2080 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1071a835
AV
2081 NULL, 0);
2082}
2083
2084/**
2085 * ice_vc_config_rss_lut
2086 * @vf: pointer to the VF info
2087 * @msg: pointer to the msg buffer
2088 *
2089 * Configure the VF's RSS LUT
2090 */
2091static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2092{
2093 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
cf6c6e01 2094 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
f1ef73f5 2095 struct ice_pf *pf = vf->pf;
4c66d227 2096 struct ice_vsi *vsi;
1071a835
AV
2097
2098 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2099 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2100 goto error_param;
2101 }
2102
2103 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
cf6c6e01 2104 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2105 goto error_param;
2106 }
2107
3f416961 2108 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
cf6c6e01 2109 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2110 goto error_param;
2111 }
2112
3f416961 2113 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2114 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2115 goto error_param;
2116 }
2117
3f416961
A
2118 vsi = pf->vsi[vf->lan_vsi_idx];
2119 if (!vsi) {
cf6c6e01 2120 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2121 goto error_param;
2122 }
2123
cf6c6e01
MW
2124 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2125 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2126error_param:
cf6c6e01 2127 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1071a835
AV
2128 NULL, 0);
2129}
2130
c54d209c
BC
2131/**
2132 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2133 * @vf: The VF being resseting
2134 *
2135 * The max poll time is about ~800ms, which is about the maximum time it takes
2136 * for a VF to be reset and/or a VF driver to be removed.
2137 */
2138static void ice_wait_on_vf_reset(struct ice_vf *vf)
2139{
2140 int i;
2141
2142 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2143 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2144 break;
2145 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2146 }
2147}
2148
2149/**
2150 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2151 * @vf: VF to check if it's ready to be configured/queried
2152 *
2153 * The purpose of this function is to make sure the VF is not in reset, not
2154 * disabled, and initialized so it can be configured and/or queried by a host
2155 * administrator.
2156 */
2157static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2158{
2159 struct ice_pf *pf;
2160
2161 ice_wait_on_vf_reset(vf);
2162
2163 if (ice_is_vf_disabled(vf))
2164 return -EINVAL;
2165
2166 pf = vf->pf;
2167 if (ice_check_vf_init(pf, vf))
2168 return -EBUSY;
2169
2170 return 0;
2171}
2172
cd6d6b83
BC
2173/**
2174 * ice_set_vf_spoofchk
2175 * @netdev: network interface device structure
2176 * @vf_id: VF identifier
2177 * @ena: flag to enable or disable feature
2178 *
2179 * Enable or disable VF spoof checking
2180 */
2181int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2182{
2183 struct ice_netdev_priv *np = netdev_priv(netdev);
2184 struct ice_pf *pf = np->vsi->back;
2185 struct ice_vsi_ctx *ctx;
2186 struct ice_vsi *vf_vsi;
2187 enum ice_status status;
2188 struct device *dev;
2189 struct ice_vf *vf;
c54d209c 2190 int ret;
cd6d6b83
BC
2191
2192 dev = ice_pf_to_dev(pf);
2193 if (ice_validate_vf_id(pf, vf_id))
2194 return -EINVAL;
2195
2196 vf = &pf->vf[vf_id];
c54d209c
BC
2197 ret = ice_check_vf_ready_for_cfg(vf);
2198 if (ret)
2199 return ret;
cd6d6b83
BC
2200
2201 vf_vsi = pf->vsi[vf->lan_vsi_idx];
2202 if (!vf_vsi) {
2203 netdev_err(netdev, "VSI %d for VF %d is null\n",
2204 vf->lan_vsi_idx, vf->vf_id);
2205 return -EINVAL;
2206 }
2207
2208 if (vf_vsi->type != ICE_VSI_VF) {
19cce2c6 2209 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
cd6d6b83
BC
2210 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2211 return -ENODEV;
2212 }
2213
2214 if (ena == vf->spoofchk) {
2215 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2216 return 0;
2217 }
2218
2219 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2220 if (!ctx)
2221 return -ENOMEM;
2222
2223 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2224 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2225 if (ena) {
2226 ctx->info.sec_flags |=
2227 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2228 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2229 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2230 } else {
2231 ctx->info.sec_flags &=
2232 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2233 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2234 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2235 }
2236
2237 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2238 if (status) {
0fee3577
LY
2239 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2240 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2241 ice_stat_str(status));
cd6d6b83
BC
2242 ret = -EIO;
2243 goto out;
2244 }
2245
2246 /* only update spoofchk state and VSI context on success */
2247 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2248 vf->spoofchk = ena;
2249
2250out:
2251 kfree(ctx);
2252 return ret;
2253}
2254
01b5e89a
BC
2255/**
2256 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2257 * @pf: PF structure for accessing VF(s)
2258 *
2259 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2260 * else return true
2261 */
2262bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2263{
2264 int vf_idx;
2265
2266 ice_for_each_vf(pf, vf_idx) {
2267 struct ice_vf *vf = &pf->vf[vf_idx];
2268
2269 /* found a VF that has promiscuous mode configured */
2270 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2271 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2272 return true;
2273 }
2274
2275 return false;
2276}
2277
2278/**
2279 * ice_vc_cfg_promiscuous_mode_msg
2280 * @vf: pointer to the VF info
2281 * @msg: pointer to the msg buffer
2282 *
2283 * called from the VF to configure VF VSIs promiscuous mode
2284 */
2285static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2286{
2287 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2288 struct virtchnl_promisc_info *info =
2289 (struct virtchnl_promisc_info *)msg;
2290 struct ice_pf *pf = vf->pf;
2291 struct ice_vsi *vsi;
2292 struct device *dev;
2293 bool rm_promisc;
2294 int ret = 0;
2295
2296 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2297 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2298 goto error_param;
2299 }
2300
2301 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2302 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2303 goto error_param;
2304 }
2305
2306 vsi = pf->vsi[vf->lan_vsi_idx];
2307 if (!vsi) {
2308 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2309 goto error_param;
2310 }
2311
2312 dev = ice_pf_to_dev(pf);
2313 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2314 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2315 vf->vf_id);
2316 /* Leave v_ret alone, lie to the VF on purpose. */
2317 goto error_param;
2318 }
2319
2320 rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2321 !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2322
2323 if (vsi->num_vlan || vf->port_vlan_info) {
2324 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2325 struct net_device *pf_netdev;
2326
2327 if (!pf_vsi) {
2328 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2329 goto error_param;
2330 }
2331
2332 pf_netdev = pf_vsi->netdev;
2333
2334 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2335 if (ret) {
2336 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2337 rm_promisc ? "ON" : "OFF", vf->vf_id,
2338 vsi->vsi_num);
2339 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2340 }
2341
2342 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2343 if (ret) {
2344 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2345 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2346 goto error_param;
2347 }
2348 }
2349
2350 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2351 bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2352
2353 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2354 /* only attempt to set the default forwarding VSI if
2355 * it's not currently set
2356 */
2357 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2358 else if (!set_dflt_vsi &&
2359 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2360 /* only attempt to free the default forwarding VSI if we
2361 * are the owner
2362 */
2363 ret = ice_clear_dflt_vsi(pf->first_sw);
2364
2365 if (ret) {
2366 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2367 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2368 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2369 goto error_param;
2370 }
2371 } else {
2372 enum ice_status status;
2373 u8 promisc_m;
2374
2375 if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2376 if (vf->port_vlan_info || vsi->num_vlan)
2377 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2378 else
2379 promisc_m = ICE_UCAST_PROMISC_BITS;
2380 } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2381 if (vf->port_vlan_info || vsi->num_vlan)
2382 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2383 else
2384 promisc_m = ICE_MCAST_PROMISC_BITS;
2385 } else {
2386 if (vf->port_vlan_info || vsi->num_vlan)
2387 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2388 else
2389 promisc_m = ICE_UCAST_PROMISC_BITS;
2390 }
2391
2392 /* Configure multicast/unicast with or without VLAN promiscuous
2393 * mode
2394 */
2395 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2396 if (status) {
0fee3577
LY
2397 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2398 rm_promisc ? "dis" : "en", vf->vf_id,
2399 ice_stat_str(status));
01b5e89a
BC
2400 v_ret = ice_err_to_virt_err(status);
2401 goto error_param;
2402 } else {
2403 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2404 rm_promisc ? "dis" : "en", vf->vf_id);
2405 }
2406 }
2407
2408 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2409 set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2410 else
2411 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2412
2413 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2414 set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2415 else
2416 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2417
2418error_param:
2419 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2420 v_ret, NULL, 0);
2421}
2422
1071a835
AV
2423/**
2424 * ice_vc_get_stats_msg
2425 * @vf: pointer to the VF info
2426 * @msg: pointer to the msg buffer
2427 *
2428 * called from the VF to get VSI stats
2429 */
2430static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2431{
cf6c6e01 2432 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2433 struct virtchnl_queue_select *vqs =
2434 (struct virtchnl_queue_select *)msg;
949375de 2435 struct ice_eth_stats stats = { 0 };
f1ef73f5 2436 struct ice_pf *pf = vf->pf;
1071a835
AV
2437 struct ice_vsi *vsi;
2438
2439 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2440 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2441 goto error_param;
2442 }
2443
2444 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2445 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2446 goto error_param;
2447 }
2448
f1ef73f5 2449 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2450 if (!vsi) {
cf6c6e01 2451 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2452 goto error_param;
2453 }
2454
1071a835
AV
2455 ice_update_eth_stats(vsi);
2456
2457 stats = vsi->eth_stats;
2458
2459error_param:
2460 /* send the response to the VF */
cf6c6e01 2461 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1071a835
AV
2462 (u8 *)&stats, sizeof(stats));
2463}
2464
24e2e2a0
BC
2465/**
2466 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2467 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2468 *
2469 * Return true on successful validation, else false
2470 */
2471static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2472{
2473 if ((!vqs->rx_queues && !vqs->tx_queues) ||
0ca469fb
MW
2474 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2475 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
24e2e2a0
BC
2476 return false;
2477
2478 return true;
2479}
2480
4dc926d3
BC
2481/**
2482 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2483 * @vsi: VSI of the VF to configure
2484 * @q_idx: VF queue index used to determine the queue in the PF's space
2485 */
2486static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2487{
2488 struct ice_hw *hw = &vsi->back->hw;
2489 u32 pfq = vsi->txq_map[q_idx];
2490 u32 reg;
2491
2492 reg = rd32(hw, QINT_TQCTL(pfq));
2493
2494 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2495 * this is most likely a poll mode VF driver, so don't enable an
2496 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2497 */
2498 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2499 return;
2500
2501 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2502}
2503
2504/**
2505 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2506 * @vsi: VSI of the VF to configure
2507 * @q_idx: VF queue index used to determine the queue in the PF's space
2508 */
2509static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2510{
2511 struct ice_hw *hw = &vsi->back->hw;
2512 u32 pfq = vsi->rxq_map[q_idx];
2513 u32 reg;
2514
2515 reg = rd32(hw, QINT_RQCTL(pfq));
2516
2517 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2518 * this is most likely a poll mode VF driver, so don't enable an
2519 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2520 */
2521 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2522 return;
2523
2524 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2525}
2526
1071a835
AV
2527/**
2528 * ice_vc_ena_qs_msg
2529 * @vf: pointer to the VF info
2530 * @msg: pointer to the msg buffer
2531 *
2532 * called from the VF to enable all or specific queue(s)
2533 */
2534static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2535{
cf6c6e01 2536 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2537 struct virtchnl_queue_select *vqs =
2538 (struct virtchnl_queue_select *)msg;
f1ef73f5 2539 struct ice_pf *pf = vf->pf;
1071a835 2540 struct ice_vsi *vsi;
77ca27c4
PG
2541 unsigned long q_map;
2542 u16 vf_q_id;
1071a835
AV
2543
2544 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2545 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2546 goto error_param;
2547 }
2548
2549 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2550 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2551 goto error_param;
2552 }
2553
24e2e2a0 2554 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3f416961
A
2555 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2556 goto error_param;
2557 }
2558
f1ef73f5 2559 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2560 if (!vsi) {
cf6c6e01 2561 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2562 goto error_param;
2563 }
2564
2565 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2566 * Tx queue group list was configured and the context bits were
2567 * programmed using ice_vsi_cfg_txqs
2568 */
77ca27c4 2569 q_map = vqs->rx_queues;
0ca469fb 2570 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2571 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2572 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2573 goto error_param;
2574 }
2575
2576 /* Skip queue if enabled */
2577 if (test_bit(vf_q_id, vf->rxq_ena))
2578 continue;
2579
13a6233b 2580 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
19cce2c6 2581 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
77ca27c4
PG
2582 vf_q_id, vsi->vsi_num);
2583 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2584 goto error_param;
2585 }
2586
4dc926d3 2587 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
77ca27c4 2588 set_bit(vf_q_id, vf->rxq_ena);
77ca27c4
PG
2589 }
2590
2591 vsi = pf->vsi[vf->lan_vsi_idx];
2592 q_map = vqs->tx_queues;
0ca469fb 2593 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2594 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2595 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2596 goto error_param;
2597 }
2598
2599 /* Skip queue if enabled */
2600 if (test_bit(vf_q_id, vf->txq_ena))
2601 continue;
2602
4dc926d3 2603 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
77ca27c4 2604 set_bit(vf_q_id, vf->txq_ena);
77ca27c4 2605 }
1071a835
AV
2606
2607 /* Set flag to indicate that queues are enabled */
cf6c6e01 2608 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
77ca27c4 2609 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2610
2611error_param:
2612 /* send the response to the VF */
cf6c6e01 2613 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1071a835
AV
2614 NULL, 0);
2615}
2616
2617/**
2618 * ice_vc_dis_qs_msg
2619 * @vf: pointer to the VF info
2620 * @msg: pointer to the msg buffer
2621 *
2622 * called from the VF to disable all or specific
2623 * queue(s)
2624 */
2625static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2626{
cf6c6e01 2627 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2628 struct virtchnl_queue_select *vqs =
2629 (struct virtchnl_queue_select *)msg;
f1ef73f5 2630 struct ice_pf *pf = vf->pf;
1071a835 2631 struct ice_vsi *vsi;
77ca27c4
PG
2632 unsigned long q_map;
2633 u16 vf_q_id;
1071a835
AV
2634
2635 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
77ca27c4 2636 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
cf6c6e01 2637 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2638 goto error_param;
2639 }
2640
2641 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2642 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2643 goto error_param;
2644 }
2645
24e2e2a0 2646 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
cf6c6e01 2647 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2648 goto error_param;
2649 }
2650
f1ef73f5 2651 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2652 if (!vsi) {
cf6c6e01 2653 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2654 goto error_param;
2655 }
2656
77ca27c4
PG
2657 if (vqs->tx_queues) {
2658 q_map = vqs->tx_queues;
2659
0ca469fb 2660 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2661 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2662 struct ice_txq_meta txq_meta = { 0 };
2663
2664 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2665 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2666 goto error_param;
2667 }
2668
2669 /* Skip queue if not enabled */
2670 if (!test_bit(vf_q_id, vf->txq_ena))
2671 continue;
2672
2673 ice_fill_txq_meta(vsi, ring, &txq_meta);
2674
2675 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2676 ring, &txq_meta)) {
19cce2c6 2677 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
77ca27c4
PG
2678 vf_q_id, vsi->vsi_num);
2679 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2680 goto error_param;
2681 }
2682
2683 /* Clear enabled queues flag */
2684 clear_bit(vf_q_id, vf->txq_ena);
77ca27c4 2685 }
1071a835
AV
2686 }
2687
e1fe6926
BC
2688 q_map = vqs->rx_queues;
2689 /* speed up Rx queue disable by batching them if possible */
2690 if (q_map &&
0ca469fb 2691 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
e1fe6926
BC
2692 if (ice_vsi_stop_all_rx_rings(vsi)) {
2693 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2694 vsi->vsi_num);
2695 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2696 goto error_param;
2697 }
77ca27c4 2698
0ca469fb 2699 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
e1fe6926 2700 } else if (q_map) {
0ca469fb 2701 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2702 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2703 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2704 goto error_param;
2705 }
2706
2707 /* Skip queue if not enabled */
2708 if (!test_bit(vf_q_id, vf->rxq_ena))
2709 continue;
2710
13a6233b
BC
2711 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2712 true)) {
19cce2c6 2713 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
77ca27c4
PG
2714 vf_q_id, vsi->vsi_num);
2715 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2716 goto error_param;
2717 }
2718
2719 /* Clear enabled queues flag */
2720 clear_bit(vf_q_id, vf->rxq_ena);
77ca27c4 2721 }
1071a835
AV
2722 }
2723
2724 /* Clear enabled queues flag */
e1fe6926 2725 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
77ca27c4 2726 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2727
2728error_param:
2729 /* send the response to the VF */
cf6c6e01 2730 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1071a835
AV
2731 NULL, 0);
2732}
2733
0ca469fb
MW
2734/**
2735 * ice_cfg_interrupt
2736 * @vf: pointer to the VF info
2737 * @vsi: the VSI being configured
2738 * @vector_id: vector ID
2739 * @map: vector map for mapping vectors to queues
2740 * @q_vector: structure for interrupt vector
2741 * configure the IRQ to queue map
2742 */
2743static int
2744ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2745 struct virtchnl_vector_map *map,
2746 struct ice_q_vector *q_vector)
2747{
2748 u16 vsi_q_id, vsi_q_id_idx;
2749 unsigned long qmap;
2750
2751 q_vector->num_ring_rx = 0;
2752 q_vector->num_ring_tx = 0;
2753
2754 qmap = map->rxq_map;
2755 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2756 vsi_q_id = vsi_q_id_idx;
2757
2758 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2759 return VIRTCHNL_STATUS_ERR_PARAM;
2760
2761 q_vector->num_ring_rx++;
2762 q_vector->rx.itr_idx = map->rxitr_idx;
2763 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2764 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2765 q_vector->rx.itr_idx);
2766 }
2767
2768 qmap = map->txq_map;
2769 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2770 vsi_q_id = vsi_q_id_idx;
2771
2772 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2773 return VIRTCHNL_STATUS_ERR_PARAM;
2774
2775 q_vector->num_ring_tx++;
2776 q_vector->tx.itr_idx = map->txitr_idx;
2777 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2778 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2779 q_vector->tx.itr_idx);
2780 }
2781
2782 return VIRTCHNL_STATUS_SUCCESS;
2783}
2784
1071a835
AV
2785/**
2786 * ice_vc_cfg_irq_map_msg
2787 * @vf: pointer to the VF info
2788 * @msg: pointer to the msg buffer
2789 *
2790 * called from the VF to configure the IRQ to queue map
2791 */
2792static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2793{
cf6c6e01 2794 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
0ca469fb 2795 u16 num_q_vectors_mapped, vsi_id, vector_id;
173e23c0 2796 struct virtchnl_irq_map_info *irqmap_info;
1071a835 2797 struct virtchnl_vector_map *map;
1071a835 2798 struct ice_pf *pf = vf->pf;
173e23c0 2799 struct ice_vsi *vsi;
1071a835
AV
2800 int i;
2801
173e23c0 2802 irqmap_info = (struct virtchnl_irq_map_info *)msg;
047e52c0
AV
2803 num_q_vectors_mapped = irqmap_info->num_vectors;
2804
047e52c0
AV
2805 /* Check to make sure number of VF vectors mapped is not greater than
2806 * number of VF vectors originally allocated, and check that
2807 * there is actually at least a single VF queue vector mapped
2808 */
ba0db585 2809 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
46c276ce 2810 pf->num_msix_per_vf < num_q_vectors_mapped ||
0ca469fb 2811 !num_q_vectors_mapped) {
cf6c6e01 2812 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2813 goto error_param;
2814 }
2815
3f416961
A
2816 vsi = pf->vsi[vf->lan_vsi_idx];
2817 if (!vsi) {
2818 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2819 goto error_param;
2820 }
2821
047e52c0
AV
2822 for (i = 0; i < num_q_vectors_mapped; i++) {
2823 struct ice_q_vector *q_vector;
ba0db585 2824
1071a835
AV
2825 map = &irqmap_info->vecmap[i];
2826
2827 vector_id = map->vector_id;
2828 vsi_id = map->vsi_id;
b791cdd5
BC
2829 /* vector_id is always 0-based for each VF, and can never be
2830 * larger than or equal to the max allowed interrupts per VF
2831 */
46c276ce 2832 if (!(vector_id < pf->num_msix_per_vf) ||
b791cdd5 2833 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
047e52c0
AV
2834 (!vector_id && (map->rxq_map || map->txq_map))) {
2835 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2836 goto error_param;
2837 }
2838
2839 /* No need to map VF miscellaneous or rogue vector */
2840 if (!vector_id)
2841 continue;
2842
2843 /* Subtract non queue vector from vector_id passed by VF
2844 * to get actual number of VSI queue vector array index
2845 */
2846 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2847 if (!q_vector) {
cf6c6e01 2848 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2849 goto error_param;
2850 }
2851
1071a835 2852 /* lookout for the invalid queue index */
0ca469fb
MW
2853 v_ret = (enum virtchnl_status_code)
2854 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2855 if (v_ret)
2856 goto error_param;
1071a835
AV
2857 }
2858
1071a835
AV
2859error_param:
2860 /* send the response to the VF */
cf6c6e01 2861 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1071a835
AV
2862 NULL, 0);
2863}
2864
2865/**
2866 * ice_vc_cfg_qs_msg
2867 * @vf: pointer to the VF info
2868 * @msg: pointer to the msg buffer
2869 *
2870 * called from the VF to configure the Rx/Tx queues
2871 */
2872static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2873{
cf6c6e01 2874 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2875 struct virtchnl_vsi_queue_config_info *qci =
2876 (struct virtchnl_vsi_queue_config_info *)msg;
2877 struct virtchnl_queue_pair_info *qpi;
77ca27c4 2878 u16 num_rxq = 0, num_txq = 0;
5743020d 2879 struct ice_pf *pf = vf->pf;
1071a835
AV
2880 struct ice_vsi *vsi;
2881 int i;
2882
2883 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2884 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2885 goto error_param;
2886 }
2887
2888 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
cf6c6e01 2889 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2890 goto error_param;
2891 }
2892
9c7dd756
MS
2893 vsi = pf->vsi[vf->lan_vsi_idx];
2894 if (!vsi) {
cf6c6e01 2895 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5743020d
AA
2896 goto error_param;
2897 }
2898
0ca469fb 2899 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
9c7dd756 2900 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
19cce2c6 2901 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
9c7dd756 2902 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3f416961
A
2903 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2904 goto error_param;
2905 }
2906
1071a835
AV
2907 for (i = 0; i < qci->num_queue_pairs; i++) {
2908 qpi = &qci->qpair[i];
2909 if (qpi->txq.vsi_id != qci->vsi_id ||
2910 qpi->rxq.vsi_id != qci->vsi_id ||
2911 qpi->rxq.queue_id != qpi->txq.queue_id ||
f8af5bf5 2912 qpi->txq.headwb_enabled ||
9c7dd756
MS
2913 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2914 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1071a835 2915 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
cf6c6e01 2916 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2917 goto error_param;
2918 }
2919 /* copy Tx queue info from VF into VSI */
77ca27c4
PG
2920 if (qpi->txq.ring_len > 0) {
2921 num_txq++;
2922 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2923 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1071a835 2924 }
77ca27c4
PG
2925
2926 /* copy Rx queue info from VF into VSI */
2927 if (qpi->rxq.ring_len > 0) {
2928 num_rxq++;
2929 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2930 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2931
2932 if (qpi->rxq.databuffer_size != 0 &&
2933 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2934 qpi->rxq.databuffer_size < 1024)) {
2935 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2936 goto error_param;
2937 }
2938 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2939 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2940 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2941 qpi->rxq.max_pkt_size < 64) {
2942 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2943 goto error_param;
2944 }
1071a835 2945 }
77ca27c4 2946
1071a835
AV
2947 vsi->max_frame = qpi->rxq.max_pkt_size;
2948 }
2949
2950 /* VF can request to configure less than allocated queues
2951 * or default allocated queues. So update the VSI with new number
2952 */
77ca27c4
PG
2953 vsi->num_txq = num_txq;
2954 vsi->num_rxq = num_rxq;
105e5bc2 2955 /* All queues of VF VSI are in TC 0 */
77ca27c4
PG
2956 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2957 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
1071a835 2958
cf6c6e01
MW
2959 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2960 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
2961
2962error_param:
2963 /* send the response to the VF */
cf6c6e01 2964 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
1071a835
AV
2965 NULL, 0);
2966}
2967
2968/**
2969 * ice_is_vf_trusted
2970 * @vf: pointer to the VF info
2971 */
2972static bool ice_is_vf_trusted(struct ice_vf *vf)
2973{
2974 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2975}
2976
2977/**
2978 * ice_can_vf_change_mac
2979 * @vf: pointer to the VF info
2980 *
2981 * Return true if the VF is allowed to change its MAC filters, false otherwise
2982 */
2983static bool ice_can_vf_change_mac(struct ice_vf *vf)
2984{
2985 /* If the VF MAC address has been set administratively (via the
2986 * ndo_set_vf_mac command), then deny permission to the VF to
2987 * add/delete unicast MAC addresses, unless the VF is trusted
2988 */
2989 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2990 return false;
2991
2992 return true;
2993}
2994
ed4c068d
BC
2995/**
2996 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
2997 * @vf: pointer to the VF info
2998 * @vsi: pointer to the VF's VSI
2999 * @mac_addr: MAC address to add
3000 */
3001static int
3002ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3003{
3004 struct device *dev = ice_pf_to_dev(vf->pf);
3005 enum ice_status status;
3006
3007 /* default unicast MAC already added */
3008 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3009 return 0;
3010
3011 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3012 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3013 return -EPERM;
3014 }
3015
1b8f15b6 3016 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3017 if (status == ICE_ERR_ALREADY_EXISTS) {
3018 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3019 vf->vf_id);
3020 return -EEXIST;
3021 } else if (status) {
0fee3577
LY
3022 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3023 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3024 return -EIO;
3025 }
3026
bf8987df
PG
3027 /* Set the default LAN address to the latest unicast MAC address added
3028 * by the VF. The default LAN address is reported by the PF via
3029 * ndo_get_vf_config.
3030 */
3031 if (is_unicast_ether_addr(mac_addr))
ed4c068d
BC
3032 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3033
3034 vf->num_mac++;
3035
3036 return 0;
3037}
3038
3039/**
3040 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3041 * @vf: pointer to the VF info
3042 * @vsi: pointer to the VF's VSI
3043 * @mac_addr: MAC address to delete
3044 */
3045static int
3046ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3047{
3048 struct device *dev = ice_pf_to_dev(vf->pf);
3049 enum ice_status status;
3050
3051 if (!ice_can_vf_change_mac(vf) &&
3052 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3053 return 0;
3054
1b8f15b6 3055 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3056 if (status == ICE_ERR_DOES_NOT_EXIST) {
3057 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3058 vf->vf_id);
3059 return -ENOENT;
3060 } else if (status) {
0fee3577
LY
3061 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3062 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3063 return -EIO;
3064 }
3065
3066 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3067 eth_zero_addr(vf->dflt_lan_addr.addr);
3068
3069 vf->num_mac--;
3070
3071 return 0;
3072}
3073
1071a835
AV
3074/**
3075 * ice_vc_handle_mac_addr_msg
3076 * @vf: pointer to the VF info
3077 * @msg: pointer to the msg buffer
f9867df6 3078 * @set: true if MAC filters are being set, false otherwise
1071a835 3079 *
df17b7e0 3080 * add guest MAC address filter
1071a835
AV
3081 */
3082static int
3083ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3084{
ed4c068d
BC
3085 int (*ice_vc_cfg_mac)
3086 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
cf6c6e01 3087 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3088 struct virtchnl_ether_addr_list *al =
3089 (struct virtchnl_ether_addr_list *)msg;
3090 struct ice_pf *pf = vf->pf;
3091 enum virtchnl_ops vc_op;
1071a835 3092 struct ice_vsi *vsi;
1071a835
AV
3093 int i;
3094
ed4c068d 3095 if (set) {
1071a835 3096 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
ed4c068d
BC
3097 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3098 } else {
1071a835 3099 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
ed4c068d
BC
3100 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3101 }
1071a835
AV
3102
3103 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3104 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
cf6c6e01 3105 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3106 goto handle_mac_exit;
3107 }
3108
ed4c068d
BC
3109 /* If this VF is not privileged, then we can't add more than a
3110 * limited number of addresses. Check to make sure that the
3111 * additions do not push us over the limit.
3112 */
1071a835
AV
3113 if (set && !ice_is_vf_trusted(vf) &&
3114 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
19cce2c6 3115 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
d84b899a 3116 vf->vf_id);
cf6c6e01 3117 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3118 goto handle_mac_exit;
3119 }
3120
3121 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3122 if (!vsi) {
cf6c6e01 3123 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3124 goto handle_mac_exit;
3125 }
1071a835
AV
3126
3127 for (i = 0; i < al->num_elements; i++) {
ed4c068d
BC
3128 u8 *mac_addr = al->list[i].addr;
3129 int result;
1071a835 3130
ed4c068d
BC
3131 if (is_broadcast_ether_addr(mac_addr) ||
3132 is_zero_ether_addr(mac_addr))
3133 continue;
1071a835 3134
ed4c068d
BC
3135 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3136 if (result == -EEXIST || result == -ENOENT) {
3137 continue;
3138 } else if (result) {
3139 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
3140 goto handle_mac_exit;
3141 }
1071a835
AV
3142 }
3143
1071a835 3144handle_mac_exit:
1071a835 3145 /* send the response to the VF */
cf6c6e01 3146 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1071a835
AV
3147}
3148
3149/**
3150 * ice_vc_add_mac_addr_msg
3151 * @vf: pointer to the VF info
3152 * @msg: pointer to the msg buffer
3153 *
3154 * add guest MAC address filter
3155 */
3156static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3157{
3158 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3159}
3160
3161/**
3162 * ice_vc_del_mac_addr_msg
3163 * @vf: pointer to the VF info
3164 * @msg: pointer to the msg buffer
3165 *
3166 * remove guest MAC address filter
3167 */
3168static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3169{
3170 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3171}
3172
3173/**
3174 * ice_vc_request_qs_msg
3175 * @vf: pointer to the VF info
3176 * @msg: pointer to the msg buffer
3177 *
3178 * VFs get a default number of queues but can use this message to request a
df17b7e0 3179 * different number. If the request is successful, PF will reset the VF and
1071a835 3180 * return 0. If unsuccessful, PF will send message informing VF of number of
f9867df6 3181 * available queue pairs via virtchnl message response to VF.
1071a835
AV
3182 */
3183static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3184{
cf6c6e01 3185 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3186 struct virtchnl_vf_res_request *vfres =
3187 (struct virtchnl_vf_res_request *)msg;
cbfe31b5 3188 u16 req_queues = vfres->num_queue_pairs;
1071a835 3189 struct ice_pf *pf = vf->pf;
cbfe31b5
PK
3190 u16 max_allowed_vf_queues;
3191 u16 tx_rx_queue_left;
4015d11e 3192 struct device *dev;
4ee656bb 3193 u16 cur_queues;
1071a835 3194
4015d11e 3195 dev = ice_pf_to_dev(pf);
1071a835 3196 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3197 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3198 goto error_param;
3199 }
3200
5743020d 3201 cur_queues = vf->num_vf_qs;
8c243700
AV
3202 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3203 ice_get_avail_rxq_count(pf));
5743020d 3204 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
cbfe31b5 3205 if (!req_queues) {
4015d11e 3206 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
cbfe31b5 3207 vf->vf_id);
0ca469fb 3208 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4015d11e 3209 dev_err(dev, "VF %d tried to request more than %d queues.\n",
0ca469fb
MW
3210 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3211 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
cbfe31b5
PK
3212 } else if (req_queues > cur_queues &&
3213 req_queues - cur_queues > tx_rx_queue_left) {
19cce2c6 3214 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1071a835 3215 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
cbfe31b5 3216 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
0ca469fb 3217 ICE_MAX_RSS_QS_PER_VF);
1071a835
AV
3218 } else {
3219 /* request is successful, then reset VF */
3220 vf->num_req_qs = req_queues;
ff010eca 3221 ice_vc_reset_vf(vf);
4015d11e 3222 dev_info(dev, "VF %d granted request of %u queues.\n",
1071a835
AV
3223 vf->vf_id, req_queues);
3224 return 0;
3225 }
3226
3227error_param:
3228 /* send the response to the VF */
3229 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
cf6c6e01 3230 v_ret, (u8 *)vfres, sizeof(*vfres));
1071a835
AV
3231}
3232
7c710869
AV
3233/**
3234 * ice_set_vf_port_vlan
3235 * @netdev: network interface device structure
3236 * @vf_id: VF identifier
f9867df6 3237 * @vlan_id: VLAN ID being set
7c710869
AV
3238 * @qos: priority setting
3239 * @vlan_proto: VLAN protocol
3240 *
f9867df6 3241 * program VF Port VLAN ID and/or QoS
7c710869
AV
3242 */
3243int
3244ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3245 __be16 vlan_proto)
3246{
4c66d227 3247 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3248 struct ice_vsi *vsi;
4015d11e 3249 struct device *dev;
7c710869 3250 struct ice_vf *vf;
61c9ce86 3251 u16 vlanprio;
c54d209c 3252 int ret;
7c710869 3253
4015d11e 3254 dev = ice_pf_to_dev(pf);
4c66d227 3255 if (ice_validate_vf_id(pf, vf_id))
7c710869 3256 return -EINVAL;
7c710869 3257
61c9ce86
BC
3258 if (vlan_id >= VLAN_N_VID || qos > 7) {
3259 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3260 vf_id, vlan_id, qos);
7c710869
AV
3261 return -EINVAL;
3262 }
3263
3264 if (vlan_proto != htons(ETH_P_8021Q)) {
4015d11e 3265 dev_err(dev, "VF VLAN protocol is not supported\n");
7c710869
AV
3266 return -EPROTONOSUPPORT;
3267 }
3268
3269 vf = &pf->vf[vf_id];
3270 vsi = pf->vsi[vf->lan_vsi_idx];
c54d209c
BC
3271
3272 ret = ice_check_vf_ready_for_cfg(vf);
3273 if (ret)
3274 return ret;
7c710869 3275
61c9ce86
BC
3276 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3277
3278 if (vf->port_vlan_info == vlanprio) {
7c710869 3279 /* duplicate request, so just return success */
4015d11e 3280 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
c54d209c 3281 return 0;
7c710869
AV
3282 }
3283
7c710869 3284 if (vlan_id || qos) {
72634bc2
BC
3285 /* remove VLAN 0 filter set by default when transitioning from
3286 * no port VLAN to a port VLAN. No change to old port VLAN on
3287 * failure.
3288 */
3289 ret = ice_vsi_kill_vlan(vsi, 0);
3290 if (ret)
3291 return ret;
77a7a84d 3292 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
7c710869 3293 if (ret)
72634bc2 3294 return ret;
7c710869 3295 } else {
72634bc2
BC
3296 /* add VLAN 0 filter back when transitioning from port VLAN to
3297 * no port VLAN. No change to old port VLAN on failure.
3298 */
1b8f15b6 3299 ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
72634bc2
BC
3300 if (ret)
3301 return ret;
b093841f
BC
3302 ret = ice_vsi_manage_pvid(vsi, 0, false);
3303 if (ret)
e65ee2fb 3304 return ret;
7c710869
AV
3305 }
3306
3307 if (vlan_id) {
4015d11e 3308 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
7c710869
AV
3309 vlan_id, qos, vf_id);
3310
72634bc2 3311 /* add VLAN filter for the port VLAN */
1b8f15b6 3312 ret = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
7c710869 3313 if (ret)
c54d209c 3314 return ret;
7c710869 3315 }
72634bc2
BC
3316 /* remove old port VLAN filter with valid VLAN ID or QoS fields */
3317 if (vf->port_vlan_info)
3318 ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
7c710869 3319
72634bc2 3320 /* keep port VLAN information persistent on resets */
b093841f 3321 vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
7c710869 3322
c54d209c 3323 return 0;
7c710869
AV
3324}
3325
d4bc4e2d
BC
3326/**
3327 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3328 * @caps: VF driver negotiated capabilities
3329 *
3330 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3331 */
3332static bool ice_vf_vlan_offload_ena(u32 caps)
3333{
3334 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3335}
3336
1071a835
AV
3337/**
3338 * ice_vc_process_vlan_msg
3339 * @vf: pointer to the VF info
3340 * @msg: pointer to the msg buffer
3341 * @add_v: Add VLAN if true, otherwise delete VLAN
3342 *
f9867df6 3343 * Process virtchnl op to add or remove programmed guest VLAN ID
1071a835
AV
3344 */
3345static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3346{
cf6c6e01 3347 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3348 struct virtchnl_vlan_filter_list *vfl =
3349 (struct virtchnl_vlan_filter_list *)msg;
1071a835 3350 struct ice_pf *pf = vf->pf;
5eda8afd 3351 bool vlan_promisc = false;
1071a835 3352 struct ice_vsi *vsi;
4015d11e 3353 struct device *dev;
5eda8afd
AA
3354 struct ice_hw *hw;
3355 int status = 0;
3356 u8 promisc_m;
1071a835
AV
3357 int i;
3358
4015d11e 3359 dev = ice_pf_to_dev(pf);
1071a835 3360 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3361 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3362 goto error_param;
3363 }
3364
d4bc4e2d
BC
3365 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3366 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367 goto error_param;
3368 }
3369
1071a835 3370 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
cf6c6e01 3371 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3372 goto error_param;
3373 }
3374
1071a835 3375 for (i = 0; i < vfl->num_elements; i++) {
61c9ce86 3376 if (vfl->vlan_id[i] >= VLAN_N_VID) {
cf6c6e01 3377 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6
AV
3378 dev_err(dev, "invalid VF VLAN id %d\n",
3379 vfl->vlan_id[i]);
1071a835
AV
3380 goto error_param;
3381 }
3382 }
3383
5eda8afd 3384 hw = &pf->hw;
f1ef73f5 3385 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 3386 if (!vsi) {
cf6c6e01 3387 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3388 goto error_param;
3389 }
3390
cd6d6b83
BC
3391 if (add_v && !ice_is_vf_trusted(vf) &&
3392 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3393 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
cd6d6b83
BC
3394 vf->vf_id);
3395 /* There is no need to let VF know about being not trusted,
3396 * so we can just return success message here
3397 */
3398 goto error_param;
3399 }
3400
1071a835 3401 if (vsi->info.pvid) {
cf6c6e01 3402 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3403 goto error_param;
3404 }
3405
01b5e89a
BC
3406 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3407 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3408 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
5eda8afd
AA
3409 vlan_promisc = true;
3410
1071a835
AV
3411 if (add_v) {
3412 for (i = 0; i < vfl->num_elements; i++) {
3413 u16 vid = vfl->vlan_id[i];
3414
5079b853 3415 if (!ice_is_vf_trusted(vf) &&
cd6d6b83 3416 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3417 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
5079b853
AA
3418 vf->vf_id);
3419 /* There is no need to let VF know about being
3420 * not trusted, so we can just return success
3421 * message here as well.
3422 */
3423 goto error_param;
3424 }
3425
cd6d6b83
BC
3426 /* we add VLAN 0 by default for each VF so we can enable
3427 * Tx VLAN anti-spoof without triggering MDD events so
3428 * we don't need to add it again here
3429 */
3430 if (!vid)
3431 continue;
3432
1b8f15b6 3433 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
cd6d6b83 3434 if (status) {
cf6c6e01 3435 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3436 goto error_param;
3437 }
1071a835 3438
42f3efef
BC
3439 /* Enable VLAN pruning when non-zero VLAN is added */
3440 if (!vlan_promisc && vid &&
3441 !ice_vsi_is_vlan_pruning_ena(vsi)) {
5eda8afd
AA
3442 status = ice_cfg_vlan_pruning(vsi, true, false);
3443 if (status) {
cf6c6e01 3444 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3445 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
5eda8afd
AA
3446 vid, status);
3447 goto error_param;
3448 }
42f3efef 3449 } else if (vlan_promisc) {
5eda8afd
AA
3450 /* Enable Ucast/Mcast VLAN promiscuous mode */
3451 promisc_m = ICE_PROMISC_VLAN_TX |
3452 ICE_PROMISC_VLAN_RX;
3453
3454 status = ice_set_vsi_promisc(hw, vsi->idx,
3455 promisc_m, vid);
cf6c6e01
MW
3456 if (status) {
3457 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3458 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
5eda8afd 3459 vid, status);
cf6c6e01 3460 }
1071a835
AV
3461 }
3462 }
3463 } else {
bb877b22
AA
3464 /* In case of non_trusted VF, number of VLAN elements passed
3465 * to PF for removal might be greater than number of VLANs
3466 * filter programmed for that VF - So, use actual number of
3467 * VLANS added earlier with add VLAN opcode. In order to avoid
3468 * removing VLAN that doesn't exist, which result to sending
3469 * erroneous failed message back to the VF
3470 */
3471 int num_vf_vlan;
3472
cd6d6b83 3473 num_vf_vlan = vsi->num_vlan;
bb877b22 3474 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1071a835
AV
3475 u16 vid = vfl->vlan_id[i];
3476
cd6d6b83
BC
3477 /* we add VLAN 0 by default for each VF so we can enable
3478 * Tx VLAN anti-spoof without triggering MDD events so
3479 * we don't want a VIRTCHNL request to remove it
3480 */
3481 if (!vid)
3482 continue;
3483
1071a835
AV
3484 /* Make sure ice_vsi_kill_vlan is successful before
3485 * updating VLAN information
3486 */
cd6d6b83
BC
3487 status = ice_vsi_kill_vlan(vsi, vid);
3488 if (status) {
cf6c6e01 3489 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3490 goto error_param;
3491 }
3492
42f3efef
BC
3493 /* Disable VLAN pruning when only VLAN 0 is left */
3494 if (vsi->num_vlan == 1 &&
3495 ice_vsi_is_vlan_pruning_ena(vsi))
cd186e51 3496 ice_cfg_vlan_pruning(vsi, false, false);
5eda8afd
AA
3497
3498 /* Disable Unicast/Multicast VLAN promiscuous mode */
3499 if (vlan_promisc) {
3500 promisc_m = ICE_PROMISC_VLAN_TX |
3501 ICE_PROMISC_VLAN_RX;
1071a835 3502
5eda8afd
AA
3503 ice_clear_vsi_promisc(hw, vsi->idx,
3504 promisc_m, vid);
1071a835
AV
3505 }
3506 }
3507 }
3508
3509error_param:
3510 /* send the response to the VF */
3511 if (add_v)
cf6c6e01 3512 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1071a835
AV
3513 NULL, 0);
3514 else
cf6c6e01 3515 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1071a835
AV
3516 NULL, 0);
3517}
3518
3519/**
3520 * ice_vc_add_vlan_msg
3521 * @vf: pointer to the VF info
3522 * @msg: pointer to the msg buffer
3523 *
f9867df6 3524 * Add and program guest VLAN ID
1071a835
AV
3525 */
3526static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3527{
3528 return ice_vc_process_vlan_msg(vf, msg, true);
3529}
3530
3531/**
3532 * ice_vc_remove_vlan_msg
3533 * @vf: pointer to the VF info
3534 * @msg: pointer to the msg buffer
3535 *
f9867df6 3536 * remove programmed guest VLAN ID
1071a835
AV
3537 */
3538static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3539{
3540 return ice_vc_process_vlan_msg(vf, msg, false);
3541}
3542
3543/**
3544 * ice_vc_ena_vlan_stripping
3545 * @vf: pointer to the VF info
3546 *
3547 * Enable VLAN header stripping for a given VF
3548 */
3549static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3550{
cf6c6e01 3551 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3552 struct ice_pf *pf = vf->pf;
3553 struct ice_vsi *vsi;
3554
3555 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3556 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3557 goto error_param;
3558 }
3559
d4bc4e2d
BC
3560 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3561 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3562 goto error_param;
3563 }
3564
1071a835
AV
3565 vsi = pf->vsi[vf->lan_vsi_idx];
3566 if (ice_vsi_manage_vlan_stripping(vsi, true))
cf6c6e01 3567 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3568
3569error_param:
3570 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
cf6c6e01 3571 v_ret, NULL, 0);
1071a835
AV
3572}
3573
3574/**
3575 * ice_vc_dis_vlan_stripping
3576 * @vf: pointer to the VF info
3577 *
3578 * Disable VLAN header stripping for a given VF
3579 */
3580static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3581{
cf6c6e01 3582 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3583 struct ice_pf *pf = vf->pf;
3584 struct ice_vsi *vsi;
3585
3586 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3587 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3588 goto error_param;
3589 }
3590
d4bc4e2d
BC
3591 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3592 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3593 goto error_param;
3594 }
3595
1071a835 3596 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3597 if (!vsi) {
cf6c6e01 3598 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3599 goto error_param;
3600 }
3601
1071a835 3602 if (ice_vsi_manage_vlan_stripping(vsi, false))
cf6c6e01 3603 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3604
3605error_param:
3606 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
cf6c6e01 3607 v_ret, NULL, 0);
1071a835
AV
3608}
3609
2f9ec241
BC
3610/**
3611 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3612 * @vf: VF to enable/disable VLAN stripping for on initialization
3613 *
3614 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3615 * the flag is cleared then we want to disable stripping. For example, the flag
3616 * will be cleared when port VLANs are configured by the administrator before
3617 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3618 * offloads.
3619 */
3620static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3621{
3622 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3623
3624 if (!vsi)
3625 return -EINVAL;
3626
3627 /* don't modify stripping if port VLAN is configured */
3628 if (vsi->info.pvid)
3629 return 0;
3630
3631 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3632 return ice_vsi_manage_vlan_stripping(vsi, true);
3633 else
3634 return ice_vsi_manage_vlan_stripping(vsi, false);
3635}
3636
1071a835
AV
3637/**
3638 * ice_vc_process_vf_msg - Process request from VF
3639 * @pf: pointer to the PF structure
3640 * @event: pointer to the AQ event
3641 *
3642 * called from the common asq/arq handler to
3643 * process request from VF
3644 */
3645void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3646{
3647 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3648 s16 vf_id = le16_to_cpu(event->desc.retval);
3649 u16 msglen = event->msg_len;
3650 u8 *msg = event->msg_buf;
3651 struct ice_vf *vf = NULL;
4015d11e 3652 struct device *dev;
1071a835
AV
3653 int err = 0;
3654
4015d11e 3655 dev = ice_pf_to_dev(pf);
4c66d227 3656 if (ice_validate_vf_id(pf, vf_id)) {
1071a835
AV
3657 err = -EINVAL;
3658 goto error_handler;
3659 }
3660
3661 vf = &pf->vf[vf_id];
3662
3663 /* Check if VF is disabled. */
3664 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3665 err = -EPERM;
3666 goto error_handler;
3667 }
3668
3669 /* Perform basic checks on the msg */
3670 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3671 if (err) {
cf6c6e01 3672 if (err == VIRTCHNL_STATUS_ERR_PARAM)
1071a835
AV
3673 err = -EPERM;
3674 else
3675 err = -EINVAL;
1071a835
AV
3676 }
3677
3678error_handler:
3679 if (err) {
cf6c6e01
MW
3680 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3681 NULL, 0);
4015d11e 3682 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
1071a835
AV
3683 vf_id, v_opcode, msglen, err);
3684 return;
3685 }
3686
3687 switch (v_opcode) {
3688 case VIRTCHNL_OP_VERSION:
3689 err = ice_vc_get_ver_msg(vf, msg);
3690 break;
3691 case VIRTCHNL_OP_GET_VF_RESOURCES:
3692 err = ice_vc_get_vf_res_msg(vf, msg);
2f9ec241 3693 if (ice_vf_init_vlan_stripping(vf))
19cce2c6 3694 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
2f9ec241 3695 vf->vf_id);
dfc62400 3696 ice_vc_notify_vf_link_state(vf);
1071a835
AV
3697 break;
3698 case VIRTCHNL_OP_RESET_VF:
3699 ice_vc_reset_vf_msg(vf);
3700 break;
3701 case VIRTCHNL_OP_ADD_ETH_ADDR:
3702 err = ice_vc_add_mac_addr_msg(vf, msg);
3703 break;
3704 case VIRTCHNL_OP_DEL_ETH_ADDR:
3705 err = ice_vc_del_mac_addr_msg(vf, msg);
3706 break;
3707 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3708 err = ice_vc_cfg_qs_msg(vf, msg);
3709 break;
3710 case VIRTCHNL_OP_ENABLE_QUEUES:
3711 err = ice_vc_ena_qs_msg(vf, msg);
3712 ice_vc_notify_vf_link_state(vf);
3713 break;
3714 case VIRTCHNL_OP_DISABLE_QUEUES:
3715 err = ice_vc_dis_qs_msg(vf, msg);
3716 break;
3717 case VIRTCHNL_OP_REQUEST_QUEUES:
3718 err = ice_vc_request_qs_msg(vf, msg);
3719 break;
3720 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3721 err = ice_vc_cfg_irq_map_msg(vf, msg);
3722 break;
3723 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3724 err = ice_vc_config_rss_key(vf, msg);
3725 break;
3726 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3727 err = ice_vc_config_rss_lut(vf, msg);
3728 break;
3729 case VIRTCHNL_OP_GET_STATS:
3730 err = ice_vc_get_stats_msg(vf, msg);
3731 break;
01b5e89a
BC
3732 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3733 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3734 break;
1071a835
AV
3735 case VIRTCHNL_OP_ADD_VLAN:
3736 err = ice_vc_add_vlan_msg(vf, msg);
3737 break;
3738 case VIRTCHNL_OP_DEL_VLAN:
3739 err = ice_vc_remove_vlan_msg(vf, msg);
3740 break;
3741 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3742 err = ice_vc_ena_vlan_stripping(vf);
3743 break;
3744 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3745 err = ice_vc_dis_vlan_stripping(vf);
3746 break;
3747 case VIRTCHNL_OP_UNKNOWN:
3748 default:
4015d11e
BC
3749 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3750 vf_id);
cf6c6e01
MW
3751 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3752 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
1071a835
AV
3753 NULL, 0);
3754 break;
3755 }
3756 if (err) {
3757 /* Helper function cares less about error return values here
3758 * as it is busy with pending work.
3759 */
4015d11e 3760 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
1071a835
AV
3761 vf_id, v_opcode, err);
3762 }
3763}
3764
7c710869
AV
3765/**
3766 * ice_get_vf_cfg
3767 * @netdev: network interface device structure
3768 * @vf_id: VF identifier
3769 * @ivi: VF configuration structure
3770 *
3771 * return VF configuration
3772 */
c8b7abdd
BA
3773int
3774ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
7c710869 3775{
4c66d227 3776 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
3777 struct ice_vf *vf;
3778
4c66d227 3779 if (ice_validate_vf_id(pf, vf_id))
7c710869 3780 return -EINVAL;
7c710869
AV
3781
3782 vf = &pf->vf[vf_id];
7c710869 3783
4c66d227 3784 if (ice_check_vf_init(pf, vf))
7c710869 3785 return -EBUSY;
7c710869
AV
3786
3787 ivi->vf = vf_id;
3788 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3789
3790 /* VF configuration for VLAN and applicable QoS */
61c9ce86
BC
3791 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3792 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
7c710869
AV
3793
3794 ivi->trusted = vf->trusted;
3795 ivi->spoofchk = vf->spoofchk;
3796 if (!vf->link_forced)
3797 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3798 else if (vf->link_up)
3799 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3800 else
3801 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3802 ivi->max_tx_rate = vf->tx_rate;
3803 ivi->min_tx_rate = 0;
3804 return 0;
3805}
3806
47ebc7b0
BC
3807/**
3808 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3809 * @pf: PF used to reference the switch's rules
3810 * @umac: unicast MAC to compare against existing switch rules
3811 *
3812 * Return true on the first/any match, else return false
3813 */
3814static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3815{
3816 struct ice_sw_recipe *mac_recipe_list =
3817 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3818 struct ice_fltr_mgmt_list_entry *list_itr;
3819 struct list_head *rule_head;
3820 struct mutex *rule_lock; /* protect MAC filter list access */
3821
3822 rule_head = &mac_recipe_list->filt_rules;
3823 rule_lock = &mac_recipe_list->filt_rule_lock;
3824
3825 mutex_lock(rule_lock);
3826 list_for_each_entry(list_itr, rule_head, list_entry) {
3827 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3828
3829 if (ether_addr_equal(existing_mac, umac)) {
3830 mutex_unlock(rule_lock);
3831 return true;
3832 }
3833 }
3834
3835 mutex_unlock(rule_lock);
3836
3837 return false;
3838}
3839
7c710869
AV
3840/**
3841 * ice_set_vf_mac
3842 * @netdev: network interface device structure
3843 * @vf_id: VF identifier
f9867df6 3844 * @mac: MAC address
7c710869 3845 *
f9867df6 3846 * program VF MAC address
7c710869
AV
3847 */
3848int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3849{
4c66d227 3850 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3851 struct ice_vf *vf;
c54d209c 3852 int ret;
7c710869 3853
4c66d227 3854 if (ice_validate_vf_id(pf, vf_id))
7c710869 3855 return -EINVAL;
7c710869 3856
7c710869
AV
3857 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3858 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3859 return -EINVAL;
3860 }
3861
c54d209c 3862 vf = &pf->vf[vf_id];
47ebc7b0
BC
3863 /* nothing left to do, unicast MAC already set */
3864 if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3865 return 0;
3866
c54d209c
BC
3867 ret = ice_check_vf_ready_for_cfg(vf);
3868 if (ret)
3869 return ret;
3870
47ebc7b0
BC
3871 if (ice_unicast_mac_exists(pf, mac)) {
3872 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3873 mac, vf_id, mac);
3874 return -EINVAL;
3875 }
3876
f9867df6 3877 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
7c710869
AV
3878 * flow will use the updated dflt_lan_addr and add a MAC filter
3879 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3880 * set the MAC address for this VF.
3881 */
3882 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3883 vf->pf_set_mac = true;
19cce2c6 3884 netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
7c710869
AV
3885 vf_id, mac);
3886
ff010eca 3887 ice_vc_reset_vf(vf);
c54d209c 3888 return 0;
7c710869
AV
3889}
3890
3891/**
3892 * ice_set_vf_trust
3893 * @netdev: network interface device structure
3894 * @vf_id: VF identifier
3895 * @trusted: Boolean value to enable/disable trusted VF
3896 *
3897 * Enable or disable a given VF as trusted
3898 */
3899int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3900{
4c66d227 3901 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3902 struct ice_vf *vf;
c54d209c 3903 int ret;
7c710869 3904
4c66d227 3905 if (ice_validate_vf_id(pf, vf_id))
7c710869 3906 return -EINVAL;
7c710869
AV
3907
3908 vf = &pf->vf[vf_id];
c54d209c
BC
3909 ret = ice_check_vf_ready_for_cfg(vf);
3910 if (ret)
3911 return ret;
7c710869
AV
3912
3913 /* Check if already trusted */
3914 if (trusted == vf->trusted)
3915 return 0;
3916
3917 vf->trusted = trusted;
ff010eca 3918 ice_vc_reset_vf(vf);
19cce2c6 3919 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
7c710869
AV
3920 vf_id, trusted ? "" : "un");
3921
3922 return 0;
3923}
3924
3925/**
3926 * ice_set_vf_link_state
3927 * @netdev: network interface device structure
3928 * @vf_id: VF identifier
3929 * @link_state: required link state
3930 *
3931 * Set VF's link state, irrespective of physical link state status
3932 */
3933int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3934{
4c66d227 3935 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3936 struct ice_vf *vf;
c54d209c 3937 int ret;
7c710869 3938
4c66d227 3939 if (ice_validate_vf_id(pf, vf_id))
7c710869 3940 return -EINVAL;
7c710869
AV
3941
3942 vf = &pf->vf[vf_id];
c54d209c
BC
3943 ret = ice_check_vf_ready_for_cfg(vf);
3944 if (ret)
3945 return ret;
7c710869 3946
7c710869
AV
3947 switch (link_state) {
3948 case IFLA_VF_LINK_STATE_AUTO:
3949 vf->link_forced = false;
7c710869
AV
3950 break;
3951 case IFLA_VF_LINK_STATE_ENABLE:
3952 vf->link_forced = true;
3953 vf->link_up = true;
3954 break;
3955 case IFLA_VF_LINK_STATE_DISABLE:
3956 vf->link_forced = true;
3957 vf->link_up = false;
3958 break;
3959 default:
3960 return -EINVAL;
3961 }
3962
26a91525 3963 ice_vc_notify_vf_link_state(vf);
7c710869
AV
3964
3965 return 0;
3966}
730fdea4
JB
3967
3968/**
3969 * ice_get_vf_stats - populate some stats for the VF
3970 * @netdev: the netdev of the PF
3971 * @vf_id: the host OS identifier (0-255)
3972 * @vf_stats: pointer to the OS memory to be initialized
3973 */
3974int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3975 struct ifla_vf_stats *vf_stats)
3976{
3977 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3978 struct ice_eth_stats *stats;
3979 struct ice_vsi *vsi;
3980 struct ice_vf *vf;
c54d209c 3981 int ret;
730fdea4
JB
3982
3983 if (ice_validate_vf_id(pf, vf_id))
3984 return -EINVAL;
3985
3986 vf = &pf->vf[vf_id];
c54d209c
BC
3987 ret = ice_check_vf_ready_for_cfg(vf);
3988 if (ret)
3989 return ret;
730fdea4
JB
3990
3991 vsi = pf->vsi[vf->lan_vsi_idx];
3992 if (!vsi)
3993 return -EINVAL;
3994
3995 ice_update_eth_stats(vsi);
3996 stats = &vsi->eth_stats;
3997
3998 memset(vf_stats, 0, sizeof(*vf_stats));
3999
4000 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4001 stats->rx_multicast;
4002 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4003 stats->tx_multicast;
4004 vf_stats->rx_bytes = stats->rx_bytes;
4005 vf_stats->tx_bytes = stats->tx_bytes;
4006 vf_stats->broadcast = stats->rx_broadcast;
4007 vf_stats->multicast = stats->rx_multicast;
4008 vf_stats->rx_dropped = stats->rx_discards;
4009 vf_stats->tx_dropped = stats->tx_discards;
4010
4011 return 0;
4012}
9d5c5a52 4013
7438a3b0
PG
4014/**
4015 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4016 * @vf: pointer to the VF structure
4017 */
4018void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4019{
4020 struct ice_pf *pf = vf->pf;
4021 struct device *dev;
4022
4023 dev = ice_pf_to_dev(pf);
4024
4025 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4026 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4027 vf->dflt_lan_addr.addr,
4028 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4029 ? "on" : "off");
4030}
4031
9d5c5a52
PG
4032/**
4033 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4034 * @pf: pointer to the PF structure
4035 *
4036 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4037 */
4038void ice_print_vfs_mdd_events(struct ice_pf *pf)
4039{
4040 struct device *dev = ice_pf_to_dev(pf);
4041 struct ice_hw *hw = &pf->hw;
4042 int i;
4043
4044 /* check that there are pending MDD events to print */
4045 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4046 return;
4047
4048 /* VF MDD event logs are rate limited to one second intervals */
4049 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4050 return;
4051
4052 pf->last_printed_mdd_jiffies = jiffies;
4053
4054 ice_for_each_vf(pf, i) {
4055 struct ice_vf *vf = &pf->vf[i];
4056
4057 /* only print Rx MDD event message if there are new events */
4058 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4059 vf->mdd_rx_events.last_printed =
4060 vf->mdd_rx_events.count;
7438a3b0 4061 ice_print_vf_rx_mdd_event(vf);
9d5c5a52
PG
4062 }
4063
4064 /* only print Tx MDD event message if there are new events */
4065 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4066 vf->mdd_tx_events.last_printed =
4067 vf->mdd_tx_events.count;
4068
4069 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4070 vf->mdd_tx_events.count, hw->pf_id, i,
4071 vf->dflt_lan_addr.addr);
4072 }
4073 }
4074}