ice: fix link event handling timing
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_virtchnl_pf.c
CommitLineData
ddf30f7f
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
eff380aa 5#include "ice_base.h"
ddf30f7f 6#include "ice_lib.h"
1b8f15b6 7#include "ice_fltr.h"
ddf30f7f 8
4c66d227
JB
9/**
10 * ice_validate_vf_id - helper to check if VF ID is valid
11 * @pf: pointer to the PF structure
12 * @vf_id: the ID of the VF to check
13 */
53bb6698 14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
4c66d227 15{
53bb6698 16 /* vf_id range is only valid for 0-255, and should always be unsigned */
4c66d227 17 if (vf_id >= pf->num_alloc_vfs) {
53bb6698 18 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
4c66d227
JB
19 return -EINVAL;
20 }
21 return 0;
22}
23
24/**
25 * ice_check_vf_init - helper to check if VF init complete
26 * @pf: pointer to the PF structure
27 * @vf: the pointer to the VF to check
28 */
29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30{
31 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
53bb6698 32 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
4c66d227
JB
33 vf->vf_id);
34 return -EBUSY;
35 }
36 return 0;
37}
38
01b5e89a
BC
39/**
40 * ice_err_to_virt_err - translate errors for VF return code
41 * @ice_err: error return code
42 */
43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44{
45 switch (ice_err) {
46 case ICE_SUCCESS:
47 return VIRTCHNL_STATUS_SUCCESS;
48 case ICE_ERR_BAD_PTR:
49 case ICE_ERR_INVAL_SIZE:
50 case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 case ICE_ERR_PARAM:
52 case ICE_ERR_CFG:
53 return VIRTCHNL_STATUS_ERR_PARAM;
54 case ICE_ERR_NO_MEMORY:
55 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 case ICE_ERR_NOT_READY:
57 case ICE_ERR_RESET_FAILED:
58 case ICE_ERR_FW_API_VER:
59 case ICE_ERR_AQ_ERROR:
60 case ICE_ERR_AQ_TIMEOUT:
61 case ICE_ERR_AQ_FULL:
62 case ICE_ERR_AQ_NO_WORK:
63 case ICE_ERR_AQ_EMPTY:
64 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 default:
66 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 }
68}
69
007676b4
AV
70/**
71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
72 * @pf: pointer to the PF structure
73 * @v_opcode: operation code
74 * @v_retval: return value
75 * @msg: pointer to the msg buffer
76 * @msglen: msg length
77 */
78static void
79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
cf6c6e01 80 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
007676b4
AV
81{
82 struct ice_hw *hw = &pf->hw;
c1e08830 83 unsigned int i;
007676b4 84
005881bc
BC
85 ice_for_each_vf(pf, i) {
86 struct ice_vf *vf = &pf->vf[i];
87
007676b4
AV
88 /* Not all vfs are enabled so skip the ones that are not */
89 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 continue;
92
93 /* Ignore return value on purpose - a given VF may fail, but
94 * we need to keep going and send to all of them
95 */
96 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 msglen, NULL);
98 }
99}
100
7c710869
AV
101/**
102 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
103 * @vf: pointer to the VF structure
104 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
105 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
106 * @link_up: whether or not to set the link up/down
107 */
108static void
109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 int ice_link_speed, bool link_up)
111{
112 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 pfe->event_data.link_event_adv.link_status = link_up;
114 /* Speed in Mbps */
115 pfe->event_data.link_event_adv.link_speed =
116 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 } else {
118 pfe->event_data.link_event.link_status = link_up;
119 /* Legacy method for virtchnl link speeds */
120 pfe->event_data.link_event.link_speed =
121 (enum virtchnl_link_speed)
122 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 }
124}
125
e1fe6926
BC
126/**
127 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
128 * @vf: the VF to check
129 *
130 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
131 * otherwise
132 */
133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134{
0ca469fb
MW
135 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
e1fe6926
BC
137}
138
0b6c6a8b
BC
139/**
140 * ice_is_vf_link_up - check if the VF's link is up
141 * @vf: VF to check if link is up
142 */
143static bool ice_is_vf_link_up(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 if (ice_check_vf_init(pf, vf))
148 return false;
149
e1fe6926 150 if (ice_vf_has_no_qs_ena(vf))
0b6c6a8b
BC
151 return false;
152 else if (vf->link_forced)
153 return vf->link_up;
154 else
155 return pf->hw.port_info->phy.link_info.link_info &
156 ICE_AQ_LINK_UP;
157}
158
1071a835
AV
159/**
160 * ice_vc_notify_vf_link_state - Inform a VF of link status
161 * @vf: pointer to the VF structure
162 *
163 * send a link status message to a single VF
164 */
165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166{
167 struct virtchnl_pf_event pfe = { 0 };
0b6c6a8b 168 struct ice_hw *hw = &vf->pf->hw;
1071a835
AV
169
170 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 pfe.severity = PF_EVENT_SEVERITY_INFO;
172
0b6c6a8b
BC
173 if (ice_is_vf_link_up(vf))
174 ice_set_pfe_link(vf, &pfe,
175 hw->port_info->phy.link_info.link_speed, true);
176 else
c61d2342 177 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
1071a835 178
cf6c6e01
MW
179 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
1071a835
AV
181 sizeof(pfe), NULL);
182}
183
3726cce2
BC
184/**
185 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
186 * @vf: VF to remove access to VSI for
187 */
188static void ice_vf_invalidate_vsi(struct ice_vf *vf)
189{
190 vf->lan_vsi_idx = ICE_NO_VSI;
191 vf->lan_vsi_num = ICE_NO_VSI;
192}
193
194/**
195 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
196 * @vf: invalidate this VF's VSI after freeing it
197 */
198static void ice_vf_vsi_release(struct ice_vf *vf)
199{
200 ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
201 ice_vf_invalidate_vsi(vf);
202}
203
ddf30f7f
AV
204/**
205 * ice_free_vf_res - Free a VF's resources
206 * @vf: pointer to the VF info
207 */
208static void ice_free_vf_res(struct ice_vf *vf)
209{
210 struct ice_pf *pf = vf->pf;
72ecb896 211 int i, last_vector_idx;
ddf30f7f
AV
212
213 /* First, disable VF's configuration API to prevent OS from
214 * accessing the VF's VSI after it's freed or invalidated.
215 */
216 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
217
2f2da36e 218 /* free VSI and disconnect it from the parent uplink */
3726cce2
BC
219 if (vf->lan_vsi_idx != ICE_NO_VSI) {
220 ice_vf_vsi_release(vf);
ddf30f7f
AV
221 vf->num_mac = 0;
222 }
223
46c276ce 224 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
9d5c5a52
PG
225
226 /* clear VF MDD event information */
227 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229
ddf30f7f 230 /* Disable interrupts so that VF starts in a known state */
72ecb896
BC
231 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
232 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ddf30f7f
AV
233 ice_flush(&pf->hw);
234 }
235 /* reset some of the state variables keeping track of the resources */
236 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
237 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
238}
239
ddf30f7f
AV
240/**
241 * ice_dis_vf_mappings
242 * @vf: pointer to the VF structure
243 */
244static void ice_dis_vf_mappings(struct ice_vf *vf)
245{
246 struct ice_pf *pf = vf->pf;
247 struct ice_vsi *vsi;
4015d11e 248 struct device *dev;
ddf30f7f
AV
249 int first, last, v;
250 struct ice_hw *hw;
251
252 hw = &pf->hw;
253 vsi = pf->vsi[vf->lan_vsi_idx];
254
4015d11e 255 dev = ice_pf_to_dev(pf);
ddf30f7f 256 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
982b1219 257 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
ddf30f7f 258
cbe66bfe 259 first = vf->first_vector_idx;
46c276ce 260 last = first + pf->num_msix_per_vf - 1;
ddf30f7f
AV
261 for (v = first; v <= last; v++) {
262 u32 reg;
263
264 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
265 GLINT_VECT2FUNC_IS_PF_M) |
266 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
267 GLINT_VECT2FUNC_PF_NUM_M));
268 wr32(hw, GLINT_VECT2FUNC(v), reg);
269 }
270
271 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
272 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
273 else
4015d11e 274 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
275
276 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
277 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
278 else
19cce2c6 279 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
280}
281
cbe66bfe
BC
282/**
283 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
284 * @pf: pointer to the PF structure
285 *
0ca469fb 286 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
cbe66bfe
BC
287 * the pf->sriov_base_vector.
288 *
289 * Returns 0 on success, and -EINVAL on error.
290 */
291static int ice_sriov_free_msix_res(struct ice_pf *pf)
292{
293 struct ice_res_tracker *res;
294
295 if (!pf)
296 return -EINVAL;
297
298 res = pf->irq_tracker;
299 if (!res)
300 return -EINVAL;
301
302 /* give back irq_tracker resources used */
0ca469fb 303 WARN_ON(pf->sriov_base_vector < res->num_entries);
cbe66bfe
BC
304
305 pf->sriov_base_vector = 0;
306
307 return 0;
308}
309
77ca27c4
PG
310/**
311 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
312 * @vf: pointer to the VF structure
313 */
314void ice_set_vf_state_qs_dis(struct ice_vf *vf)
315{
316 /* Clear Rx/Tx enabled queues flag */
0ca469fb
MW
317 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
318 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
77ca27c4
PG
319 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
320}
321
322/**
323 * ice_dis_vf_qs - Disable the VF queues
324 * @vf: pointer to the VF structure
325 */
326static void ice_dis_vf_qs(struct ice_vf *vf)
327{
328 struct ice_pf *pf = vf->pf;
329 struct ice_vsi *vsi;
330
331 vsi = pf->vsi[vf->lan_vsi_idx];
332
333 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
13a6233b 334 ice_vsi_stop_all_rx_rings(vsi);
77ca27c4
PG
335 ice_set_vf_state_qs_dis(vf);
336}
337
ddf30f7f
AV
338/**
339 * ice_free_vfs - Free all VFs
340 * @pf: pointer to the PF structure
341 */
342void ice_free_vfs(struct ice_pf *pf)
343{
4015d11e 344 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 345 struct ice_hw *hw = &pf->hw;
c1e08830 346 unsigned int tmp, i;
ddf30f7f
AV
347
348 if (!pf->vf)
349 return;
350
351 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
352 usleep_range(1000, 2000);
353
72ecb896
BC
354 /* Disable IOV before freeing resources. This lets any VF drivers
355 * running in the host get themselves cleaned up before we yank
356 * the carpet out from underneath their feet.
357 */
358 if (!pci_vfs_assigned(pf->pdev))
359 pci_disable_sriov(pf->pdev);
360 else
4015d11e 361 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
72ecb896 362
f844d521
BC
363 /* Avoid wait time by stopping all VFs at the same time */
364 ice_for_each_vf(pf, i)
365 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
366 ice_dis_vf_qs(&pf->vf[i]);
367
ddf30f7f 368 tmp = pf->num_alloc_vfs;
46c276ce 369 pf->num_qps_per_vf = 0;
ddf30f7f
AV
370 pf->num_alloc_vfs = 0;
371 for (i = 0; i < tmp; i++) {
372 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
1f9639d2 373 /* disable VF qp mappings and set VF disable state */
ddf30f7f 374 ice_dis_vf_mappings(&pf->vf[i]);
1f9639d2 375 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ddf30f7f
AV
376 ice_free_vf_res(&pf->vf[i]);
377 }
378 }
379
cbe66bfe 380 if (ice_sriov_free_msix_res(pf))
4015d11e 381 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 382
4015d11e 383 devm_kfree(dev, pf->vf);
ddf30f7f
AV
384 pf->vf = NULL;
385
386 /* This check is for when the driver is unloaded while VFs are
387 * assigned. Setting the number of VFs to 0 through sysfs is caught
388 * before this function ever gets called.
389 */
390 if (!pci_vfs_assigned(pf->pdev)) {
53bb6698 391 unsigned int vf_id;
ddf30f7f
AV
392
393 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
394 * work correctly when SR-IOV gets re-enabled.
395 */
396 for (vf_id = 0; vf_id < tmp; vf_id++) {
397 u32 reg_idx, bit_idx;
398
399 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
400 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
401 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
402 }
403 }
404 clear_bit(__ICE_VF_DIS, pf->state);
405 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
406}
407
408/**
409 * ice_trigger_vf_reset - Reset a VF on HW
410 * @vf: pointer to the VF structure
411 * @is_vflr: true if VFLR was issued, false if not
29d42f1f 412 * @is_pfr: true if the reset was triggered due to a previous PFR
ddf30f7f
AV
413 *
414 * Trigger hardware to start a reset for a particular VF. Expects the caller
415 * to wait the proper amount of time to allow hardware to reset the VF before
416 * it cleans up and restores VF functionality.
417 */
29d42f1f 418static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
ddf30f7f
AV
419{
420 struct ice_pf *pf = vf->pf;
421 u32 reg, reg_idx, bit_idx;
53bb6698 422 unsigned int vf_abs_id, i;
4015d11e 423 struct device *dev;
ddf30f7f 424 struct ice_hw *hw;
ddf30f7f 425
4015d11e 426 dev = ice_pf_to_dev(pf);
ddf30f7f
AV
427 hw = &pf->hw;
428 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
429
430 /* Inform VF that it is no longer active, as a warning */
431 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
432
433 /* Disable VF's configuration API during reset. The flag is re-enabled
12bb018c 434 * when it's safe again to access VF's VSI.
ddf30f7f
AV
435 */
436 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
82ba0128 437
29d42f1f
MW
438 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
439 * in the case of VFR. If this is done for PFR, it can mess up VF
440 * resets because the VF driver may already have started cleanup
441 * by the time we get here.
82ba0128 442 */
29d42f1f 443 if (!is_pfr)
39559456 444 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
ddf30f7f
AV
445
446 /* In the case of a VFLR, the HW has already reset the VF and we
447 * just need to clean up, so don't hit the VFRTRIG register.
448 */
449 if (!is_vflr) {
450 /* reset VF using VPGEN_VFRTRIG reg */
451 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
452 reg |= VPGEN_VFRTRIG_VFSWR_M;
453 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
454 }
455 /* clear the VFLR bit in GLGEN_VFLRSTAT */
456 reg_idx = (vf_abs_id) / 32;
457 bit_idx = (vf_abs_id) % 32;
458 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
459 ice_flush(hw);
460
461 wr32(hw, PF_PCI_CIAA,
462 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
60d628ea 463 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
ddf30f7f 464 reg = rd32(hw, PF_PCI_CIAD);
60d628ea
BC
465 /* no transactions pending so stop polling */
466 if ((reg & VF_TRANS_PENDING_M) == 0)
467 break;
468
53bb6698 469 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
60d628ea 470 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
ddf30f7f
AV
471 }
472}
473
77a7a84d
MS
474/**
475 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
476 * @vsi: the VSI to update
b093841f 477 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
f9867df6 478 * @enable: true for enable PVID false for disable
77a7a84d 479 */
b093841f 480static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
ddf30f7f 481{
ddf30f7f 482 struct ice_hw *hw = &vsi->back->hw;
b093841f 483 struct ice_aqc_vsi_props *info;
198a666a 484 struct ice_vsi_ctx *ctxt;
ddf30f7f 485 enum ice_status status;
198a666a
BA
486 int ret = 0;
487
9efe35d0 488 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
198a666a
BA
489 if (!ctxt)
490 return -ENOMEM;
ddf30f7f 491
77a7a84d 492 ctxt->info = vsi->info;
b093841f
BC
493 info = &ctxt->info;
494 if (enable) {
495 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
496 ICE_AQ_VSI_PVLAN_INSERT_PVID |
497 ICE_AQ_VSI_VLAN_EMOD_STR;
498 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
499 } else {
500 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
501 ICE_AQ_VSI_VLAN_MODE_ALL;
502 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
503 }
504
505 info->pvid = cpu_to_le16(pvid_info);
506 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
507 ICE_AQ_VSI_PROP_SW_VALID);
ddf30f7f 508
198a666a 509 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
ddf30f7f 510 if (status) {
0fee3577
LY
511 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
512 ice_stat_str(status),
513 ice_aq_str(hw->adminq.sq_last_status));
198a666a
BA
514 ret = -EIO;
515 goto out;
ddf30f7f
AV
516 }
517
b093841f
BC
518 vsi->info.vlan_flags = info->vlan_flags;
519 vsi->info.sw_flags2 = info->sw_flags2;
520 vsi->info.pvid = info->pvid;
198a666a 521out:
9efe35d0 522 kfree(ctxt);
198a666a 523 return ret;
ddf30f7f
AV
524}
525
3726cce2
BC
526/**
527 * ice_vf_get_port_info - Get the VF's port info structure
528 * @vf: VF used to get the port info structure for
529 */
530static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
531{
532 return vf->pf->hw.port_info;
533}
534
ddf30f7f
AV
535/**
536 * ice_vf_vsi_setup - Set up a VF VSI
3726cce2 537 * @vf: VF to setup VSI for
ddf30f7f
AV
538 *
539 * Returns pointer to the successfully allocated VSI struct on success,
540 * otherwise returns NULL on failure.
541 */
3726cce2 542static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
ddf30f7f 543{
3726cce2
BC
544 struct ice_port_info *pi = ice_vf_get_port_info(vf);
545 struct ice_pf *pf = vf->pf;
546 struct ice_vsi *vsi;
547
548 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
549
550 if (!vsi) {
551 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
552 ice_vf_invalidate_vsi(vf);
553 return NULL;
554 }
555
556 vf->lan_vsi_idx = vsi->idx;
557 vf->lan_vsi_num = vsi->vsi_num;
558
559 return vsi;
ddf30f7f
AV
560}
561
cbe66bfe 562/**
1337175d 563 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
cbe66bfe
BC
564 * @pf: pointer to PF structure
565 * @vf: pointer to VF that the first MSIX vector index is being calculated for
566 *
1337175d
PG
567 * This returns the first MSIX vector index in PF space that is used by this VF.
568 * This index is used when accessing PF relative registers such as
569 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
570 * This will always be the OICR index in the AVF driver so any functionality
cbe66bfe
BC
571 * using vf->first_vector_idx for queue configuration will have to increment by
572 * 1 to avoid meddling with the OICR index.
573 */
574static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
575{
46c276ce 576 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
cbe66bfe
BC
577}
578
350e822c
BC
579/**
580 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
581 * @vf: VF to add MAC filters for
582 *
583 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
584 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
585 */
586static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
587{
588 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
589 struct device *dev = ice_pf_to_dev(vf->pf);
590 u16 vlan_id = 0;
591 int err;
592
593 if (vf->port_vlan_info) {
594 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
595 if (err) {
596 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
597 vf->vf_id, err);
598 return err;
599 }
600
601 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
602 }
603
604 /* vlan_id will either be 0 or the port VLAN number */
605 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
606 if (err) {
607 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
608 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
609 err);
610 return err;
611 }
612
613 return 0;
614}
615
616/**
617 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
618 * @vf: VF to add MAC filters for
619 *
620 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
621 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
622 */
623static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
624{
625 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
626 struct device *dev = ice_pf_to_dev(vf->pf);
627 enum ice_status status;
628 u8 broadcast[ETH_ALEN];
629
630 eth_broadcast_addr(broadcast);
631 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
632 if (status) {
633 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
634 vf->vf_id, ice_stat_str(status));
635 return ice_status_to_errno(status);
636 }
637
638 vf->num_mac++;
639
640 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
641 status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
642 ICE_FWD_TO_VSI);
643 if (status) {
644 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
645 &vf->dflt_lan_addr.addr[0], vf->vf_id,
646 ice_stat_str(status));
647 return ice_status_to_errno(status);
648 }
649 vf->num_mac++;
650 }
651
652 return 0;
653}
654
eb2af3ee
BC
655/**
656 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
657 * @vf: VF to configure trust setting for
658 */
659static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
660{
661 if (vf->trusted)
662 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
663 else
664 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
665}
666
ddf30f7f 667/**
ac371613
BC
668 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
669 * @vf: VF to enable MSIX mappings for
ddf30f7f 670 *
ac371613
BC
671 * Some of the registers need to be indexed/configured using hardware global
672 * device values and other registers need 0-based values, which represent PF
673 * based values.
ddf30f7f 674 */
ac371613 675static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
ddf30f7f 676{
ac371613
BC
677 int device_based_first_msix, device_based_last_msix;
678 int pf_based_first_msix, pf_based_last_msix, v;
ddf30f7f 679 struct ice_pf *pf = vf->pf;
ac371613 680 int device_based_vf_id;
ddf30f7f 681 struct ice_hw *hw;
ddf30f7f
AV
682 u32 reg;
683
684 hw = &pf->hw;
ac371613
BC
685 pf_based_first_msix = vf->first_vector_idx;
686 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
687
688 device_based_first_msix = pf_based_first_msix +
689 pf->hw.func_caps.common_cap.msix_vector_first_id;
690 device_based_last_msix =
691 (device_based_first_msix + pf->num_msix_per_vf) - 1;
692 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
693
694 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
695 VPINT_ALLOC_FIRST_M) |
696 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
697 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
ddf30f7f
AV
698 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
699
ac371613 700 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1337175d 701 & VPINT_ALLOC_PCI_FIRST_M) |
ac371613
BC
702 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
703 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
982b1219 704 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
ac371613 705
ddf30f7f 706 /* map the interrupts to its functions */
ac371613
BC
707 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
708 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
ddf30f7f
AV
709 GLINT_VECT2FUNC_VF_NUM_M) |
710 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
711 GLINT_VECT2FUNC_PF_NUM_M));
712 wr32(hw, GLINT_VECT2FUNC(v), reg);
713 }
714
ac371613
BC
715 /* Map mailbox interrupt to VF MSI-X vector 0 */
716 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
717}
718
719/**
720 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
721 * @vf: VF to enable the mappings for
722 * @max_txq: max Tx queues allowed on the VF's VSI
723 * @max_rxq: max Rx queues allowed on the VF's VSI
724 */
725static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
726{
727 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
728 struct device *dev = ice_pf_to_dev(vf->pf);
729 struct ice_hw *hw = &vf->pf->hw;
730 u32 reg;
731
982b1219
AV
732 /* set regardless of mapping mode */
733 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
734
ddf30f7f
AV
735 /* VF Tx queues allocation */
736 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
737 /* set the VF PF Tx queue range
738 * VFNUMQ value should be set to (number of queues - 1). A value
739 * of 0 means 1 queue and a value of 255 means 256 queues
740 */
741 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
742 VPLAN_TX_QBASE_VFFIRSTQ_M) |
ac371613 743 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
744 VPLAN_TX_QBASE_VFNUMQ_M));
745 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
746 } else {
4015d11e 747 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
748 }
749
982b1219
AV
750 /* set regardless of mapping mode */
751 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
752
ddf30f7f
AV
753 /* VF Rx queues allocation */
754 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
755 /* set the VF PF Rx queue range
756 * VFNUMQ value should be set to (number of queues - 1). A value
757 * of 0 means 1 queue and a value of 255 means 256 queues
758 */
759 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
760 VPLAN_RX_QBASE_VFFIRSTQ_M) |
ac371613 761 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
762 VPLAN_RX_QBASE_VFNUMQ_M));
763 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
764 } else {
4015d11e 765 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
766 }
767}
768
ac371613
BC
769/**
770 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
771 * @vf: pointer to the VF structure
772 */
773static void ice_ena_vf_mappings(struct ice_vf *vf)
774{
775 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
776
777 ice_ena_vf_msix_mappings(vf);
778 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
779}
780
ddf30f7f
AV
781/**
782 * ice_determine_res
783 * @pf: pointer to the PF structure
784 * @avail_res: available resources in the PF structure
785 * @max_res: maximum resources that can be given per VF
786 * @min_res: minimum resources that can be given per VF
787 *
788 * Returns non-zero value if resources (queues/vectors) are available or
789 * returns zero if PF cannot accommodate for all num_alloc_vfs.
790 */
791static int
792ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
793{
794 bool checked_min_res = false;
795 int res;
796
797 /* start by checking if PF can assign max number of resources for
798 * all num_alloc_vfs.
799 * if yes, return number per VF
800 * If no, divide by 2 and roundup, check again
801 * repeat the loop till we reach a point where even minimum resources
802 * are not available, in that case return 0
803 */
804 res = max_res;
805 while ((res >= min_res) && !checked_min_res) {
806 int num_all_res;
807
808 num_all_res = pf->num_alloc_vfs * res;
809 if (num_all_res <= avail_res)
810 return res;
811
812 if (res == min_res)
813 checked_min_res = true;
814
815 res = DIV_ROUND_UP(res, 2);
816 }
817 return 0;
818}
819
cbe66bfe
BC
820/**
821 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
822 * @vf: VF to calculate the register index for
823 * @q_vector: a q_vector associated to the VF
824 */
825int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
826{
827 struct ice_pf *pf;
828
829 if (!vf || !q_vector)
830 return -EINVAL;
831
832 pf = vf->pf;
833
834 /* always add one to account for the OICR being the first MSIX */
46c276ce 835 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
cbe66bfe
BC
836 q_vector->v_idx + 1;
837}
838
839/**
840 * ice_get_max_valid_res_idx - Get the max valid resource index
841 * @res: pointer to the resource to find the max valid index for
842 *
843 * Start from the end of the ice_res_tracker and return right when we find the
844 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
845 * valid for SR-IOV because it is the only consumer that manipulates the
846 * res->end and this is always called when res->end is set to res->num_entries.
847 */
848static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
849{
850 int i;
851
852 if (!res)
853 return -EINVAL;
854
855 for (i = res->num_entries - 1; i >= 0; i--)
856 if (res->list[i] & ICE_RES_VALID_BIT)
857 return i;
858
859 return 0;
860}
861
862/**
863 * ice_sriov_set_msix_res - Set any used MSIX resources
864 * @pf: pointer to PF structure
865 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
866 *
867 * This function allows SR-IOV resources to be taken from the end of the PF's
0ca469fb
MW
868 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
869 * just set the pf->sriov_base_vector and return success.
cbe66bfe 870 *
0ca469fb
MW
871 * If there are not enough resources available, return an error. This should
872 * always be caught by ice_set_per_vf_res().
cbe66bfe
BC
873 *
874 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
875 * in the PF's space available for SR-IOV.
876 */
877static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
878{
0ca469fb
MW
879 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
880 int vectors_used = pf->irq_tracker->num_entries;
cbe66bfe
BC
881 int sriov_base_vector;
882
0ca469fb 883 sriov_base_vector = total_vectors - num_msix_needed;
cbe66bfe
BC
884
885 /* make sure we only grab irq_tracker entries from the list end and
886 * that we have enough available MSIX vectors
887 */
0ca469fb 888 if (sriov_base_vector < vectors_used)
cbe66bfe
BC
889 return -EINVAL;
890
891 pf->sriov_base_vector = sriov_base_vector;
892
cbe66bfe
BC
893 return 0;
894}
895
ddf30f7f 896/**
0ca469fb 897 * ice_set_per_vf_res - check if vectors and queues are available
ddf30f7f
AV
898 * @pf: pointer to the PF structure
899 *
0ca469fb
MW
900 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
901 * get more vectors and can enable more queues per VF. Note that this does not
902 * grab any vectors from the SW pool already allocated. Also note, that all
903 * vector counts include one for each VF's miscellaneous interrupt vector
904 * (i.e. OICR).
905 *
906 * Minimum VFs - 2 vectors, 1 queue pair
907 * Small VFs - 5 vectors, 4 queue pairs
908 * Medium VFs - 17 vectors, 16 queue pairs
909 *
910 * Second, determine number of queue pairs per VF by starting with a pre-defined
911 * maximum each VF supports. If this is not possible, then we adjust based on
912 * queue pairs available on the device.
913 *
914 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
915 * by each VF during VF initialization and reset.
ddf30f7f 916 */
0ca469fb 917static int ice_set_per_vf_res(struct ice_pf *pf)
ddf30f7f 918{
cbe66bfe 919 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
46c276ce 920 int msix_avail_per_vf, msix_avail_for_sriov;
4015d11e 921 struct device *dev = ice_pf_to_dev(pf);
46c276ce 922 u16 num_msix_per_vf, num_txq, num_rxq;
ddf30f7f 923
cbe66bfe 924 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
ddf30f7f
AV
925 return -EINVAL;
926
0ca469fb 927 /* determine MSI-X resources per VF */
46c276ce
BC
928 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
929 pf->irq_tracker->num_entries;
930 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
931 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
932 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
933 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
934 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
935 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
936 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
ddf30f7f 937 } else {
46c276ce
BC
938 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
939 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
0ca469fb 940 pf->num_alloc_vfs);
ddf30f7f
AV
941 return -EIO;
942 }
943
0ca469fb 944 /* determine queue resources per VF */
8c243700 945 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
46c276ce
BC
946 min_t(u16,
947 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
948 ICE_MAX_RSS_QS_PER_VF),
949 ICE_MIN_QS_PER_VF);
ddf30f7f 950
8c243700 951 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
46c276ce
BC
952 min_t(u16,
953 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
954 ICE_MAX_RSS_QS_PER_VF),
955 ICE_MIN_QS_PER_VF);
ddf30f7f 956
0ca469fb 957 if (!num_txq || !num_rxq) {
46c276ce
BC
958 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
959 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
ddf30f7f 960 return -EIO;
0ca469fb 961 }
ddf30f7f 962
46c276ce 963 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
0ca469fb
MW
964 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
965 pf->num_alloc_vfs);
cbe66bfe 966 return -EINVAL;
0ca469fb 967 }
cbe66bfe 968
0ca469fb 969 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
46c276ce
BC
970 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
971 pf->num_msix_per_vf = num_msix_per_vf;
0ca469fb 972 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
46c276ce 973 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
ddf30f7f
AV
974
975 return 0;
976}
977
cfcee02b
BC
978/**
979 * ice_clear_vf_reset_trigger - enable VF to access hardware
980 * @vf: VF to enabled hardware access for
981 */
982static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
983{
984 struct ice_hw *hw = &vf->pf->hw;
985 u32 reg;
986
987 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
988 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
989 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
990 ice_flush(hw);
991}
992
5eda8afd
AA
993/**
994 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
995 * @vf: pointer to the VF info
996 * @vsi: the VSI being configured
997 * @promisc_m: mask of promiscuous config bits
998 * @rm_promisc: promisc flag request from the VF to remove or add filter
999 *
1000 * This function configures VF VSI promiscuous mode, based on the VF requests,
1001 * for Unicast, Multicast and VLAN
1002 */
1003static enum ice_status
1004ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1005 bool rm_promisc)
1006{
1007 struct ice_pf *pf = vf->pf;
1008 enum ice_status status = 0;
1009 struct ice_hw *hw;
1010
1011 hw = &pf->hw;
cd6d6b83 1012 if (vsi->num_vlan) {
5eda8afd
AA
1013 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1014 rm_promisc);
b093841f 1015 } else if (vf->port_vlan_info) {
5eda8afd
AA
1016 if (rm_promisc)
1017 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 1018 vf->port_vlan_info);
5eda8afd
AA
1019 else
1020 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 1021 vf->port_vlan_info);
5eda8afd
AA
1022 } else {
1023 if (rm_promisc)
1024 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1025 0);
1026 else
1027 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1028 0);
1029 }
1030
1031 return status;
1032}
1033
12bb018c
BC
1034static void ice_vf_clear_counters(struct ice_vf *vf)
1035{
1036 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1037
1038 vf->num_mac = 0;
1039 vsi->num_vlan = 0;
1040 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1041 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1042}
1043
d82dd83d 1044/**
12bb018c
BC
1045 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1046 * @vf: VF to perform pre VSI rebuild tasks
d82dd83d 1047 *
12bb018c
BC
1048 * These tasks are items that don't need to be amortized since they are most
1049 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
d82dd83d 1050 */
12bb018c 1051static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
d82dd83d 1052{
12bb018c
BC
1053 ice_vf_clear_counters(vf);
1054 ice_clear_vf_reset_trigger(vf);
1055}
d82dd83d 1056
12bb018c
BC
1057/**
1058 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1059 * @vf: VF to rebuild host configuration on
1060 */
1061static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1062{
1063 struct device *dev = ice_pf_to_dev(vf->pf);
1064
1065 ice_vf_set_host_trust_cfg(vf);
1066
1067 if (ice_vf_rebuild_host_mac_cfg(vf))
1068 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1069 vf->vf_id);
1070
1071 if (ice_vf_rebuild_host_vlan_cfg(vf))
1072 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1073 vf->vf_id);
1074}
1075
1076/**
1077 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1078 * @vf: VF to release and setup the VSI for
1079 *
1080 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1081 * configuration change, etc.).
1082 */
1083static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1084{
3726cce2
BC
1085 ice_vf_vsi_release(vf);
1086 if (!ice_vf_vsi_setup(vf))
12bb018c 1087 return -ENOMEM;
d82dd83d 1088
12bb018c
BC
1089 return 0;
1090}
d82dd83d 1091
12bb018c
BC
1092/**
1093 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1094 * @vf: VF to rebuild the VSI for
1095 *
1096 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1097 * host, PFR, CORER, etc.).
1098 */
1099static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1100{
1101 struct ice_pf *pf = vf->pf;
1102 struct ice_vsi *vsi;
1103
1104 vsi = pf->vsi[vf->lan_vsi_idx];
1105
1106 if (ice_vsi_rebuild(vsi, true)) {
1107 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1108 vf->vf_id);
1109 return -EIO;
d82dd83d 1110 }
12bb018c
BC
1111 /* vsi->idx will remain the same in this case so don't update
1112 * vf->lan_vsi_idx
1113 */
1114 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1115 vf->lan_vsi_num = vsi->vsi_num;
d82dd83d 1116
12bb018c
BC
1117 return 0;
1118}
d82dd83d 1119
12bb018c
BC
1120/**
1121 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1122 * @vf: VF to set in initialized state
1123 *
1124 * After this function the VF will be ready to receive/handle the
1125 * VIRTCHNL_OP_GET_VF_RESOURCES message
1126 */
1127static void ice_vf_set_initialized(struct ice_vf *vf)
1128{
1129 ice_set_vf_state_qs_dis(vf);
1130 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1131 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1132 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1133 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1134}
1135
1136/**
1137 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1138 * @vf: VF to perform tasks on
1139 */
1140static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1141{
1142 struct ice_pf *pf = vf->pf;
1143 struct ice_hw *hw;
1144
1145 hw = &pf->hw;
1146
1147 ice_vf_rebuild_host_cfg(vf);
1148
1149 ice_vf_set_initialized(vf);
1150 ice_ena_vf_mappings(vf);
1151 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
d82dd83d
AA
1152}
1153
ddf30f7f
AV
1154/**
1155 * ice_reset_all_vfs - reset all allocated VFs in one go
1156 * @pf: pointer to the PF structure
1157 * @is_vflr: true if VFLR was issued, false if not
1158 *
1159 * First, tell the hardware to reset each VF, then do all the waiting in one
1160 * chunk, and finally finish restoring each VF after the wait. This is useful
1161 * during PF routines which need to reset all VFs, as otherwise it must perform
1162 * these resets in a serialized fashion.
1163 *
1164 * Returns true if any VFs were reset, and false otherwise.
1165 */
1166bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1167{
4015d11e 1168 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1169 struct ice_hw *hw = &pf->hw;
42b2cc83 1170 struct ice_vf *vf;
ddf30f7f
AV
1171 int v, i;
1172
1173 /* If we don't have any VFs, then there is nothing to reset */
1174 if (!pf->num_alloc_vfs)
1175 return false;
1176
1177 /* If VFs have been disabled, there is no need to reset */
1178 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1179 return false;
1180
1181 /* Begin reset on all VFs at once */
005881bc 1182 ice_for_each_vf(pf, v)
29d42f1f 1183 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
ddf30f7f 1184
ddf30f7f
AV
1185 /* HW requires some time to make sure it can flush the FIFO for a VF
1186 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1187 * sequence to make sure that it has completed. We'll keep track of
1188 * the VFs using a simple iterator that increments once that VF has
1189 * finished resetting.
1190 */
1191 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
ddf30f7f
AV
1192 /* Check each VF in sequence */
1193 while (v < pf->num_alloc_vfs) {
ddf30f7f
AV
1194 u32 reg;
1195
42b2cc83 1196 vf = &pf->vf[v];
ddf30f7f 1197 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
60d628ea
BC
1198 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1199 /* only delay if the check failed */
1200 usleep_range(10, 20);
ddf30f7f 1201 break;
60d628ea 1202 }
ddf30f7f
AV
1203
1204 /* If the current VF has finished resetting, move on
1205 * to the next VF in sequence.
1206 */
1207 v++;
1208 }
1209 }
1210
1211 /* Display a warning if at least one VF didn't manage to reset in
1212 * time, but continue on with the operation.
1213 */
1214 if (v < pf->num_alloc_vfs)
4015d11e 1215 dev_warn(dev, "VF reset check timeout\n");
ddf30f7f
AV
1216
1217 /* free VF resources to begin resetting the VSI state */
005881bc 1218 ice_for_each_vf(pf, v) {
5743020d
AA
1219 vf = &pf->vf[v];
1220
12bb018c
BC
1221 ice_vf_pre_vsi_rebuild(vf);
1222 ice_vf_rebuild_vsi(vf);
1223 ice_vf_post_vsi_rebuild(vf);
5743020d 1224 }
ddf30f7f 1225
12bb018c
BC
1226 ice_flush(hw);
1227 clear_bit(__ICE_VF_DIS, pf->state);
ddf30f7f
AV
1228
1229 return true;
1230}
1231
ec4f5a43
AA
1232/**
1233 * ice_is_vf_disabled
1234 * @vf: pointer to the VF info
1235 *
1236 * Returns true if the PF or VF is disabled, false otherwise.
1237 */
1238static bool ice_is_vf_disabled(struct ice_vf *vf)
1239{
1240 struct ice_pf *pf = vf->pf;
1241
1242 /* If the PF has been disabled, there is no need resetting VF until
1243 * PF is active again. Similarly, if the VF has been disabled, this
1244 * means something else is resetting the VF, so we shouldn't continue.
1245 * Otherwise, set disable VF state bit for actual reset, and continue.
1246 */
1247 return (test_bit(__ICE_VF_DIS, pf->state) ||
1248 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1249}
1250
007676b4
AV
1251/**
1252 * ice_reset_vf - Reset a particular VF
1253 * @vf: pointer to the VF structure
1254 * @is_vflr: true if VFLR was issued, false if not
1255 *
f844d521
BC
1256 * Returns true if the VF is currently in reset, resets successfully, or resets
1257 * are disabled and false otherwise.
007676b4 1258 */
9d5c5a52 1259bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
007676b4
AV
1260{
1261 struct ice_pf *pf = vf->pf;
03f7a986 1262 struct ice_vsi *vsi;
4015d11e 1263 struct device *dev;
5eda8afd 1264 struct ice_hw *hw;
007676b4 1265 bool rsd = false;
5eda8afd 1266 u8 promisc_m;
007676b4
AV
1267 u32 reg;
1268 int i;
1269
4015d11e
BC
1270 dev = ice_pf_to_dev(pf);
1271
f844d521
BC
1272 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1273 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1274 vf->vf_id);
1275 return true;
1276 }
1277
ec4f5a43 1278 if (ice_is_vf_disabled(vf)) {
4015d11e
BC
1279 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1280 vf->vf_id);
ec4f5a43
AA
1281 return true;
1282 }
cb6a8dc0 1283
ec4f5a43
AA
1284 /* Set VF disable bit state here, before triggering reset */
1285 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
29d42f1f 1286 ice_trigger_vf_reset(vf, is_vflr, false);
007676b4 1287
03f7a986
AV
1288 vsi = pf->vsi[vf->lan_vsi_idx];
1289
77ca27c4
PG
1290 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1291 ice_dis_vf_qs(vf);
06914ac2
MW
1292
1293 /* Call Disable LAN Tx queue AQ whether or not queues are
1294 * enabled. This is needed for successful completion of VFR.
1295 */
1296 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1297 NULL, ICE_VF_RESET, vf->vf_id, NULL);
007676b4 1298
5eda8afd 1299 hw = &pf->hw;
007676b4
AV
1300 /* poll VPGEN_VFRSTAT reg to make sure
1301 * that reset is complete
1302 */
1303 for (i = 0; i < 10; i++) {
1304 /* VF reset requires driver to first reset the VF and then
1305 * poll the status register to make sure that the reset
1306 * completed successfully.
1307 */
007676b4
AV
1308 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1309 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1310 rsd = true;
1311 break;
1312 }
60d628ea
BC
1313
1314 /* only sleep if the reset is not done */
1315 usleep_range(10, 20);
007676b4
AV
1316 }
1317
1318 /* Display a warning if VF didn't manage to reset in time, but need to
1319 * continue on with the operation.
1320 */
1321 if (!rsd)
4015d11e 1322 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
007676b4 1323
5eda8afd
AA
1324 /* disable promiscuous modes in case they were enabled
1325 * ignore any error if disabling process failed
1326 */
1327 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1328 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
b093841f 1329 if (vf->port_vlan_info || vsi->num_vlan)
5eda8afd
AA
1330 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1331 else
1332 promisc_m = ICE_UCAST_PROMISC_BITS;
1333
1334 vsi = pf->vsi[vf->lan_vsi_idx];
1335 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
4015d11e 1336 dev_err(dev, "disabling promiscuous mode failed\n");
5eda8afd
AA
1337 }
1338
12bb018c
BC
1339 ice_vf_pre_vsi_rebuild(vf);
1340 ice_vf_rebuild_vsi_with_release(vf);
1341 ice_vf_post_vsi_rebuild(vf);
007676b4
AV
1342
1343 return true;
1344}
1345
53b8decb
AV
1346/**
1347 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1348 * @pf: pointer to the PF structure
1349 */
1350void ice_vc_notify_link_state(struct ice_pf *pf)
1351{
1352 int i;
1353
005881bc 1354 ice_for_each_vf(pf, i)
53b8decb
AV
1355 ice_vc_notify_vf_link_state(&pf->vf[i]);
1356}
1357
007676b4
AV
1358/**
1359 * ice_vc_notify_reset - Send pending reset message to all VFs
1360 * @pf: pointer to the PF structure
1361 *
1362 * indicate a pending reset to all VFs on a given PF
1363 */
1364void ice_vc_notify_reset(struct ice_pf *pf)
1365{
1366 struct virtchnl_pf_event pfe;
1367
1368 if (!pf->num_alloc_vfs)
1369 return;
1370
1371 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1372 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
cf6c6e01 1373 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
007676b4
AV
1374 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1375}
1376
7c710869
AV
1377/**
1378 * ice_vc_notify_vf_reset - Notify VF of a reset event
1379 * @vf: pointer to the VF structure
1380 */
1381static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1382{
1383 struct virtchnl_pf_event pfe;
4c66d227 1384 struct ice_pf *pf;
7c710869 1385
4c66d227
JB
1386 if (!vf)
1387 return;
1388
1389 pf = vf->pf;
1390 if (ice_validate_vf_id(pf, vf->vf_id))
7c710869
AV
1391 return;
1392
1f9639d2
AA
1393 /* Bail out if VF is in disabled state, neither initialized, nor active
1394 * state - otherwise proceed with notifications
1395 */
1396 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1397 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1398 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
7c710869
AV
1399 return;
1400
1401 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1402 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
4c66d227 1403 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
cf6c6e01
MW
1404 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1405 NULL);
7c710869
AV
1406}
1407
916c7fdf
BC
1408/**
1409 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1410 * @vf: VF to initialize/setup the VSI for
1411 *
1412 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1413 * VF VSI's broadcast filter and is only used during initial VF creation.
1414 */
1415static int ice_init_vf_vsi_res(struct ice_vf *vf)
1416{
1417 struct ice_pf *pf = vf->pf;
1418 u8 broadcast[ETH_ALEN];
1419 enum ice_status status;
1420 struct ice_vsi *vsi;
1421 struct device *dev;
1422 int err;
1423
1424 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1425
1426 dev = ice_pf_to_dev(pf);
3726cce2
BC
1427 vsi = ice_vf_vsi_setup(vf);
1428 if (!vsi)
916c7fdf 1429 return -ENOMEM;
916c7fdf
BC
1430
1431 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1432 if (err) {
1433 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1434 vf->vf_id);
1435 goto release_vsi;
1436 }
1437
1438 eth_broadcast_addr(broadcast);
1439 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1440 if (status) {
1441 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1442 vf->vf_id, ice_stat_str(status));
1443 err = ice_status_to_errno(status);
1444 goto release_vsi;
1445 }
1446
1447 vf->num_mac = 1;
1448
1449 return 0;
1450
1451release_vsi:
3726cce2 1452 ice_vf_vsi_release(vf);
916c7fdf
BC
1453 return err;
1454}
1455
1456/**
1457 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1458 * @pf: PF the VFs are associated with
1459 */
1460static int ice_start_vfs(struct ice_pf *pf)
1461{
1462 struct ice_hw *hw = &pf->hw;
1463 int retval, i;
1464
1465 ice_for_each_vf(pf, i) {
1466 struct ice_vf *vf = &pf->vf[i];
1467
1468 ice_clear_vf_reset_trigger(vf);
1469
1470 retval = ice_init_vf_vsi_res(vf);
1471 if (retval) {
1472 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1473 vf->vf_id, retval);
1474 goto teardown;
1475 }
1476
1477 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1478 ice_ena_vf_mappings(vf);
1479 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1480 }
1481
1482 ice_flush(hw);
1483 return 0;
1484
1485teardown:
1486 for (i = i - 1; i >= 0; i--) {
1487 struct ice_vf *vf = &pf->vf[i];
1488
1489 ice_dis_vf_mappings(vf);
3726cce2 1490 ice_vf_vsi_release(vf);
916c7fdf
BC
1491 }
1492
1493 return retval;
1494}
1495
ddf30f7f 1496/**
a06325a0
BC
1497 * ice_set_dflt_settings - set VF defaults during initialization/creation
1498 * @pf: PF holding reference to all VFs for default configuration
1499 */
1500static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1501{
1502 int i;
1503
1504 ice_for_each_vf(pf, i) {
1505 struct ice_vf *vf = &pf->vf[i];
1506
1507 vf->pf = pf;
1508 vf->vf_id = i;
1509 vf->vf_sw_id = pf->first_sw;
1510 /* assign default capabilities */
1511 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1512 vf->spoofchk = true;
1513 vf->num_vf_qs = pf->num_qps_per_vf;
1514 }
1515}
1516
1517/**
1518 * ice_alloc_vfs - allocate num_vfs in the PF structure
1519 * @pf: PF to store the allocated VFs in
1520 * @num_vfs: number of VFs to allocate
1521 */
1522static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1523{
1524 struct ice_vf *vfs;
1525
1526 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1527 GFP_KERNEL);
1528 if (!vfs)
1529 return -ENOMEM;
1530
1531 pf->vf = vfs;
1532 pf->num_alloc_vfs = num_vfs;
1533
1534 return 0;
1535}
1536
1537/**
1538 * ice_ena_vfs - enable VFs so they are ready to be used
ddf30f7f 1539 * @pf: pointer to the PF structure
a06325a0 1540 * @num_vfs: number of VFs to enable
ddf30f7f 1541 */
a06325a0 1542static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
ddf30f7f 1543{
4015d11e 1544 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1545 struct ice_hw *hw = &pf->hw;
a06325a0 1546 int ret;
ddf30f7f
AV
1547
1548 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
cbe66bfe 1549 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ddf30f7f 1550 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
d82dd83d 1551 set_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1552 ice_flush(hw);
1553
a06325a0 1554 ret = pci_enable_sriov(pf->pdev, num_vfs);
ddf30f7f
AV
1555 if (ret) {
1556 pf->num_alloc_vfs = 0;
1557 goto err_unroll_intr;
1558 }
a06325a0
BC
1559
1560 ret = ice_alloc_vfs(pf, num_vfs);
1561 if (ret)
72f9c203 1562 goto err_pci_disable_sriov;
ddf30f7f 1563
916c7fdf
BC
1564 if (ice_set_per_vf_res(pf)) {
1565 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
a06325a0 1566 num_vfs);
916c7fdf
BC
1567 ret = -ENOSPC;
1568 goto err_unroll_sriov;
1569 }
1570
a06325a0 1571 ice_set_dflt_settings_vfs(pf);
ddf30f7f 1572
916c7fdf
BC
1573 if (ice_start_vfs(pf)) {
1574 dev_err(dev, "Failed to start VF(s)\n");
1575 ret = -EAGAIN;
ddf30f7f 1576 goto err_unroll_sriov;
72f9c203 1577 }
ddf30f7f 1578
916c7fdf
BC
1579 clear_bit(__ICE_VF_DIS, pf->state);
1580 return 0;
ddf30f7f
AV
1581
1582err_unroll_sriov:
a06325a0 1583 devm_kfree(dev, pf->vf);
72f9c203 1584 pf->vf = NULL;
72f9c203
BC
1585 pf->num_alloc_vfs = 0;
1586err_pci_disable_sriov:
ddf30f7f
AV
1587 pci_disable_sriov(pf->pdev);
1588err_unroll_intr:
1589 /* rearm interrupts here */
1590 ice_irq_dynamic_ena(hw, NULL, NULL);
d82dd83d 1591 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1592 return ret;
1593}
1594
ddf30f7f
AV
1595/**
1596 * ice_pci_sriov_ena - Enable or change number of VFs
1597 * @pf: pointer to the PF structure
1598 * @num_vfs: number of VFs to allocate
02337f1f
BC
1599 *
1600 * Returns 0 on success and negative on failure
ddf30f7f
AV
1601 */
1602static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1603{
1604 int pre_existing_vfs = pci_num_vf(pf->pdev);
4015d11e 1605 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1606 int err;
1607
ddf30f7f
AV
1608 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1609 ice_free_vfs(pf);
1610 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
02337f1f 1611 return 0;
ddf30f7f
AV
1612
1613 if (num_vfs > pf->num_vfs_supported) {
1614 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1615 num_vfs, pf->num_vfs_supported);
dced8ad3 1616 return -EOPNOTSUPP;
ddf30f7f
AV
1617 }
1618
a06325a0
BC
1619 dev_info(dev, "Enabling %d VFs\n", num_vfs);
1620 err = ice_ena_vfs(pf, num_vfs);
ddf30f7f
AV
1621 if (err) {
1622 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1623 return err;
1624 }
1625
1626 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
02337f1f
BC
1627 return 0;
1628}
1629
1630/**
1631 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1632 * @pf: PF to enabled SR-IOV on
1633 */
1634static int ice_check_sriov_allowed(struct ice_pf *pf)
1635{
1636 struct device *dev = ice_pf_to_dev(pf);
1637
1638 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1639 dev_err(dev, "This device is not capable of SR-IOV\n");
1640 return -EOPNOTSUPP;
1641 }
1642
1643 if (ice_is_safe_mode(pf)) {
1644 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1645 return -EOPNOTSUPP;
1646 }
1647
1648 if (!ice_pf_state_is_nominal(pf)) {
1649 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1650 return -EBUSY;
1651 }
1652
1653 return 0;
ddf30f7f
AV
1654}
1655
1656/**
1657 * ice_sriov_configure - Enable or change number of VFs via sysfs
1658 * @pdev: pointer to a pci_dev structure
02337f1f 1659 * @num_vfs: number of VFs to allocate or 0 to free VFs
ddf30f7f 1660 *
02337f1f
BC
1661 * This function is called when the user updates the number of VFs in sysfs. On
1662 * success return whatever num_vfs was set to by the caller. Return negative on
1663 * failure.
ddf30f7f
AV
1664 */
1665int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1666{
1667 struct ice_pf *pf = pci_get_drvdata(pdev);
4015d11e 1668 struct device *dev = ice_pf_to_dev(pf);
02337f1f 1669 int err;
ddf30f7f 1670
02337f1f
BC
1671 err = ice_check_sriov_allowed(pf);
1672 if (err)
1673 return err;
462acf6a 1674
02337f1f
BC
1675 if (!num_vfs) {
1676 if (!pci_vfs_assigned(pdev)) {
1677 ice_free_vfs(pf);
1678 return 0;
1679 }
ddf30f7f 1680
4015d11e 1681 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
ddf30f7f
AV
1682 return -EBUSY;
1683 }
1684
02337f1f
BC
1685 err = ice_pci_sriov_ena(pf, num_vfs);
1686 if (err)
1687 return err;
1688
1689 return num_vfs;
ddf30f7f 1690}
007676b4
AV
1691
1692/**
1693 * ice_process_vflr_event - Free VF resources via IRQ calls
1694 * @pf: pointer to the PF structure
1695 *
df17b7e0 1696 * called from the VFLR IRQ handler to
007676b4
AV
1697 * free up VF resources and state variables
1698 */
1699void ice_process_vflr_event(struct ice_pf *pf)
1700{
1701 struct ice_hw *hw = &pf->hw;
53bb6698 1702 unsigned int vf_id;
007676b4
AV
1703 u32 reg;
1704
8d7189d2 1705 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
007676b4
AV
1706 !pf->num_alloc_vfs)
1707 return;
1708
005881bc 1709 ice_for_each_vf(pf, vf_id) {
007676b4
AV
1710 struct ice_vf *vf = &pf->vf[vf_id];
1711 u32 reg_idx, bit_idx;
1712
1713 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1714 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1715 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1716 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1717 if (reg & BIT(bit_idx))
1718 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1719 ice_reset_vf(vf, true);
1720 }
1721}
7c710869
AV
1722
1723/**
ff010eca 1724 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
7c710869 1725 * @vf: pointer to the VF info
7c710869 1726 */
ff010eca 1727static void ice_vc_reset_vf(struct ice_vf *vf)
7c710869
AV
1728{
1729 ice_vc_notify_vf_reset(vf);
1730 ice_reset_vf(vf, false);
1731}
1732
2309ae38
BC
1733/**
1734 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1735 * @pf: PF used to index all VFs
1736 * @pfq: queue index relative to the PF's function space
1737 *
1738 * If no VF is found who owns the pfq then return NULL, otherwise return a
1739 * pointer to the VF who owns the pfq
1740 */
1741static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1742{
53bb6698 1743 unsigned int vf_id;
2309ae38
BC
1744
1745 ice_for_each_vf(pf, vf_id) {
1746 struct ice_vf *vf = &pf->vf[vf_id];
1747 struct ice_vsi *vsi;
1748 u16 rxq_idx;
1749
1750 vsi = pf->vsi[vf->lan_vsi_idx];
1751
1752 ice_for_each_rxq(vsi, rxq_idx)
1753 if (vsi->rxq_map[rxq_idx] == pfq)
1754 return vf;
1755 }
1756
1757 return NULL;
1758}
1759
1760/**
1761 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1762 * @pf: PF used for conversion
1763 * @globalq: global queue index used to convert to PF space queue index
1764 */
1765static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1766{
1767 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1768}
1769
1770/**
1771 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1772 * @pf: PF that the LAN overflow event happened on
1773 * @event: structure holding the event information for the LAN overflow event
1774 *
1775 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1776 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1777 * reset on the offending VF.
1778 */
1779void
1780ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1781{
1782 u32 gldcb_rtctq, queue;
1783 struct ice_vf *vf;
1784
1785 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1786 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1787
1788 /* event returns device global Rx queue number */
1789 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1790 GLDCB_RTCTQ_RXQNUM_S;
1791
1792 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1793 if (!vf)
1794 return;
1795
1796 ice_vc_reset_vf(vf);
1797}
1798
1071a835
AV
1799/**
1800 * ice_vc_send_msg_to_vf - Send message to VF
1801 * @vf: pointer to the VF info
1802 * @v_opcode: virtual channel opcode
1803 * @v_retval: virtual channel return value
1804 * @msg: pointer to the msg buffer
1805 * @msglen: msg length
1806 *
1807 * send msg to VF
1808 */
c8b7abdd 1809static int
cf6c6e01
MW
1810ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1811 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1071a835
AV
1812{
1813 enum ice_status aq_ret;
4015d11e 1814 struct device *dev;
1071a835
AV
1815 struct ice_pf *pf;
1816
4c66d227 1817 if (!vf)
1071a835
AV
1818 return -EINVAL;
1819
1820 pf = vf->pf;
4c66d227
JB
1821 if (ice_validate_vf_id(pf, vf->vf_id))
1822 return -EINVAL;
1071a835 1823
4015d11e
BC
1824 dev = ice_pf_to_dev(pf);
1825
1071a835
AV
1826 /* single place to detect unsuccessful return values */
1827 if (v_retval) {
1828 vf->num_inval_msgs++;
4015d11e
BC
1829 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1830 v_opcode, v_retval);
1071a835 1831 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
19cce2c6 1832 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1071a835 1833 vf->vf_id);
4015d11e 1834 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1071a835
AV
1835 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1836 return -EIO;
1837 }
1838 } else {
1839 vf->num_valid_msgs++;
1840 /* reset the invalid counter, if a valid message is received. */
1841 vf->num_inval_msgs = 0;
1842 }
1843
1844 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1845 msg, msglen, NULL);
90e47737 1846 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
0fee3577
LY
1847 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1848 vf->vf_id, ice_stat_str(aq_ret),
1849 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1071a835
AV
1850 return -EIO;
1851 }
1852
1853 return 0;
1854}
1855
1856/**
1857 * ice_vc_get_ver_msg
1858 * @vf: pointer to the VF info
1859 * @msg: pointer to the msg buffer
1860 *
1861 * called from the VF to request the API version used by the PF
1862 */
1863static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1864{
1865 struct virtchnl_version_info info = {
1866 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1867 };
1868
1869 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1870 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1871 if (VF_IS_V10(&vf->vf_ver))
1872 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1873
cf6c6e01
MW
1874 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1875 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1071a835
AV
1876 sizeof(struct virtchnl_version_info));
1877}
1878
1879/**
1880 * ice_vc_get_vf_res_msg
1881 * @vf: pointer to the VF info
1882 * @msg: pointer to the msg buffer
1883 *
1884 * called from the VF to request its resources
1885 */
1886static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1887{
cf6c6e01 1888 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835 1889 struct virtchnl_vf_resource *vfres = NULL;
1071a835
AV
1890 struct ice_pf *pf = vf->pf;
1891 struct ice_vsi *vsi;
1892 int len = 0;
1893 int ret;
1894
4c66d227 1895 if (ice_check_vf_init(pf, vf)) {
cf6c6e01 1896 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1897 goto err;
1898 }
1899
1900 len = sizeof(struct virtchnl_vf_resource);
1901
9efe35d0 1902 vfres = kzalloc(len, GFP_KERNEL);
1071a835 1903 if (!vfres) {
cf6c6e01 1904 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1071a835
AV
1905 len = 0;
1906 goto err;
1907 }
1908 if (VF_IS_V11(&vf->vf_ver))
1909 vf->driver_caps = *(u32 *)msg;
1910 else
1911 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1912 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1913 VIRTCHNL_VF_OFFLOAD_VLAN;
1914
1915 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1916 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 1917 if (!vsi) {
cf6c6e01 1918 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
1919 goto err;
1920 }
1921
1071a835
AV
1922 if (!vsi->info.pvid)
1923 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1924
1925 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1926 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1927 } else {
1928 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1929 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1930 else
1931 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1932 }
1933
1934 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1935 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1936
1937 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1938 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1939
1940 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1941 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1942
1943 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1944 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1945
1946 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1947 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1948
1949 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1950 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1951
1952 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1953 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1954
1955 vfres->num_vsis = 1;
1956 /* Tx and Rx queue are equal for VF */
1957 vfres->num_queue_pairs = vsi->num_txq;
46c276ce 1958 vfres->max_vectors = pf->num_msix_per_vf;
1071a835
AV
1959 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1960 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1961
1962 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1963 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1964 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1965 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1966 vf->dflt_lan_addr.addr);
1967
d4bc4e2d
BC
1968 /* match guest capabilities */
1969 vf->driver_caps = vfres->vf_cap_flags;
1970
1071a835
AV
1971 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1972
1973err:
1974 /* send the response back to the VF */
cf6c6e01 1975 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1071a835
AV
1976 (u8 *)vfres, len);
1977
9efe35d0 1978 kfree(vfres);
1071a835
AV
1979 return ret;
1980}
1981
1982/**
1983 * ice_vc_reset_vf_msg
1984 * @vf: pointer to the VF info
1985 *
1986 * called from the VF to reset itself,
1987 * unlike other virtchnl messages, PF driver
1988 * doesn't send the response back to the VF
1989 */
1990static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1991{
7dcc0fb8 1992 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
1071a835
AV
1993 ice_reset_vf(vf, false);
1994}
1995
1996/**
1997 * ice_find_vsi_from_id
2f2da36e 1998 * @pf: the PF structure to search for the VSI
f9867df6 1999 * @id: ID of the VSI it is searching for
1071a835 2000 *
f9867df6 2001 * searches for the VSI with the given ID
1071a835
AV
2002 */
2003static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2004{
2005 int i;
2006
80ed404a 2007 ice_for_each_vsi(pf, i)
1071a835
AV
2008 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2009 return pf->vsi[i];
2010
2011 return NULL;
2012}
2013
2014/**
2015 * ice_vc_isvalid_vsi_id
2016 * @vf: pointer to the VF info
f9867df6 2017 * @vsi_id: VF relative VSI ID
1071a835 2018 *
f9867df6 2019 * check for the valid VSI ID
1071a835
AV
2020 */
2021static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2022{
2023 struct ice_pf *pf = vf->pf;
2024 struct ice_vsi *vsi;
2025
2026 vsi = ice_find_vsi_from_id(pf, vsi_id);
2027
2028 return (vsi && (vsi->vf_id == vf->vf_id));
2029}
2030
2031/**
2032 * ice_vc_isvalid_q_id
2033 * @vf: pointer to the VF info
f9867df6
AV
2034 * @vsi_id: VSI ID
2035 * @qid: VSI relative queue ID
1071a835 2036 *
f9867df6 2037 * check for the valid queue ID
1071a835
AV
2038 */
2039static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2040{
2041 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2042 /* allocated Tx and Rx queues should be always equal for VF VSI */
2043 return (vsi && (qid < vsi->alloc_txq));
2044}
2045
9c7dd756
MS
2046/**
2047 * ice_vc_isvalid_ring_len
2048 * @ring_len: length of ring
2049 *
2050 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
77ca27c4 2051 * or zero
9c7dd756
MS
2052 */
2053static bool ice_vc_isvalid_ring_len(u16 ring_len)
2054{
77ca27c4
PG
2055 return ring_len == 0 ||
2056 (ring_len >= ICE_MIN_NUM_DESC &&
9c7dd756
MS
2057 ring_len <= ICE_MAX_NUM_DESC &&
2058 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2059}
2060
1071a835
AV
2061/**
2062 * ice_vc_config_rss_key
2063 * @vf: pointer to the VF info
2064 * @msg: pointer to the msg buffer
2065 *
2066 * Configure the VF's RSS key
2067 */
2068static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2069{
cf6c6e01 2070 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2071 struct virtchnl_rss_key *vrk =
2072 (struct virtchnl_rss_key *)msg;
f1ef73f5 2073 struct ice_pf *pf = vf->pf;
4c66d227 2074 struct ice_vsi *vsi;
1071a835
AV
2075
2076 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2077 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2078 goto error_param;
2079 }
2080
2081 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
cf6c6e01 2082 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2083 goto error_param;
2084 }
2085
3f416961 2086 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
cf6c6e01 2087 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2088 goto error_param;
2089 }
2090
3f416961 2091 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2092 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2093 goto error_param;
2094 }
2095
3f416961
A
2096 vsi = pf->vsi[vf->lan_vsi_idx];
2097 if (!vsi) {
cf6c6e01 2098 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2099 goto error_param;
2100 }
2101
cf6c6e01
MW
2102 if (ice_set_rss(vsi, vrk->key, NULL, 0))
2103 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2104error_param:
cf6c6e01 2105 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1071a835
AV
2106 NULL, 0);
2107}
2108
2109/**
2110 * ice_vc_config_rss_lut
2111 * @vf: pointer to the VF info
2112 * @msg: pointer to the msg buffer
2113 *
2114 * Configure the VF's RSS LUT
2115 */
2116static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2117{
2118 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
cf6c6e01 2119 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
f1ef73f5 2120 struct ice_pf *pf = vf->pf;
4c66d227 2121 struct ice_vsi *vsi;
1071a835
AV
2122
2123 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2124 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2125 goto error_param;
2126 }
2127
2128 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
cf6c6e01 2129 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2130 goto error_param;
2131 }
2132
3f416961 2133 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
cf6c6e01 2134 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2135 goto error_param;
2136 }
2137
3f416961 2138 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2139 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2140 goto error_param;
2141 }
2142
3f416961
A
2143 vsi = pf->vsi[vf->lan_vsi_idx];
2144 if (!vsi) {
cf6c6e01 2145 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2146 goto error_param;
2147 }
2148
cf6c6e01
MW
2149 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2150 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2151error_param:
cf6c6e01 2152 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1071a835
AV
2153 NULL, 0);
2154}
2155
c54d209c
BC
2156/**
2157 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2158 * @vf: The VF being resseting
2159 *
2160 * The max poll time is about ~800ms, which is about the maximum time it takes
2161 * for a VF to be reset and/or a VF driver to be removed.
2162 */
2163static void ice_wait_on_vf_reset(struct ice_vf *vf)
2164{
2165 int i;
2166
2167 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2168 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2169 break;
2170 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2171 }
2172}
2173
2174/**
2175 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2176 * @vf: VF to check if it's ready to be configured/queried
2177 *
2178 * The purpose of this function is to make sure the VF is not in reset, not
2179 * disabled, and initialized so it can be configured and/or queried by a host
2180 * administrator.
2181 */
2182static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2183{
2184 struct ice_pf *pf;
2185
2186 ice_wait_on_vf_reset(vf);
2187
2188 if (ice_is_vf_disabled(vf))
2189 return -EINVAL;
2190
2191 pf = vf->pf;
2192 if (ice_check_vf_init(pf, vf))
2193 return -EBUSY;
2194
2195 return 0;
2196}
2197
cd6d6b83
BC
2198/**
2199 * ice_set_vf_spoofchk
2200 * @netdev: network interface device structure
2201 * @vf_id: VF identifier
2202 * @ena: flag to enable or disable feature
2203 *
2204 * Enable or disable VF spoof checking
2205 */
2206int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2207{
2208 struct ice_netdev_priv *np = netdev_priv(netdev);
2209 struct ice_pf *pf = np->vsi->back;
2210 struct ice_vsi_ctx *ctx;
2211 struct ice_vsi *vf_vsi;
2212 enum ice_status status;
2213 struct device *dev;
2214 struct ice_vf *vf;
c54d209c 2215 int ret;
cd6d6b83
BC
2216
2217 dev = ice_pf_to_dev(pf);
2218 if (ice_validate_vf_id(pf, vf_id))
2219 return -EINVAL;
2220
2221 vf = &pf->vf[vf_id];
c54d209c
BC
2222 ret = ice_check_vf_ready_for_cfg(vf);
2223 if (ret)
2224 return ret;
cd6d6b83
BC
2225
2226 vf_vsi = pf->vsi[vf->lan_vsi_idx];
2227 if (!vf_vsi) {
2228 netdev_err(netdev, "VSI %d for VF %d is null\n",
2229 vf->lan_vsi_idx, vf->vf_id);
2230 return -EINVAL;
2231 }
2232
2233 if (vf_vsi->type != ICE_VSI_VF) {
19cce2c6 2234 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
cd6d6b83
BC
2235 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2236 return -ENODEV;
2237 }
2238
2239 if (ena == vf->spoofchk) {
2240 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2241 return 0;
2242 }
2243
2244 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2245 if (!ctx)
2246 return -ENOMEM;
2247
2248 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2249 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2250 if (ena) {
2251 ctx->info.sec_flags |=
2252 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2253 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2254 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2255 } else {
2256 ctx->info.sec_flags &=
2257 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2258 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2259 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2260 }
2261
2262 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2263 if (status) {
0fee3577
LY
2264 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2265 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2266 ice_stat_str(status));
cd6d6b83
BC
2267 ret = -EIO;
2268 goto out;
2269 }
2270
2271 /* only update spoofchk state and VSI context on success */
2272 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2273 vf->spoofchk = ena;
2274
2275out:
2276 kfree(ctx);
2277 return ret;
2278}
2279
01b5e89a
BC
2280/**
2281 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2282 * @pf: PF structure for accessing VF(s)
2283 *
2284 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2285 * else return true
2286 */
2287bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2288{
2289 int vf_idx;
2290
2291 ice_for_each_vf(pf, vf_idx) {
2292 struct ice_vf *vf = &pf->vf[vf_idx];
2293
2294 /* found a VF that has promiscuous mode configured */
2295 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2296 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2297 return true;
2298 }
2299
2300 return false;
2301}
2302
2303/**
2304 * ice_vc_cfg_promiscuous_mode_msg
2305 * @vf: pointer to the VF info
2306 * @msg: pointer to the msg buffer
2307 *
2308 * called from the VF to configure VF VSIs promiscuous mode
2309 */
2310static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2311{
2312 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2313 struct virtchnl_promisc_info *info =
2314 (struct virtchnl_promisc_info *)msg;
2315 struct ice_pf *pf = vf->pf;
2316 struct ice_vsi *vsi;
2317 struct device *dev;
2318 bool rm_promisc;
2319 int ret = 0;
2320
2321 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2322 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2323 goto error_param;
2324 }
2325
2326 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2327 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2328 goto error_param;
2329 }
2330
2331 vsi = pf->vsi[vf->lan_vsi_idx];
2332 if (!vsi) {
2333 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2334 goto error_param;
2335 }
2336
2337 dev = ice_pf_to_dev(pf);
2338 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2339 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2340 vf->vf_id);
2341 /* Leave v_ret alone, lie to the VF on purpose. */
2342 goto error_param;
2343 }
2344
2345 rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2346 !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2347
2348 if (vsi->num_vlan || vf->port_vlan_info) {
2349 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2350 struct net_device *pf_netdev;
2351
2352 if (!pf_vsi) {
2353 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2354 goto error_param;
2355 }
2356
2357 pf_netdev = pf_vsi->netdev;
2358
2359 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2360 if (ret) {
2361 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2362 rm_promisc ? "ON" : "OFF", vf->vf_id,
2363 vsi->vsi_num);
2364 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2365 }
2366
2367 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2368 if (ret) {
2369 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2370 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2371 goto error_param;
2372 }
2373 }
2374
2375 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2376 bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2377
2378 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2379 /* only attempt to set the default forwarding VSI if
2380 * it's not currently set
2381 */
2382 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2383 else if (!set_dflt_vsi &&
2384 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2385 /* only attempt to free the default forwarding VSI if we
2386 * are the owner
2387 */
2388 ret = ice_clear_dflt_vsi(pf->first_sw);
2389
2390 if (ret) {
2391 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2392 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2393 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2394 goto error_param;
2395 }
2396 } else {
2397 enum ice_status status;
2398 u8 promisc_m;
2399
2400 if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2401 if (vf->port_vlan_info || vsi->num_vlan)
2402 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2403 else
2404 promisc_m = ICE_UCAST_PROMISC_BITS;
2405 } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2406 if (vf->port_vlan_info || vsi->num_vlan)
2407 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2408 else
2409 promisc_m = ICE_MCAST_PROMISC_BITS;
2410 } else {
2411 if (vf->port_vlan_info || vsi->num_vlan)
2412 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2413 else
2414 promisc_m = ICE_UCAST_PROMISC_BITS;
2415 }
2416
2417 /* Configure multicast/unicast with or without VLAN promiscuous
2418 * mode
2419 */
2420 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2421 if (status) {
0fee3577
LY
2422 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2423 rm_promisc ? "dis" : "en", vf->vf_id,
2424 ice_stat_str(status));
01b5e89a
BC
2425 v_ret = ice_err_to_virt_err(status);
2426 goto error_param;
2427 } else {
2428 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2429 rm_promisc ? "dis" : "en", vf->vf_id);
2430 }
2431 }
2432
2433 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2434 set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2435 else
2436 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2437
2438 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2439 set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2440 else
2441 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2442
2443error_param:
2444 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2445 v_ret, NULL, 0);
2446}
2447
1071a835
AV
2448/**
2449 * ice_vc_get_stats_msg
2450 * @vf: pointer to the VF info
2451 * @msg: pointer to the msg buffer
2452 *
2453 * called from the VF to get VSI stats
2454 */
2455static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2456{
cf6c6e01 2457 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2458 struct virtchnl_queue_select *vqs =
2459 (struct virtchnl_queue_select *)msg;
949375de 2460 struct ice_eth_stats stats = { 0 };
f1ef73f5 2461 struct ice_pf *pf = vf->pf;
1071a835
AV
2462 struct ice_vsi *vsi;
2463
2464 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2465 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2466 goto error_param;
2467 }
2468
2469 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2470 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2471 goto error_param;
2472 }
2473
f1ef73f5 2474 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2475 if (!vsi) {
cf6c6e01 2476 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2477 goto error_param;
2478 }
2479
1071a835
AV
2480 ice_update_eth_stats(vsi);
2481
2482 stats = vsi->eth_stats;
2483
2484error_param:
2485 /* send the response to the VF */
cf6c6e01 2486 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1071a835
AV
2487 (u8 *)&stats, sizeof(stats));
2488}
2489
24e2e2a0
BC
2490/**
2491 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2492 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2493 *
2494 * Return true on successful validation, else false
2495 */
2496static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2497{
2498 if ((!vqs->rx_queues && !vqs->tx_queues) ||
0ca469fb
MW
2499 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2500 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
24e2e2a0
BC
2501 return false;
2502
2503 return true;
2504}
2505
4dc926d3
BC
2506/**
2507 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2508 * @vsi: VSI of the VF to configure
2509 * @q_idx: VF queue index used to determine the queue in the PF's space
2510 */
2511static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2512{
2513 struct ice_hw *hw = &vsi->back->hw;
2514 u32 pfq = vsi->txq_map[q_idx];
2515 u32 reg;
2516
2517 reg = rd32(hw, QINT_TQCTL(pfq));
2518
2519 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2520 * this is most likely a poll mode VF driver, so don't enable an
2521 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2522 */
2523 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2524 return;
2525
2526 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2527}
2528
2529/**
2530 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2531 * @vsi: VSI of the VF to configure
2532 * @q_idx: VF queue index used to determine the queue in the PF's space
2533 */
2534static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2535{
2536 struct ice_hw *hw = &vsi->back->hw;
2537 u32 pfq = vsi->rxq_map[q_idx];
2538 u32 reg;
2539
2540 reg = rd32(hw, QINT_RQCTL(pfq));
2541
2542 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2543 * this is most likely a poll mode VF driver, so don't enable an
2544 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2545 */
2546 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2547 return;
2548
2549 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2550}
2551
1071a835
AV
2552/**
2553 * ice_vc_ena_qs_msg
2554 * @vf: pointer to the VF info
2555 * @msg: pointer to the msg buffer
2556 *
2557 * called from the VF to enable all or specific queue(s)
2558 */
2559static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2560{
cf6c6e01 2561 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2562 struct virtchnl_queue_select *vqs =
2563 (struct virtchnl_queue_select *)msg;
f1ef73f5 2564 struct ice_pf *pf = vf->pf;
1071a835 2565 struct ice_vsi *vsi;
77ca27c4
PG
2566 unsigned long q_map;
2567 u16 vf_q_id;
1071a835
AV
2568
2569 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2570 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2571 goto error_param;
2572 }
2573
2574 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2575 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2576 goto error_param;
2577 }
2578
24e2e2a0 2579 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3f416961
A
2580 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2581 goto error_param;
2582 }
2583
f1ef73f5 2584 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2585 if (!vsi) {
cf6c6e01 2586 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2587 goto error_param;
2588 }
2589
2590 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2591 * Tx queue group list was configured and the context bits were
2592 * programmed using ice_vsi_cfg_txqs
2593 */
77ca27c4 2594 q_map = vqs->rx_queues;
0ca469fb 2595 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2596 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2597 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2598 goto error_param;
2599 }
2600
2601 /* Skip queue if enabled */
2602 if (test_bit(vf_q_id, vf->rxq_ena))
2603 continue;
2604
13a6233b 2605 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
19cce2c6 2606 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
77ca27c4
PG
2607 vf_q_id, vsi->vsi_num);
2608 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2609 goto error_param;
2610 }
2611
4dc926d3 2612 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
77ca27c4 2613 set_bit(vf_q_id, vf->rxq_ena);
77ca27c4
PG
2614 }
2615
2616 vsi = pf->vsi[vf->lan_vsi_idx];
2617 q_map = vqs->tx_queues;
0ca469fb 2618 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2619 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2620 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2621 goto error_param;
2622 }
2623
2624 /* Skip queue if enabled */
2625 if (test_bit(vf_q_id, vf->txq_ena))
2626 continue;
2627
4dc926d3 2628 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
77ca27c4 2629 set_bit(vf_q_id, vf->txq_ena);
77ca27c4 2630 }
1071a835
AV
2631
2632 /* Set flag to indicate that queues are enabled */
cf6c6e01 2633 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
77ca27c4 2634 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2635
2636error_param:
2637 /* send the response to the VF */
cf6c6e01 2638 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1071a835
AV
2639 NULL, 0);
2640}
2641
2642/**
2643 * ice_vc_dis_qs_msg
2644 * @vf: pointer to the VF info
2645 * @msg: pointer to the msg buffer
2646 *
2647 * called from the VF to disable all or specific
2648 * queue(s)
2649 */
2650static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2651{
cf6c6e01 2652 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2653 struct virtchnl_queue_select *vqs =
2654 (struct virtchnl_queue_select *)msg;
f1ef73f5 2655 struct ice_pf *pf = vf->pf;
1071a835 2656 struct ice_vsi *vsi;
77ca27c4
PG
2657 unsigned long q_map;
2658 u16 vf_q_id;
1071a835
AV
2659
2660 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
77ca27c4 2661 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
cf6c6e01 2662 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2663 goto error_param;
2664 }
2665
2666 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2667 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2668 goto error_param;
2669 }
2670
24e2e2a0 2671 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
cf6c6e01 2672 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2673 goto error_param;
2674 }
2675
f1ef73f5 2676 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2677 if (!vsi) {
cf6c6e01 2678 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2679 goto error_param;
2680 }
2681
77ca27c4
PG
2682 if (vqs->tx_queues) {
2683 q_map = vqs->tx_queues;
2684
0ca469fb 2685 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2686 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2687 struct ice_txq_meta txq_meta = { 0 };
2688
2689 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2690 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2691 goto error_param;
2692 }
2693
2694 /* Skip queue if not enabled */
2695 if (!test_bit(vf_q_id, vf->txq_ena))
2696 continue;
2697
2698 ice_fill_txq_meta(vsi, ring, &txq_meta);
2699
2700 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2701 ring, &txq_meta)) {
19cce2c6 2702 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
77ca27c4
PG
2703 vf_q_id, vsi->vsi_num);
2704 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2705 goto error_param;
2706 }
2707
2708 /* Clear enabled queues flag */
2709 clear_bit(vf_q_id, vf->txq_ena);
77ca27c4 2710 }
1071a835
AV
2711 }
2712
e1fe6926
BC
2713 q_map = vqs->rx_queues;
2714 /* speed up Rx queue disable by batching them if possible */
2715 if (q_map &&
0ca469fb 2716 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
e1fe6926
BC
2717 if (ice_vsi_stop_all_rx_rings(vsi)) {
2718 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2719 vsi->vsi_num);
2720 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2721 goto error_param;
2722 }
77ca27c4 2723
0ca469fb 2724 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
e1fe6926 2725 } else if (q_map) {
0ca469fb 2726 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2727 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2728 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2729 goto error_param;
2730 }
2731
2732 /* Skip queue if not enabled */
2733 if (!test_bit(vf_q_id, vf->rxq_ena))
2734 continue;
2735
13a6233b
BC
2736 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2737 true)) {
19cce2c6 2738 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
77ca27c4
PG
2739 vf_q_id, vsi->vsi_num);
2740 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2741 goto error_param;
2742 }
2743
2744 /* Clear enabled queues flag */
2745 clear_bit(vf_q_id, vf->rxq_ena);
77ca27c4 2746 }
1071a835
AV
2747 }
2748
2749 /* Clear enabled queues flag */
e1fe6926 2750 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
77ca27c4 2751 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2752
2753error_param:
2754 /* send the response to the VF */
cf6c6e01 2755 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1071a835
AV
2756 NULL, 0);
2757}
2758
0ca469fb
MW
2759/**
2760 * ice_cfg_interrupt
2761 * @vf: pointer to the VF info
2762 * @vsi: the VSI being configured
2763 * @vector_id: vector ID
2764 * @map: vector map for mapping vectors to queues
2765 * @q_vector: structure for interrupt vector
2766 * configure the IRQ to queue map
2767 */
2768static int
2769ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2770 struct virtchnl_vector_map *map,
2771 struct ice_q_vector *q_vector)
2772{
2773 u16 vsi_q_id, vsi_q_id_idx;
2774 unsigned long qmap;
2775
2776 q_vector->num_ring_rx = 0;
2777 q_vector->num_ring_tx = 0;
2778
2779 qmap = map->rxq_map;
2780 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2781 vsi_q_id = vsi_q_id_idx;
2782
2783 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2784 return VIRTCHNL_STATUS_ERR_PARAM;
2785
2786 q_vector->num_ring_rx++;
2787 q_vector->rx.itr_idx = map->rxitr_idx;
2788 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2789 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2790 q_vector->rx.itr_idx);
2791 }
2792
2793 qmap = map->txq_map;
2794 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2795 vsi_q_id = vsi_q_id_idx;
2796
2797 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2798 return VIRTCHNL_STATUS_ERR_PARAM;
2799
2800 q_vector->num_ring_tx++;
2801 q_vector->tx.itr_idx = map->txitr_idx;
2802 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2803 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2804 q_vector->tx.itr_idx);
2805 }
2806
2807 return VIRTCHNL_STATUS_SUCCESS;
2808}
2809
1071a835
AV
2810/**
2811 * ice_vc_cfg_irq_map_msg
2812 * @vf: pointer to the VF info
2813 * @msg: pointer to the msg buffer
2814 *
2815 * called from the VF to configure the IRQ to queue map
2816 */
2817static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2818{
cf6c6e01 2819 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
0ca469fb 2820 u16 num_q_vectors_mapped, vsi_id, vector_id;
173e23c0 2821 struct virtchnl_irq_map_info *irqmap_info;
1071a835 2822 struct virtchnl_vector_map *map;
1071a835 2823 struct ice_pf *pf = vf->pf;
173e23c0 2824 struct ice_vsi *vsi;
1071a835
AV
2825 int i;
2826
173e23c0 2827 irqmap_info = (struct virtchnl_irq_map_info *)msg;
047e52c0
AV
2828 num_q_vectors_mapped = irqmap_info->num_vectors;
2829
047e52c0
AV
2830 /* Check to make sure number of VF vectors mapped is not greater than
2831 * number of VF vectors originally allocated, and check that
2832 * there is actually at least a single VF queue vector mapped
2833 */
ba0db585 2834 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
46c276ce 2835 pf->num_msix_per_vf < num_q_vectors_mapped ||
0ca469fb 2836 !num_q_vectors_mapped) {
cf6c6e01 2837 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2838 goto error_param;
2839 }
2840
3f416961
A
2841 vsi = pf->vsi[vf->lan_vsi_idx];
2842 if (!vsi) {
2843 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2844 goto error_param;
2845 }
2846
047e52c0
AV
2847 for (i = 0; i < num_q_vectors_mapped; i++) {
2848 struct ice_q_vector *q_vector;
ba0db585 2849
1071a835
AV
2850 map = &irqmap_info->vecmap[i];
2851
2852 vector_id = map->vector_id;
2853 vsi_id = map->vsi_id;
b791cdd5
BC
2854 /* vector_id is always 0-based for each VF, and can never be
2855 * larger than or equal to the max allowed interrupts per VF
2856 */
46c276ce 2857 if (!(vector_id < pf->num_msix_per_vf) ||
b791cdd5 2858 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
047e52c0
AV
2859 (!vector_id && (map->rxq_map || map->txq_map))) {
2860 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2861 goto error_param;
2862 }
2863
2864 /* No need to map VF miscellaneous or rogue vector */
2865 if (!vector_id)
2866 continue;
2867
2868 /* Subtract non queue vector from vector_id passed by VF
2869 * to get actual number of VSI queue vector array index
2870 */
2871 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2872 if (!q_vector) {
cf6c6e01 2873 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2874 goto error_param;
2875 }
2876
1071a835 2877 /* lookout for the invalid queue index */
0ca469fb
MW
2878 v_ret = (enum virtchnl_status_code)
2879 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2880 if (v_ret)
2881 goto error_param;
1071a835
AV
2882 }
2883
1071a835
AV
2884error_param:
2885 /* send the response to the VF */
cf6c6e01 2886 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1071a835
AV
2887 NULL, 0);
2888}
2889
2890/**
2891 * ice_vc_cfg_qs_msg
2892 * @vf: pointer to the VF info
2893 * @msg: pointer to the msg buffer
2894 *
2895 * called from the VF to configure the Rx/Tx queues
2896 */
2897static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2898{
cf6c6e01 2899 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2900 struct virtchnl_vsi_queue_config_info *qci =
2901 (struct virtchnl_vsi_queue_config_info *)msg;
2902 struct virtchnl_queue_pair_info *qpi;
77ca27c4 2903 u16 num_rxq = 0, num_txq = 0;
5743020d 2904 struct ice_pf *pf = vf->pf;
1071a835
AV
2905 struct ice_vsi *vsi;
2906 int i;
2907
2908 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2909 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2910 goto error_param;
2911 }
2912
2913 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
cf6c6e01 2914 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2915 goto error_param;
2916 }
2917
9c7dd756
MS
2918 vsi = pf->vsi[vf->lan_vsi_idx];
2919 if (!vsi) {
cf6c6e01 2920 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5743020d
AA
2921 goto error_param;
2922 }
2923
0ca469fb 2924 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
9c7dd756 2925 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
19cce2c6 2926 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
9c7dd756 2927 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3f416961
A
2928 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2929 goto error_param;
2930 }
2931
1071a835
AV
2932 for (i = 0; i < qci->num_queue_pairs; i++) {
2933 qpi = &qci->qpair[i];
2934 if (qpi->txq.vsi_id != qci->vsi_id ||
2935 qpi->rxq.vsi_id != qci->vsi_id ||
2936 qpi->rxq.queue_id != qpi->txq.queue_id ||
f8af5bf5 2937 qpi->txq.headwb_enabled ||
9c7dd756
MS
2938 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2939 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1071a835 2940 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
cf6c6e01 2941 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2942 goto error_param;
2943 }
2944 /* copy Tx queue info from VF into VSI */
77ca27c4
PG
2945 if (qpi->txq.ring_len > 0) {
2946 num_txq++;
2947 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2948 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1071a835 2949 }
77ca27c4
PG
2950
2951 /* copy Rx queue info from VF into VSI */
2952 if (qpi->rxq.ring_len > 0) {
2953 num_rxq++;
2954 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2955 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2956
2957 if (qpi->rxq.databuffer_size != 0 &&
2958 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2959 qpi->rxq.databuffer_size < 1024)) {
2960 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2961 goto error_param;
2962 }
2963 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2964 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2965 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2966 qpi->rxq.max_pkt_size < 64) {
2967 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2968 goto error_param;
2969 }
1071a835 2970 }
77ca27c4 2971
1071a835
AV
2972 vsi->max_frame = qpi->rxq.max_pkt_size;
2973 }
2974
2975 /* VF can request to configure less than allocated queues
2976 * or default allocated queues. So update the VSI with new number
2977 */
77ca27c4
PG
2978 vsi->num_txq = num_txq;
2979 vsi->num_rxq = num_rxq;
105e5bc2 2980 /* All queues of VF VSI are in TC 0 */
77ca27c4
PG
2981 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2982 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
1071a835 2983
cf6c6e01
MW
2984 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2985 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
2986
2987error_param:
2988 /* send the response to the VF */
cf6c6e01 2989 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
1071a835
AV
2990 NULL, 0);
2991}
2992
2993/**
2994 * ice_is_vf_trusted
2995 * @vf: pointer to the VF info
2996 */
2997static bool ice_is_vf_trusted(struct ice_vf *vf)
2998{
2999 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3000}
3001
3002/**
3003 * ice_can_vf_change_mac
3004 * @vf: pointer to the VF info
3005 *
3006 * Return true if the VF is allowed to change its MAC filters, false otherwise
3007 */
3008static bool ice_can_vf_change_mac(struct ice_vf *vf)
3009{
3010 /* If the VF MAC address has been set administratively (via the
3011 * ndo_set_vf_mac command), then deny permission to the VF to
3012 * add/delete unicast MAC addresses, unless the VF is trusted
3013 */
3014 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3015 return false;
3016
3017 return true;
3018}
3019
ed4c068d
BC
3020/**
3021 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3022 * @vf: pointer to the VF info
3023 * @vsi: pointer to the VF's VSI
3024 * @mac_addr: MAC address to add
3025 */
3026static int
3027ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3028{
3029 struct device *dev = ice_pf_to_dev(vf->pf);
3030 enum ice_status status;
3031
3032 /* default unicast MAC already added */
3033 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3034 return 0;
3035
3036 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3037 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3038 return -EPERM;
3039 }
3040
1b8f15b6 3041 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3042 if (status == ICE_ERR_ALREADY_EXISTS) {
3043 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3044 vf->vf_id);
3045 return -EEXIST;
3046 } else if (status) {
0fee3577
LY
3047 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3048 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3049 return -EIO;
3050 }
3051
bf8987df
PG
3052 /* Set the default LAN address to the latest unicast MAC address added
3053 * by the VF. The default LAN address is reported by the PF via
3054 * ndo_get_vf_config.
3055 */
3056 if (is_unicast_ether_addr(mac_addr))
ed4c068d
BC
3057 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3058
3059 vf->num_mac++;
3060
3061 return 0;
3062}
3063
3064/**
3065 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3066 * @vf: pointer to the VF info
3067 * @vsi: pointer to the VF's VSI
3068 * @mac_addr: MAC address to delete
3069 */
3070static int
3071ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3072{
3073 struct device *dev = ice_pf_to_dev(vf->pf);
3074 enum ice_status status;
3075
3076 if (!ice_can_vf_change_mac(vf) &&
3077 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3078 return 0;
3079
1b8f15b6 3080 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3081 if (status == ICE_ERR_DOES_NOT_EXIST) {
3082 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3083 vf->vf_id);
3084 return -ENOENT;
3085 } else if (status) {
0fee3577
LY
3086 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3087 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3088 return -EIO;
3089 }
3090
3091 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3092 eth_zero_addr(vf->dflt_lan_addr.addr);
3093
3094 vf->num_mac--;
3095
3096 return 0;
3097}
3098
1071a835
AV
3099/**
3100 * ice_vc_handle_mac_addr_msg
3101 * @vf: pointer to the VF info
3102 * @msg: pointer to the msg buffer
f9867df6 3103 * @set: true if MAC filters are being set, false otherwise
1071a835 3104 *
df17b7e0 3105 * add guest MAC address filter
1071a835
AV
3106 */
3107static int
3108ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3109{
ed4c068d
BC
3110 int (*ice_vc_cfg_mac)
3111 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
cf6c6e01 3112 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3113 struct virtchnl_ether_addr_list *al =
3114 (struct virtchnl_ether_addr_list *)msg;
3115 struct ice_pf *pf = vf->pf;
3116 enum virtchnl_ops vc_op;
1071a835 3117 struct ice_vsi *vsi;
1071a835
AV
3118 int i;
3119
ed4c068d 3120 if (set) {
1071a835 3121 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
ed4c068d
BC
3122 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3123 } else {
1071a835 3124 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
ed4c068d
BC
3125 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3126 }
1071a835
AV
3127
3128 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3129 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
cf6c6e01 3130 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3131 goto handle_mac_exit;
3132 }
3133
ed4c068d
BC
3134 /* If this VF is not privileged, then we can't add more than a
3135 * limited number of addresses. Check to make sure that the
3136 * additions do not push us over the limit.
3137 */
1071a835
AV
3138 if (set && !ice_is_vf_trusted(vf) &&
3139 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
19cce2c6 3140 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
d84b899a 3141 vf->vf_id);
cf6c6e01 3142 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3143 goto handle_mac_exit;
3144 }
3145
3146 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3147 if (!vsi) {
cf6c6e01 3148 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3149 goto handle_mac_exit;
3150 }
1071a835
AV
3151
3152 for (i = 0; i < al->num_elements; i++) {
ed4c068d
BC
3153 u8 *mac_addr = al->list[i].addr;
3154 int result;
1071a835 3155
ed4c068d
BC
3156 if (is_broadcast_ether_addr(mac_addr) ||
3157 is_zero_ether_addr(mac_addr))
3158 continue;
1071a835 3159
ed4c068d
BC
3160 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3161 if (result == -EEXIST || result == -ENOENT) {
3162 continue;
3163 } else if (result) {
3164 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
3165 goto handle_mac_exit;
3166 }
1071a835
AV
3167 }
3168
1071a835 3169handle_mac_exit:
1071a835 3170 /* send the response to the VF */
cf6c6e01 3171 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1071a835
AV
3172}
3173
3174/**
3175 * ice_vc_add_mac_addr_msg
3176 * @vf: pointer to the VF info
3177 * @msg: pointer to the msg buffer
3178 *
3179 * add guest MAC address filter
3180 */
3181static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3182{
3183 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3184}
3185
3186/**
3187 * ice_vc_del_mac_addr_msg
3188 * @vf: pointer to the VF info
3189 * @msg: pointer to the msg buffer
3190 *
3191 * remove guest MAC address filter
3192 */
3193static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3194{
3195 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3196}
3197
3198/**
3199 * ice_vc_request_qs_msg
3200 * @vf: pointer to the VF info
3201 * @msg: pointer to the msg buffer
3202 *
3203 * VFs get a default number of queues but can use this message to request a
df17b7e0 3204 * different number. If the request is successful, PF will reset the VF and
1071a835 3205 * return 0. If unsuccessful, PF will send message informing VF of number of
f9867df6 3206 * available queue pairs via virtchnl message response to VF.
1071a835
AV
3207 */
3208static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3209{
cf6c6e01 3210 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3211 struct virtchnl_vf_res_request *vfres =
3212 (struct virtchnl_vf_res_request *)msg;
cbfe31b5 3213 u16 req_queues = vfres->num_queue_pairs;
1071a835 3214 struct ice_pf *pf = vf->pf;
cbfe31b5
PK
3215 u16 max_allowed_vf_queues;
3216 u16 tx_rx_queue_left;
4015d11e 3217 struct device *dev;
4ee656bb 3218 u16 cur_queues;
1071a835 3219
4015d11e 3220 dev = ice_pf_to_dev(pf);
1071a835 3221 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3222 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3223 goto error_param;
3224 }
3225
5743020d 3226 cur_queues = vf->num_vf_qs;
8c243700
AV
3227 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3228 ice_get_avail_rxq_count(pf));
5743020d 3229 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
cbfe31b5 3230 if (!req_queues) {
4015d11e 3231 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
cbfe31b5 3232 vf->vf_id);
0ca469fb 3233 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4015d11e 3234 dev_err(dev, "VF %d tried to request more than %d queues.\n",
0ca469fb
MW
3235 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3236 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
cbfe31b5
PK
3237 } else if (req_queues > cur_queues &&
3238 req_queues - cur_queues > tx_rx_queue_left) {
19cce2c6 3239 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1071a835 3240 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
cbfe31b5 3241 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
0ca469fb 3242 ICE_MAX_RSS_QS_PER_VF);
1071a835
AV
3243 } else {
3244 /* request is successful, then reset VF */
3245 vf->num_req_qs = req_queues;
ff010eca 3246 ice_vc_reset_vf(vf);
4015d11e 3247 dev_info(dev, "VF %d granted request of %u queues.\n",
1071a835
AV
3248 vf->vf_id, req_queues);
3249 return 0;
3250 }
3251
3252error_param:
3253 /* send the response to the VF */
3254 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
cf6c6e01 3255 v_ret, (u8 *)vfres, sizeof(*vfres));
1071a835
AV
3256}
3257
7c710869
AV
3258/**
3259 * ice_set_vf_port_vlan
3260 * @netdev: network interface device structure
3261 * @vf_id: VF identifier
f9867df6 3262 * @vlan_id: VLAN ID being set
7c710869
AV
3263 * @qos: priority setting
3264 * @vlan_proto: VLAN protocol
3265 *
f9867df6 3266 * program VF Port VLAN ID and/or QoS
7c710869
AV
3267 */
3268int
3269ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3270 __be16 vlan_proto)
3271{
4c66d227 3272 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4015d11e 3273 struct device *dev;
7c710869 3274 struct ice_vf *vf;
61c9ce86 3275 u16 vlanprio;
c54d209c 3276 int ret;
7c710869 3277
4015d11e 3278 dev = ice_pf_to_dev(pf);
4c66d227 3279 if (ice_validate_vf_id(pf, vf_id))
7c710869 3280 return -EINVAL;
7c710869 3281
61c9ce86
BC
3282 if (vlan_id >= VLAN_N_VID || qos > 7) {
3283 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3284 vf_id, vlan_id, qos);
7c710869
AV
3285 return -EINVAL;
3286 }
3287
3288 if (vlan_proto != htons(ETH_P_8021Q)) {
4015d11e 3289 dev_err(dev, "VF VLAN protocol is not supported\n");
7c710869
AV
3290 return -EPROTONOSUPPORT;
3291 }
3292
3293 vf = &pf->vf[vf_id];
c54d209c
BC
3294 ret = ice_check_vf_ready_for_cfg(vf);
3295 if (ret)
3296 return ret;
7c710869 3297
61c9ce86
BC
3298 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3299
3300 if (vf->port_vlan_info == vlanprio) {
7c710869 3301 /* duplicate request, so just return success */
4015d11e 3302 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
c54d209c 3303 return 0;
7c710869
AV
3304 }
3305
cf0bf41d 3306 vf->port_vlan_info = vlanprio;
7c710869 3307
cf0bf41d 3308 if (vf->port_vlan_info)
4015d11e 3309 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
7c710869 3310 vlan_id, qos, vf_id);
cf0bf41d
BC
3311 else
3312 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
7c710869 3313
cf0bf41d 3314 ice_vc_reset_vf(vf);
7c710869 3315
c54d209c 3316 return 0;
7c710869
AV
3317}
3318
d4bc4e2d
BC
3319/**
3320 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3321 * @caps: VF driver negotiated capabilities
3322 *
3323 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3324 */
3325static bool ice_vf_vlan_offload_ena(u32 caps)
3326{
3327 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3328}
3329
1071a835
AV
3330/**
3331 * ice_vc_process_vlan_msg
3332 * @vf: pointer to the VF info
3333 * @msg: pointer to the msg buffer
3334 * @add_v: Add VLAN if true, otherwise delete VLAN
3335 *
f9867df6 3336 * Process virtchnl op to add or remove programmed guest VLAN ID
1071a835
AV
3337 */
3338static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3339{
cf6c6e01 3340 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3341 struct virtchnl_vlan_filter_list *vfl =
3342 (struct virtchnl_vlan_filter_list *)msg;
1071a835 3343 struct ice_pf *pf = vf->pf;
5eda8afd 3344 bool vlan_promisc = false;
1071a835 3345 struct ice_vsi *vsi;
4015d11e 3346 struct device *dev;
5eda8afd
AA
3347 struct ice_hw *hw;
3348 int status = 0;
3349 u8 promisc_m;
1071a835
AV
3350 int i;
3351
4015d11e 3352 dev = ice_pf_to_dev(pf);
1071a835 3353 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3354 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3355 goto error_param;
3356 }
3357
d4bc4e2d
BC
3358 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3359 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3360 goto error_param;
3361 }
3362
1071a835 3363 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
cf6c6e01 3364 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3365 goto error_param;
3366 }
3367
1071a835 3368 for (i = 0; i < vfl->num_elements; i++) {
61c9ce86 3369 if (vfl->vlan_id[i] >= VLAN_N_VID) {
cf6c6e01 3370 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6
AV
3371 dev_err(dev, "invalid VF VLAN id %d\n",
3372 vfl->vlan_id[i]);
1071a835
AV
3373 goto error_param;
3374 }
3375 }
3376
5eda8afd 3377 hw = &pf->hw;
f1ef73f5 3378 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 3379 if (!vsi) {
cf6c6e01 3380 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3381 goto error_param;
3382 }
3383
cd6d6b83
BC
3384 if (add_v && !ice_is_vf_trusted(vf) &&
3385 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3386 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
cd6d6b83
BC
3387 vf->vf_id);
3388 /* There is no need to let VF know about being not trusted,
3389 * so we can just return success message here
3390 */
3391 goto error_param;
3392 }
3393
1071a835 3394 if (vsi->info.pvid) {
cf6c6e01 3395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3396 goto error_param;
3397 }
3398
01b5e89a
BC
3399 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3400 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3401 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
5eda8afd
AA
3402 vlan_promisc = true;
3403
1071a835
AV
3404 if (add_v) {
3405 for (i = 0; i < vfl->num_elements; i++) {
3406 u16 vid = vfl->vlan_id[i];
3407
5079b853 3408 if (!ice_is_vf_trusted(vf) &&
cd6d6b83 3409 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3410 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
5079b853
AA
3411 vf->vf_id);
3412 /* There is no need to let VF know about being
3413 * not trusted, so we can just return success
3414 * message here as well.
3415 */
3416 goto error_param;
3417 }
3418
cd6d6b83
BC
3419 /* we add VLAN 0 by default for each VF so we can enable
3420 * Tx VLAN anti-spoof without triggering MDD events so
3421 * we don't need to add it again here
3422 */
3423 if (!vid)
3424 continue;
3425
1b8f15b6 3426 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
cd6d6b83 3427 if (status) {
cf6c6e01 3428 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3429 goto error_param;
3430 }
1071a835 3431
42f3efef
BC
3432 /* Enable VLAN pruning when non-zero VLAN is added */
3433 if (!vlan_promisc && vid &&
3434 !ice_vsi_is_vlan_pruning_ena(vsi)) {
5eda8afd
AA
3435 status = ice_cfg_vlan_pruning(vsi, true, false);
3436 if (status) {
cf6c6e01 3437 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3438 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
5eda8afd
AA
3439 vid, status);
3440 goto error_param;
3441 }
42f3efef 3442 } else if (vlan_promisc) {
5eda8afd
AA
3443 /* Enable Ucast/Mcast VLAN promiscuous mode */
3444 promisc_m = ICE_PROMISC_VLAN_TX |
3445 ICE_PROMISC_VLAN_RX;
3446
3447 status = ice_set_vsi_promisc(hw, vsi->idx,
3448 promisc_m, vid);
cf6c6e01
MW
3449 if (status) {
3450 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3451 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
5eda8afd 3452 vid, status);
cf6c6e01 3453 }
1071a835
AV
3454 }
3455 }
3456 } else {
bb877b22
AA
3457 /* In case of non_trusted VF, number of VLAN elements passed
3458 * to PF for removal might be greater than number of VLANs
3459 * filter programmed for that VF - So, use actual number of
3460 * VLANS added earlier with add VLAN opcode. In order to avoid
3461 * removing VLAN that doesn't exist, which result to sending
3462 * erroneous failed message back to the VF
3463 */
3464 int num_vf_vlan;
3465
cd6d6b83 3466 num_vf_vlan = vsi->num_vlan;
bb877b22 3467 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1071a835
AV
3468 u16 vid = vfl->vlan_id[i];
3469
cd6d6b83
BC
3470 /* we add VLAN 0 by default for each VF so we can enable
3471 * Tx VLAN anti-spoof without triggering MDD events so
3472 * we don't want a VIRTCHNL request to remove it
3473 */
3474 if (!vid)
3475 continue;
3476
1071a835
AV
3477 /* Make sure ice_vsi_kill_vlan is successful before
3478 * updating VLAN information
3479 */
cd6d6b83
BC
3480 status = ice_vsi_kill_vlan(vsi, vid);
3481 if (status) {
cf6c6e01 3482 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3483 goto error_param;
3484 }
3485
42f3efef
BC
3486 /* Disable VLAN pruning when only VLAN 0 is left */
3487 if (vsi->num_vlan == 1 &&
3488 ice_vsi_is_vlan_pruning_ena(vsi))
cd186e51 3489 ice_cfg_vlan_pruning(vsi, false, false);
5eda8afd
AA
3490
3491 /* Disable Unicast/Multicast VLAN promiscuous mode */
3492 if (vlan_promisc) {
3493 promisc_m = ICE_PROMISC_VLAN_TX |
3494 ICE_PROMISC_VLAN_RX;
1071a835 3495
5eda8afd
AA
3496 ice_clear_vsi_promisc(hw, vsi->idx,
3497 promisc_m, vid);
1071a835
AV
3498 }
3499 }
3500 }
3501
3502error_param:
3503 /* send the response to the VF */
3504 if (add_v)
cf6c6e01 3505 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1071a835
AV
3506 NULL, 0);
3507 else
cf6c6e01 3508 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1071a835
AV
3509 NULL, 0);
3510}
3511
3512/**
3513 * ice_vc_add_vlan_msg
3514 * @vf: pointer to the VF info
3515 * @msg: pointer to the msg buffer
3516 *
f9867df6 3517 * Add and program guest VLAN ID
1071a835
AV
3518 */
3519static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3520{
3521 return ice_vc_process_vlan_msg(vf, msg, true);
3522}
3523
3524/**
3525 * ice_vc_remove_vlan_msg
3526 * @vf: pointer to the VF info
3527 * @msg: pointer to the msg buffer
3528 *
f9867df6 3529 * remove programmed guest VLAN ID
1071a835
AV
3530 */
3531static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3532{
3533 return ice_vc_process_vlan_msg(vf, msg, false);
3534}
3535
3536/**
3537 * ice_vc_ena_vlan_stripping
3538 * @vf: pointer to the VF info
3539 *
3540 * Enable VLAN header stripping for a given VF
3541 */
3542static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3543{
cf6c6e01 3544 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3545 struct ice_pf *pf = vf->pf;
3546 struct ice_vsi *vsi;
3547
3548 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3549 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3550 goto error_param;
3551 }
3552
d4bc4e2d
BC
3553 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3554 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3555 goto error_param;
3556 }
3557
1071a835
AV
3558 vsi = pf->vsi[vf->lan_vsi_idx];
3559 if (ice_vsi_manage_vlan_stripping(vsi, true))
cf6c6e01 3560 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3561
3562error_param:
3563 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
cf6c6e01 3564 v_ret, NULL, 0);
1071a835
AV
3565}
3566
3567/**
3568 * ice_vc_dis_vlan_stripping
3569 * @vf: pointer to the VF info
3570 *
3571 * Disable VLAN header stripping for a given VF
3572 */
3573static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3574{
cf6c6e01 3575 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3576 struct ice_pf *pf = vf->pf;
3577 struct ice_vsi *vsi;
3578
3579 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3580 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3581 goto error_param;
3582 }
3583
d4bc4e2d
BC
3584 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3585 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3586 goto error_param;
3587 }
3588
1071a835 3589 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3590 if (!vsi) {
cf6c6e01 3591 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3592 goto error_param;
3593 }
3594
1071a835 3595 if (ice_vsi_manage_vlan_stripping(vsi, false))
cf6c6e01 3596 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3597
3598error_param:
3599 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
cf6c6e01 3600 v_ret, NULL, 0);
1071a835
AV
3601}
3602
2f9ec241
BC
3603/**
3604 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3605 * @vf: VF to enable/disable VLAN stripping for on initialization
3606 *
3607 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3608 * the flag is cleared then we want to disable stripping. For example, the flag
3609 * will be cleared when port VLANs are configured by the administrator before
3610 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3611 * offloads.
3612 */
3613static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3614{
3615 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3616
3617 if (!vsi)
3618 return -EINVAL;
3619
3620 /* don't modify stripping if port VLAN is configured */
3621 if (vsi->info.pvid)
3622 return 0;
3623
3624 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3625 return ice_vsi_manage_vlan_stripping(vsi, true);
3626 else
3627 return ice_vsi_manage_vlan_stripping(vsi, false);
3628}
3629
1071a835
AV
3630/**
3631 * ice_vc_process_vf_msg - Process request from VF
3632 * @pf: pointer to the PF structure
3633 * @event: pointer to the AQ event
3634 *
3635 * called from the common asq/arq handler to
3636 * process request from VF
3637 */
3638void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3639{
3640 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3641 s16 vf_id = le16_to_cpu(event->desc.retval);
3642 u16 msglen = event->msg_len;
3643 u8 *msg = event->msg_buf;
3644 struct ice_vf *vf = NULL;
4015d11e 3645 struct device *dev;
1071a835
AV
3646 int err = 0;
3647
4015d11e 3648 dev = ice_pf_to_dev(pf);
4c66d227 3649 if (ice_validate_vf_id(pf, vf_id)) {
1071a835
AV
3650 err = -EINVAL;
3651 goto error_handler;
3652 }
3653
3654 vf = &pf->vf[vf_id];
3655
3656 /* Check if VF is disabled. */
3657 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3658 err = -EPERM;
3659 goto error_handler;
3660 }
3661
3662 /* Perform basic checks on the msg */
3663 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3664 if (err) {
cf6c6e01 3665 if (err == VIRTCHNL_STATUS_ERR_PARAM)
1071a835
AV
3666 err = -EPERM;
3667 else
3668 err = -EINVAL;
1071a835
AV
3669 }
3670
3671error_handler:
3672 if (err) {
cf6c6e01
MW
3673 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3674 NULL, 0);
4015d11e 3675 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
1071a835
AV
3676 vf_id, v_opcode, msglen, err);
3677 return;
3678 }
3679
3680 switch (v_opcode) {
3681 case VIRTCHNL_OP_VERSION:
3682 err = ice_vc_get_ver_msg(vf, msg);
3683 break;
3684 case VIRTCHNL_OP_GET_VF_RESOURCES:
3685 err = ice_vc_get_vf_res_msg(vf, msg);
2f9ec241 3686 if (ice_vf_init_vlan_stripping(vf))
19cce2c6 3687 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
2f9ec241 3688 vf->vf_id);
dfc62400 3689 ice_vc_notify_vf_link_state(vf);
1071a835
AV
3690 break;
3691 case VIRTCHNL_OP_RESET_VF:
3692 ice_vc_reset_vf_msg(vf);
3693 break;
3694 case VIRTCHNL_OP_ADD_ETH_ADDR:
3695 err = ice_vc_add_mac_addr_msg(vf, msg);
3696 break;
3697 case VIRTCHNL_OP_DEL_ETH_ADDR:
3698 err = ice_vc_del_mac_addr_msg(vf, msg);
3699 break;
3700 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3701 err = ice_vc_cfg_qs_msg(vf, msg);
3702 break;
3703 case VIRTCHNL_OP_ENABLE_QUEUES:
3704 err = ice_vc_ena_qs_msg(vf, msg);
3705 ice_vc_notify_vf_link_state(vf);
3706 break;
3707 case VIRTCHNL_OP_DISABLE_QUEUES:
3708 err = ice_vc_dis_qs_msg(vf, msg);
3709 break;
3710 case VIRTCHNL_OP_REQUEST_QUEUES:
3711 err = ice_vc_request_qs_msg(vf, msg);
3712 break;
3713 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3714 err = ice_vc_cfg_irq_map_msg(vf, msg);
3715 break;
3716 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3717 err = ice_vc_config_rss_key(vf, msg);
3718 break;
3719 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3720 err = ice_vc_config_rss_lut(vf, msg);
3721 break;
3722 case VIRTCHNL_OP_GET_STATS:
3723 err = ice_vc_get_stats_msg(vf, msg);
3724 break;
01b5e89a
BC
3725 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3726 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3727 break;
1071a835
AV
3728 case VIRTCHNL_OP_ADD_VLAN:
3729 err = ice_vc_add_vlan_msg(vf, msg);
3730 break;
3731 case VIRTCHNL_OP_DEL_VLAN:
3732 err = ice_vc_remove_vlan_msg(vf, msg);
3733 break;
3734 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3735 err = ice_vc_ena_vlan_stripping(vf);
3736 break;
3737 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3738 err = ice_vc_dis_vlan_stripping(vf);
3739 break;
3740 case VIRTCHNL_OP_UNKNOWN:
3741 default:
4015d11e
BC
3742 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3743 vf_id);
cf6c6e01
MW
3744 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3745 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
1071a835
AV
3746 NULL, 0);
3747 break;
3748 }
3749 if (err) {
3750 /* Helper function cares less about error return values here
3751 * as it is busy with pending work.
3752 */
4015d11e 3753 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
1071a835
AV
3754 vf_id, v_opcode, err);
3755 }
3756}
3757
7c710869
AV
3758/**
3759 * ice_get_vf_cfg
3760 * @netdev: network interface device structure
3761 * @vf_id: VF identifier
3762 * @ivi: VF configuration structure
3763 *
3764 * return VF configuration
3765 */
c8b7abdd
BA
3766int
3767ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
7c710869 3768{
4c66d227 3769 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
3770 struct ice_vf *vf;
3771
4c66d227 3772 if (ice_validate_vf_id(pf, vf_id))
7c710869 3773 return -EINVAL;
7c710869
AV
3774
3775 vf = &pf->vf[vf_id];
7c710869 3776
4c66d227 3777 if (ice_check_vf_init(pf, vf))
7c710869 3778 return -EBUSY;
7c710869
AV
3779
3780 ivi->vf = vf_id;
3781 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3782
3783 /* VF configuration for VLAN and applicable QoS */
61c9ce86
BC
3784 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3785 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
7c710869
AV
3786
3787 ivi->trusted = vf->trusted;
3788 ivi->spoofchk = vf->spoofchk;
3789 if (!vf->link_forced)
3790 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3791 else if (vf->link_up)
3792 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3793 else
3794 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3795 ivi->max_tx_rate = vf->tx_rate;
3796 ivi->min_tx_rate = 0;
3797 return 0;
3798}
3799
47ebc7b0
BC
3800/**
3801 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3802 * @pf: PF used to reference the switch's rules
3803 * @umac: unicast MAC to compare against existing switch rules
3804 *
3805 * Return true on the first/any match, else return false
3806 */
3807static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3808{
3809 struct ice_sw_recipe *mac_recipe_list =
3810 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3811 struct ice_fltr_mgmt_list_entry *list_itr;
3812 struct list_head *rule_head;
3813 struct mutex *rule_lock; /* protect MAC filter list access */
3814
3815 rule_head = &mac_recipe_list->filt_rules;
3816 rule_lock = &mac_recipe_list->filt_rule_lock;
3817
3818 mutex_lock(rule_lock);
3819 list_for_each_entry(list_itr, rule_head, list_entry) {
3820 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3821
3822 if (ether_addr_equal(existing_mac, umac)) {
3823 mutex_unlock(rule_lock);
3824 return true;
3825 }
3826 }
3827
3828 mutex_unlock(rule_lock);
3829
3830 return false;
3831}
3832
7c710869
AV
3833/**
3834 * ice_set_vf_mac
3835 * @netdev: network interface device structure
3836 * @vf_id: VF identifier
f9867df6 3837 * @mac: MAC address
7c710869 3838 *
f9867df6 3839 * program VF MAC address
7c710869
AV
3840 */
3841int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3842{
4c66d227 3843 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3844 struct ice_vf *vf;
c54d209c 3845 int ret;
7c710869 3846
4c66d227 3847 if (ice_validate_vf_id(pf, vf_id))
7c710869 3848 return -EINVAL;
7c710869 3849
f109603a 3850 if (is_multicast_ether_addr(mac)) {
7c710869
AV
3851 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3852 return -EINVAL;
3853 }
3854
c54d209c 3855 vf = &pf->vf[vf_id];
47ebc7b0
BC
3856 /* nothing left to do, unicast MAC already set */
3857 if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3858 return 0;
3859
c54d209c
BC
3860 ret = ice_check_vf_ready_for_cfg(vf);
3861 if (ret)
3862 return ret;
3863
47ebc7b0
BC
3864 if (ice_unicast_mac_exists(pf, mac)) {
3865 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3866 mac, vf_id, mac);
3867 return -EINVAL;
3868 }
3869
f109603a
BC
3870 /* VF is notified of its new MAC via the PF's response to the
3871 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
7c710869
AV
3872 */
3873 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
f109603a
BC
3874 if (is_zero_ether_addr(mac)) {
3875 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
3876 vf->pf_set_mac = false;
3877 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3878 vf->vf_id);
3879 } else {
3880 /* PF will add MAC rule for the VF */
3881 vf->pf_set_mac = true;
3882 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3883 mac, vf_id);
3884 }
7c710869 3885
ff010eca 3886 ice_vc_reset_vf(vf);
c54d209c 3887 return 0;
7c710869
AV
3888}
3889
3890/**
3891 * ice_set_vf_trust
3892 * @netdev: network interface device structure
3893 * @vf_id: VF identifier
3894 * @trusted: Boolean value to enable/disable trusted VF
3895 *
3896 * Enable or disable a given VF as trusted
3897 */
3898int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3899{
4c66d227 3900 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3901 struct ice_vf *vf;
c54d209c 3902 int ret;
7c710869 3903
4c66d227 3904 if (ice_validate_vf_id(pf, vf_id))
7c710869 3905 return -EINVAL;
7c710869
AV
3906
3907 vf = &pf->vf[vf_id];
c54d209c
BC
3908 ret = ice_check_vf_ready_for_cfg(vf);
3909 if (ret)
3910 return ret;
7c710869
AV
3911
3912 /* Check if already trusted */
3913 if (trusted == vf->trusted)
3914 return 0;
3915
3916 vf->trusted = trusted;
ff010eca 3917 ice_vc_reset_vf(vf);
19cce2c6 3918 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
7c710869
AV
3919 vf_id, trusted ? "" : "un");
3920
3921 return 0;
3922}
3923
3924/**
3925 * ice_set_vf_link_state
3926 * @netdev: network interface device structure
3927 * @vf_id: VF identifier
3928 * @link_state: required link state
3929 *
3930 * Set VF's link state, irrespective of physical link state status
3931 */
3932int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3933{
4c66d227 3934 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3935 struct ice_vf *vf;
c54d209c 3936 int ret;
7c710869 3937
4c66d227 3938 if (ice_validate_vf_id(pf, vf_id))
7c710869 3939 return -EINVAL;
7c710869
AV
3940
3941 vf = &pf->vf[vf_id];
c54d209c
BC
3942 ret = ice_check_vf_ready_for_cfg(vf);
3943 if (ret)
3944 return ret;
7c710869 3945
7c710869
AV
3946 switch (link_state) {
3947 case IFLA_VF_LINK_STATE_AUTO:
3948 vf->link_forced = false;
7c710869
AV
3949 break;
3950 case IFLA_VF_LINK_STATE_ENABLE:
3951 vf->link_forced = true;
3952 vf->link_up = true;
3953 break;
3954 case IFLA_VF_LINK_STATE_DISABLE:
3955 vf->link_forced = true;
3956 vf->link_up = false;
3957 break;
3958 default:
3959 return -EINVAL;
3960 }
3961
26a91525 3962 ice_vc_notify_vf_link_state(vf);
7c710869
AV
3963
3964 return 0;
3965}
730fdea4
JB
3966
3967/**
3968 * ice_get_vf_stats - populate some stats for the VF
3969 * @netdev: the netdev of the PF
3970 * @vf_id: the host OS identifier (0-255)
3971 * @vf_stats: pointer to the OS memory to be initialized
3972 */
3973int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3974 struct ifla_vf_stats *vf_stats)
3975{
3976 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3977 struct ice_eth_stats *stats;
3978 struct ice_vsi *vsi;
3979 struct ice_vf *vf;
c54d209c 3980 int ret;
730fdea4
JB
3981
3982 if (ice_validate_vf_id(pf, vf_id))
3983 return -EINVAL;
3984
3985 vf = &pf->vf[vf_id];
c54d209c
BC
3986 ret = ice_check_vf_ready_for_cfg(vf);
3987 if (ret)
3988 return ret;
730fdea4
JB
3989
3990 vsi = pf->vsi[vf->lan_vsi_idx];
3991 if (!vsi)
3992 return -EINVAL;
3993
3994 ice_update_eth_stats(vsi);
3995 stats = &vsi->eth_stats;
3996
3997 memset(vf_stats, 0, sizeof(*vf_stats));
3998
3999 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4000 stats->rx_multicast;
4001 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4002 stats->tx_multicast;
4003 vf_stats->rx_bytes = stats->rx_bytes;
4004 vf_stats->tx_bytes = stats->tx_bytes;
4005 vf_stats->broadcast = stats->rx_broadcast;
4006 vf_stats->multicast = stats->rx_multicast;
4007 vf_stats->rx_dropped = stats->rx_discards;
4008 vf_stats->tx_dropped = stats->tx_discards;
4009
4010 return 0;
4011}
9d5c5a52 4012
7438a3b0
PG
4013/**
4014 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4015 * @vf: pointer to the VF structure
4016 */
4017void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4018{
4019 struct ice_pf *pf = vf->pf;
4020 struct device *dev;
4021
4022 dev = ice_pf_to_dev(pf);
4023
4024 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4025 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4026 vf->dflt_lan_addr.addr,
4027 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4028 ? "on" : "off");
4029}
4030
9d5c5a52
PG
4031/**
4032 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4033 * @pf: pointer to the PF structure
4034 *
4035 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4036 */
4037void ice_print_vfs_mdd_events(struct ice_pf *pf)
4038{
4039 struct device *dev = ice_pf_to_dev(pf);
4040 struct ice_hw *hw = &pf->hw;
4041 int i;
4042
4043 /* check that there are pending MDD events to print */
4044 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4045 return;
4046
4047 /* VF MDD event logs are rate limited to one second intervals */
4048 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4049 return;
4050
4051 pf->last_printed_mdd_jiffies = jiffies;
4052
4053 ice_for_each_vf(pf, i) {
4054 struct ice_vf *vf = &pf->vf[i];
4055
4056 /* only print Rx MDD event message if there are new events */
4057 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4058 vf->mdd_rx_events.last_printed =
4059 vf->mdd_rx_events.count;
7438a3b0 4060 ice_print_vf_rx_mdd_event(vf);
9d5c5a52
PG
4061 }
4062
4063 /* only print Tx MDD event message if there are new events */
4064 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4065 vf->mdd_tx_events.last_printed =
4066 vf->mdd_tx_events.count;
4067
4068 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4069 vf->mdd_tx_events.count, hw->pf_id, i,
4070 vf->dflt_lan_addr.addr);
4071 }
4072 }
4073}