ice: Fix implicit queue mapping mode in ice_vsi_get_qs
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_virtchnl_pf.c
CommitLineData
ddf30f7f
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
eff380aa 5#include "ice_base.h"
ddf30f7f
AV
6#include "ice_lib.h"
7
4c66d227
JB
8/**
9 * ice_validate_vf_id - helper to check if VF ID is valid
10 * @pf: pointer to the PF structure
11 * @vf_id: the ID of the VF to check
12 */
13static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
14{
15 if (vf_id >= pf->num_alloc_vfs) {
16 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
17 return -EINVAL;
18 }
19 return 0;
20}
21
22/**
23 * ice_check_vf_init - helper to check if VF init complete
24 * @pf: pointer to the PF structure
25 * @vf: the pointer to the VF to check
26 */
27static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
28{
29 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
30 dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
31 vf->vf_id);
32 return -EBUSY;
33 }
34 return 0;
35}
36
007676b4
AV
37/**
38 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
39 * @pf: pointer to the PF structure
40 * @v_opcode: operation code
41 * @v_retval: return value
42 * @msg: pointer to the msg buffer
43 * @msglen: msg length
44 */
45static void
46ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
cf6c6e01 47 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
007676b4
AV
48{
49 struct ice_hw *hw = &pf->hw;
007676b4
AV
50 int i;
51
005881bc
BC
52 ice_for_each_vf(pf, i) {
53 struct ice_vf *vf = &pf->vf[i];
54
007676b4
AV
55 /* Not all vfs are enabled so skip the ones that are not */
56 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
57 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
58 continue;
59
60 /* Ignore return value on purpose - a given VF may fail, but
61 * we need to keep going and send to all of them
62 */
63 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
64 msglen, NULL);
65 }
66}
67
7c710869
AV
68/**
69 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
70 * @vf: pointer to the VF structure
71 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
72 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
73 * @link_up: whether or not to set the link up/down
74 */
75static void
76ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
77 int ice_link_speed, bool link_up)
78{
79 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
80 pfe->event_data.link_event_adv.link_status = link_up;
81 /* Speed in Mbps */
82 pfe->event_data.link_event_adv.link_speed =
83 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
84 } else {
85 pfe->event_data.link_event.link_status = link_up;
86 /* Legacy method for virtchnl link speeds */
87 pfe->event_data.link_event.link_speed =
88 (enum virtchnl_link_speed)
89 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
90 }
91}
92
0b6c6a8b
BC
93/**
94 * ice_is_vf_link_up - check if the VF's link is up
95 * @vf: VF to check if link is up
96 */
97static bool ice_is_vf_link_up(struct ice_vf *vf)
98{
99 struct ice_pf *pf = vf->pf;
100
101 if (ice_check_vf_init(pf, vf))
102 return false;
103
104 if (!vf->num_qs_ena)
105 return false;
106 else if (vf->link_forced)
107 return vf->link_up;
108 else
109 return pf->hw.port_info->phy.link_info.link_info &
110 ICE_AQ_LINK_UP;
111}
112
1071a835
AV
113/**
114 * ice_vc_notify_vf_link_state - Inform a VF of link status
115 * @vf: pointer to the VF structure
116 *
117 * send a link status message to a single VF
118 */
119static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
120{
121 struct virtchnl_pf_event pfe = { 0 };
0b6c6a8b 122 struct ice_hw *hw = &vf->pf->hw;
1071a835
AV
123
124 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
125 pfe.severity = PF_EVENT_SEVERITY_INFO;
126
0b6c6a8b
BC
127 if (ice_is_vf_link_up(vf))
128 ice_set_pfe_link(vf, &pfe,
129 hw->port_info->phy.link_info.link_speed, true);
130 else
c61d2342 131 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
1071a835 132
cf6c6e01
MW
133 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
134 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
1071a835
AV
135 sizeof(pfe), NULL);
136}
137
ddf30f7f
AV
138/**
139 * ice_free_vf_res - Free a VF's resources
140 * @vf: pointer to the VF info
141 */
142static void ice_free_vf_res(struct ice_vf *vf)
143{
144 struct ice_pf *pf = vf->pf;
72ecb896 145 int i, last_vector_idx;
ddf30f7f
AV
146
147 /* First, disable VF's configuration API to prevent OS from
148 * accessing the VF's VSI after it's freed or invalidated.
149 */
150 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
151
2f2da36e 152 /* free VSI and disconnect it from the parent uplink */
ddf30f7f
AV
153 if (vf->lan_vsi_idx) {
154 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
155 vf->lan_vsi_idx = 0;
156 vf->lan_vsi_num = 0;
157 vf->num_mac = 0;
158 }
159
72ecb896 160 last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
ddf30f7f 161 /* Disable interrupts so that VF starts in a known state */
72ecb896
BC
162 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
163 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ddf30f7f
AV
164 ice_flush(&pf->hw);
165 }
166 /* reset some of the state variables keeping track of the resources */
167 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
168 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
169}
170
ddf30f7f
AV
171/**
172 * ice_dis_vf_mappings
173 * @vf: pointer to the VF structure
174 */
175static void ice_dis_vf_mappings(struct ice_vf *vf)
176{
177 struct ice_pf *pf = vf->pf;
178 struct ice_vsi *vsi;
4015d11e 179 struct device *dev;
ddf30f7f
AV
180 int first, last, v;
181 struct ice_hw *hw;
182
183 hw = &pf->hw;
184 vsi = pf->vsi[vf->lan_vsi_idx];
185
4015d11e 186 dev = ice_pf_to_dev(pf);
ddf30f7f 187 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
982b1219 188 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
ddf30f7f 189
cbe66bfe 190 first = vf->first_vector_idx;
ddf30f7f
AV
191 last = first + pf->num_vf_msix - 1;
192 for (v = first; v <= last; v++) {
193 u32 reg;
194
195 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
196 GLINT_VECT2FUNC_IS_PF_M) |
197 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
198 GLINT_VECT2FUNC_PF_NUM_M));
199 wr32(hw, GLINT_VECT2FUNC(v), reg);
200 }
201
202 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
203 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
204 else
4015d11e 205 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
206
207 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
208 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
209 else
19cce2c6 210 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
211}
212
cbe66bfe
BC
213/**
214 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
215 * @pf: pointer to the PF structure
216 *
217 * If MSIX entries from the pf->irq_tracker were needed then we need to
218 * reset the irq_tracker->end and give back the entries we needed to
219 * num_avail_sw_msix.
220 *
221 * If no MSIX entries were taken from the pf->irq_tracker then just clear
222 * the pf->sriov_base_vector.
223 *
224 * Returns 0 on success, and -EINVAL on error.
225 */
226static int ice_sriov_free_msix_res(struct ice_pf *pf)
227{
228 struct ice_res_tracker *res;
229
230 if (!pf)
231 return -EINVAL;
232
233 res = pf->irq_tracker;
234 if (!res)
235 return -EINVAL;
236
237 /* give back irq_tracker resources used */
238 if (pf->sriov_base_vector < res->num_entries) {
239 res->end = res->num_entries;
240 pf->num_avail_sw_msix +=
241 res->num_entries - pf->sriov_base_vector;
242 }
243
244 pf->sriov_base_vector = 0;
245
246 return 0;
247}
248
77ca27c4
PG
249/**
250 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
251 * @vf: pointer to the VF structure
252 */
253void ice_set_vf_state_qs_dis(struct ice_vf *vf)
254{
255 /* Clear Rx/Tx enabled queues flag */
256 bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
257 bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
258 vf->num_qs_ena = 0;
259 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
260}
261
262/**
263 * ice_dis_vf_qs - Disable the VF queues
264 * @vf: pointer to the VF structure
265 */
266static void ice_dis_vf_qs(struct ice_vf *vf)
267{
268 struct ice_pf *pf = vf->pf;
269 struct ice_vsi *vsi;
270
271 vsi = pf->vsi[vf->lan_vsi_idx];
272
273 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
13a6233b 274 ice_vsi_stop_all_rx_rings(vsi);
77ca27c4
PG
275 ice_set_vf_state_qs_dis(vf);
276}
277
ddf30f7f
AV
278/**
279 * ice_free_vfs - Free all VFs
280 * @pf: pointer to the PF structure
281 */
282void ice_free_vfs(struct ice_pf *pf)
283{
4015d11e 284 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
285 struct ice_hw *hw = &pf->hw;
286 int tmp, i;
287
288 if (!pf->vf)
289 return;
290
291 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
292 usleep_range(1000, 2000);
293
294 /* Avoid wait time by stopping all VFs at the same time */
005881bc 295 ice_for_each_vf(pf, i)
77ca27c4
PG
296 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
297 ice_dis_vf_qs(&pf->vf[i]);
ddf30f7f 298
72ecb896
BC
299 /* Disable IOV before freeing resources. This lets any VF drivers
300 * running in the host get themselves cleaned up before we yank
301 * the carpet out from underneath their feet.
302 */
303 if (!pci_vfs_assigned(pf->pdev))
304 pci_disable_sriov(pf->pdev);
305 else
4015d11e 306 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
72ecb896 307
ddf30f7f
AV
308 tmp = pf->num_alloc_vfs;
309 pf->num_vf_qps = 0;
310 pf->num_alloc_vfs = 0;
311 for (i = 0; i < tmp; i++) {
312 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
1f9639d2 313 /* disable VF qp mappings and set VF disable state */
ddf30f7f 314 ice_dis_vf_mappings(&pf->vf[i]);
1f9639d2 315 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ddf30f7f
AV
316 ice_free_vf_res(&pf->vf[i]);
317 }
318 }
319
cbe66bfe 320 if (ice_sriov_free_msix_res(pf))
4015d11e 321 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 322
4015d11e 323 devm_kfree(dev, pf->vf);
ddf30f7f
AV
324 pf->vf = NULL;
325
326 /* This check is for when the driver is unloaded while VFs are
327 * assigned. Setting the number of VFs to 0 through sysfs is caught
328 * before this function ever gets called.
329 */
330 if (!pci_vfs_assigned(pf->pdev)) {
331 int vf_id;
332
333 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
334 * work correctly when SR-IOV gets re-enabled.
335 */
336 for (vf_id = 0; vf_id < tmp; vf_id++) {
337 u32 reg_idx, bit_idx;
338
339 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
340 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
341 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
342 }
343 }
344 clear_bit(__ICE_VF_DIS, pf->state);
345 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
346}
347
348/**
349 * ice_trigger_vf_reset - Reset a VF on HW
350 * @vf: pointer to the VF structure
351 * @is_vflr: true if VFLR was issued, false if not
29d42f1f 352 * @is_pfr: true if the reset was triggered due to a previous PFR
ddf30f7f
AV
353 *
354 * Trigger hardware to start a reset for a particular VF. Expects the caller
355 * to wait the proper amount of time to allow hardware to reset the VF before
356 * it cleans up and restores VF functionality.
357 */
29d42f1f 358static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
ddf30f7f
AV
359{
360 struct ice_pf *pf = vf->pf;
361 u32 reg, reg_idx, bit_idx;
4015d11e 362 struct device *dev;
ddf30f7f
AV
363 struct ice_hw *hw;
364 int vf_abs_id, i;
365
4015d11e 366 dev = ice_pf_to_dev(pf);
ddf30f7f
AV
367 hw = &pf->hw;
368 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
369
370 /* Inform VF that it is no longer active, as a warning */
371 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
372
373 /* Disable VF's configuration API during reset. The flag is re-enabled
374 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
375 * It's normally disabled in ice_free_vf_res(), but it's safer
376 * to do it earlier to give some time to finish to any VF config
377 * functions that may still be running at this point.
378 */
379 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
82ba0128 380
29d42f1f
MW
381 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
382 * in the case of VFR. If this is done for PFR, it can mess up VF
383 * resets because the VF driver may already have started cleanup
384 * by the time we get here.
82ba0128 385 */
29d42f1f 386 if (!is_pfr)
39559456 387 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
ddf30f7f
AV
388
389 /* In the case of a VFLR, the HW has already reset the VF and we
390 * just need to clean up, so don't hit the VFRTRIG register.
391 */
392 if (!is_vflr) {
393 /* reset VF using VPGEN_VFRTRIG reg */
394 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
395 reg |= VPGEN_VFRTRIG_VFSWR_M;
396 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
397 }
398 /* clear the VFLR bit in GLGEN_VFLRSTAT */
399 reg_idx = (vf_abs_id) / 32;
400 bit_idx = (vf_abs_id) % 32;
401 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
402 ice_flush(hw);
403
404 wr32(hw, PF_PCI_CIAA,
405 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
60d628ea 406 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
ddf30f7f 407 reg = rd32(hw, PF_PCI_CIAD);
60d628ea
BC
408 /* no transactions pending so stop polling */
409 if ((reg & VF_TRANS_PENDING_M) == 0)
410 break;
411
19cce2c6 412 dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
60d628ea 413 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
ddf30f7f
AV
414 }
415}
416
77a7a84d
MS
417/**
418 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
419 * @vsi: the VSI to update
b093841f 420 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
f9867df6 421 * @enable: true for enable PVID false for disable
77a7a84d 422 */
b093841f 423static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
ddf30f7f 424{
ddf30f7f 425 struct ice_hw *hw = &vsi->back->hw;
b093841f 426 struct ice_aqc_vsi_props *info;
198a666a 427 struct ice_vsi_ctx *ctxt;
ddf30f7f 428 enum ice_status status;
198a666a
BA
429 int ret = 0;
430
9efe35d0 431 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
198a666a
BA
432 if (!ctxt)
433 return -ENOMEM;
ddf30f7f 434
77a7a84d 435 ctxt->info = vsi->info;
b093841f
BC
436 info = &ctxt->info;
437 if (enable) {
438 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
439 ICE_AQ_VSI_PVLAN_INSERT_PVID |
440 ICE_AQ_VSI_VLAN_EMOD_STR;
441 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
442 } else {
443 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
444 ICE_AQ_VSI_VLAN_MODE_ALL;
445 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
446 }
447
448 info->pvid = cpu_to_le16(pvid_info);
449 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
450 ICE_AQ_VSI_PROP_SW_VALID);
ddf30f7f 451
198a666a 452 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
ddf30f7f 453 if (status) {
b093841f 454 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
ddf30f7f 455 status, hw->adminq.sq_last_status);
198a666a
BA
456 ret = -EIO;
457 goto out;
ddf30f7f
AV
458 }
459
b093841f
BC
460 vsi->info.vlan_flags = info->vlan_flags;
461 vsi->info.sw_flags2 = info->sw_flags2;
462 vsi->info.pvid = info->pvid;
198a666a 463out:
9efe35d0 464 kfree(ctxt);
198a666a 465 return ret;
ddf30f7f
AV
466}
467
468/**
469 * ice_vf_vsi_setup - Set up a VF VSI
470 * @pf: board private structure
471 * @pi: pointer to the port_info instance
f9867df6 472 * @vf_id: defines VF ID to which this VSI connects.
ddf30f7f
AV
473 *
474 * Returns pointer to the successfully allocated VSI struct on success,
475 * otherwise returns NULL on failure.
476 */
477static struct ice_vsi *
478ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
479{
480 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
481}
482
cbe66bfe 483/**
1337175d 484 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
cbe66bfe
BC
485 * @pf: pointer to PF structure
486 * @vf: pointer to VF that the first MSIX vector index is being calculated for
487 *
1337175d
PG
488 * This returns the first MSIX vector index in PF space that is used by this VF.
489 * This index is used when accessing PF relative registers such as
490 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
491 * This will always be the OICR index in the AVF driver so any functionality
cbe66bfe
BC
492 * using vf->first_vector_idx for queue configuration will have to increment by
493 * 1 to avoid meddling with the OICR index.
494 */
495static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
496{
1337175d 497 return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
cbe66bfe
BC
498}
499
ddf30f7f
AV
500/**
501 * ice_alloc_vsi_res - Setup VF VSI and its resources
502 * @vf: pointer to the VF structure
503 *
504 * Returns 0 on success, negative value on failure
505 */
506static int ice_alloc_vsi_res(struct ice_vf *vf)
507{
508 struct ice_pf *pf = vf->pf;
509 LIST_HEAD(tmp_add_list);
510 u8 broadcast[ETH_ALEN];
511 struct ice_vsi *vsi;
4015d11e 512 struct device *dev;
ddf30f7f
AV
513 int status = 0;
514
4015d11e 515 dev = ice_pf_to_dev(pf);
cbe66bfe
BC
516 /* first vector index is the VFs OICR index */
517 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
518
ddf30f7f 519 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
ddf30f7f 520 if (!vsi) {
4015d11e 521 dev_err(dev, "Failed to create VF VSI\n");
ddf30f7f
AV
522 return -ENOMEM;
523 }
524
525 vf->lan_vsi_idx = vsi->idx;
526 vf->lan_vsi_num = vsi->vsi_num;
527
ddf30f7f 528 /* Check if port VLAN exist before, and restore it accordingly */
b093841f
BC
529 if (vf->port_vlan_info) {
530 ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
72634bc2
BC
531 if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
532 dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
533 vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
534 } else {
535 /* set VLAN 0 filter by default when no port VLAN is
536 * enabled. If a port VLAN is enabled we don't want
537 * untagged broadcast/multicast traffic seen on the VF
538 * interface.
539 */
540 if (ice_vsi_add_vlan(vsi, 0))
541 dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
542 vf->vf_id);
840bcd88 543 }
ddf30f7f
AV
544
545 eth_broadcast_addr(broadcast);
546
547 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
548 if (status)
549 goto ice_alloc_vsi_res_exit;
550
551 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
552 status = ice_add_mac_to_list(vsi, &tmp_add_list,
553 vf->dflt_lan_addr.addr);
554 if (status)
555 goto ice_alloc_vsi_res_exit;
556 }
557
558 status = ice_add_mac(&pf->hw, &tmp_add_list);
559 if (status)
4015d11e 560 dev_err(dev, "could not add mac filters error %d\n", status);
bbb968e8
AA
561 else
562 vf->num_mac = 1;
ddf30f7f
AV
563
564 /* Clear this bit after VF initialization since we shouldn't reclaim
565 * and reassign interrupts for synchronous or asynchronous VFR events.
94c4441b 566 * We don't want to reconfigure interrupts since AVF driver doesn't
ddf30f7f
AV
567 * expect vector assignment to be changed unless there is a request for
568 * more vectors.
569 */
ddf30f7f 570ice_alloc_vsi_res_exit:
4015d11e 571 ice_free_fltr_list(dev, &tmp_add_list);
ddf30f7f
AV
572 return status;
573}
574
575/**
576 * ice_alloc_vf_res - Allocate VF resources
577 * @vf: pointer to the VF structure
578 */
579static int ice_alloc_vf_res(struct ice_vf *vf)
580{
5743020d
AA
581 struct ice_pf *pf = vf->pf;
582 int tx_rx_queue_left;
ddf30f7f
AV
583 int status;
584
5743020d
AA
585 /* Update number of VF queues, in case VF had requested for queue
586 * changes
587 */
8c243700
AV
588 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
589 ice_get_avail_rxq_count(pf));
5743020d
AA
590 tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
591 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
592 vf->num_req_qs != vf->num_vf_qs)
593 vf->num_vf_qs = vf->num_req_qs;
594
66b29e7a
AA
595 /* setup VF VSI and necessary resources */
596 status = ice_alloc_vsi_res(vf);
597 if (status)
598 goto ice_alloc_vf_res_exit;
599
ddf30f7f
AV
600 if (vf->trusted)
601 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
602 else
603 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
604
605 /* VF is now completely initialized */
606 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
607
608 return status;
609
610ice_alloc_vf_res_exit:
611 ice_free_vf_res(vf);
612 return status;
613}
614
615/**
616 * ice_ena_vf_mappings
617 * @vf: pointer to the VF structure
618 *
619 * Enable VF vectors and queues allocation by writing the details into
620 * respective registers.
621 */
622static void ice_ena_vf_mappings(struct ice_vf *vf)
623{
1337175d 624 int abs_vf_id, abs_first, abs_last;
ddf30f7f
AV
625 struct ice_pf *pf = vf->pf;
626 struct ice_vsi *vsi;
4015d11e 627 struct device *dev;
ddf30f7f
AV
628 int first, last, v;
629 struct ice_hw *hw;
ddf30f7f
AV
630 u32 reg;
631
4015d11e 632 dev = ice_pf_to_dev(pf);
ddf30f7f
AV
633 hw = &pf->hw;
634 vsi = pf->vsi[vf->lan_vsi_idx];
cbe66bfe 635 first = vf->first_vector_idx;
ddf30f7f 636 last = (first + pf->num_vf_msix) - 1;
1337175d
PG
637 abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
638 abs_last = (abs_first + pf->num_vf_msix) - 1;
ddf30f7f
AV
639 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
640
641 /* VF Vector allocation */
1337175d
PG
642 reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
643 ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
ddf30f7f
AV
644 VPINT_ALLOC_VALID_M);
645 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
646
1337175d
PG
647 reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
648 & VPINT_ALLOC_PCI_FIRST_M) |
649 ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
982b1219
AV
650 VPINT_ALLOC_PCI_VALID_M);
651 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
ddf30f7f
AV
652 /* map the interrupts to its functions */
653 for (v = first; v <= last; v++) {
654 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
655 GLINT_VECT2FUNC_VF_NUM_M) |
656 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
657 GLINT_VECT2FUNC_PF_NUM_M));
658 wr32(hw, GLINT_VECT2FUNC(v), reg);
659 }
660
a7c9b47b
MW
661 /* Map mailbox interrupt. We put an explicit 0 here to remind us that
662 * VF admin queue interrupts will go to VF MSI-X vector 0.
663 */
664 wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
982b1219
AV
665 /* set regardless of mapping mode */
666 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
667
ddf30f7f
AV
668 /* VF Tx queues allocation */
669 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
670 /* set the VF PF Tx queue range
671 * VFNUMQ value should be set to (number of queues - 1). A value
672 * of 0 means 1 queue and a value of 255 means 256 queues
673 */
674 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
675 VPLAN_TX_QBASE_VFFIRSTQ_M) |
676 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
677 VPLAN_TX_QBASE_VFNUMQ_M));
678 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
679 } else {
4015d11e 680 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
681 }
682
982b1219
AV
683 /* set regardless of mapping mode */
684 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
685
ddf30f7f
AV
686 /* VF Rx queues allocation */
687 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
688 /* set the VF PF Rx queue range
689 * VFNUMQ value should be set to (number of queues - 1). A value
690 * of 0 means 1 queue and a value of 255 means 256 queues
691 */
692 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
693 VPLAN_RX_QBASE_VFFIRSTQ_M) |
694 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
695 VPLAN_RX_QBASE_VFNUMQ_M));
696 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
697 } else {
4015d11e 698 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
699 }
700}
701
702/**
703 * ice_determine_res
704 * @pf: pointer to the PF structure
705 * @avail_res: available resources in the PF structure
706 * @max_res: maximum resources that can be given per VF
707 * @min_res: minimum resources that can be given per VF
708 *
709 * Returns non-zero value if resources (queues/vectors) are available or
710 * returns zero if PF cannot accommodate for all num_alloc_vfs.
711 */
712static int
713ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
714{
715 bool checked_min_res = false;
716 int res;
717
718 /* start by checking if PF can assign max number of resources for
719 * all num_alloc_vfs.
720 * if yes, return number per VF
721 * If no, divide by 2 and roundup, check again
722 * repeat the loop till we reach a point where even minimum resources
723 * are not available, in that case return 0
724 */
725 res = max_res;
726 while ((res >= min_res) && !checked_min_res) {
727 int num_all_res;
728
729 num_all_res = pf->num_alloc_vfs * res;
730 if (num_all_res <= avail_res)
731 return res;
732
733 if (res == min_res)
734 checked_min_res = true;
735
736 res = DIV_ROUND_UP(res, 2);
737 }
738 return 0;
739}
740
cbe66bfe
BC
741/**
742 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
743 * @vf: VF to calculate the register index for
744 * @q_vector: a q_vector associated to the VF
745 */
746int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
747{
748 struct ice_pf *pf;
749
750 if (!vf || !q_vector)
751 return -EINVAL;
752
753 pf = vf->pf;
754
755 /* always add one to account for the OICR being the first MSIX */
756 return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
757 q_vector->v_idx + 1;
758}
759
760/**
761 * ice_get_max_valid_res_idx - Get the max valid resource index
762 * @res: pointer to the resource to find the max valid index for
763 *
764 * Start from the end of the ice_res_tracker and return right when we find the
765 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
766 * valid for SR-IOV because it is the only consumer that manipulates the
767 * res->end and this is always called when res->end is set to res->num_entries.
768 */
769static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
770{
771 int i;
772
773 if (!res)
774 return -EINVAL;
775
776 for (i = res->num_entries - 1; i >= 0; i--)
777 if (res->list[i] & ICE_RES_VALID_BIT)
778 return i;
779
780 return 0;
781}
782
783/**
784 * ice_sriov_set_msix_res - Set any used MSIX resources
785 * @pf: pointer to PF structure
786 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
787 *
788 * This function allows SR-IOV resources to be taken from the end of the PF's
789 * allowed HW MSIX vectors so in many cases the irq_tracker will not
790 * be needed. In these cases we just set the pf->sriov_base_vector and return
791 * success.
792 *
793 * If SR-IOV needs to use any pf->irq_tracker entries it updates the
794 * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
795 * so any calls to ice_get_res() using the irq_tracker will not try to use
796 * resources at or beyond the newly set value.
797 *
798 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
799 * in the PF's space available for SR-IOV.
800 */
801static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
802{
803 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
804 u16 pf_total_msix_vectors =
805 pf->hw.func_caps.common_cap.num_msix_vectors;
806 struct ice_res_tracker *res = pf->irq_tracker;
807 int sriov_base_vector;
808
809 if (max_valid_res_idx < 0)
810 return max_valid_res_idx;
811
812 sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
813
814 /* make sure we only grab irq_tracker entries from the list end and
815 * that we have enough available MSIX vectors
816 */
817 if (sriov_base_vector <= max_valid_res_idx)
818 return -EINVAL;
819
820 pf->sriov_base_vector = sriov_base_vector;
821
822 /* dip into irq_tracker entries and update used resources */
823 if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
824 pf->num_avail_sw_msix -=
825 res->num_entries - pf->sriov_base_vector;
826 res->end = pf->sriov_base_vector;
827 }
828
829 return 0;
830}
831
ddf30f7f
AV
832/**
833 * ice_check_avail_res - check if vectors and queues are available
834 * @pf: pointer to the PF structure
835 *
836 * This function is where we calculate actual number of resources for VF VSIs,
837 * we don't reserve ahead of time during probe. Returns success if vectors and
838 * queues resources are available, otherwise returns error code
839 */
840static int ice_check_avail_res(struct ice_pf *pf)
841{
cbe66bfe
BC
842 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
843 u16 num_msix, num_txq, num_rxq, num_avail_msix;
4015d11e 844 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 845
cbe66bfe 846 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
ddf30f7f
AV
847 return -EINVAL;
848
cbe66bfe
BC
849 /* add 1 to max_valid_res_idx to account for it being 0-based */
850 num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
851 (max_valid_res_idx + 1);
852
ddf30f7f
AV
853 /* Grab from HW interrupts common pool
854 * Note: By the time the user decides it needs more vectors in a VF
855 * its already too late since one must decide this prior to creating the
856 * VF interface. So the best we can do is take a guess as to what the
857 * user might want.
858 *
859 * We have two policies for vector allocation:
860 * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
861 * number of NFV VFs used for NFV appliances, since this is a special
862 * case, we try to assign maximum vectors per VF (65) as much as
863 * possible, based on determine_resources algorithm.
864 * 2. if num_alloc_vfs is from 17 to 256, then its large number of
865 * regular VFs which are not used for any special purpose. Hence try to
866 * grab default interrupt vectors (5 as supported by AVF driver).
867 */
868 if (pf->num_alloc_vfs <= 16) {
cbe66bfe 869 num_msix = ice_determine_res(pf, num_avail_msix,
ddf30f7f
AV
870 ICE_MAX_INTR_PER_VF,
871 ICE_MIN_INTR_PER_VF);
872 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
cbe66bfe 873 num_msix = ice_determine_res(pf, num_avail_msix,
ddf30f7f
AV
874 ICE_DFLT_INTR_PER_VF,
875 ICE_MIN_INTR_PER_VF);
876 } else {
4015d11e 877 dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
ddf30f7f
AV
878 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
879 return -EIO;
880 }
881
882 if (!num_msix)
883 return -EIO;
884
885 /* Grab from the common pool
886 * start by requesting Default queues (4 as supported by AVF driver),
887 * Note that, the main difference between queues and vectors is, latter
888 * can only be reserved at init time but queues can be requested by VF
889 * at runtime through Virtchnl, that is the reason we start by reserving
890 * few queues.
891 */
8c243700
AV
892 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
893 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
ddf30f7f 894
8c243700
AV
895 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
896 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
ddf30f7f
AV
897
898 if (!num_txq || !num_rxq)
899 return -EIO;
900
cbe66bfe
BC
901 if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
902 return -EINVAL;
903
ddf30f7f
AV
904 /* since AVF driver works with only queue pairs which means, it expects
905 * to have equal number of Rx and Tx queues, so take the minimum of
906 * available Tx or Rx queues
907 */
908 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
909 pf->num_vf_msix = num_msix;
910
911 return 0;
912}
913
914/**
915 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
916 * @vf: pointer to the VF structure
917 *
918 * Cleanup a VF after the hardware reset is finished. Expects the caller to
919 * have verified whether the reset is finished properly, and ensure the
920 * minimum amount of wait time has passed. Reallocate VF resources back to make
921 * VF state active
922 */
923static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
924{
925 struct ice_pf *pf = vf->pf;
926 struct ice_hw *hw;
927 u32 reg;
928
929 hw = &pf->hw;
930
931 /* PF software completes the flow by notifying VF that reset flow is
932 * completed. This is done by enabling hardware by clearing the reset
933 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
934 * register to VFR completed (done at the end of this function)
935 * By doing this we allow HW to access VF memory at any point. If we
936 * did it any sooner, HW could access memory while it was being freed
937 * in ice_free_vf_res(), causing an IOMMU fault.
938 *
939 * On the other hand, this needs to be done ASAP, because the VF driver
940 * is waiting for this to happen and may report a timeout. It's
941 * harmless, but it gets logged into Guest OS kernel log, so best avoid
942 * it.
943 */
944 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
945 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
946 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
947
948 /* reallocate VF resources to finish resetting the VSI state */
949 if (!ice_alloc_vf_res(vf)) {
950 ice_ena_vf_mappings(vf);
951 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
952 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
ddf30f7f
AV
953 }
954
955 /* Tell the VF driver the reset is done. This needs to be done only
956 * after VF has been fully initialized, because the VF driver may
957 * request resources immediately after setting this flag.
958 */
959 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
960}
961
5eda8afd
AA
962/**
963 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
964 * @vf: pointer to the VF info
965 * @vsi: the VSI being configured
966 * @promisc_m: mask of promiscuous config bits
967 * @rm_promisc: promisc flag request from the VF to remove or add filter
968 *
969 * This function configures VF VSI promiscuous mode, based on the VF requests,
970 * for Unicast, Multicast and VLAN
971 */
972static enum ice_status
973ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
974 bool rm_promisc)
975{
976 struct ice_pf *pf = vf->pf;
977 enum ice_status status = 0;
978 struct ice_hw *hw;
979
980 hw = &pf->hw;
cd6d6b83 981 if (vsi->num_vlan) {
5eda8afd
AA
982 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
983 rm_promisc);
b093841f 984 } else if (vf->port_vlan_info) {
5eda8afd
AA
985 if (rm_promisc)
986 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 987 vf->port_vlan_info);
5eda8afd
AA
988 else
989 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 990 vf->port_vlan_info);
5eda8afd
AA
991 } else {
992 if (rm_promisc)
993 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
994 0);
995 else
996 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
997 0);
998 }
999
1000 return status;
1001}
1002
d82dd83d
AA
1003/**
1004 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1005 * @pf: pointer to the PF structure
1006 *
1007 * This function is being called as last part of resetting all VFs, or when
1008 * configuring VFs for the first time, where there is no resource to be freed
1009 * Returns true if resources were properly allocated for all VFs, and false
1010 * otherwise.
1011 */
1012static bool ice_config_res_vfs(struct ice_pf *pf)
1013{
4015d11e 1014 struct device *dev = ice_pf_to_dev(pf);
d82dd83d
AA
1015 struct ice_hw *hw = &pf->hw;
1016 int v;
1017
1018 if (ice_check_avail_res(pf)) {
4015d11e 1019 dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
d82dd83d
AA
1020 return false;
1021 }
1022
1023 /* rearm global interrupts */
1024 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1025 ice_irq_dynamic_ena(hw, NULL, NULL);
1026
1027 /* Finish resetting each VF and allocate resources */
005881bc 1028 ice_for_each_vf(pf, v) {
d82dd83d
AA
1029 struct ice_vf *vf = &pf->vf[v];
1030
1031 vf->num_vf_qs = pf->num_vf_qps;
4015d11e
BC
1032 dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
1033 vf->num_vf_qs);
d82dd83d
AA
1034 ice_cleanup_and_realloc_vf(vf);
1035 }
1036
1037 ice_flush(hw);
1038 clear_bit(__ICE_VF_DIS, pf->state);
1039
1040 return true;
1041}
1042
ddf30f7f
AV
1043/**
1044 * ice_reset_all_vfs - reset all allocated VFs in one go
1045 * @pf: pointer to the PF structure
1046 * @is_vflr: true if VFLR was issued, false if not
1047 *
1048 * First, tell the hardware to reset each VF, then do all the waiting in one
1049 * chunk, and finally finish restoring each VF after the wait. This is useful
1050 * during PF routines which need to reset all VFs, as otherwise it must perform
1051 * these resets in a serialized fashion.
1052 *
1053 * Returns true if any VFs were reset, and false otherwise.
1054 */
1055bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1056{
4015d11e 1057 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1058 struct ice_hw *hw = &pf->hw;
42b2cc83 1059 struct ice_vf *vf;
ddf30f7f
AV
1060 int v, i;
1061
1062 /* If we don't have any VFs, then there is nothing to reset */
1063 if (!pf->num_alloc_vfs)
1064 return false;
1065
1066 /* If VFs have been disabled, there is no need to reset */
1067 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1068 return false;
1069
1070 /* Begin reset on all VFs at once */
005881bc 1071 ice_for_each_vf(pf, v)
29d42f1f 1072 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
ddf30f7f 1073
005881bc 1074 ice_for_each_vf(pf, v) {
06914ac2
MW
1075 struct ice_vsi *vsi;
1076
1077 vf = &pf->vf[v];
1078 vsi = pf->vsi[vf->lan_vsi_idx];
1079 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1080 ice_dis_vf_qs(vf);
1081 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1082 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1083 }
ddf30f7f
AV
1084
1085 /* HW requires some time to make sure it can flush the FIFO for a VF
1086 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1087 * sequence to make sure that it has completed. We'll keep track of
1088 * the VFs using a simple iterator that increments once that VF has
1089 * finished resetting.
1090 */
1091 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
ddf30f7f
AV
1092 /* Check each VF in sequence */
1093 while (v < pf->num_alloc_vfs) {
ddf30f7f
AV
1094 u32 reg;
1095
42b2cc83 1096 vf = &pf->vf[v];
ddf30f7f 1097 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
60d628ea
BC
1098 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1099 /* only delay if the check failed */
1100 usleep_range(10, 20);
ddf30f7f 1101 break;
60d628ea 1102 }
ddf30f7f
AV
1103
1104 /* If the current VF has finished resetting, move on
1105 * to the next VF in sequence.
1106 */
1107 v++;
1108 }
1109 }
1110
1111 /* Display a warning if at least one VF didn't manage to reset in
1112 * time, but continue on with the operation.
1113 */
1114 if (v < pf->num_alloc_vfs)
4015d11e 1115 dev_warn(dev, "VF reset check timeout\n");
ddf30f7f
AV
1116
1117 /* free VF resources to begin resetting the VSI state */
005881bc 1118 ice_for_each_vf(pf, v) {
5743020d
AA
1119 vf = &pf->vf[v];
1120
1121 ice_free_vf_res(vf);
1122
1123 /* Free VF queues as well, and reallocate later.
1124 * If a given VF has different number of queues
1125 * configured, the request for update will come
1126 * via mailbox communication.
1127 */
1128 vf->num_vf_qs = 0;
1129 }
ddf30f7f 1130
cbe66bfe 1131 if (ice_sriov_free_msix_res(pf))
4015d11e 1132 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 1133
d82dd83d 1134 if (!ice_config_res_vfs(pf))
ddf30f7f 1135 return false;
ddf30f7f
AV
1136
1137 return true;
1138}
1139
ec4f5a43
AA
1140/**
1141 * ice_is_vf_disabled
1142 * @vf: pointer to the VF info
1143 *
1144 * Returns true if the PF or VF is disabled, false otherwise.
1145 */
1146static bool ice_is_vf_disabled(struct ice_vf *vf)
1147{
1148 struct ice_pf *pf = vf->pf;
1149
1150 /* If the PF has been disabled, there is no need resetting VF until
1151 * PF is active again. Similarly, if the VF has been disabled, this
1152 * means something else is resetting the VF, so we shouldn't continue.
1153 * Otherwise, set disable VF state bit for actual reset, and continue.
1154 */
1155 return (test_bit(__ICE_VF_DIS, pf->state) ||
1156 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1157}
1158
007676b4
AV
1159/**
1160 * ice_reset_vf - Reset a particular VF
1161 * @vf: pointer to the VF structure
1162 * @is_vflr: true if VFLR was issued, false if not
1163 *
1164 * Returns true if the VF is reset, false otherwise.
1165 */
1166static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1167{
1168 struct ice_pf *pf = vf->pf;
03f7a986 1169 struct ice_vsi *vsi;
4015d11e 1170 struct device *dev;
5eda8afd 1171 struct ice_hw *hw;
007676b4 1172 bool rsd = false;
5eda8afd 1173 u8 promisc_m;
007676b4
AV
1174 u32 reg;
1175 int i;
1176
4015d11e
BC
1177 dev = ice_pf_to_dev(pf);
1178
ec4f5a43 1179 if (ice_is_vf_disabled(vf)) {
4015d11e
BC
1180 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1181 vf->vf_id);
ec4f5a43
AA
1182 return true;
1183 }
cb6a8dc0 1184
ec4f5a43
AA
1185 /* Set VF disable bit state here, before triggering reset */
1186 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
29d42f1f 1187 ice_trigger_vf_reset(vf, is_vflr, false);
007676b4 1188
03f7a986
AV
1189 vsi = pf->vsi[vf->lan_vsi_idx];
1190
77ca27c4
PG
1191 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1192 ice_dis_vf_qs(vf);
06914ac2
MW
1193
1194 /* Call Disable LAN Tx queue AQ whether or not queues are
1195 * enabled. This is needed for successful completion of VFR.
1196 */
1197 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1198 NULL, ICE_VF_RESET, vf->vf_id, NULL);
007676b4 1199
5eda8afd 1200 hw = &pf->hw;
007676b4
AV
1201 /* poll VPGEN_VFRSTAT reg to make sure
1202 * that reset is complete
1203 */
1204 for (i = 0; i < 10; i++) {
1205 /* VF reset requires driver to first reset the VF and then
1206 * poll the status register to make sure that the reset
1207 * completed successfully.
1208 */
007676b4
AV
1209 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1210 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1211 rsd = true;
1212 break;
1213 }
60d628ea
BC
1214
1215 /* only sleep if the reset is not done */
1216 usleep_range(10, 20);
007676b4
AV
1217 }
1218
1219 /* Display a warning if VF didn't manage to reset in time, but need to
1220 * continue on with the operation.
1221 */
1222 if (!rsd)
4015d11e 1223 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
007676b4 1224
5eda8afd
AA
1225 /* disable promiscuous modes in case they were enabled
1226 * ignore any error if disabling process failed
1227 */
1228 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1229 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
b093841f 1230 if (vf->port_vlan_info || vsi->num_vlan)
5eda8afd
AA
1231 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1232 else
1233 promisc_m = ICE_UCAST_PROMISC_BITS;
1234
1235 vsi = pf->vsi[vf->lan_vsi_idx];
1236 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
4015d11e 1237 dev_err(dev, "disabling promiscuous mode failed\n");
5eda8afd
AA
1238 }
1239
007676b4
AV
1240 /* free VF resources to begin resetting the VSI state */
1241 ice_free_vf_res(vf);
1242
1243 ice_cleanup_and_realloc_vf(vf);
1244
1245 ice_flush(hw);
007676b4
AV
1246
1247 return true;
1248}
1249
53b8decb
AV
1250/**
1251 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1252 * @pf: pointer to the PF structure
1253 */
1254void ice_vc_notify_link_state(struct ice_pf *pf)
1255{
1256 int i;
1257
005881bc 1258 ice_for_each_vf(pf, i)
53b8decb
AV
1259 ice_vc_notify_vf_link_state(&pf->vf[i]);
1260}
1261
007676b4
AV
1262/**
1263 * ice_vc_notify_reset - Send pending reset message to all VFs
1264 * @pf: pointer to the PF structure
1265 *
1266 * indicate a pending reset to all VFs on a given PF
1267 */
1268void ice_vc_notify_reset(struct ice_pf *pf)
1269{
1270 struct virtchnl_pf_event pfe;
1271
1272 if (!pf->num_alloc_vfs)
1273 return;
1274
1275 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1276 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
cf6c6e01 1277 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
007676b4
AV
1278 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1279}
1280
7c710869
AV
1281/**
1282 * ice_vc_notify_vf_reset - Notify VF of a reset event
1283 * @vf: pointer to the VF structure
1284 */
1285static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1286{
1287 struct virtchnl_pf_event pfe;
4c66d227 1288 struct ice_pf *pf;
7c710869 1289
4c66d227
JB
1290 if (!vf)
1291 return;
1292
1293 pf = vf->pf;
1294 if (ice_validate_vf_id(pf, vf->vf_id))
7c710869
AV
1295 return;
1296
1f9639d2
AA
1297 /* Bail out if VF is in disabled state, neither initialized, nor active
1298 * state - otherwise proceed with notifications
1299 */
1300 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1301 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1302 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
7c710869
AV
1303 return;
1304
1305 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1306 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
4c66d227 1307 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
cf6c6e01
MW
1308 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1309 NULL);
7c710869
AV
1310}
1311
ddf30f7f
AV
1312/**
1313 * ice_alloc_vfs - Allocate and set up VFs resources
1314 * @pf: pointer to the PF structure
1315 * @num_alloc_vfs: number of VFs to allocate
1316 */
1317static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1318{
4015d11e 1319 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1320 struct ice_hw *hw = &pf->hw;
1321 struct ice_vf *vfs;
1322 int i, ret;
1323
1324 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
cbe66bfe 1325 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ddf30f7f 1326 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
d82dd83d 1327 set_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1328 ice_flush(hw);
1329
1330 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1331 if (ret) {
1332 pf->num_alloc_vfs = 0;
1333 goto err_unroll_intr;
1334 }
1335 /* allocate memory */
4015d11e 1336 vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
ddf30f7f
AV
1337 if (!vfs) {
1338 ret = -ENOMEM;
72f9c203 1339 goto err_pci_disable_sriov;
ddf30f7f
AV
1340 }
1341 pf->vf = vfs;
005881bc 1342 pf->num_alloc_vfs = num_alloc_vfs;
ddf30f7f
AV
1343
1344 /* apply default profile */
005881bc 1345 ice_for_each_vf(pf, i) {
ddf30f7f
AV
1346 vfs[i].pf = pf;
1347 vfs[i].vf_sw_id = pf->first_sw;
1348 vfs[i].vf_id = i;
1349
1350 /* assign default capabilities */
1351 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1352 vfs[i].spoofchk = true;
ddf30f7f 1353 }
ddf30f7f 1354
d82dd83d
AA
1355 /* VF resources get allocated with initialization */
1356 if (!ice_config_res_vfs(pf)) {
72f9c203 1357 ret = -EIO;
ddf30f7f 1358 goto err_unroll_sriov;
72f9c203 1359 }
ddf30f7f 1360
d82dd83d 1361 return ret;
ddf30f7f
AV
1362
1363err_unroll_sriov:
72f9c203 1364 pf->vf = NULL;
4015d11e 1365 devm_kfree(dev, vfs);
72f9c203
BC
1366 vfs = NULL;
1367 pf->num_alloc_vfs = 0;
1368err_pci_disable_sriov:
ddf30f7f
AV
1369 pci_disable_sriov(pf->pdev);
1370err_unroll_intr:
1371 /* rearm interrupts here */
1372 ice_irq_dynamic_ena(hw, NULL, NULL);
d82dd83d 1373 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1374 return ret;
1375}
1376
1377/**
2f2da36e
AV
1378 * ice_pf_state_is_nominal - checks the PF for nominal state
1379 * @pf: pointer to PF to check
ddf30f7f
AV
1380 *
1381 * Check the PF's state for a collection of bits that would indicate
1382 * the PF is in a state that would inhibit normal operation for
1383 * driver functionality.
1384 *
1385 * Returns true if PF is in a nominal state.
1386 * Returns false otherwise
1387 */
1388static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1389{
1390 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1391
1392 if (!pf)
1393 return false;
1394
1395 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1396 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1397 return false;
1398
1399 return true;
1400}
1401
1402/**
1403 * ice_pci_sriov_ena - Enable or change number of VFs
1404 * @pf: pointer to the PF structure
1405 * @num_vfs: number of VFs to allocate
1406 */
1407static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1408{
1409 int pre_existing_vfs = pci_num_vf(pf->pdev);
4015d11e 1410 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1411 int err;
1412
1413 if (!ice_pf_state_is_nominal(pf)) {
1414 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1415 return -EBUSY;
1416 }
1417
1418 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1419 dev_err(dev, "This device is not capable of SR-IOV\n");
039c60c5 1420 return -EOPNOTSUPP;
ddf30f7f
AV
1421 }
1422
1423 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1424 ice_free_vfs(pf);
1425 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1426 return num_vfs;
1427
1428 if (num_vfs > pf->num_vfs_supported) {
1429 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1430 num_vfs, pf->num_vfs_supported);
1431 return -ENOTSUPP;
1432 }
1433
1434 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1435 err = ice_alloc_vfs(pf, num_vfs);
1436 if (err) {
1437 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1438 return err;
1439 }
1440
1441 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1442 return num_vfs;
1443}
1444
1445/**
1446 * ice_sriov_configure - Enable or change number of VFs via sysfs
1447 * @pdev: pointer to a pci_dev structure
1448 * @num_vfs: number of VFs to allocate
1449 *
1450 * This function is called when the user updates the number of VFs in sysfs.
1451 */
1452int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1453{
1454 struct ice_pf *pf = pci_get_drvdata(pdev);
4015d11e 1455 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1456
462acf6a 1457 if (ice_is_safe_mode(pf)) {
4015d11e 1458 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
462acf6a
TN
1459 return -EOPNOTSUPP;
1460 }
1461
ddf30f7f
AV
1462 if (num_vfs)
1463 return ice_pci_sriov_ena(pf, num_vfs);
1464
1465 if (!pci_vfs_assigned(pdev)) {
1466 ice_free_vfs(pf);
1467 } else {
4015d11e 1468 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
ddf30f7f
AV
1469 return -EBUSY;
1470 }
1471
1472 return 0;
1473}
007676b4
AV
1474
1475/**
1476 * ice_process_vflr_event - Free VF resources via IRQ calls
1477 * @pf: pointer to the PF structure
1478 *
df17b7e0 1479 * called from the VFLR IRQ handler to
007676b4
AV
1480 * free up VF resources and state variables
1481 */
1482void ice_process_vflr_event(struct ice_pf *pf)
1483{
1484 struct ice_hw *hw = &pf->hw;
1485 int vf_id;
1486 u32 reg;
1487
8d7189d2 1488 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
007676b4
AV
1489 !pf->num_alloc_vfs)
1490 return;
1491
005881bc 1492 ice_for_each_vf(pf, vf_id) {
007676b4
AV
1493 struct ice_vf *vf = &pf->vf[vf_id];
1494 u32 reg_idx, bit_idx;
1495
1496 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1497 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1498 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1499 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1500 if (reg & BIT(bit_idx))
1501 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1502 ice_reset_vf(vf, true);
1503 }
1504}
7c710869
AV
1505
1506/**
ff010eca 1507 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
7c710869 1508 * @vf: pointer to the VF info
7c710869 1509 */
ff010eca 1510static void ice_vc_reset_vf(struct ice_vf *vf)
7c710869
AV
1511{
1512 ice_vc_notify_vf_reset(vf);
1513 ice_reset_vf(vf, false);
1514}
1515
1071a835
AV
1516/**
1517 * ice_vc_send_msg_to_vf - Send message to VF
1518 * @vf: pointer to the VF info
1519 * @v_opcode: virtual channel opcode
1520 * @v_retval: virtual channel return value
1521 * @msg: pointer to the msg buffer
1522 * @msglen: msg length
1523 *
1524 * send msg to VF
1525 */
c8b7abdd 1526static int
cf6c6e01
MW
1527ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1528 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1071a835
AV
1529{
1530 enum ice_status aq_ret;
4015d11e 1531 struct device *dev;
1071a835
AV
1532 struct ice_pf *pf;
1533
4c66d227 1534 if (!vf)
1071a835
AV
1535 return -EINVAL;
1536
1537 pf = vf->pf;
4c66d227
JB
1538 if (ice_validate_vf_id(pf, vf->vf_id))
1539 return -EINVAL;
1071a835 1540
4015d11e
BC
1541 dev = ice_pf_to_dev(pf);
1542
1071a835
AV
1543 /* single place to detect unsuccessful return values */
1544 if (v_retval) {
1545 vf->num_inval_msgs++;
4015d11e
BC
1546 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1547 v_opcode, v_retval);
1071a835 1548 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
19cce2c6 1549 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1071a835 1550 vf->vf_id);
4015d11e 1551 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1071a835
AV
1552 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1553 return -EIO;
1554 }
1555 } else {
1556 vf->num_valid_msgs++;
1557 /* reset the invalid counter, if a valid message is received. */
1558 vf->num_inval_msgs = 0;
1559 }
1560
1561 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1562 msg, msglen, NULL);
90e47737 1563 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
19cce2c6 1564 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
90e47737 1565 vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
1071a835
AV
1566 return -EIO;
1567 }
1568
1569 return 0;
1570}
1571
1572/**
1573 * ice_vc_get_ver_msg
1574 * @vf: pointer to the VF info
1575 * @msg: pointer to the msg buffer
1576 *
1577 * called from the VF to request the API version used by the PF
1578 */
1579static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1580{
1581 struct virtchnl_version_info info = {
1582 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1583 };
1584
1585 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1586 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1587 if (VF_IS_V10(&vf->vf_ver))
1588 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1589
cf6c6e01
MW
1590 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1591 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1071a835
AV
1592 sizeof(struct virtchnl_version_info));
1593}
1594
1595/**
1596 * ice_vc_get_vf_res_msg
1597 * @vf: pointer to the VF info
1598 * @msg: pointer to the msg buffer
1599 *
1600 * called from the VF to request its resources
1601 */
1602static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1603{
cf6c6e01 1604 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835 1605 struct virtchnl_vf_resource *vfres = NULL;
1071a835
AV
1606 struct ice_pf *pf = vf->pf;
1607 struct ice_vsi *vsi;
1608 int len = 0;
1609 int ret;
1610
4c66d227 1611 if (ice_check_vf_init(pf, vf)) {
cf6c6e01 1612 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1613 goto err;
1614 }
1615
1616 len = sizeof(struct virtchnl_vf_resource);
1617
9efe35d0 1618 vfres = kzalloc(len, GFP_KERNEL);
1071a835 1619 if (!vfres) {
cf6c6e01 1620 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1071a835
AV
1621 len = 0;
1622 goto err;
1623 }
1624 if (VF_IS_V11(&vf->vf_ver))
1625 vf->driver_caps = *(u32 *)msg;
1626 else
1627 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1628 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1629 VIRTCHNL_VF_OFFLOAD_VLAN;
1630
1631 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1632 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 1633 if (!vsi) {
cf6c6e01 1634 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
1635 goto err;
1636 }
1637
1071a835
AV
1638 if (!vsi->info.pvid)
1639 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1640
1641 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1642 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1643 } else {
1644 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1645 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1646 else
1647 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1648 }
1649
1650 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1651 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1652
1653 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1654 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1655
1656 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1657 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1658
1659 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1660 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1661
1662 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1663 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1664
1665 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1666 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1667
1668 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1669 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1670
1671 vfres->num_vsis = 1;
1672 /* Tx and Rx queue are equal for VF */
1673 vfres->num_queue_pairs = vsi->num_txq;
1674 vfres->max_vectors = pf->num_vf_msix;
1675 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1676 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1677
1678 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1679 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1680 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1681 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1682 vf->dflt_lan_addr.addr);
1683
d4bc4e2d
BC
1684 /* match guest capabilities */
1685 vf->driver_caps = vfres->vf_cap_flags;
1686
1071a835
AV
1687 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1688
1689err:
1690 /* send the response back to the VF */
cf6c6e01 1691 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1071a835
AV
1692 (u8 *)vfres, len);
1693
9efe35d0 1694 kfree(vfres);
1071a835
AV
1695 return ret;
1696}
1697
1698/**
1699 * ice_vc_reset_vf_msg
1700 * @vf: pointer to the VF info
1701 *
1702 * called from the VF to reset itself,
1703 * unlike other virtchnl messages, PF driver
1704 * doesn't send the response back to the VF
1705 */
1706static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1707{
1708 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1709 ice_reset_vf(vf, false);
1710}
1711
1712/**
1713 * ice_find_vsi_from_id
2f2da36e 1714 * @pf: the PF structure to search for the VSI
f9867df6 1715 * @id: ID of the VSI it is searching for
1071a835 1716 *
f9867df6 1717 * searches for the VSI with the given ID
1071a835
AV
1718 */
1719static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1720{
1721 int i;
1722
80ed404a 1723 ice_for_each_vsi(pf, i)
1071a835
AV
1724 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1725 return pf->vsi[i];
1726
1727 return NULL;
1728}
1729
1730/**
1731 * ice_vc_isvalid_vsi_id
1732 * @vf: pointer to the VF info
f9867df6 1733 * @vsi_id: VF relative VSI ID
1071a835 1734 *
f9867df6 1735 * check for the valid VSI ID
1071a835
AV
1736 */
1737static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1738{
1739 struct ice_pf *pf = vf->pf;
1740 struct ice_vsi *vsi;
1741
1742 vsi = ice_find_vsi_from_id(pf, vsi_id);
1743
1744 return (vsi && (vsi->vf_id == vf->vf_id));
1745}
1746
1747/**
1748 * ice_vc_isvalid_q_id
1749 * @vf: pointer to the VF info
f9867df6
AV
1750 * @vsi_id: VSI ID
1751 * @qid: VSI relative queue ID
1071a835 1752 *
f9867df6 1753 * check for the valid queue ID
1071a835
AV
1754 */
1755static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1756{
1757 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1758 /* allocated Tx and Rx queues should be always equal for VF VSI */
1759 return (vsi && (qid < vsi->alloc_txq));
1760}
1761
9c7dd756
MS
1762/**
1763 * ice_vc_isvalid_ring_len
1764 * @ring_len: length of ring
1765 *
1766 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
77ca27c4 1767 * or zero
9c7dd756
MS
1768 */
1769static bool ice_vc_isvalid_ring_len(u16 ring_len)
1770{
77ca27c4
PG
1771 return ring_len == 0 ||
1772 (ring_len >= ICE_MIN_NUM_DESC &&
9c7dd756
MS
1773 ring_len <= ICE_MAX_NUM_DESC &&
1774 !(ring_len % ICE_REQ_DESC_MULTIPLE));
1775}
1776
1071a835
AV
1777/**
1778 * ice_vc_config_rss_key
1779 * @vf: pointer to the VF info
1780 * @msg: pointer to the msg buffer
1781 *
1782 * Configure the VF's RSS key
1783 */
1784static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1785{
cf6c6e01 1786 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
1787 struct virtchnl_rss_key *vrk =
1788 (struct virtchnl_rss_key *)msg;
f1ef73f5 1789 struct ice_pf *pf = vf->pf;
4c66d227 1790 struct ice_vsi *vsi;
1071a835
AV
1791
1792 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 1793 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1794 goto error_param;
1795 }
1796
1797 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
cf6c6e01 1798 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1799 goto error_param;
1800 }
1801
3f416961 1802 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
cf6c6e01 1803 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1804 goto error_param;
1805 }
1806
3f416961 1807 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 1808 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1809 goto error_param;
1810 }
1811
3f416961
A
1812 vsi = pf->vsi[vf->lan_vsi_idx];
1813 if (!vsi) {
cf6c6e01 1814 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1815 goto error_param;
1816 }
1817
cf6c6e01
MW
1818 if (ice_set_rss(vsi, vrk->key, NULL, 0))
1819 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 1820error_param:
cf6c6e01 1821 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1071a835
AV
1822 NULL, 0);
1823}
1824
1825/**
1826 * ice_vc_config_rss_lut
1827 * @vf: pointer to the VF info
1828 * @msg: pointer to the msg buffer
1829 *
1830 * Configure the VF's RSS LUT
1831 */
1832static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1833{
1834 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
cf6c6e01 1835 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
f1ef73f5 1836 struct ice_pf *pf = vf->pf;
4c66d227 1837 struct ice_vsi *vsi;
1071a835
AV
1838
1839 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 1840 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1841 goto error_param;
1842 }
1843
1844 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
cf6c6e01 1845 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1846 goto error_param;
1847 }
1848
3f416961 1849 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
cf6c6e01 1850 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1851 goto error_param;
1852 }
1853
3f416961 1854 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 1855 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1856 goto error_param;
1857 }
1858
3f416961
A
1859 vsi = pf->vsi[vf->lan_vsi_idx];
1860 if (!vsi) {
cf6c6e01 1861 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1862 goto error_param;
1863 }
1864
cf6c6e01
MW
1865 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1866 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 1867error_param:
cf6c6e01 1868 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1071a835
AV
1869 NULL, 0);
1870}
1871
cd6d6b83
BC
1872/**
1873 * ice_set_vf_spoofchk
1874 * @netdev: network interface device structure
1875 * @vf_id: VF identifier
1876 * @ena: flag to enable or disable feature
1877 *
1878 * Enable or disable VF spoof checking
1879 */
1880int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1881{
1882 struct ice_netdev_priv *np = netdev_priv(netdev);
1883 struct ice_pf *pf = np->vsi->back;
1884 struct ice_vsi_ctx *ctx;
1885 struct ice_vsi *vf_vsi;
1886 enum ice_status status;
1887 struct device *dev;
1888 struct ice_vf *vf;
1889 int ret = 0;
1890
1891 dev = ice_pf_to_dev(pf);
1892 if (ice_validate_vf_id(pf, vf_id))
1893 return -EINVAL;
1894
1895 vf = &pf->vf[vf_id];
1896
1897 if (ice_check_vf_init(pf, vf))
1898 return -EBUSY;
1899
1900 vf_vsi = pf->vsi[vf->lan_vsi_idx];
1901 if (!vf_vsi) {
1902 netdev_err(netdev, "VSI %d for VF %d is null\n",
1903 vf->lan_vsi_idx, vf->vf_id);
1904 return -EINVAL;
1905 }
1906
1907 if (vf_vsi->type != ICE_VSI_VF) {
19cce2c6 1908 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
cd6d6b83
BC
1909 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1910 return -ENODEV;
1911 }
1912
1913 if (ena == vf->spoofchk) {
1914 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1915 return 0;
1916 }
1917
1918 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1919 if (!ctx)
1920 return -ENOMEM;
1921
1922 ctx->info.sec_flags = vf_vsi->info.sec_flags;
1923 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1924 if (ena) {
1925 ctx->info.sec_flags |=
1926 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1927 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1928 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
1929 } else {
1930 ctx->info.sec_flags &=
1931 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1932 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1933 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
1934 }
1935
1936 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
1937 if (status) {
19cce2c6 1938 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
cd6d6b83
BC
1939 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
1940 ret = -EIO;
1941 goto out;
1942 }
1943
1944 /* only update spoofchk state and VSI context on success */
1945 vf_vsi->info.sec_flags = ctx->info.sec_flags;
1946 vf->spoofchk = ena;
1947
1948out:
1949 kfree(ctx);
1950 return ret;
1951}
1952
1071a835
AV
1953/**
1954 * ice_vc_get_stats_msg
1955 * @vf: pointer to the VF info
1956 * @msg: pointer to the msg buffer
1957 *
1958 * called from the VF to get VSI stats
1959 */
1960static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1961{
cf6c6e01 1962 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
1963 struct virtchnl_queue_select *vqs =
1964 (struct virtchnl_queue_select *)msg;
949375de 1965 struct ice_eth_stats stats = { 0 };
f1ef73f5 1966 struct ice_pf *pf = vf->pf;
1071a835
AV
1967 struct ice_vsi *vsi;
1968
1969 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 1970 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1971 goto error_param;
1972 }
1973
1974 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 1975 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1976 goto error_param;
1977 }
1978
f1ef73f5 1979 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 1980 if (!vsi) {
cf6c6e01 1981 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1982 goto error_param;
1983 }
1984
1071a835
AV
1985 ice_update_eth_stats(vsi);
1986
1987 stats = vsi->eth_stats;
1988
1989error_param:
1990 /* send the response to the VF */
cf6c6e01 1991 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1071a835
AV
1992 (u8 *)&stats, sizeof(stats));
1993}
1994
1995/**
1996 * ice_vc_ena_qs_msg
1997 * @vf: pointer to the VF info
1998 * @msg: pointer to the msg buffer
1999 *
2000 * called from the VF to enable all or specific queue(s)
2001 */
2002static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2003{
cf6c6e01 2004 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2005 struct virtchnl_queue_select *vqs =
2006 (struct virtchnl_queue_select *)msg;
f1ef73f5 2007 struct ice_pf *pf = vf->pf;
1071a835 2008 struct ice_vsi *vsi;
77ca27c4
PG
2009 unsigned long q_map;
2010 u16 vf_q_id;
1071a835
AV
2011
2012 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2013 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2014 goto error_param;
2015 }
2016
2017 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2018 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2019 goto error_param;
2020 }
2021
2022 if (!vqs->rx_queues && !vqs->tx_queues) {
cf6c6e01 2023 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2024 goto error_param;
2025 }
2026
3f416961
A
2027 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2028 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2029 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2030 goto error_param;
2031 }
2032
f1ef73f5 2033 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2034 if (!vsi) {
cf6c6e01 2035 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2036 goto error_param;
2037 }
2038
2039 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2040 * Tx queue group list was configured and the context bits were
2041 * programmed using ice_vsi_cfg_txqs
2042 */
77ca27c4
PG
2043 q_map = vqs->rx_queues;
2044 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2045 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2046 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2047 goto error_param;
2048 }
2049
2050 /* Skip queue if enabled */
2051 if (test_bit(vf_q_id, vf->rxq_ena))
2052 continue;
2053
13a6233b 2054 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
19cce2c6 2055 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
77ca27c4
PG
2056 vf_q_id, vsi->vsi_num);
2057 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2058 goto error_param;
2059 }
2060
2061 set_bit(vf_q_id, vf->rxq_ena);
2062 vf->num_qs_ena++;
2063 }
2064
2065 vsi = pf->vsi[vf->lan_vsi_idx];
2066 q_map = vqs->tx_queues;
2067 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2068 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2069 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2070 goto error_param;
2071 }
2072
2073 /* Skip queue if enabled */
2074 if (test_bit(vf_q_id, vf->txq_ena))
2075 continue;
2076
2077 set_bit(vf_q_id, vf->txq_ena);
2078 vf->num_qs_ena++;
2079 }
1071a835
AV
2080
2081 /* Set flag to indicate that queues are enabled */
cf6c6e01 2082 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
77ca27c4 2083 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2084
2085error_param:
2086 /* send the response to the VF */
cf6c6e01 2087 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1071a835
AV
2088 NULL, 0);
2089}
2090
2091/**
2092 * ice_vc_dis_qs_msg
2093 * @vf: pointer to the VF info
2094 * @msg: pointer to the msg buffer
2095 *
2096 * called from the VF to disable all or specific
2097 * queue(s)
2098 */
2099static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2100{
cf6c6e01 2101 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2102 struct virtchnl_queue_select *vqs =
2103 (struct virtchnl_queue_select *)msg;
f1ef73f5 2104 struct ice_pf *pf = vf->pf;
1071a835 2105 struct ice_vsi *vsi;
77ca27c4
PG
2106 unsigned long q_map;
2107 u16 vf_q_id;
1071a835
AV
2108
2109 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
77ca27c4 2110 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
cf6c6e01 2111 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2112 goto error_param;
2113 }
2114
2115 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2116 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2117 goto error_param;
2118 }
2119
2120 if (!vqs->rx_queues && !vqs->tx_queues) {
cf6c6e01 2121 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2122 goto error_param;
2123 }
2124
3f416961
A
2125 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2126 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2127 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2128 goto error_param;
2129 }
2130
f1ef73f5 2131 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2132 if (!vsi) {
cf6c6e01 2133 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2134 goto error_param;
2135 }
2136
77ca27c4
PG
2137 if (vqs->tx_queues) {
2138 q_map = vqs->tx_queues;
2139
2140 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2141 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2142 struct ice_txq_meta txq_meta = { 0 };
2143
2144 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2145 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2146 goto error_param;
2147 }
2148
2149 /* Skip queue if not enabled */
2150 if (!test_bit(vf_q_id, vf->txq_ena))
2151 continue;
2152
2153 ice_fill_txq_meta(vsi, ring, &txq_meta);
2154
2155 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2156 ring, &txq_meta)) {
19cce2c6 2157 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
77ca27c4
PG
2158 vf_q_id, vsi->vsi_num);
2159 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2160 goto error_param;
2161 }
2162
2163 /* Clear enabled queues flag */
2164 clear_bit(vf_q_id, vf->txq_ena);
2165 vf->num_qs_ena--;
2166 }
1071a835
AV
2167 }
2168
77ca27c4
PG
2169 if (vqs->rx_queues) {
2170 q_map = vqs->rx_queues;
2171
2172 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2173 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2174 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2175 goto error_param;
2176 }
2177
2178 /* Skip queue if not enabled */
2179 if (!test_bit(vf_q_id, vf->rxq_ena))
2180 continue;
2181
13a6233b
BC
2182 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2183 true)) {
19cce2c6 2184 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
77ca27c4
PG
2185 vf_q_id, vsi->vsi_num);
2186 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2187 goto error_param;
2188 }
2189
2190 /* Clear enabled queues flag */
2191 clear_bit(vf_q_id, vf->rxq_ena);
2192 vf->num_qs_ena--;
2193 }
1071a835
AV
2194 }
2195
2196 /* Clear enabled queues flag */
77ca27c4
PG
2197 if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
2198 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2199
2200error_param:
2201 /* send the response to the VF */
cf6c6e01 2202 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1071a835
AV
2203 NULL, 0);
2204}
2205
2206/**
2207 * ice_vc_cfg_irq_map_msg
2208 * @vf: pointer to the VF info
2209 * @msg: pointer to the msg buffer
2210 *
2211 * called from the VF to configure the IRQ to queue map
2212 */
2213static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2214{
cf6c6e01 2215 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
173e23c0 2216 struct virtchnl_irq_map_info *irqmap_info;
1071a835
AV
2217 u16 vsi_id, vsi_q_id, vector_id;
2218 struct virtchnl_vector_map *map;
1071a835 2219 struct ice_pf *pf = vf->pf;
047e52c0 2220 u16 num_q_vectors_mapped;
173e23c0 2221 struct ice_vsi *vsi;
1071a835
AV
2222 unsigned long qmap;
2223 int i;
2224
173e23c0 2225 irqmap_info = (struct virtchnl_irq_map_info *)msg;
047e52c0
AV
2226 num_q_vectors_mapped = irqmap_info->num_vectors;
2227
047e52c0
AV
2228 /* Check to make sure number of VF vectors mapped is not greater than
2229 * number of VF vectors originally allocated, and check that
2230 * there is actually at least a single VF queue vector mapped
2231 */
ba0db585 2232 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
047e52c0
AV
2233 pf->num_vf_msix < num_q_vectors_mapped ||
2234 !irqmap_info->num_vectors) {
cf6c6e01 2235 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2236 goto error_param;
2237 }
2238
3f416961
A
2239 vsi = pf->vsi[vf->lan_vsi_idx];
2240 if (!vsi) {
2241 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2242 goto error_param;
2243 }
2244
047e52c0
AV
2245 for (i = 0; i < num_q_vectors_mapped; i++) {
2246 struct ice_q_vector *q_vector;
ba0db585 2247
1071a835
AV
2248 map = &irqmap_info->vecmap[i];
2249
2250 vector_id = map->vector_id;
2251 vsi_id = map->vsi_id;
b791cdd5
BC
2252 /* vector_id is always 0-based for each VF, and can never be
2253 * larger than or equal to the max allowed interrupts per VF
2254 */
2255 if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
2256 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
047e52c0
AV
2257 (!vector_id && (map->rxq_map || map->txq_map))) {
2258 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2259 goto error_param;
2260 }
2261
2262 /* No need to map VF miscellaneous or rogue vector */
2263 if (!vector_id)
2264 continue;
2265
2266 /* Subtract non queue vector from vector_id passed by VF
2267 * to get actual number of VSI queue vector array index
2268 */
2269 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2270 if (!q_vector) {
cf6c6e01 2271 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2272 goto error_param;
2273 }
2274
1071a835
AV
2275 /* lookout for the invalid queue index */
2276 qmap = map->rxq_map;
ba0db585 2277 q_vector->num_ring_rx = 0;
1071a835
AV
2278 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2279 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
cf6c6e01 2280 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2281 goto error_param;
2282 }
d2b464a7
BC
2283 q_vector->num_ring_rx++;
2284 q_vector->rx.itr_idx = map->rxitr_idx;
2285 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
047e52c0
AV
2286 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2287 q_vector->rx.itr_idx);
1071a835
AV
2288 }
2289
2290 qmap = map->txq_map;
ba0db585 2291 q_vector->num_ring_tx = 0;
1071a835
AV
2292 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2293 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
cf6c6e01 2294 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2295 goto error_param;
2296 }
d2b464a7
BC
2297 q_vector->num_ring_tx++;
2298 q_vector->tx.itr_idx = map->txitr_idx;
2299 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
047e52c0
AV
2300 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2301 q_vector->tx.itr_idx);
1071a835
AV
2302 }
2303 }
2304
1071a835
AV
2305error_param:
2306 /* send the response to the VF */
cf6c6e01 2307 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1071a835
AV
2308 NULL, 0);
2309}
2310
2311/**
2312 * ice_vc_cfg_qs_msg
2313 * @vf: pointer to the VF info
2314 * @msg: pointer to the msg buffer
2315 *
2316 * called from the VF to configure the Rx/Tx queues
2317 */
2318static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2319{
cf6c6e01 2320 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2321 struct virtchnl_vsi_queue_config_info *qci =
2322 (struct virtchnl_vsi_queue_config_info *)msg;
2323 struct virtchnl_queue_pair_info *qpi;
77ca27c4 2324 u16 num_rxq = 0, num_txq = 0;
5743020d 2325 struct ice_pf *pf = vf->pf;
1071a835
AV
2326 struct ice_vsi *vsi;
2327 int i;
2328
2329 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2330 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2331 goto error_param;
2332 }
2333
2334 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
cf6c6e01 2335 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2336 goto error_param;
2337 }
2338
9c7dd756
MS
2339 vsi = pf->vsi[vf->lan_vsi_idx];
2340 if (!vsi) {
cf6c6e01 2341 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5743020d
AA
2342 goto error_param;
2343 }
2344
9c7dd756
MS
2345 if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
2346 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
19cce2c6 2347 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
9c7dd756 2348 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3f416961
A
2349 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2350 goto error_param;
2351 }
2352
1071a835
AV
2353 for (i = 0; i < qci->num_queue_pairs; i++) {
2354 qpi = &qci->qpair[i];
2355 if (qpi->txq.vsi_id != qci->vsi_id ||
2356 qpi->rxq.vsi_id != qci->vsi_id ||
2357 qpi->rxq.queue_id != qpi->txq.queue_id ||
f8af5bf5 2358 qpi->txq.headwb_enabled ||
9c7dd756
MS
2359 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2360 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1071a835 2361 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
cf6c6e01 2362 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2363 goto error_param;
2364 }
2365 /* copy Tx queue info from VF into VSI */
77ca27c4
PG
2366 if (qpi->txq.ring_len > 0) {
2367 num_txq++;
2368 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2369 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1071a835 2370 }
77ca27c4
PG
2371
2372 /* copy Rx queue info from VF into VSI */
2373 if (qpi->rxq.ring_len > 0) {
2374 num_rxq++;
2375 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2376 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2377
2378 if (qpi->rxq.databuffer_size != 0 &&
2379 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2380 qpi->rxq.databuffer_size < 1024)) {
2381 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2382 goto error_param;
2383 }
2384 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2385 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2386 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2387 qpi->rxq.max_pkt_size < 64) {
2388 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2389 goto error_param;
2390 }
1071a835 2391 }
77ca27c4 2392
1071a835
AV
2393 vsi->max_frame = qpi->rxq.max_pkt_size;
2394 }
2395
2396 /* VF can request to configure less than allocated queues
2397 * or default allocated queues. So update the VSI with new number
2398 */
77ca27c4
PG
2399 vsi->num_txq = num_txq;
2400 vsi->num_rxq = num_rxq;
105e5bc2 2401 /* All queues of VF VSI are in TC 0 */
77ca27c4
PG
2402 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2403 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
1071a835 2404
cf6c6e01
MW
2405 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2406 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
2407
2408error_param:
2409 /* send the response to the VF */
cf6c6e01 2410 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
1071a835
AV
2411 NULL, 0);
2412}
2413
2414/**
2415 * ice_is_vf_trusted
2416 * @vf: pointer to the VF info
2417 */
2418static bool ice_is_vf_trusted(struct ice_vf *vf)
2419{
2420 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2421}
2422
2423/**
2424 * ice_can_vf_change_mac
2425 * @vf: pointer to the VF info
2426 *
2427 * Return true if the VF is allowed to change its MAC filters, false otherwise
2428 */
2429static bool ice_can_vf_change_mac(struct ice_vf *vf)
2430{
2431 /* If the VF MAC address has been set administratively (via the
2432 * ndo_set_vf_mac command), then deny permission to the VF to
2433 * add/delete unicast MAC addresses, unless the VF is trusted
2434 */
2435 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2436 return false;
2437
2438 return true;
2439}
2440
ed4c068d
BC
2441/**
2442 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
2443 * @vf: pointer to the VF info
2444 * @vsi: pointer to the VF's VSI
2445 * @mac_addr: MAC address to add
2446 */
2447static int
2448ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2449{
2450 struct device *dev = ice_pf_to_dev(vf->pf);
2451 enum ice_status status;
2452
2453 /* default unicast MAC already added */
2454 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2455 return 0;
2456
2457 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
2458 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2459 return -EPERM;
2460 }
2461
2462 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
2463 if (status == ICE_ERR_ALREADY_EXISTS) {
2464 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
2465 vf->vf_id);
2466 return -EEXIST;
2467 } else if (status) {
2468 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
2469 mac_addr, vf->vf_id, status);
2470 return -EIO;
2471 }
2472
2473 /* only set dflt_lan_addr once */
2474 if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
2475 is_unicast_ether_addr(mac_addr))
2476 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
2477
2478 vf->num_mac++;
2479
2480 return 0;
2481}
2482
2483/**
2484 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
2485 * @vf: pointer to the VF info
2486 * @vsi: pointer to the VF's VSI
2487 * @mac_addr: MAC address to delete
2488 */
2489static int
2490ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2491{
2492 struct device *dev = ice_pf_to_dev(vf->pf);
2493 enum ice_status status;
2494
2495 if (!ice_can_vf_change_mac(vf) &&
2496 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2497 return 0;
2498
2499 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
2500 if (status == ICE_ERR_DOES_NOT_EXIST) {
2501 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
2502 vf->vf_id);
2503 return -ENOENT;
2504 } else if (status) {
2505 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
2506 mac_addr, vf->vf_id, status);
2507 return -EIO;
2508 }
2509
2510 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2511 eth_zero_addr(vf->dflt_lan_addr.addr);
2512
2513 vf->num_mac--;
2514
2515 return 0;
2516}
2517
1071a835
AV
2518/**
2519 * ice_vc_handle_mac_addr_msg
2520 * @vf: pointer to the VF info
2521 * @msg: pointer to the msg buffer
f9867df6 2522 * @set: true if MAC filters are being set, false otherwise
1071a835 2523 *
df17b7e0 2524 * add guest MAC address filter
1071a835
AV
2525 */
2526static int
2527ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2528{
ed4c068d
BC
2529 int (*ice_vc_cfg_mac)
2530 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
cf6c6e01 2531 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2532 struct virtchnl_ether_addr_list *al =
2533 (struct virtchnl_ether_addr_list *)msg;
2534 struct ice_pf *pf = vf->pf;
2535 enum virtchnl_ops vc_op;
1071a835 2536 struct ice_vsi *vsi;
1071a835
AV
2537 int i;
2538
ed4c068d 2539 if (set) {
1071a835 2540 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
ed4c068d
BC
2541 ice_vc_cfg_mac = ice_vc_add_mac_addr;
2542 } else {
1071a835 2543 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
ed4c068d
BC
2544 ice_vc_cfg_mac = ice_vc_del_mac_addr;
2545 }
1071a835
AV
2546
2547 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2548 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
cf6c6e01 2549 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2550 goto handle_mac_exit;
2551 }
2552
ed4c068d
BC
2553 /* If this VF is not privileged, then we can't add more than a
2554 * limited number of addresses. Check to make sure that the
2555 * additions do not push us over the limit.
2556 */
1071a835
AV
2557 if (set && !ice_is_vf_trusted(vf) &&
2558 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
19cce2c6 2559 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
d84b899a 2560 vf->vf_id);
cf6c6e01 2561 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2562 goto handle_mac_exit;
2563 }
2564
2565 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 2566 if (!vsi) {
cf6c6e01 2567 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
2568 goto handle_mac_exit;
2569 }
1071a835
AV
2570
2571 for (i = 0; i < al->num_elements; i++) {
ed4c068d
BC
2572 u8 *mac_addr = al->list[i].addr;
2573 int result;
1071a835 2574
ed4c068d
BC
2575 if (is_broadcast_ether_addr(mac_addr) ||
2576 is_zero_ether_addr(mac_addr))
2577 continue;
1071a835 2578
ed4c068d
BC
2579 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
2580 if (result == -EEXIST || result == -ENOENT) {
2581 continue;
2582 } else if (result) {
2583 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
2584 goto handle_mac_exit;
2585 }
1071a835
AV
2586 }
2587
1071a835 2588handle_mac_exit:
1071a835 2589 /* send the response to the VF */
cf6c6e01 2590 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1071a835
AV
2591}
2592
2593/**
2594 * ice_vc_add_mac_addr_msg
2595 * @vf: pointer to the VF info
2596 * @msg: pointer to the msg buffer
2597 *
2598 * add guest MAC address filter
2599 */
2600static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2601{
2602 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2603}
2604
2605/**
2606 * ice_vc_del_mac_addr_msg
2607 * @vf: pointer to the VF info
2608 * @msg: pointer to the msg buffer
2609 *
2610 * remove guest MAC address filter
2611 */
2612static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2613{
2614 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2615}
2616
2617/**
2618 * ice_vc_request_qs_msg
2619 * @vf: pointer to the VF info
2620 * @msg: pointer to the msg buffer
2621 *
2622 * VFs get a default number of queues but can use this message to request a
df17b7e0 2623 * different number. If the request is successful, PF will reset the VF and
1071a835 2624 * return 0. If unsuccessful, PF will send message informing VF of number of
f9867df6 2625 * available queue pairs via virtchnl message response to VF.
1071a835
AV
2626 */
2627static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2628{
cf6c6e01 2629 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2630 struct virtchnl_vf_res_request *vfres =
2631 (struct virtchnl_vf_res_request *)msg;
cbfe31b5 2632 u16 req_queues = vfres->num_queue_pairs;
1071a835 2633 struct ice_pf *pf = vf->pf;
cbfe31b5
PK
2634 u16 max_allowed_vf_queues;
2635 u16 tx_rx_queue_left;
4015d11e 2636 struct device *dev;
4ee656bb 2637 u16 cur_queues;
1071a835 2638
4015d11e 2639 dev = ice_pf_to_dev(pf);
1071a835 2640 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2641 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2642 goto error_param;
2643 }
2644
5743020d 2645 cur_queues = vf->num_vf_qs;
8c243700
AV
2646 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2647 ice_get_avail_rxq_count(pf));
5743020d 2648 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
cbfe31b5 2649 if (!req_queues) {
4015d11e 2650 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
cbfe31b5 2651 vf->vf_id);
5743020d 2652 } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
4015d11e 2653 dev_err(dev, "VF %d tried to request more than %d queues.\n",
5743020d
AA
2654 vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2655 vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
cbfe31b5
PK
2656 } else if (req_queues > cur_queues &&
2657 req_queues - cur_queues > tx_rx_queue_left) {
19cce2c6 2658 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1071a835 2659 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
cbfe31b5 2660 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
5743020d 2661 ICE_MAX_BASE_QS_PER_VF);
1071a835
AV
2662 } else {
2663 /* request is successful, then reset VF */
2664 vf->num_req_qs = req_queues;
ff010eca 2665 ice_vc_reset_vf(vf);
4015d11e 2666 dev_info(dev, "VF %d granted request of %u queues.\n",
1071a835
AV
2667 vf->vf_id, req_queues);
2668 return 0;
2669 }
2670
2671error_param:
2672 /* send the response to the VF */
2673 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
cf6c6e01 2674 v_ret, (u8 *)vfres, sizeof(*vfres));
1071a835
AV
2675}
2676
7c710869
AV
2677/**
2678 * ice_set_vf_port_vlan
2679 * @netdev: network interface device structure
2680 * @vf_id: VF identifier
f9867df6 2681 * @vlan_id: VLAN ID being set
7c710869
AV
2682 * @qos: priority setting
2683 * @vlan_proto: VLAN protocol
2684 *
f9867df6 2685 * program VF Port VLAN ID and/or QoS
7c710869
AV
2686 */
2687int
2688ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2689 __be16 vlan_proto)
2690{
4c66d227 2691 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 2692 struct ice_vsi *vsi;
4015d11e 2693 struct device *dev;
7c710869 2694 struct ice_vf *vf;
61c9ce86 2695 u16 vlanprio;
7c710869
AV
2696 int ret = 0;
2697
4015d11e 2698 dev = ice_pf_to_dev(pf);
4c66d227 2699 if (ice_validate_vf_id(pf, vf_id))
7c710869 2700 return -EINVAL;
7c710869 2701
61c9ce86
BC
2702 if (vlan_id >= VLAN_N_VID || qos > 7) {
2703 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
2704 vf_id, vlan_id, qos);
7c710869
AV
2705 return -EINVAL;
2706 }
2707
2708 if (vlan_proto != htons(ETH_P_8021Q)) {
4015d11e 2709 dev_err(dev, "VF VLAN protocol is not supported\n");
7c710869
AV
2710 return -EPROTONOSUPPORT;
2711 }
2712
2713 vf = &pf->vf[vf_id];
2714 vsi = pf->vsi[vf->lan_vsi_idx];
4c66d227 2715 if (ice_check_vf_init(pf, vf))
7c710869 2716 return -EBUSY;
7c710869 2717
61c9ce86
BC
2718 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
2719
2720 if (vf->port_vlan_info == vlanprio) {
7c710869 2721 /* duplicate request, so just return success */
4015d11e 2722 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
7c710869
AV
2723 return ret;
2724 }
2725
7c710869 2726 if (vlan_id || qos) {
72634bc2
BC
2727 /* remove VLAN 0 filter set by default when transitioning from
2728 * no port VLAN to a port VLAN. No change to old port VLAN on
2729 * failure.
2730 */
2731 ret = ice_vsi_kill_vlan(vsi, 0);
2732 if (ret)
2733 return ret;
77a7a84d 2734 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
7c710869 2735 if (ret)
72634bc2 2736 return ret;
7c710869 2737 } else {
72634bc2
BC
2738 /* add VLAN 0 filter back when transitioning from port VLAN to
2739 * no port VLAN. No change to old port VLAN on failure.
2740 */
2741 ret = ice_vsi_add_vlan(vsi, 0);
2742 if (ret)
2743 return ret;
b093841f
BC
2744 ret = ice_vsi_manage_pvid(vsi, 0, false);
2745 if (ret)
2746 goto error_manage_pvid;
7c710869
AV
2747 }
2748
2749 if (vlan_id) {
4015d11e 2750 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
7c710869
AV
2751 vlan_id, qos, vf_id);
2752
72634bc2 2753 /* add VLAN filter for the port VLAN */
7c710869
AV
2754 ret = ice_vsi_add_vlan(vsi, vlan_id);
2755 if (ret)
b093841f 2756 goto error_manage_pvid;
7c710869 2757 }
72634bc2
BC
2758 /* remove old port VLAN filter with valid VLAN ID or QoS fields */
2759 if (vf->port_vlan_info)
2760 ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
7c710869 2761
72634bc2 2762 /* keep port VLAN information persistent on resets */
b093841f 2763 vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
7c710869 2764
b093841f 2765error_manage_pvid:
7c710869
AV
2766 return ret;
2767}
2768
d4bc4e2d
BC
2769/**
2770 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2771 * @caps: VF driver negotiated capabilities
2772 *
2773 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2774 */
2775static bool ice_vf_vlan_offload_ena(u32 caps)
2776{
2777 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2778}
2779
1071a835
AV
2780/**
2781 * ice_vc_process_vlan_msg
2782 * @vf: pointer to the VF info
2783 * @msg: pointer to the msg buffer
2784 * @add_v: Add VLAN if true, otherwise delete VLAN
2785 *
f9867df6 2786 * Process virtchnl op to add or remove programmed guest VLAN ID
1071a835
AV
2787 */
2788static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2789{
cf6c6e01 2790 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2791 struct virtchnl_vlan_filter_list *vfl =
2792 (struct virtchnl_vlan_filter_list *)msg;
1071a835 2793 struct ice_pf *pf = vf->pf;
5eda8afd 2794 bool vlan_promisc = false;
1071a835 2795 struct ice_vsi *vsi;
4015d11e 2796 struct device *dev;
5eda8afd
AA
2797 struct ice_hw *hw;
2798 int status = 0;
2799 u8 promisc_m;
1071a835
AV
2800 int i;
2801
4015d11e 2802 dev = ice_pf_to_dev(pf);
1071a835 2803 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2804 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2805 goto error_param;
2806 }
2807
d4bc4e2d
BC
2808 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2809 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2810 goto error_param;
2811 }
2812
1071a835 2813 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
cf6c6e01 2814 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2815 goto error_param;
2816 }
2817
1071a835 2818 for (i = 0; i < vfl->num_elements; i++) {
61c9ce86 2819 if (vfl->vlan_id[i] >= VLAN_N_VID) {
cf6c6e01 2820 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6
AV
2821 dev_err(dev, "invalid VF VLAN id %d\n",
2822 vfl->vlan_id[i]);
1071a835
AV
2823 goto error_param;
2824 }
2825 }
2826
5eda8afd 2827 hw = &pf->hw;
f1ef73f5 2828 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2829 if (!vsi) {
cf6c6e01 2830 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2831 goto error_param;
2832 }
2833
cd6d6b83
BC
2834 if (add_v && !ice_is_vf_trusted(vf) &&
2835 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 2836 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
cd6d6b83
BC
2837 vf->vf_id);
2838 /* There is no need to let VF know about being not trusted,
2839 * so we can just return success message here
2840 */
2841 goto error_param;
2842 }
2843
1071a835 2844 if (vsi->info.pvid) {
cf6c6e01 2845 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2846 goto error_param;
2847 }
2848
5eda8afd
AA
2849 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2850 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2851 vlan_promisc = true;
2852
1071a835
AV
2853 if (add_v) {
2854 for (i = 0; i < vfl->num_elements; i++) {
2855 u16 vid = vfl->vlan_id[i];
2856
5079b853 2857 if (!ice_is_vf_trusted(vf) &&
cd6d6b83 2858 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 2859 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
5079b853
AA
2860 vf->vf_id);
2861 /* There is no need to let VF know about being
2862 * not trusted, so we can just return success
2863 * message here as well.
2864 */
2865 goto error_param;
2866 }
2867
cd6d6b83
BC
2868 /* we add VLAN 0 by default for each VF so we can enable
2869 * Tx VLAN anti-spoof without triggering MDD events so
2870 * we don't need to add it again here
2871 */
2872 if (!vid)
2873 continue;
2874
2875 status = ice_vsi_add_vlan(vsi, vid);
2876 if (status) {
cf6c6e01 2877 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
2878 goto error_param;
2879 }
1071a835 2880
42f3efef
BC
2881 /* Enable VLAN pruning when non-zero VLAN is added */
2882 if (!vlan_promisc && vid &&
2883 !ice_vsi_is_vlan_pruning_ena(vsi)) {
5eda8afd
AA
2884 status = ice_cfg_vlan_pruning(vsi, true, false);
2885 if (status) {
cf6c6e01 2886 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 2887 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
5eda8afd
AA
2888 vid, status);
2889 goto error_param;
2890 }
42f3efef 2891 } else if (vlan_promisc) {
5eda8afd
AA
2892 /* Enable Ucast/Mcast VLAN promiscuous mode */
2893 promisc_m = ICE_PROMISC_VLAN_TX |
2894 ICE_PROMISC_VLAN_RX;
2895
2896 status = ice_set_vsi_promisc(hw, vsi->idx,
2897 promisc_m, vid);
cf6c6e01
MW
2898 if (status) {
2899 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 2900 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
5eda8afd 2901 vid, status);
cf6c6e01 2902 }
1071a835
AV
2903 }
2904 }
2905 } else {
bb877b22
AA
2906 /* In case of non_trusted VF, number of VLAN elements passed
2907 * to PF for removal might be greater than number of VLANs
2908 * filter programmed for that VF - So, use actual number of
2909 * VLANS added earlier with add VLAN opcode. In order to avoid
2910 * removing VLAN that doesn't exist, which result to sending
2911 * erroneous failed message back to the VF
2912 */
2913 int num_vf_vlan;
2914
cd6d6b83 2915 num_vf_vlan = vsi->num_vlan;
bb877b22 2916 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1071a835
AV
2917 u16 vid = vfl->vlan_id[i];
2918
cd6d6b83
BC
2919 /* we add VLAN 0 by default for each VF so we can enable
2920 * Tx VLAN anti-spoof without triggering MDD events so
2921 * we don't want a VIRTCHNL request to remove it
2922 */
2923 if (!vid)
2924 continue;
2925
1071a835
AV
2926 /* Make sure ice_vsi_kill_vlan is successful before
2927 * updating VLAN information
2928 */
cd6d6b83
BC
2929 status = ice_vsi_kill_vlan(vsi, vid);
2930 if (status) {
cf6c6e01 2931 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
2932 goto error_param;
2933 }
2934
42f3efef
BC
2935 /* Disable VLAN pruning when only VLAN 0 is left */
2936 if (vsi->num_vlan == 1 &&
2937 ice_vsi_is_vlan_pruning_ena(vsi))
cd186e51 2938 ice_cfg_vlan_pruning(vsi, false, false);
5eda8afd
AA
2939
2940 /* Disable Unicast/Multicast VLAN promiscuous mode */
2941 if (vlan_promisc) {
2942 promisc_m = ICE_PROMISC_VLAN_TX |
2943 ICE_PROMISC_VLAN_RX;
1071a835 2944
5eda8afd
AA
2945 ice_clear_vsi_promisc(hw, vsi->idx,
2946 promisc_m, vid);
1071a835
AV
2947 }
2948 }
2949 }
2950
2951error_param:
2952 /* send the response to the VF */
2953 if (add_v)
cf6c6e01 2954 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1071a835
AV
2955 NULL, 0);
2956 else
cf6c6e01 2957 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1071a835
AV
2958 NULL, 0);
2959}
2960
2961/**
2962 * ice_vc_add_vlan_msg
2963 * @vf: pointer to the VF info
2964 * @msg: pointer to the msg buffer
2965 *
f9867df6 2966 * Add and program guest VLAN ID
1071a835
AV
2967 */
2968static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2969{
2970 return ice_vc_process_vlan_msg(vf, msg, true);
2971}
2972
2973/**
2974 * ice_vc_remove_vlan_msg
2975 * @vf: pointer to the VF info
2976 * @msg: pointer to the msg buffer
2977 *
f9867df6 2978 * remove programmed guest VLAN ID
1071a835
AV
2979 */
2980static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2981{
2982 return ice_vc_process_vlan_msg(vf, msg, false);
2983}
2984
2985/**
2986 * ice_vc_ena_vlan_stripping
2987 * @vf: pointer to the VF info
2988 *
2989 * Enable VLAN header stripping for a given VF
2990 */
2991static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2992{
cf6c6e01 2993 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2994 struct ice_pf *pf = vf->pf;
2995 struct ice_vsi *vsi;
2996
2997 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2998 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2999 goto error_param;
3000 }
3001
d4bc4e2d
BC
3002 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3003 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3004 goto error_param;
3005 }
3006
1071a835
AV
3007 vsi = pf->vsi[vf->lan_vsi_idx];
3008 if (ice_vsi_manage_vlan_stripping(vsi, true))
cf6c6e01 3009 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3010
3011error_param:
3012 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
cf6c6e01 3013 v_ret, NULL, 0);
1071a835
AV
3014}
3015
3016/**
3017 * ice_vc_dis_vlan_stripping
3018 * @vf: pointer to the VF info
3019 *
3020 * Disable VLAN header stripping for a given VF
3021 */
3022static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3023{
cf6c6e01 3024 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3025 struct ice_pf *pf = vf->pf;
3026 struct ice_vsi *vsi;
3027
3028 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3029 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3030 goto error_param;
3031 }
3032
d4bc4e2d
BC
3033 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3034 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3035 goto error_param;
3036 }
3037
1071a835 3038 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3039 if (!vsi) {
cf6c6e01 3040 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3041 goto error_param;
3042 }
3043
1071a835 3044 if (ice_vsi_manage_vlan_stripping(vsi, false))
cf6c6e01 3045 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3046
3047error_param:
3048 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
cf6c6e01 3049 v_ret, NULL, 0);
1071a835
AV
3050}
3051
2f9ec241
BC
3052/**
3053 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3054 * @vf: VF to enable/disable VLAN stripping for on initialization
3055 *
3056 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3057 * the flag is cleared then we want to disable stripping. For example, the flag
3058 * will be cleared when port VLANs are configured by the administrator before
3059 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3060 * offloads.
3061 */
3062static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3063{
3064 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3065
3066 if (!vsi)
3067 return -EINVAL;
3068
3069 /* don't modify stripping if port VLAN is configured */
3070 if (vsi->info.pvid)
3071 return 0;
3072
3073 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3074 return ice_vsi_manage_vlan_stripping(vsi, true);
3075 else
3076 return ice_vsi_manage_vlan_stripping(vsi, false);
3077}
3078
1071a835
AV
3079/**
3080 * ice_vc_process_vf_msg - Process request from VF
3081 * @pf: pointer to the PF structure
3082 * @event: pointer to the AQ event
3083 *
3084 * called from the common asq/arq handler to
3085 * process request from VF
3086 */
3087void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3088{
3089 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3090 s16 vf_id = le16_to_cpu(event->desc.retval);
3091 u16 msglen = event->msg_len;
3092 u8 *msg = event->msg_buf;
3093 struct ice_vf *vf = NULL;
4015d11e 3094 struct device *dev;
1071a835
AV
3095 int err = 0;
3096
4015d11e 3097 dev = ice_pf_to_dev(pf);
4c66d227 3098 if (ice_validate_vf_id(pf, vf_id)) {
1071a835
AV
3099 err = -EINVAL;
3100 goto error_handler;
3101 }
3102
3103 vf = &pf->vf[vf_id];
3104
3105 /* Check if VF is disabled. */
3106 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3107 err = -EPERM;
3108 goto error_handler;
3109 }
3110
3111 /* Perform basic checks on the msg */
3112 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3113 if (err) {
cf6c6e01 3114 if (err == VIRTCHNL_STATUS_ERR_PARAM)
1071a835
AV
3115 err = -EPERM;
3116 else
3117 err = -EINVAL;
1071a835
AV
3118 }
3119
3120error_handler:
3121 if (err) {
cf6c6e01
MW
3122 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3123 NULL, 0);
4015d11e 3124 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
1071a835
AV
3125 vf_id, v_opcode, msglen, err);
3126 return;
3127 }
3128
3129 switch (v_opcode) {
3130 case VIRTCHNL_OP_VERSION:
3131 err = ice_vc_get_ver_msg(vf, msg);
3132 break;
3133 case VIRTCHNL_OP_GET_VF_RESOURCES:
3134 err = ice_vc_get_vf_res_msg(vf, msg);
2f9ec241 3135 if (ice_vf_init_vlan_stripping(vf))
19cce2c6 3136 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
2f9ec241 3137 vf->vf_id);
dfc62400 3138 ice_vc_notify_vf_link_state(vf);
1071a835
AV
3139 break;
3140 case VIRTCHNL_OP_RESET_VF:
3141 ice_vc_reset_vf_msg(vf);
3142 break;
3143 case VIRTCHNL_OP_ADD_ETH_ADDR:
3144 err = ice_vc_add_mac_addr_msg(vf, msg);
3145 break;
3146 case VIRTCHNL_OP_DEL_ETH_ADDR:
3147 err = ice_vc_del_mac_addr_msg(vf, msg);
3148 break;
3149 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3150 err = ice_vc_cfg_qs_msg(vf, msg);
3151 break;
3152 case VIRTCHNL_OP_ENABLE_QUEUES:
3153 err = ice_vc_ena_qs_msg(vf, msg);
3154 ice_vc_notify_vf_link_state(vf);
3155 break;
3156 case VIRTCHNL_OP_DISABLE_QUEUES:
3157 err = ice_vc_dis_qs_msg(vf, msg);
3158 break;
3159 case VIRTCHNL_OP_REQUEST_QUEUES:
3160 err = ice_vc_request_qs_msg(vf, msg);
3161 break;
3162 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3163 err = ice_vc_cfg_irq_map_msg(vf, msg);
3164 break;
3165 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3166 err = ice_vc_config_rss_key(vf, msg);
3167 break;
3168 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3169 err = ice_vc_config_rss_lut(vf, msg);
3170 break;
3171 case VIRTCHNL_OP_GET_STATS:
3172 err = ice_vc_get_stats_msg(vf, msg);
3173 break;
3174 case VIRTCHNL_OP_ADD_VLAN:
3175 err = ice_vc_add_vlan_msg(vf, msg);
3176 break;
3177 case VIRTCHNL_OP_DEL_VLAN:
3178 err = ice_vc_remove_vlan_msg(vf, msg);
3179 break;
3180 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3181 err = ice_vc_ena_vlan_stripping(vf);
3182 break;
3183 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3184 err = ice_vc_dis_vlan_stripping(vf);
3185 break;
3186 case VIRTCHNL_OP_UNKNOWN:
3187 default:
4015d11e
BC
3188 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3189 vf_id);
cf6c6e01
MW
3190 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3191 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
1071a835
AV
3192 NULL, 0);
3193 break;
3194 }
3195 if (err) {
3196 /* Helper function cares less about error return values here
3197 * as it is busy with pending work.
3198 */
4015d11e 3199 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
1071a835
AV
3200 vf_id, v_opcode, err);
3201 }
3202}
3203
7c710869
AV
3204/**
3205 * ice_get_vf_cfg
3206 * @netdev: network interface device structure
3207 * @vf_id: VF identifier
3208 * @ivi: VF configuration structure
3209 *
3210 * return VF configuration
3211 */
c8b7abdd
BA
3212int
3213ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
7c710869 3214{
4c66d227 3215 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
3216 struct ice_vf *vf;
3217
4c66d227 3218 if (ice_validate_vf_id(pf, vf_id))
7c710869 3219 return -EINVAL;
7c710869
AV
3220
3221 vf = &pf->vf[vf_id];
7c710869 3222
4c66d227 3223 if (ice_check_vf_init(pf, vf))
7c710869 3224 return -EBUSY;
7c710869
AV
3225
3226 ivi->vf = vf_id;
3227 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3228
3229 /* VF configuration for VLAN and applicable QoS */
61c9ce86
BC
3230 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3231 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
7c710869
AV
3232
3233 ivi->trusted = vf->trusted;
3234 ivi->spoofchk = vf->spoofchk;
3235 if (!vf->link_forced)
3236 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3237 else if (vf->link_up)
3238 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3239 else
3240 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3241 ivi->max_tx_rate = vf->tx_rate;
3242 ivi->min_tx_rate = 0;
3243 return 0;
3244}
3245
ec4f5a43
AA
3246/**
3247 * ice_wait_on_vf_reset
3248 * @vf: The VF being resseting
3249 *
3250 * Poll to make sure a given VF is ready after reset
3251 */
3252static void ice_wait_on_vf_reset(struct ice_vf *vf)
3253{
3254 int i;
3255
3256 for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
3257 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
3258 break;
3259 msleep(20);
3260 }
3261}
3262
7c710869
AV
3263/**
3264 * ice_set_vf_mac
3265 * @netdev: network interface device structure
3266 * @vf_id: VF identifier
f9867df6 3267 * @mac: MAC address
7c710869 3268 *
f9867df6 3269 * program VF MAC address
7c710869
AV
3270 */
3271int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3272{
4c66d227 3273 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
3274 struct ice_vf *vf;
3275 int ret = 0;
3276
4c66d227 3277 if (ice_validate_vf_id(pf, vf_id))
7c710869 3278 return -EINVAL;
7c710869
AV
3279
3280 vf = &pf->vf[vf_id];
ec4f5a43
AA
3281 /* Don't set MAC on disabled VF */
3282 if (ice_is_vf_disabled(vf))
3283 return -EINVAL;
3284
3285 /* In case VF is in reset mode, wait until it is completed. Depending
3286 * on factors like queue disabling routine, this could take ~250ms
3287 */
3288 ice_wait_on_vf_reset(vf);
3289
4c66d227 3290 if (ice_check_vf_init(pf, vf))
7c710869 3291 return -EBUSY;
7c710869
AV
3292
3293 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3294 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3295 return -EINVAL;
3296 }
3297
f9867df6 3298 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
7c710869
AV
3299 * flow will use the updated dflt_lan_addr and add a MAC filter
3300 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3301 * set the MAC address for this VF.
3302 */
3303 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3304 vf->pf_set_mac = true;
19cce2c6 3305 netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
7c710869
AV
3306 vf_id, mac);
3307
ff010eca 3308 ice_vc_reset_vf(vf);
7c710869
AV
3309 return ret;
3310}
3311
3312/**
3313 * ice_set_vf_trust
3314 * @netdev: network interface device structure
3315 * @vf_id: VF identifier
3316 * @trusted: Boolean value to enable/disable trusted VF
3317 *
3318 * Enable or disable a given VF as trusted
3319 */
3320int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3321{
4c66d227 3322 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
3323 struct ice_vf *vf;
3324
4c66d227 3325 if (ice_validate_vf_id(pf, vf_id))
7c710869 3326 return -EINVAL;
7c710869
AV
3327
3328 vf = &pf->vf[vf_id];
ec4f5a43
AA
3329 /* Don't set Trusted Mode on disabled VF */
3330 if (ice_is_vf_disabled(vf))
3331 return -EINVAL;
3332
3333 /* In case VF is in reset mode, wait until it is completed. Depending
3334 * on factors like queue disabling routine, this could take ~250ms
3335 */
3336 ice_wait_on_vf_reset(vf);
3337
4c66d227 3338 if (ice_check_vf_init(pf, vf))
7c710869 3339 return -EBUSY;
7c710869
AV
3340
3341 /* Check if already trusted */
3342 if (trusted == vf->trusted)
3343 return 0;
3344
3345 vf->trusted = trusted;
ff010eca 3346 ice_vc_reset_vf(vf);
19cce2c6 3347 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
7c710869
AV
3348 vf_id, trusted ? "" : "un");
3349
3350 return 0;
3351}
3352
3353/**
3354 * ice_set_vf_link_state
3355 * @netdev: network interface device structure
3356 * @vf_id: VF identifier
3357 * @link_state: required link state
3358 *
3359 * Set VF's link state, irrespective of physical link state status
3360 */
3361int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3362{
4c66d227 3363 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3364 struct ice_vf *vf;
7c710869 3365
4c66d227 3366 if (ice_validate_vf_id(pf, vf_id))
7c710869 3367 return -EINVAL;
7c710869
AV
3368
3369 vf = &pf->vf[vf_id];
4c66d227 3370 if (ice_check_vf_init(pf, vf))
7c710869 3371 return -EBUSY;
7c710869 3372
7c710869
AV
3373 switch (link_state) {
3374 case IFLA_VF_LINK_STATE_AUTO:
3375 vf->link_forced = false;
7c710869
AV
3376 break;
3377 case IFLA_VF_LINK_STATE_ENABLE:
3378 vf->link_forced = true;
3379 vf->link_up = true;
3380 break;
3381 case IFLA_VF_LINK_STATE_DISABLE:
3382 vf->link_forced = true;
3383 vf->link_up = false;
3384 break;
3385 default:
3386 return -EINVAL;
3387 }
3388
26a91525 3389 ice_vc_notify_vf_link_state(vf);
7c710869
AV
3390
3391 return 0;
3392}
730fdea4
JB
3393
3394/**
3395 * ice_get_vf_stats - populate some stats for the VF
3396 * @netdev: the netdev of the PF
3397 * @vf_id: the host OS identifier (0-255)
3398 * @vf_stats: pointer to the OS memory to be initialized
3399 */
3400int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3401 struct ifla_vf_stats *vf_stats)
3402{
3403 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3404 struct ice_eth_stats *stats;
3405 struct ice_vsi *vsi;
3406 struct ice_vf *vf;
3407
3408 if (ice_validate_vf_id(pf, vf_id))
3409 return -EINVAL;
3410
3411 vf = &pf->vf[vf_id];
3412
3413 if (ice_check_vf_init(pf, vf))
3414 return -EBUSY;
3415
3416 vsi = pf->vsi[vf->lan_vsi_idx];
3417 if (!vsi)
3418 return -EINVAL;
3419
3420 ice_update_eth_stats(vsi);
3421 stats = &vsi->eth_stats;
3422
3423 memset(vf_stats, 0, sizeof(*vf_stats));
3424
3425 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
3426 stats->rx_multicast;
3427 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
3428 stats->tx_multicast;
3429 vf_stats->rx_bytes = stats->rx_bytes;
3430 vf_stats->tx_bytes = stats->tx_bytes;
3431 vf_stats->broadcast = stats->rx_broadcast;
3432 vf_stats->multicast = stats->rx_multicast;
3433 vf_stats->rx_dropped = stats->rx_discards;
3434 vf_stats->tx_dropped = stats->tx_discards;
3435
3436 return 0;
3437}