ice: move clear_malvf call in ice_free_vfs
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_virtchnl_pf.c
CommitLineData
ddf30f7f
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
eff380aa 5#include "ice_base.h"
ddf30f7f 6#include "ice_lib.h"
1b8f15b6 7#include "ice_fltr.h"
4ecc8633 8#include "ice_dcb_lib.h"
222a8ab0 9#include "ice_flow.h"
1c54c839 10#include "ice_eswitch.h"
c0dcaa55 11#include "ice_virtchnl_allowlist.h"
60f44fe4 12#include "ice_flex_pipe.h"
c31af68a 13#include "ice_vf_vsi_vlan_ops.h"
cc71de8f 14#include "ice_vlan.h"
ddf30f7f 15
222a8ab0
QZ
16#define FIELD_SELECTOR(proto_hdr_field) \
17 BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
18
19struct ice_vc_hdr_match_type {
20 u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
21 u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
22};
23
60f44fe4 24static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
222a8ab0
QZ
25 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
26 {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
27 {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
28 {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
29 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
30 ICE_FLOW_SEG_HDR_IPV_OTHER},
31 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
32 ICE_FLOW_SEG_HDR_IPV_OTHER},
33 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
34 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
35 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
36 {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
37 {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
38 {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
39 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
40 ICE_FLOW_SEG_HDR_GTPU_DWN},
41 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
42 ICE_FLOW_SEG_HDR_GTPU_UP},
43 {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
44 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
45 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
46 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
47};
48
49struct ice_vc_hash_field_match_type {
50 u32 vc_hdr; /* virtchnl headers
51 * (VIRTCHNL_PROTO_HDR_XXX)
52 */
53 u32 vc_hash_field; /* virtchnl hash fields selector
54 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
55 */
56 u64 ice_hash_field; /* ice hash fields
57 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
58 */
59};
60
61static const struct
60f44fe4 62ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
222a8ab0
QZ
63 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
64 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
65 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
66 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
67 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
68 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
69 ICE_FLOW_HASH_ETH},
70 {VIRTCHNL_PROTO_HDR_ETH,
71 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
72 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
73 {VIRTCHNL_PROTO_HDR_S_VLAN,
74 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
75 BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
76 {VIRTCHNL_PROTO_HDR_C_VLAN,
77 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
78 BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
79 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
80 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
81 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
82 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
83 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
84 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
85 ICE_FLOW_HASH_IPV4},
86 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
87 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
88 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
89 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
91 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
92 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
93 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
94 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
95 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
96 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
97 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
98 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
99 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
100 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
101 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
102 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
103 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
104 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
105 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
106 ICE_FLOW_HASH_IPV6},
107 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
108 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
109 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
110 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
112 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
113 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
114 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
115 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
116 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
117 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
118 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
119 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
120 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
121 {VIRTCHNL_PROTO_HDR_TCP,
122 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
123 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
124 {VIRTCHNL_PROTO_HDR_TCP,
125 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
126 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
127 {VIRTCHNL_PROTO_HDR_TCP,
128 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
129 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
130 ICE_FLOW_HASH_TCP_PORT},
131 {VIRTCHNL_PROTO_HDR_UDP,
132 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
133 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
134 {VIRTCHNL_PROTO_HDR_UDP,
135 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
136 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
137 {VIRTCHNL_PROTO_HDR_UDP,
138 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
139 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
140 ICE_FLOW_HASH_UDP_PORT},
141 {VIRTCHNL_PROTO_HDR_SCTP,
142 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
143 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
144 {VIRTCHNL_PROTO_HDR_SCTP,
145 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
146 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
147 {VIRTCHNL_PROTO_HDR_SCTP,
148 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
149 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
150 ICE_FLOW_HASH_SCTP_PORT},
151 {VIRTCHNL_PROTO_HDR_PPPOE,
152 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
153 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
154 {VIRTCHNL_PROTO_HDR_GTPU_IP,
155 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
156 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
157 {VIRTCHNL_PROTO_HDR_L2TPV3,
158 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
159 BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
160 {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
161 BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
162 {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
163 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
164 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
165 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
166};
167
c5afbe99
BC
168/**
169 * ice_get_vf_vsi - get VF's VSI based on the stored index
170 * @vf: VF used to get VSI
171 */
2ae0aa47 172struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
c5afbe99
BC
173{
174 return vf->pf->vsi[vf->lan_vsi_idx];
175}
176
4c66d227
JB
177/**
178 * ice_validate_vf_id - helper to check if VF ID is valid
179 * @pf: pointer to the PF structure
180 * @vf_id: the ID of the VF to check
181 */
53bb6698 182static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
4c66d227 183{
53bb6698 184 /* vf_id range is only valid for 0-255, and should always be unsigned */
4c66d227 185 if (vf_id >= pf->num_alloc_vfs) {
53bb6698 186 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
4c66d227
JB
187 return -EINVAL;
188 }
189 return 0;
190}
191
192/**
193 * ice_check_vf_init - helper to check if VF init complete
194 * @pf: pointer to the PF structure
195 * @vf: the pointer to the VF to check
196 */
197static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
198{
199 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
53bb6698 200 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
4c66d227
JB
201 vf->vf_id);
202 return -EBUSY;
203 }
204 return 0;
205}
206
007676b4
AV
207/**
208 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
209 * @pf: pointer to the PF structure
210 * @v_opcode: operation code
211 * @v_retval: return value
212 * @msg: pointer to the msg buffer
213 * @msglen: msg length
214 */
215static void
216ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
cf6c6e01 217 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
007676b4
AV
218{
219 struct ice_hw *hw = &pf->hw;
c1e08830 220 unsigned int i;
007676b4 221
005881bc
BC
222 ice_for_each_vf(pf, i) {
223 struct ice_vf *vf = &pf->vf[i];
224
007676b4
AV
225 /* Not all vfs are enabled so skip the ones that are not */
226 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
227 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
228 continue;
229
230 /* Ignore return value on purpose - a given VF may fail, but
231 * we need to keep going and send to all of them
232 */
233 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
234 msglen, NULL);
235 }
236}
237
7c710869
AV
238/**
239 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
240 * @vf: pointer to the VF structure
241 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
242 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
243 * @link_up: whether or not to set the link up/down
244 */
245static void
246ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
247 int ice_link_speed, bool link_up)
248{
249 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
250 pfe->event_data.link_event_adv.link_status = link_up;
251 /* Speed in Mbps */
252 pfe->event_data.link_event_adv.link_speed =
253 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
254 } else {
255 pfe->event_data.link_event.link_status = link_up;
256 /* Legacy method for virtchnl link speeds */
257 pfe->event_data.link_event.link_speed =
258 (enum virtchnl_link_speed)
259 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
260 }
261}
262
e1fe6926
BC
263/**
264 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
265 * @vf: the VF to check
266 *
267 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
268 * otherwise
269 */
270static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
271{
0ca469fb
MW
272 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
273 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
e1fe6926
BC
274}
275
0b6c6a8b
BC
276/**
277 * ice_is_vf_link_up - check if the VF's link is up
278 * @vf: VF to check if link is up
279 */
280static bool ice_is_vf_link_up(struct ice_vf *vf)
281{
282 struct ice_pf *pf = vf->pf;
283
284 if (ice_check_vf_init(pf, vf))
285 return false;
286
e1fe6926 287 if (ice_vf_has_no_qs_ena(vf))
0b6c6a8b
BC
288 return false;
289 else if (vf->link_forced)
290 return vf->link_up;
291 else
292 return pf->hw.port_info->phy.link_info.link_info &
293 ICE_AQ_LINK_UP;
294}
295
1071a835
AV
296/**
297 * ice_vc_notify_vf_link_state - Inform a VF of link status
298 * @vf: pointer to the VF structure
299 *
300 * send a link status message to a single VF
301 */
37165e3f 302void ice_vc_notify_vf_link_state(struct ice_vf *vf)
1071a835
AV
303{
304 struct virtchnl_pf_event pfe = { 0 };
0b6c6a8b 305 struct ice_hw *hw = &vf->pf->hw;
1071a835
AV
306
307 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
308 pfe.severity = PF_EVENT_SEVERITY_INFO;
309
0b6c6a8b
BC
310 if (ice_is_vf_link_up(vf))
311 ice_set_pfe_link(vf, &pfe,
312 hw->port_info->phy.link_info.link_speed, true);
313 else
c61d2342 314 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
1071a835 315
cf6c6e01
MW
316 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
317 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
1071a835
AV
318 sizeof(pfe), NULL);
319}
320
3726cce2
BC
321/**
322 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
323 * @vf: VF to remove access to VSI for
324 */
325static void ice_vf_invalidate_vsi(struct ice_vf *vf)
326{
327 vf->lan_vsi_idx = ICE_NO_VSI;
328 vf->lan_vsi_num = ICE_NO_VSI;
329}
330
331/**
332 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
333 * @vf: invalidate this VF's VSI after freeing it
334 */
335static void ice_vf_vsi_release(struct ice_vf *vf)
336{
c5afbe99 337 ice_vsi_release(ice_get_vf_vsi(vf));
3726cce2
BC
338 ice_vf_invalidate_vsi(vf);
339}
340
da62c5ff
QZ
341/**
342 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
343 * @vf: VF that control VSI is being invalidated on
344 */
345static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
346{
347 vf->ctrl_vsi_idx = ICE_NO_VSI;
348}
349
350/**
351 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
352 * @vf: VF that control VSI is being released on
353 */
354static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
355{
356 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
357 ice_vf_ctrl_invalidate_vsi(vf);
358}
359
ddf30f7f
AV
360/**
361 * ice_free_vf_res - Free a VF's resources
362 * @vf: pointer to the VF info
363 */
364static void ice_free_vf_res(struct ice_vf *vf)
365{
366 struct ice_pf *pf = vf->pf;
72ecb896 367 int i, last_vector_idx;
ddf30f7f
AV
368
369 /* First, disable VF's configuration API to prevent OS from
370 * accessing the VF's VSI after it's freed or invalidated.
371 */
372 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
1f7ea1cd 373 ice_vf_fdir_exit(vf);
da62c5ff
QZ
374 /* free VF control VSI */
375 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
376 ice_vf_ctrl_vsi_release(vf);
ddf30f7f 377
2f2da36e 378 /* free VSI and disconnect it from the parent uplink */
3726cce2
BC
379 if (vf->lan_vsi_idx != ICE_NO_VSI) {
380 ice_vf_vsi_release(vf);
ddf30f7f
AV
381 vf->num_mac = 0;
382 }
383
46c276ce 384 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
9d5c5a52
PG
385
386 /* clear VF MDD event information */
387 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
388 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
389
ddf30f7f 390 /* Disable interrupts so that VF starts in a known state */
72ecb896
BC
391 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
392 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ddf30f7f
AV
393 ice_flush(&pf->hw);
394 }
395 /* reset some of the state variables keeping track of the resources */
396 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
397 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
398}
399
ddf30f7f
AV
400/**
401 * ice_dis_vf_mappings
402 * @vf: pointer to the VF structure
403 */
404static void ice_dis_vf_mappings(struct ice_vf *vf)
405{
406 struct ice_pf *pf = vf->pf;
407 struct ice_vsi *vsi;
4015d11e 408 struct device *dev;
ddf30f7f
AV
409 int first, last, v;
410 struct ice_hw *hw;
411
412 hw = &pf->hw;
c5afbe99 413 vsi = ice_get_vf_vsi(vf);
ddf30f7f 414
4015d11e 415 dev = ice_pf_to_dev(pf);
ddf30f7f 416 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
982b1219 417 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
ddf30f7f 418
cbe66bfe 419 first = vf->first_vector_idx;
46c276ce 420 last = first + pf->num_msix_per_vf - 1;
ddf30f7f
AV
421 for (v = first; v <= last; v++) {
422 u32 reg;
423
424 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
425 GLINT_VECT2FUNC_IS_PF_M) |
426 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
427 GLINT_VECT2FUNC_PF_NUM_M));
428 wr32(hw, GLINT_VECT2FUNC(v), reg);
429 }
430
431 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
432 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
433 else
4015d11e 434 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
435
436 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
437 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
438 else
19cce2c6 439 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
440}
441
cbe66bfe
BC
442/**
443 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
444 * @pf: pointer to the PF structure
445 *
0ca469fb 446 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
cbe66bfe
BC
447 * the pf->sriov_base_vector.
448 *
449 * Returns 0 on success, and -EINVAL on error.
450 */
451static int ice_sriov_free_msix_res(struct ice_pf *pf)
452{
453 struct ice_res_tracker *res;
454
455 if (!pf)
456 return -EINVAL;
457
458 res = pf->irq_tracker;
459 if (!res)
460 return -EINVAL;
461
462 /* give back irq_tracker resources used */
0ca469fb 463 WARN_ON(pf->sriov_base_vector < res->num_entries);
cbe66bfe
BC
464
465 pf->sriov_base_vector = 0;
466
467 return 0;
468}
469
77ca27c4
PG
470/**
471 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
472 * @vf: pointer to the VF structure
473 */
474void ice_set_vf_state_qs_dis(struct ice_vf *vf)
475{
476 /* Clear Rx/Tx enabled queues flag */
0ca469fb
MW
477 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
478 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
77ca27c4
PG
479 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
480}
481
482/**
483 * ice_dis_vf_qs - Disable the VF queues
484 * @vf: pointer to the VF structure
485 */
486static void ice_dis_vf_qs(struct ice_vf *vf)
487{
c5afbe99 488 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
77ca27c4
PG
489
490 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
13a6233b 491 ice_vsi_stop_all_rx_rings(vsi);
77ca27c4
PG
492 ice_set_vf_state_qs_dis(vf);
493}
494
ddf30f7f
AV
495/**
496 * ice_free_vfs - Free all VFs
497 * @pf: pointer to the PF structure
498 */
499void ice_free_vfs(struct ice_pf *pf)
500{
4015d11e 501 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 502 struct ice_hw *hw = &pf->hw;
c1e08830 503 unsigned int tmp, i;
ddf30f7f
AV
504
505 if (!pf->vf)
506 return;
507
1c54c839
GN
508 ice_eswitch_release(pf);
509
7e408e07 510 while (test_and_set_bit(ICE_VF_DIS, pf->state))
ddf30f7f
AV
511 usleep_range(1000, 2000);
512
72ecb896
BC
513 /* Disable IOV before freeing resources. This lets any VF drivers
514 * running in the host get themselves cleaned up before we yank
515 * the carpet out from underneath their feet.
516 */
517 if (!pci_vfs_assigned(pf->pdev))
518 pci_disable_sriov(pf->pdev);
519 else
4015d11e 520 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
72ecb896 521
ddf30f7f 522 tmp = pf->num_alloc_vfs;
46c276ce 523 pf->num_qps_per_vf = 0;
ddf30f7f
AV
524 pf->num_alloc_vfs = 0;
525 for (i = 0; i < tmp; i++) {
fadead80
JK
526 struct ice_vf *vf = &pf->vf[i];
527
528 mutex_lock(&vf->cfg_lock);
529
530 ice_dis_vf_qs(vf);
531
532 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1f9639d2 533 /* disable VF qp mappings and set VF disable state */
fadead80
JK
534 ice_dis_vf_mappings(vf);
535 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
536 ice_free_vf_res(vf);
ddf30f7f 537 }
e6ba5273 538
294627a6
JK
539 /* clear malicious info since the VF is getting released */
540 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
541 ICE_MAX_VF_COUNT, vf->vf_id))
542 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
543 vf->vf_id);
544
fadead80
JK
545 mutex_unlock(&vf->cfg_lock);
546
547 mutex_destroy(&vf->cfg_lock);
ddf30f7f
AV
548 }
549
cbe66bfe 550 if (ice_sriov_free_msix_res(pf))
4015d11e 551 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 552
4015d11e 553 devm_kfree(dev, pf->vf);
ddf30f7f
AV
554 pf->vf = NULL;
555
556 /* This check is for when the driver is unloaded while VFs are
557 * assigned. Setting the number of VFs to 0 through sysfs is caught
558 * before this function ever gets called.
559 */
560 if (!pci_vfs_assigned(pf->pdev)) {
53bb6698 561 unsigned int vf_id;
ddf30f7f
AV
562
563 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
564 * work correctly when SR-IOV gets re-enabled.
565 */
566 for (vf_id = 0; vf_id < tmp; vf_id++) {
567 u32 reg_idx, bit_idx;
568
569 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
570 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
571 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
572 }
573 }
0891c896 574
7e408e07 575 clear_bit(ICE_VF_DIS, pf->state);
ddf30f7f
AV
576 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
577}
578
579/**
580 * ice_trigger_vf_reset - Reset a VF on HW
581 * @vf: pointer to the VF structure
582 * @is_vflr: true if VFLR was issued, false if not
29d42f1f 583 * @is_pfr: true if the reset was triggered due to a previous PFR
ddf30f7f
AV
584 *
585 * Trigger hardware to start a reset for a particular VF. Expects the caller
586 * to wait the proper amount of time to allow hardware to reset the VF before
587 * it cleans up and restores VF functionality.
588 */
29d42f1f 589static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
ddf30f7f
AV
590{
591 struct ice_pf *pf = vf->pf;
592 u32 reg, reg_idx, bit_idx;
53bb6698 593 unsigned int vf_abs_id, i;
4015d11e 594 struct device *dev;
ddf30f7f 595 struct ice_hw *hw;
ddf30f7f 596
4015d11e 597 dev = ice_pf_to_dev(pf);
ddf30f7f
AV
598 hw = &pf->hw;
599 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
600
601 /* Inform VF that it is no longer active, as a warning */
602 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
603
604 /* Disable VF's configuration API during reset. The flag is re-enabled
12bb018c 605 * when it's safe again to access VF's VSI.
ddf30f7f
AV
606 */
607 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
82ba0128 608
8679f07a
BC
609 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
610 * needs to clear them in the case of VFR/VFLR. If this is done for
611 * PFR, it can mess up VF resets because the VF driver may already
612 * have started cleanup by the time we get here.
82ba0128 613 */
8679f07a 614 if (!is_pfr) {
39559456 615 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
8679f07a
BC
616 wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
617 }
ddf30f7f
AV
618
619 /* In the case of a VFLR, the HW has already reset the VF and we
620 * just need to clean up, so don't hit the VFRTRIG register.
621 */
622 if (!is_vflr) {
623 /* reset VF using VPGEN_VFRTRIG reg */
624 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
625 reg |= VPGEN_VFRTRIG_VFSWR_M;
626 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
627 }
628 /* clear the VFLR bit in GLGEN_VFLRSTAT */
629 reg_idx = (vf_abs_id) / 32;
630 bit_idx = (vf_abs_id) % 32;
631 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
632 ice_flush(hw);
633
634 wr32(hw, PF_PCI_CIAA,
635 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
60d628ea 636 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
ddf30f7f 637 reg = rd32(hw, PF_PCI_CIAD);
60d628ea
BC
638 /* no transactions pending so stop polling */
639 if ((reg & VF_TRANS_PENDING_M) == 0)
640 break;
641
53bb6698 642 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
60d628ea 643 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
ddf30f7f
AV
644 }
645}
646
3726cce2
BC
647/**
648 * ice_vf_get_port_info - Get the VF's port info structure
649 * @vf: VF used to get the port info structure for
650 */
651static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
652{
653 return vf->pf->hw.port_info;
654}
655
ddf30f7f
AV
656/**
657 * ice_vf_vsi_setup - Set up a VF VSI
3726cce2 658 * @vf: VF to setup VSI for
ddf30f7f
AV
659 *
660 * Returns pointer to the successfully allocated VSI struct on success,
661 * otherwise returns NULL on failure.
662 */
3726cce2 663static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
ddf30f7f 664{
3726cce2
BC
665 struct ice_port_info *pi = ice_vf_get_port_info(vf);
666 struct ice_pf *pf = vf->pf;
667 struct ice_vsi *vsi;
668
b03d519d 669 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
3726cce2
BC
670
671 if (!vsi) {
672 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
673 ice_vf_invalidate_vsi(vf);
674 return NULL;
675 }
676
677 vf->lan_vsi_idx = vsi->idx;
678 vf->lan_vsi_num = vsi->vsi_num;
679
680 return vsi;
ddf30f7f
AV
681}
682
da62c5ff
QZ
683/**
684 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
685 * @vf: VF to setup control VSI for
686 *
687 * Returns pointer to the successfully allocated VSI struct on success,
688 * otherwise returns NULL on failure.
689 */
690struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
691{
692 struct ice_port_info *pi = ice_vf_get_port_info(vf);
693 struct ice_pf *pf = vf->pf;
694 struct ice_vsi *vsi;
695
b03d519d 696 vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
da62c5ff
QZ
697 if (!vsi) {
698 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
699 ice_vf_ctrl_invalidate_vsi(vf);
700 }
701
702 return vsi;
703}
704
cbe66bfe 705/**
1337175d 706 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
cbe66bfe
BC
707 * @pf: pointer to PF structure
708 * @vf: pointer to VF that the first MSIX vector index is being calculated for
709 *
1337175d
PG
710 * This returns the first MSIX vector index in PF space that is used by this VF.
711 * This index is used when accessing PF relative registers such as
712 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
713 * This will always be the OICR index in the AVF driver so any functionality
cbe66bfe
BC
714 * using vf->first_vector_idx for queue configuration will have to increment by
715 * 1 to avoid meddling with the OICR index.
716 */
717static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
718{
46c276ce 719 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
cbe66bfe
BC
720}
721
4ecc8633
BC
722/**
723 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
724 * @vf: VF to re-apply the configuration for
725 *
726 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
727 * needs to re-apply the host configured Tx rate limiting configuration.
728 */
729static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
730{
731 struct device *dev = ice_pf_to_dev(vf->pf);
732 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
733 int err;
734
735 if (vf->min_tx_rate) {
736 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
737 if (err) {
738 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
739 vf->min_tx_rate, vf->vf_id, err);
740 return err;
741 }
742 }
743
744 if (vf->max_tx_rate) {
745 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
746 if (err) {
747 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
748 vf->max_tx_rate, vf->vf_id, err);
749 return err;
750 }
751 }
752
753 return 0;
754}
755
a19d7f7f
BC
756static u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
757{
758 return vf->port_vlan_info.vid;
759}
760
761static u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
762{
763 return vf->port_vlan_info.prio;
764}
765
c31af68a 766bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
a19d7f7f
BC
767{
768 return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
769}
770
cbc8b564
BC
771static u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
772{
773 return vf->port_vlan_info.tpid;
774}
775
350e822c
BC
776/**
777 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
778 * @vf: VF to add MAC filters for
c31af68a 779 * @vsi: Pointer to VSI
350e822c
BC
780 *
781 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
782 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
783 */
c31af68a 784static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
350e822c 785{
c31af68a 786 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
350e822c 787 struct device *dev = ice_pf_to_dev(vf->pf);
350e822c
BC
788 int err;
789
a19d7f7f 790 if (ice_vf_is_port_vlan_ena(vf)) {
c31af68a 791 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
350e822c
BC
792 if (err) {
793 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
794 vf->vf_id, err);
795 return err;
796 }
c31af68a
BC
797
798 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
799 } else {
800 err = ice_vsi_add_vlan_zero(vsi);
350e822c
BC
801 }
802
350e822c 803 if (err) {
a19d7f7f
BC
804 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
805 ice_vf_is_port_vlan_ena(vf) ?
806 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
350e822c
BC
807 return err;
808 }
809
f1da5a08
BC
810 err = vlan_ops->ena_rx_filtering(vsi);
811 if (err)
812 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
813 vf->vf_id, vsi->idx, err);
814
350e822c
BC
815 return 0;
816}
817
daf4dd16
BC
818static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
819{
820 struct ice_vsi_ctx *ctx;
821 int err;
822
823 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
824 if (!ctx)
825 return -ENOMEM;
826
827 ctx->info.sec_flags = vsi->info.sec_flags;
828 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
829
830 if (enable)
831 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
832 else
833 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
834
835 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
836 if (err)
837 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
838 enable ? "ON" : "OFF", vsi->vsi_num, err);
839 else
840 vsi->info.sec_flags = ctx->info.sec_flags;
841
842 kfree(ctx);
843
844 return err;
845}
846
847/**
848 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
849 * @vsi: VSI to enable Tx spoof checking for
850 */
851static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
852{
c31af68a 853 struct ice_vsi_vlan_ops *vlan_ops;
daf4dd16
BC
854 int err;
855
c31af68a
BC
856 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
857
858 err = vlan_ops->ena_tx_filtering(vsi);
daf4dd16
BC
859 if (err)
860 return err;
861
862 return ice_cfg_mac_antispoof(vsi, true);
863}
864
865/**
866 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
867 * @vsi: VSI to disable Tx spoof checking for
868 */
869static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
870{
c31af68a 871 struct ice_vsi_vlan_ops *vlan_ops;
daf4dd16
BC
872 int err;
873
c31af68a
BC
874 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
875
876 err = vlan_ops->dis_tx_filtering(vsi);
daf4dd16
BC
877 if (err)
878 return err;
879
880 return ice_cfg_mac_antispoof(vsi, false);
881}
882
883/**
884 * ice_vf_set_spoofchk_cfg - apply Tx spoof checking setting
885 * @vf: VF set spoofchk for
886 * @vsi: VSI associated to the VF
887 */
888static int
889ice_vf_set_spoofchk_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
890{
891 int err;
892
893 if (vf->spoofchk)
894 err = ice_vsi_ena_spoofchk(vsi);
895 else
896 err = ice_vsi_dis_spoofchk(vsi);
897
898 return err;
899}
900
350e822c
BC
901/**
902 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
903 * @vf: VF to add MAC filters for
904 *
905 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
906 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
907 */
908static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
909{
350e822c 910 struct device *dev = ice_pf_to_dev(vf->pf);
c5afbe99 911 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
350e822c 912 u8 broadcast[ETH_ALEN];
5518ac2a 913 int status;
350e822c 914
1c54c839
GN
915 if (ice_is_eswitch_mode_switchdev(vf->pf))
916 return 0;
917
350e822c
BC
918 eth_broadcast_addr(broadcast);
919 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
920 if (status) {
5f87ec48
TN
921 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
922 vf->vf_id, status);
d54699e2 923 return status;
350e822c
BC
924 }
925
926 vf->num_mac++;
927
51efbbdf
BC
928 if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
929 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
350e822c
BC
930 ICE_FWD_TO_VSI);
931 if (status) {
5f87ec48 932 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
51efbbdf 933 &vf->hw_lan_addr.addr[0], vf->vf_id,
5f87ec48 934 status);
d54699e2 935 return status;
350e822c
BC
936 }
937 vf->num_mac++;
f28cd5ce
BC
938
939 ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
350e822c
BC
940 }
941
942 return 0;
943}
944
eb2af3ee
BC
945/**
946 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
947 * @vf: VF to configure trust setting for
948 */
949static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
950{
951 if (vf->trusted)
952 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
953 else
954 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
955}
956
ddf30f7f 957/**
ac371613
BC
958 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
959 * @vf: VF to enable MSIX mappings for
ddf30f7f 960 *
ac371613
BC
961 * Some of the registers need to be indexed/configured using hardware global
962 * device values and other registers need 0-based values, which represent PF
963 * based values.
ddf30f7f 964 */
ac371613 965static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
ddf30f7f 966{
ac371613
BC
967 int device_based_first_msix, device_based_last_msix;
968 int pf_based_first_msix, pf_based_last_msix, v;
ddf30f7f 969 struct ice_pf *pf = vf->pf;
ac371613 970 int device_based_vf_id;
ddf30f7f 971 struct ice_hw *hw;
ddf30f7f
AV
972 u32 reg;
973
974 hw = &pf->hw;
ac371613
BC
975 pf_based_first_msix = vf->first_vector_idx;
976 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
977
978 device_based_first_msix = pf_based_first_msix +
979 pf->hw.func_caps.common_cap.msix_vector_first_id;
980 device_based_last_msix =
981 (device_based_first_msix + pf->num_msix_per_vf) - 1;
982 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
983
984 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
985 VPINT_ALLOC_FIRST_M) |
986 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
987 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
ddf30f7f
AV
988 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
989
ac371613 990 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1337175d 991 & VPINT_ALLOC_PCI_FIRST_M) |
ac371613
BC
992 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
993 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
982b1219 994 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
ac371613 995
ddf30f7f 996 /* map the interrupts to its functions */
ac371613
BC
997 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
998 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
ddf30f7f
AV
999 GLINT_VECT2FUNC_VF_NUM_M) |
1000 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
1001 GLINT_VECT2FUNC_PF_NUM_M));
1002 wr32(hw, GLINT_VECT2FUNC(v), reg);
1003 }
1004
ac371613
BC
1005 /* Map mailbox interrupt to VF MSI-X vector 0 */
1006 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
1007}
1008
1009/**
1010 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
1011 * @vf: VF to enable the mappings for
1012 * @max_txq: max Tx queues allowed on the VF's VSI
1013 * @max_rxq: max Rx queues allowed on the VF's VSI
1014 */
1015static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
1016{
ac371613 1017 struct device *dev = ice_pf_to_dev(vf->pf);
c5afbe99 1018 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
ac371613
BC
1019 struct ice_hw *hw = &vf->pf->hw;
1020 u32 reg;
1021
982b1219
AV
1022 /* set regardless of mapping mode */
1023 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
1024
ddf30f7f
AV
1025 /* VF Tx queues allocation */
1026 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
1027 /* set the VF PF Tx queue range
1028 * VFNUMQ value should be set to (number of queues - 1). A value
1029 * of 0 means 1 queue and a value of 255 means 256 queues
1030 */
1031 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
1032 VPLAN_TX_QBASE_VFFIRSTQ_M) |
ac371613 1033 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
1034 VPLAN_TX_QBASE_VFNUMQ_M));
1035 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
1036 } else {
4015d11e 1037 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
1038 }
1039
982b1219
AV
1040 /* set regardless of mapping mode */
1041 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
1042
ddf30f7f
AV
1043 /* VF Rx queues allocation */
1044 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
1045 /* set the VF PF Rx queue range
1046 * VFNUMQ value should be set to (number of queues - 1). A value
1047 * of 0 means 1 queue and a value of 255 means 256 queues
1048 */
1049 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
1050 VPLAN_RX_QBASE_VFFIRSTQ_M) |
ac371613 1051 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
1052 VPLAN_RX_QBASE_VFNUMQ_M));
1053 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
1054 } else {
4015d11e 1055 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
1056 }
1057}
1058
ac371613
BC
1059/**
1060 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
1061 * @vf: pointer to the VF structure
1062 */
1063static void ice_ena_vf_mappings(struct ice_vf *vf)
1064{
c5afbe99 1065 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
ac371613
BC
1066
1067 ice_ena_vf_msix_mappings(vf);
1068 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
1069}
1070
cbe66bfe
BC
1071/**
1072 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
1073 * @vf: VF to calculate the register index for
1074 * @q_vector: a q_vector associated to the VF
1075 */
1076int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
1077{
1078 struct ice_pf *pf;
1079
1080 if (!vf || !q_vector)
1081 return -EINVAL;
1082
1083 pf = vf->pf;
1084
1085 /* always add one to account for the OICR being the first MSIX */
46c276ce 1086 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
cbe66bfe
BC
1087 q_vector->v_idx + 1;
1088}
1089
1090/**
1091 * ice_get_max_valid_res_idx - Get the max valid resource index
1092 * @res: pointer to the resource to find the max valid index for
1093 *
1094 * Start from the end of the ice_res_tracker and return right when we find the
1095 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
1096 * valid for SR-IOV because it is the only consumer that manipulates the
1097 * res->end and this is always called when res->end is set to res->num_entries.
1098 */
1099static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
1100{
1101 int i;
1102
1103 if (!res)
1104 return -EINVAL;
1105
1106 for (i = res->num_entries - 1; i >= 0; i--)
1107 if (res->list[i] & ICE_RES_VALID_BIT)
1108 return i;
1109
1110 return 0;
1111}
1112
1113/**
1114 * ice_sriov_set_msix_res - Set any used MSIX resources
1115 * @pf: pointer to PF structure
1116 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
1117 *
1118 * This function allows SR-IOV resources to be taken from the end of the PF's
0ca469fb
MW
1119 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
1120 * just set the pf->sriov_base_vector and return success.
cbe66bfe 1121 *
0ca469fb
MW
1122 * If there are not enough resources available, return an error. This should
1123 * always be caught by ice_set_per_vf_res().
cbe66bfe 1124 *
ac382a09 1125 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
cbe66bfe
BC
1126 * in the PF's space available for SR-IOV.
1127 */
1128static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
1129{
0ca469fb
MW
1130 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
1131 int vectors_used = pf->irq_tracker->num_entries;
cbe66bfe
BC
1132 int sriov_base_vector;
1133
0ca469fb 1134 sriov_base_vector = total_vectors - num_msix_needed;
cbe66bfe
BC
1135
1136 /* make sure we only grab irq_tracker entries from the list end and
1137 * that we have enough available MSIX vectors
1138 */
0ca469fb 1139 if (sriov_base_vector < vectors_used)
cbe66bfe
BC
1140 return -EINVAL;
1141
1142 pf->sriov_base_vector = sriov_base_vector;
1143
cbe66bfe
BC
1144 return 0;
1145}
1146
ddf30f7f 1147/**
0ca469fb 1148 * ice_set_per_vf_res - check if vectors and queues are available
ddf30f7f 1149 * @pf: pointer to the PF structure
cd0f4f3b 1150 * @num_vfs: the number of SR-IOV VFs being configured
ddf30f7f 1151 *
0ca469fb
MW
1152 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
1153 * get more vectors and can enable more queues per VF. Note that this does not
1154 * grab any vectors from the SW pool already allocated. Also note, that all
1155 * vector counts include one for each VF's miscellaneous interrupt vector
1156 * (i.e. OICR).
1157 *
1158 * Minimum VFs - 2 vectors, 1 queue pair
1159 * Small VFs - 5 vectors, 4 queue pairs
1160 * Medium VFs - 17 vectors, 16 queue pairs
1161 *
1162 * Second, determine number of queue pairs per VF by starting with a pre-defined
1163 * maximum each VF supports. If this is not possible, then we adjust based on
1164 * queue pairs available on the device.
1165 *
1166 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
1167 * by each VF during VF initialization and reset.
ddf30f7f 1168 */
cd0f4f3b 1169static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
ddf30f7f 1170{
cbe66bfe 1171 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
cd0f4f3b 1172 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
46c276ce 1173 int msix_avail_per_vf, msix_avail_for_sriov;
4015d11e 1174 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1175
cd0f4f3b 1176 if (!num_vfs || max_valid_res_idx < 0)
ddf30f7f
AV
1177 return -EINVAL;
1178
0ca469fb 1179 /* determine MSI-X resources per VF */
46c276ce
BC
1180 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
1181 pf->irq_tracker->num_entries;
cd0f4f3b 1182 msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
46c276ce
BC
1183 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
1184 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
1185 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
1186 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
f34f5555
BC
1187 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
1188 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
46c276ce
BC
1189 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
1190 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
ddf30f7f 1191 } else {
46c276ce
BC
1192 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
1193 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
cd0f4f3b 1194 num_vfs);
ddf30f7f
AV
1195 return -EIO;
1196 }
1197
cd0f4f3b
JK
1198 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
1199 ICE_MAX_RSS_QS_PER_VF);
1200 avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
1201 if (!avail_qs)
1202 num_txq = 0;
1203 else if (num_txq > avail_qs)
1204 num_txq = rounddown_pow_of_two(avail_qs);
1205
1206 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
1207 ICE_MAX_RSS_QS_PER_VF);
1208 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
1209 if (!avail_qs)
1210 num_rxq = 0;
1211 else if (num_rxq > avail_qs)
1212 num_rxq = rounddown_pow_of_two(avail_qs);
1213
1214 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
46c276ce 1215 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
cd0f4f3b 1216 ICE_MIN_QS_PER_VF, num_vfs);
ddf30f7f 1217 return -EIO;
0ca469fb 1218 }
ddf30f7f 1219
cd0f4f3b 1220 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs)) {
0ca469fb 1221 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
cd0f4f3b 1222 num_vfs);
cbe66bfe 1223 return -EINVAL;
0ca469fb 1224 }
cbe66bfe 1225
0ca469fb 1226 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
46c276ce
BC
1227 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
1228 pf->num_msix_per_vf = num_msix_per_vf;
0ca469fb 1229 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
cd0f4f3b 1230 num_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
ddf30f7f
AV
1231
1232 return 0;
1233}
1234
cfcee02b
BC
1235/**
1236 * ice_clear_vf_reset_trigger - enable VF to access hardware
1237 * @vf: VF to enabled hardware access for
1238 */
1239static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
1240{
1241 struct ice_hw *hw = &vf->pf->hw;
1242 u32 reg;
1243
1244 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
1245 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
1246 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
1247 ice_flush(hw);
1248}
1249
fabf480b
BC
1250static int
1251ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
5eda8afd 1252{
fabf480b 1253 struct ice_hw *hw = &vsi->back->hw;
5e24d598 1254 int status;
5eda8afd 1255
a19d7f7f 1256 if (ice_vf_is_port_vlan_ena(vf))
fabf480b 1257 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
a19d7f7f 1258 ice_vf_get_port_vlan_id(vf));
c31af68a 1259 else if (ice_vsi_has_non_zero_vlans(vsi))
fabf480b
BC
1260 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
1261 else
1262 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
1263
d54699e2 1264 if (status && status != -EEXIST) {
5f87ec48
TN
1265 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
1266 vf->vf_id, status);
d54699e2 1267 return status;
5eda8afd
AA
1268 }
1269
fabf480b
BC
1270 return 0;
1271}
1272
1273static int
1274ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
1275{
1276 struct ice_hw *hw = &vsi->back->hw;
5e24d598 1277 int status;
fabf480b 1278
a19d7f7f 1279 if (ice_vf_is_port_vlan_ena(vf))
fabf480b 1280 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
a19d7f7f 1281 ice_vf_get_port_vlan_id(vf));
c31af68a 1282 else if (ice_vsi_has_non_zero_vlans(vsi))
fabf480b
BC
1283 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
1284 else
1285 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
1286
d54699e2 1287 if (status && status != -ENOENT) {
5f87ec48
TN
1288 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
1289 vf->vf_id, status);
d54699e2 1290 return status;
fabf480b
BC
1291 }
1292
1293 return 0;
5eda8afd
AA
1294}
1295
12bb018c
BC
1296static void ice_vf_clear_counters(struct ice_vf *vf)
1297{
c5afbe99 1298 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
12bb018c
BC
1299
1300 vf->num_mac = 0;
1301 vsi->num_vlan = 0;
1302 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1303 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1304}
1305
d82dd83d 1306/**
12bb018c
BC
1307 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1308 * @vf: VF to perform pre VSI rebuild tasks
d82dd83d 1309 *
12bb018c
BC
1310 * These tasks are items that don't need to be amortized since they are most
1311 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
d82dd83d 1312 */
12bb018c 1313static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
d82dd83d 1314{
12bb018c
BC
1315 ice_vf_clear_counters(vf);
1316 ice_clear_vf_reset_trigger(vf);
1317}
d82dd83d 1318
b126bd6b
KP
1319/**
1320 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
1321 * @vsi: Pointer to VSI
1322 *
1323 * This function moves VSI into corresponding scheduler aggregator node
1324 * based on cached value of "aggregator node info" per VSI
1325 */
1326static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1327{
1328 struct ice_pf *pf = vsi->back;
b126bd6b 1329 struct device *dev;
5518ac2a 1330 int status;
b126bd6b
KP
1331
1332 if (!vsi->agg_node)
1333 return;
1334
1335 dev = ice_pf_to_dev(pf);
1336 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1337 dev_dbg(dev,
1338 "agg_id %u already has reached max_num_vsis %u\n",
1339 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1340 return;
1341 }
1342
1343 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1344 vsi->idx, vsi->tc_cfg.ena_tc);
1345 if (status)
1346 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1347 vsi->idx, vsi->agg_node->agg_id);
1348 else
1349 vsi->agg_node->num_vsis++;
1350}
1351
12bb018c
BC
1352/**
1353 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1354 * @vf: VF to rebuild host configuration on
1355 */
1356static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1357{
1358 struct device *dev = ice_pf_to_dev(vf->pf);
c5afbe99 1359 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
12bb018c
BC
1360
1361 ice_vf_set_host_trust_cfg(vf);
1362
1363 if (ice_vf_rebuild_host_mac_cfg(vf))
1364 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1365 vf->vf_id);
1366
c31af68a 1367 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
12bb018c
BC
1368 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1369 vf->vf_id);
4ecc8633
BC
1370
1371 if (ice_vf_rebuild_host_tx_rate_cfg(vf))
1372 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
1373 vf->vf_id);
1374
daf4dd16
BC
1375 if (ice_vf_set_spoofchk_cfg(vf, vsi))
1376 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
1377 vf->vf_id);
1378
b126bd6b
KP
1379 /* rebuild aggregator node config for main VF VSI */
1380 ice_vf_rebuild_aggregator_node_cfg(vsi);
12bb018c
BC
1381}
1382
1383/**
1384 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1385 * @vf: VF to release and setup the VSI for
1386 *
1387 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1388 * configuration change, etc.).
1389 */
1390static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1391{
3726cce2
BC
1392 ice_vf_vsi_release(vf);
1393 if (!ice_vf_vsi_setup(vf))
12bb018c 1394 return -ENOMEM;
d82dd83d 1395
12bb018c
BC
1396 return 0;
1397}
d82dd83d 1398
12bb018c
BC
1399/**
1400 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1401 * @vf: VF to rebuild the VSI for
1402 *
1403 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1404 * host, PFR, CORER, etc.).
1405 */
1406static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1407{
c5afbe99 1408 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
12bb018c 1409 struct ice_pf *pf = vf->pf;
12bb018c
BC
1410
1411 if (ice_vsi_rebuild(vsi, true)) {
1412 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1413 vf->vf_id);
1414 return -EIO;
d82dd83d 1415 }
12bb018c
BC
1416 /* vsi->idx will remain the same in this case so don't update
1417 * vf->lan_vsi_idx
1418 */
1419 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1420 vf->lan_vsi_num = vsi->vsi_num;
d82dd83d 1421
12bb018c
BC
1422 return 0;
1423}
d82dd83d 1424
12bb018c
BC
1425/**
1426 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1427 * @vf: VF to set in initialized state
1428 *
1429 * After this function the VF will be ready to receive/handle the
1430 * VIRTCHNL_OP_GET_VF_RESOURCES message
1431 */
1432static void ice_vf_set_initialized(struct ice_vf *vf)
1433{
1434 ice_set_vf_state_qs_dis(vf);
1435 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1436 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1437 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1438 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
cc71de8f 1439 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
12bb018c
BC
1440}
1441
1442/**
1443 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1444 * @vf: VF to perform tasks on
1445 */
1446static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1447{
1448 struct ice_pf *pf = vf->pf;
1449 struct ice_hw *hw;
1450
1451 hw = &pf->hw;
1452
1453 ice_vf_rebuild_host_cfg(vf);
1454
1455 ice_vf_set_initialized(vf);
1456 ice_ena_vf_mappings(vf);
1457 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
d82dd83d
AA
1458}
1459
ddf30f7f
AV
1460/**
1461 * ice_reset_all_vfs - reset all allocated VFs in one go
1462 * @pf: pointer to the PF structure
1463 * @is_vflr: true if VFLR was issued, false if not
1464 *
1465 * First, tell the hardware to reset each VF, then do all the waiting in one
1466 * chunk, and finally finish restoring each VF after the wait. This is useful
1467 * during PF routines which need to reset all VFs, as otherwise it must perform
1468 * these resets in a serialized fashion.
1469 *
1470 * Returns true if any VFs were reset, and false otherwise.
1471 */
1472bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1473{
4015d11e 1474 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1475 struct ice_hw *hw = &pf->hw;
42b2cc83 1476 struct ice_vf *vf;
ddf30f7f
AV
1477 int v, i;
1478
1479 /* If we don't have any VFs, then there is nothing to reset */
1480 if (!pf->num_alloc_vfs)
1481 return false;
1482
0891c896
VS
1483 /* clear all malicious info if the VFs are getting reset */
1484 ice_for_each_vf(pf, i)
1485 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
1486 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1487
ddf30f7f 1488 /* If VFs have been disabled, there is no need to reset */
7e408e07 1489 if (test_and_set_bit(ICE_VF_DIS, pf->state))
ddf30f7f
AV
1490 return false;
1491
1492 /* Begin reset on all VFs at once */
005881bc 1493 ice_for_each_vf(pf, v)
29d42f1f 1494 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
ddf30f7f 1495
ddf30f7f
AV
1496 /* HW requires some time to make sure it can flush the FIFO for a VF
1497 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1498 * sequence to make sure that it has completed. We'll keep track of
1499 * the VFs using a simple iterator that increments once that VF has
1500 * finished resetting.
1501 */
1502 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
ddf30f7f
AV
1503 /* Check each VF in sequence */
1504 while (v < pf->num_alloc_vfs) {
ddf30f7f
AV
1505 u32 reg;
1506
42b2cc83 1507 vf = &pf->vf[v];
ddf30f7f 1508 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
60d628ea
BC
1509 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1510 /* only delay if the check failed */
1511 usleep_range(10, 20);
ddf30f7f 1512 break;
60d628ea 1513 }
ddf30f7f
AV
1514
1515 /* If the current VF has finished resetting, move on
1516 * to the next VF in sequence.
1517 */
1518 v++;
1519 }
1520 }
1521
1522 /* Display a warning if at least one VF didn't manage to reset in
1523 * time, but continue on with the operation.
1524 */
1525 if (v < pf->num_alloc_vfs)
4015d11e 1526 dev_warn(dev, "VF reset check timeout\n");
ddf30f7f
AV
1527
1528 /* free VF resources to begin resetting the VSI state */
005881bc 1529 ice_for_each_vf(pf, v) {
5743020d
AA
1530 vf = &pf->vf[v];
1531
fadead80
JK
1532 mutex_lock(&vf->cfg_lock);
1533
c0dcaa55
MS
1534 vf->driver_caps = 0;
1535 ice_vc_set_default_allowlist(vf);
1536
1f7ea1cd 1537 ice_vf_fdir_exit(vf);
f23ab04d 1538 ice_vf_fdir_init(vf);
da62c5ff
QZ
1539 /* clean VF control VSI when resetting VFs since it should be
1540 * setup only when VF creates its first FDIR rule.
1541 */
1542 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1543 ice_vf_ctrl_invalidate_vsi(vf);
1544
12bb018c
BC
1545 ice_vf_pre_vsi_rebuild(vf);
1546 ice_vf_rebuild_vsi(vf);
1547 ice_vf_post_vsi_rebuild(vf);
fadead80
JK
1548
1549 mutex_unlock(&vf->cfg_lock);
5743020d 1550 }
ddf30f7f 1551
b3be918d
GN
1552 if (ice_is_eswitch_mode_switchdev(pf))
1553 if (ice_eswitch_rebuild(pf))
1554 dev_warn(dev, "eswitch rebuild failed\n");
1555
12bb018c 1556 ice_flush(hw);
7e408e07 1557 clear_bit(ICE_VF_DIS, pf->state);
ddf30f7f
AV
1558
1559 return true;
1560}
1561
ec4f5a43
AA
1562/**
1563 * ice_is_vf_disabled
1564 * @vf: pointer to the VF info
1565 *
1566 * Returns true if the PF or VF is disabled, false otherwise.
1567 */
7aae80ce 1568bool ice_is_vf_disabled(struct ice_vf *vf)
ec4f5a43
AA
1569{
1570 struct ice_pf *pf = vf->pf;
1571
1572 /* If the PF has been disabled, there is no need resetting VF until
1573 * PF is active again. Similarly, if the VF has been disabled, this
1574 * means something else is resetting the VF, so we shouldn't continue.
1575 * Otherwise, set disable VF state bit for actual reset, and continue.
1576 */
7e408e07 1577 return (test_bit(ICE_VF_DIS, pf->state) ||
ec4f5a43
AA
1578 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1579}
1580
007676b4
AV
1581/**
1582 * ice_reset_vf - Reset a particular VF
1583 * @vf: pointer to the VF structure
1584 * @is_vflr: true if VFLR was issued, false if not
1585 *
f844d521
BC
1586 * Returns true if the VF is currently in reset, resets successfully, or resets
1587 * are disabled and false otherwise.
007676b4 1588 */
9d5c5a52 1589bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
007676b4
AV
1590{
1591 struct ice_pf *pf = vf->pf;
03f7a986 1592 struct ice_vsi *vsi;
4015d11e 1593 struct device *dev;
5eda8afd 1594 struct ice_hw *hw;
007676b4 1595 bool rsd = false;
5eda8afd 1596 u8 promisc_m;
007676b4
AV
1597 u32 reg;
1598 int i;
1599
fadead80
JK
1600 lockdep_assert_held(&vf->cfg_lock);
1601
4015d11e
BC
1602 dev = ice_pf_to_dev(pf);
1603
7e408e07 1604 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
f844d521
BC
1605 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1606 vf->vf_id);
1607 return true;
1608 }
1609
ec4f5a43 1610 if (ice_is_vf_disabled(vf)) {
4015d11e
BC
1611 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1612 vf->vf_id);
ec4f5a43
AA
1613 return true;
1614 }
cb6a8dc0 1615
ec4f5a43
AA
1616 /* Set VF disable bit state here, before triggering reset */
1617 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
29d42f1f 1618 ice_trigger_vf_reset(vf, is_vflr, false);
007676b4 1619
c5afbe99 1620 vsi = ice_get_vf_vsi(vf);
03f7a986 1621
b385cca4 1622 ice_dis_vf_qs(vf);
06914ac2
MW
1623
1624 /* Call Disable LAN Tx queue AQ whether or not queues are
1625 * enabled. This is needed for successful completion of VFR.
1626 */
1627 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1628 NULL, ICE_VF_RESET, vf->vf_id, NULL);
007676b4 1629
5eda8afd 1630 hw = &pf->hw;
007676b4
AV
1631 /* poll VPGEN_VFRSTAT reg to make sure
1632 * that reset is complete
1633 */
1634 for (i = 0; i < 10; i++) {
1635 /* VF reset requires driver to first reset the VF and then
1636 * poll the status register to make sure that the reset
1637 * completed successfully.
1638 */
007676b4
AV
1639 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1640 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1641 rsd = true;
1642 break;
1643 }
60d628ea
BC
1644
1645 /* only sleep if the reset is not done */
1646 usleep_range(10, 20);
007676b4
AV
1647 }
1648
c0dcaa55
MS
1649 vf->driver_caps = 0;
1650 ice_vc_set_default_allowlist(vf);
1651
007676b4
AV
1652 /* Display a warning if VF didn't manage to reset in time, but need to
1653 * continue on with the operation.
1654 */
1655 if (!rsd)
4015d11e 1656 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
007676b4 1657
5eda8afd
AA
1658 /* disable promiscuous modes in case they were enabled
1659 * ignore any error if disabling process failed
1660 */
1661 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1662 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
a19d7f7f 1663 if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
5eda8afd
AA
1664 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1665 else
1666 promisc_m = ICE_UCAST_PROMISC_BITS;
1667
fabf480b 1668 if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
4015d11e 1669 dev_err(dev, "disabling promiscuous mode failed\n");
5eda8afd
AA
1670 }
1671
c1e5da5d
WD
1672 ice_eswitch_del_vf_mac_rule(vf);
1673
1f7ea1cd 1674 ice_vf_fdir_exit(vf);
f23ab04d 1675 ice_vf_fdir_init(vf);
da62c5ff
QZ
1676 /* clean VF control VSI when resetting VF since it should be setup
1677 * only when VF creates its first FDIR rule.
1678 */
1679 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1680 ice_vf_ctrl_vsi_release(vf);
1681
12bb018c 1682 ice_vf_pre_vsi_rebuild(vf);
c7ee6ce1
HW
1683
1684 if (ice_vf_rebuild_vsi_with_release(vf)) {
1685 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1686 return false;
1687 }
1688
12bb018c 1689 ice_vf_post_vsi_rebuild(vf);
1c54c839
GN
1690 vsi = ice_get_vf_vsi(vf);
1691 ice_eswitch_update_repr(vsi);
c1e5da5d 1692 ice_eswitch_replay_vf_mac_rule(vf);
007676b4 1693
0891c896
VS
1694 /* if the VF has been reset allow it to come up again */
1695 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
1696 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1697
007676b4
AV
1698 return true;
1699}
1700
53b8decb
AV
1701/**
1702 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1703 * @pf: pointer to the PF structure
1704 */
1705void ice_vc_notify_link_state(struct ice_pf *pf)
1706{
1707 int i;
1708
005881bc 1709 ice_for_each_vf(pf, i)
53b8decb
AV
1710 ice_vc_notify_vf_link_state(&pf->vf[i]);
1711}
1712
007676b4
AV
1713/**
1714 * ice_vc_notify_reset - Send pending reset message to all VFs
1715 * @pf: pointer to the PF structure
1716 *
1717 * indicate a pending reset to all VFs on a given PF
1718 */
1719void ice_vc_notify_reset(struct ice_pf *pf)
1720{
1721 struct virtchnl_pf_event pfe;
1722
1723 if (!pf->num_alloc_vfs)
1724 return;
1725
1726 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1727 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
cf6c6e01 1728 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
007676b4
AV
1729 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1730}
1731
7c710869
AV
1732/**
1733 * ice_vc_notify_vf_reset - Notify VF of a reset event
1734 * @vf: pointer to the VF structure
1735 */
1736static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1737{
1738 struct virtchnl_pf_event pfe;
4c66d227 1739 struct ice_pf *pf;
7c710869 1740
4c66d227
JB
1741 if (!vf)
1742 return;
1743
1744 pf = vf->pf;
1745 if (ice_validate_vf_id(pf, vf->vf_id))
7c710869
AV
1746 return;
1747
1f9639d2
AA
1748 /* Bail out if VF is in disabled state, neither initialized, nor active
1749 * state - otherwise proceed with notifications
1750 */
1751 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1752 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1753 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
7c710869
AV
1754 return;
1755
1756 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1757 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
4c66d227 1758 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
cf6c6e01
MW
1759 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1760 NULL);
7c710869
AV
1761}
1762
916c7fdf
BC
1763/**
1764 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1765 * @vf: VF to initialize/setup the VSI for
1766 *
1767 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1768 * VF VSI's broadcast filter and is only used during initial VF creation.
1769 */
1770static int ice_init_vf_vsi_res(struct ice_vf *vf)
1771{
f1da5a08 1772 struct ice_vsi_vlan_ops *vlan_ops;
916c7fdf
BC
1773 struct ice_pf *pf = vf->pf;
1774 u8 broadcast[ETH_ALEN];
916c7fdf
BC
1775 struct ice_vsi *vsi;
1776 struct device *dev;
1777 int err;
1778
1779 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1780
1781 dev = ice_pf_to_dev(pf);
3726cce2
BC
1782 vsi = ice_vf_vsi_setup(vf);
1783 if (!vsi)
916c7fdf 1784 return -ENOMEM;
916c7fdf 1785
3e0b5971 1786 err = ice_vsi_add_vlan_zero(vsi);
916c7fdf
BC
1787 if (err) {
1788 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1789 vf->vf_id);
1790 goto release_vsi;
1791 }
1792
f1da5a08
BC
1793 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1794 err = vlan_ops->ena_rx_filtering(vsi);
1795 if (err) {
1796 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1797 vf->vf_id);
1798 goto release_vsi;
1799 }
1800
916c7fdf 1801 eth_broadcast_addr(broadcast);
2ccc1c1c
TN
1802 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1803 if (err) {
5f87ec48 1804 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
2ccc1c1c 1805 vf->vf_id, err);
916c7fdf
BC
1806 goto release_vsi;
1807 }
1808
daf4dd16
BC
1809 err = ice_vf_set_spoofchk_cfg(vf, vsi);
1810 if (err) {
1811 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1812 vf->vf_id);
1813 goto release_vsi;
1814 }
1815
916c7fdf
BC
1816 vf->num_mac = 1;
1817
1818 return 0;
1819
1820release_vsi:
3726cce2 1821 ice_vf_vsi_release(vf);
916c7fdf
BC
1822 return err;
1823}
1824
1825/**
1826 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1827 * @pf: PF the VFs are associated with
1828 */
1829static int ice_start_vfs(struct ice_pf *pf)
1830{
1831 struct ice_hw *hw = &pf->hw;
1832 int retval, i;
1833
1834 ice_for_each_vf(pf, i) {
1835 struct ice_vf *vf = &pf->vf[i];
1836
1837 ice_clear_vf_reset_trigger(vf);
1838
1839 retval = ice_init_vf_vsi_res(vf);
1840 if (retval) {
1841 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1842 vf->vf_id, retval);
1843 goto teardown;
1844 }
1845
1846 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1847 ice_ena_vf_mappings(vf);
1848 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1849 }
1850
1851 ice_flush(hw);
1852 return 0;
1853
1854teardown:
1855 for (i = i - 1; i >= 0; i--) {
1856 struct ice_vf *vf = &pf->vf[i];
1857
1858 ice_dis_vf_mappings(vf);
3726cce2 1859 ice_vf_vsi_release(vf);
916c7fdf
BC
1860 }
1861
1862 return retval;
1863}
1864
ddf30f7f 1865/**
ef860480 1866 * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
a06325a0
BC
1867 * @pf: PF holding reference to all VFs for default configuration
1868 */
1869static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1870{
1871 int i;
1872
1873 ice_for_each_vf(pf, i) {
1874 struct ice_vf *vf = &pf->vf[i];
1875
1876 vf->pf = pf;
1877 vf->vf_id = i;
1878 vf->vf_sw_id = pf->first_sw;
1879 /* assign default capabilities */
1880 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1881 vf->spoofchk = true;
1882 vf->num_vf_qs = pf->num_qps_per_vf;
c0dcaa55 1883 ice_vc_set_default_allowlist(vf);
da62c5ff
QZ
1884
1885 /* ctrl_vsi_idx will be set to a valid value only when VF
1886 * creates its first fdir rule.
1887 */
1888 ice_vf_ctrl_invalidate_vsi(vf);
1f7ea1cd 1889 ice_vf_fdir_init(vf);
ac19e03e
MS
1890
1891 ice_vc_set_dflt_vf_ops(&vf->vc_ops);
e6ba5273
BC
1892
1893 mutex_init(&vf->cfg_lock);
a06325a0
BC
1894 }
1895}
1896
1897/**
1898 * ice_alloc_vfs - allocate num_vfs in the PF structure
1899 * @pf: PF to store the allocated VFs in
1900 * @num_vfs: number of VFs to allocate
1901 */
1902static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1903{
1904 struct ice_vf *vfs;
1905
1906 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1907 GFP_KERNEL);
1908 if (!vfs)
1909 return -ENOMEM;
1910
1911 pf->vf = vfs;
1912 pf->num_alloc_vfs = num_vfs;
1913
1914 return 0;
1915}
1916
1917/**
1918 * ice_ena_vfs - enable VFs so they are ready to be used
ddf30f7f 1919 * @pf: pointer to the PF structure
a06325a0 1920 * @num_vfs: number of VFs to enable
ddf30f7f 1921 */
a06325a0 1922static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
ddf30f7f 1923{
4015d11e 1924 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1925 struct ice_hw *hw = &pf->hw;
a06325a0 1926 int ret;
ddf30f7f
AV
1927
1928 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
cbe66bfe 1929 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ddf30f7f 1930 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
7e408e07 1931 set_bit(ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1932 ice_flush(hw);
1933
a06325a0 1934 ret = pci_enable_sriov(pf->pdev, num_vfs);
ddf30f7f
AV
1935 if (ret) {
1936 pf->num_alloc_vfs = 0;
1937 goto err_unroll_intr;
1938 }
a06325a0
BC
1939
1940 ret = ice_alloc_vfs(pf, num_vfs);
1941 if (ret)
72f9c203 1942 goto err_pci_disable_sriov;
ddf30f7f 1943
cd0f4f3b 1944 if (ice_set_per_vf_res(pf, num_vfs)) {
916c7fdf 1945 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
a06325a0 1946 num_vfs);
916c7fdf
BC
1947 ret = -ENOSPC;
1948 goto err_unroll_sriov;
1949 }
1950
a06325a0 1951 ice_set_dflt_settings_vfs(pf);
ddf30f7f 1952
916c7fdf
BC
1953 if (ice_start_vfs(pf)) {
1954 dev_err(dev, "Failed to start VF(s)\n");
1955 ret = -EAGAIN;
ddf30f7f 1956 goto err_unroll_sriov;
72f9c203 1957 }
ddf30f7f 1958
7e408e07 1959 clear_bit(ICE_VF_DIS, pf->state);
1c54c839 1960
8702ed0b
DC
1961 ret = ice_eswitch_configure(pf);
1962 if (ret)
1c54c839
GN
1963 goto err_unroll_sriov;
1964
2657e16d
PG
1965 /* rearm global interrupts */
1966 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
1967 ice_irq_dynamic_ena(hw, NULL, NULL);
1968
916c7fdf 1969 return 0;
ddf30f7f
AV
1970
1971err_unroll_sriov:
a06325a0 1972 devm_kfree(dev, pf->vf);
72f9c203 1973 pf->vf = NULL;
72f9c203
BC
1974 pf->num_alloc_vfs = 0;
1975err_pci_disable_sriov:
ddf30f7f
AV
1976 pci_disable_sriov(pf->pdev);
1977err_unroll_intr:
1978 /* rearm interrupts here */
1979 ice_irq_dynamic_ena(hw, NULL, NULL);
7e408e07 1980 clear_bit(ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1981 return ret;
1982}
1983
ddf30f7f
AV
1984/**
1985 * ice_pci_sriov_ena - Enable or change number of VFs
1986 * @pf: pointer to the PF structure
1987 * @num_vfs: number of VFs to allocate
02337f1f
BC
1988 *
1989 * Returns 0 on success and negative on failure
ddf30f7f
AV
1990 */
1991static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1992{
1993 int pre_existing_vfs = pci_num_vf(pf->pdev);
4015d11e 1994 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1995 int err;
1996
ddf30f7f
AV
1997 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1998 ice_free_vfs(pf);
1999 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
02337f1f 2000 return 0;
ddf30f7f
AV
2001
2002 if (num_vfs > pf->num_vfs_supported) {
2003 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
2004 num_vfs, pf->num_vfs_supported);
dced8ad3 2005 return -EOPNOTSUPP;
ddf30f7f
AV
2006 }
2007
a06325a0
BC
2008 dev_info(dev, "Enabling %d VFs\n", num_vfs);
2009 err = ice_ena_vfs(pf, num_vfs);
ddf30f7f
AV
2010 if (err) {
2011 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
2012 return err;
2013 }
2014
2015 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
02337f1f
BC
2016 return 0;
2017}
2018
2019/**
2020 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
2021 * @pf: PF to enabled SR-IOV on
2022 */
2023static int ice_check_sriov_allowed(struct ice_pf *pf)
2024{
2025 struct device *dev = ice_pf_to_dev(pf);
2026
2027 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
2028 dev_err(dev, "This device is not capable of SR-IOV\n");
2029 return -EOPNOTSUPP;
2030 }
2031
2032 if (ice_is_safe_mode(pf)) {
2033 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
2034 return -EOPNOTSUPP;
2035 }
2036
2037 if (!ice_pf_state_is_nominal(pf)) {
2038 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
2039 return -EBUSY;
2040 }
2041
2042 return 0;
ddf30f7f
AV
2043}
2044
2045/**
2046 * ice_sriov_configure - Enable or change number of VFs via sysfs
2047 * @pdev: pointer to a pci_dev structure
02337f1f 2048 * @num_vfs: number of VFs to allocate or 0 to free VFs
ddf30f7f 2049 *
02337f1f
BC
2050 * This function is called when the user updates the number of VFs in sysfs. On
2051 * success return whatever num_vfs was set to by the caller. Return negative on
2052 * failure.
ddf30f7f
AV
2053 */
2054int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
2055{
2056 struct ice_pf *pf = pci_get_drvdata(pdev);
4015d11e 2057 struct device *dev = ice_pf_to_dev(pf);
02337f1f 2058 int err;
ddf30f7f 2059
02337f1f
BC
2060 err = ice_check_sriov_allowed(pf);
2061 if (err)
2062 return err;
462acf6a 2063
02337f1f
BC
2064 if (!num_vfs) {
2065 if (!pci_vfs_assigned(pdev)) {
0891c896 2066 ice_mbx_deinit_snapshot(&pf->hw);
02337f1f 2067 ice_free_vfs(pf);
df006dd4
DE
2068 if (pf->lag)
2069 ice_enable_lag(pf->lag);
02337f1f
BC
2070 return 0;
2071 }
ddf30f7f 2072
4015d11e 2073 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
ddf30f7f
AV
2074 return -EBUSY;
2075 }
2076
2ccc1c1c
TN
2077 err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
2078 if (err)
2079 return err;
0891c896 2080
02337f1f 2081 err = ice_pci_sriov_ena(pf, num_vfs);
0891c896
VS
2082 if (err) {
2083 ice_mbx_deinit_snapshot(&pf->hw);
02337f1f 2084 return err;
0891c896 2085 }
02337f1f 2086
df006dd4
DE
2087 if (pf->lag)
2088 ice_disable_lag(pf->lag);
02337f1f 2089 return num_vfs;
ddf30f7f 2090}
007676b4
AV
2091
2092/**
2093 * ice_process_vflr_event - Free VF resources via IRQ calls
2094 * @pf: pointer to the PF structure
2095 *
df17b7e0 2096 * called from the VFLR IRQ handler to
007676b4
AV
2097 * free up VF resources and state variables
2098 */
2099void ice_process_vflr_event(struct ice_pf *pf)
2100{
2101 struct ice_hw *hw = &pf->hw;
53bb6698 2102 unsigned int vf_id;
007676b4
AV
2103 u32 reg;
2104
7e408e07 2105 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
007676b4
AV
2106 !pf->num_alloc_vfs)
2107 return;
2108
005881bc 2109 ice_for_each_vf(pf, vf_id) {
007676b4
AV
2110 struct ice_vf *vf = &pf->vf[vf_id];
2111 u32 reg_idx, bit_idx;
2112
2113 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2114 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2115 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2116 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
fadead80 2117 if (reg & BIT(bit_idx)) {
007676b4 2118 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
fadead80 2119 mutex_lock(&vf->cfg_lock);
007676b4 2120 ice_reset_vf(vf, true);
fadead80
JK
2121 mutex_unlock(&vf->cfg_lock);
2122 }
007676b4
AV
2123 }
2124}
7c710869
AV
2125
2126/**
ff010eca 2127 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
7c710869 2128 * @vf: pointer to the VF info
7c710869 2129 */
ff010eca 2130static void ice_vc_reset_vf(struct ice_vf *vf)
7c710869
AV
2131{
2132 ice_vc_notify_vf_reset(vf);
2133 ice_reset_vf(vf, false);
2134}
2135
2309ae38
BC
2136/**
2137 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
2138 * @pf: PF used to index all VFs
2139 * @pfq: queue index relative to the PF's function space
2140 *
2141 * If no VF is found who owns the pfq then return NULL, otherwise return a
2142 * pointer to the VF who owns the pfq
2143 */
2144static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
2145{
53bb6698 2146 unsigned int vf_id;
2309ae38
BC
2147
2148 ice_for_each_vf(pf, vf_id) {
2149 struct ice_vf *vf = &pf->vf[vf_id];
2150 struct ice_vsi *vsi;
2151 u16 rxq_idx;
2152
c5afbe99 2153 vsi = ice_get_vf_vsi(vf);
2309ae38
BC
2154
2155 ice_for_each_rxq(vsi, rxq_idx)
2156 if (vsi->rxq_map[rxq_idx] == pfq)
2157 return vf;
2158 }
2159
2160 return NULL;
2161}
2162
2163/**
2164 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
2165 * @pf: PF used for conversion
2166 * @globalq: global queue index used to convert to PF space queue index
2167 */
2168static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
2169{
2170 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
2171}
2172
2173/**
2174 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
2175 * @pf: PF that the LAN overflow event happened on
2176 * @event: structure holding the event information for the LAN overflow event
2177 *
2178 * Determine if the LAN overflow event was caused by a VF queue. If it was not
2179 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
2180 * reset on the offending VF.
2181 */
2182void
2183ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
2184{
2185 u32 gldcb_rtctq, queue;
2186 struct ice_vf *vf;
2187
2188 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
2189 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
2190
2191 /* event returns device global Rx queue number */
2192 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
2193 GLDCB_RTCTQ_RXQNUM_S;
2194
2195 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
2196 if (!vf)
2197 return;
2198
fadead80 2199 mutex_lock(&vf->cfg_lock);
2309ae38 2200 ice_vc_reset_vf(vf);
fadead80 2201 mutex_unlock(&vf->cfg_lock);
2309ae38
BC
2202}
2203
1071a835
AV
2204/**
2205 * ice_vc_send_msg_to_vf - Send message to VF
2206 * @vf: pointer to the VF info
2207 * @v_opcode: virtual channel opcode
2208 * @v_retval: virtual channel return value
2209 * @msg: pointer to the msg buffer
2210 * @msglen: msg length
2211 *
2212 * send msg to VF
2213 */
1f7ea1cd 2214int
cf6c6e01
MW
2215ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
2216 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1071a835 2217{
4015d11e 2218 struct device *dev;
1071a835 2219 struct ice_pf *pf;
5518ac2a 2220 int aq_ret;
1071a835 2221
4c66d227 2222 if (!vf)
1071a835
AV
2223 return -EINVAL;
2224
2225 pf = vf->pf;
4c66d227
JB
2226 if (ice_validate_vf_id(pf, vf->vf_id))
2227 return -EINVAL;
1071a835 2228
4015d11e
BC
2229 dev = ice_pf_to_dev(pf);
2230
1071a835
AV
2231 /* single place to detect unsuccessful return values */
2232 if (v_retval) {
2233 vf->num_inval_msgs++;
4015d11e
BC
2234 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
2235 v_opcode, v_retval);
1071a835 2236 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
19cce2c6 2237 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1071a835 2238 vf->vf_id);
4015d11e 2239 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1071a835
AV
2240 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
2241 return -EIO;
2242 }
2243 } else {
2244 vf->num_valid_msgs++;
2245 /* reset the invalid counter, if a valid message is received. */
2246 vf->num_inval_msgs = 0;
2247 }
2248
2249 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
2250 msg, msglen, NULL);
90e47737 2251 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
5f87ec48
TN
2252 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
2253 vf->vf_id, aq_ret,
0fee3577 2254 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1071a835
AV
2255 return -EIO;
2256 }
2257
2258 return 0;
2259}
2260
2261/**
2262 * ice_vc_get_ver_msg
2263 * @vf: pointer to the VF info
2264 * @msg: pointer to the msg buffer
2265 *
2266 * called from the VF to request the API version used by the PF
2267 */
2268static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
2269{
2270 struct virtchnl_version_info info = {
2271 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2272 };
2273
2274 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2275 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2276 if (VF_IS_V10(&vf->vf_ver))
2277 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2278
cf6c6e01
MW
2279 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2280 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1071a835
AV
2281 sizeof(struct virtchnl_version_info));
2282}
2283
a6aa7c8f
BC
2284/**
2285 * ice_vc_get_max_frame_size - get max frame size allowed for VF
2286 * @vf: VF used to determine max frame size
2287 *
2288 * Max frame size is determined based on the current port's max frame size and
2289 * whether a port VLAN is configured on this VF. The VF is not aware whether
2290 * it's in a port VLAN so the PF needs to account for this in max frame size
2291 * checks and sending the max frame size to the VF.
2292 */
2293static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
2294{
c5afbe99 2295 struct ice_port_info *pi = ice_vf_get_port_info(vf);
a6aa7c8f
BC
2296 u16 max_frame_size;
2297
2298 max_frame_size = pi->phy.link_info.max_frame_size;
2299
a19d7f7f 2300 if (ice_vf_is_port_vlan_ena(vf))
a6aa7c8f
BC
2301 max_frame_size -= VLAN_HLEN;
2302
2303 return max_frame_size;
2304}
2305
1071a835
AV
2306/**
2307 * ice_vc_get_vf_res_msg
2308 * @vf: pointer to the VF info
2309 * @msg: pointer to the msg buffer
2310 *
2311 * called from the VF to request its resources
2312 */
2313static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
2314{
cf6c6e01 2315 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835 2316 struct virtchnl_vf_resource *vfres = NULL;
1071a835
AV
2317 struct ice_pf *pf = vf->pf;
2318 struct ice_vsi *vsi;
2319 int len = 0;
2320 int ret;
2321
4c66d227 2322 if (ice_check_vf_init(pf, vf)) {
cf6c6e01 2323 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2324 goto err;
2325 }
2326
2327 len = sizeof(struct virtchnl_vf_resource);
2328
9efe35d0 2329 vfres = kzalloc(len, GFP_KERNEL);
1071a835 2330 if (!vfres) {
cf6c6e01 2331 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1071a835
AV
2332 len = 0;
2333 goto err;
2334 }
2335 if (VF_IS_V11(&vf->vf_ver))
2336 vf->driver_caps = *(u32 *)msg;
2337 else
2338 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2339 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2340 VIRTCHNL_VF_OFFLOAD_VLAN;
2341
2342 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
c5afbe99 2343 vsi = ice_get_vf_vsi(vf);
f1ef73f5 2344 if (!vsi) {
cf6c6e01 2345 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
2346 goto err;
2347 }
2348
cc71de8f
BC
2349 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2350 /* VLAN offloads based on current device configuration */
2351 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
2352 } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
2353 /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
2354 * these two conditions, which amounts to guest VLAN filtering
2355 * and offloads being based on the inner VLAN or the
2356 * inner/single VLAN respectively and don't allow VF to
2357 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
2358 */
2359 if (ice_is_dvm_ena(&pf->hw) && ice_vf_is_port_vlan_ena(vf)) {
2360 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2361 } else if (!ice_is_dvm_ena(&pf->hw) &&
2362 !ice_vf_is_port_vlan_ena(vf)) {
2363 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2364 /* configure backward compatible support for VFs that
2365 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
2366 * configured in SVM, and no port VLAN is configured
2367 */
2368 ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
2369 } else if (ice_is_dvm_ena(&pf->hw)) {
2370 /* configure software offloaded VLAN support when DVM
2371 * is enabled, but no port VLAN is enabled
2372 */
2373 ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
2374 }
2375 }
1071a835
AV
2376
2377 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2378 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2379 } else {
2380 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
2381 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2382 else
2383 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2384 }
2385
1f7ea1cd
QZ
2386 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
2387 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
2388
1071a835
AV
2389 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2390 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2391
2392 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2393 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2394
2395 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
2396 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2397
2398 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
2399 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2400
2401 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2402 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2403
2404 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2405 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2406
2407 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
2408 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2409
222a8ab0
QZ
2410 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
2411 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
2412
142da08c
BC
2413 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
2414 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
2415
1071a835
AV
2416 vfres->num_vsis = 1;
2417 /* Tx and Rx queue are equal for VF */
2418 vfres->num_queue_pairs = vsi->num_txq;
46c276ce 2419 vfres->max_vectors = pf->num_msix_per_vf;
1071a835
AV
2420 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
2421 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
a6aa7c8f 2422 vfres->max_mtu = ice_vc_get_max_frame_size(vf);
1071a835
AV
2423
2424 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
2425 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2426 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
2427 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
51efbbdf 2428 vf->hw_lan_addr.addr);
1071a835 2429
d4bc4e2d
BC
2430 /* match guest capabilities */
2431 vf->driver_caps = vfres->vf_cap_flags;
2432
c0dcaa55
MS
2433 ice_vc_set_caps_allowlist(vf);
2434 ice_vc_set_working_allowlist(vf);
2435
1071a835
AV
2436 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2437
2438err:
2439 /* send the response back to the VF */
cf6c6e01 2440 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1071a835
AV
2441 (u8 *)vfres, len);
2442
9efe35d0 2443 kfree(vfres);
1071a835
AV
2444 return ret;
2445}
2446
2447/**
2448 * ice_vc_reset_vf_msg
2449 * @vf: pointer to the VF info
2450 *
2451 * called from the VF to reset itself,
2452 * unlike other virtchnl messages, PF driver
2453 * doesn't send the response back to the VF
2454 */
2455static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2456{
7dcc0fb8 2457 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
1071a835
AV
2458 ice_reset_vf(vf, false);
2459}
2460
2461/**
2462 * ice_find_vsi_from_id
2f2da36e 2463 * @pf: the PF structure to search for the VSI
f9867df6 2464 * @id: ID of the VSI it is searching for
1071a835 2465 *
f9867df6 2466 * searches for the VSI with the given ID
1071a835
AV
2467 */
2468static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2469{
2470 int i;
2471
80ed404a 2472 ice_for_each_vsi(pf, i)
1071a835
AV
2473 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2474 return pf->vsi[i];
2475
2476 return NULL;
2477}
2478
2479/**
2480 * ice_vc_isvalid_vsi_id
2481 * @vf: pointer to the VF info
f9867df6 2482 * @vsi_id: VF relative VSI ID
1071a835 2483 *
f9867df6 2484 * check for the valid VSI ID
1071a835 2485 */
1f7ea1cd 2486bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1071a835
AV
2487{
2488 struct ice_pf *pf = vf->pf;
2489 struct ice_vsi *vsi;
2490
2491 vsi = ice_find_vsi_from_id(pf, vsi_id);
2492
b03d519d 2493 return (vsi && (vsi->vf == vf));
1071a835
AV
2494}
2495
2496/**
2497 * ice_vc_isvalid_q_id
2498 * @vf: pointer to the VF info
f9867df6
AV
2499 * @vsi_id: VSI ID
2500 * @qid: VSI relative queue ID
1071a835 2501 *
f9867df6 2502 * check for the valid queue ID
1071a835
AV
2503 */
2504static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2505{
2506 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2507 /* allocated Tx and Rx queues should be always equal for VF VSI */
2508 return (vsi && (qid < vsi->alloc_txq));
2509}
2510
9c7dd756
MS
2511/**
2512 * ice_vc_isvalid_ring_len
2513 * @ring_len: length of ring
2514 *
2515 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
77ca27c4 2516 * or zero
9c7dd756
MS
2517 */
2518static bool ice_vc_isvalid_ring_len(u16 ring_len)
2519{
77ca27c4
PG
2520 return ring_len == 0 ||
2521 (ring_len >= ICE_MIN_NUM_DESC &&
9c7dd756
MS
2522 ring_len <= ICE_MAX_NUM_DESC &&
2523 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2524}
2525
60f44fe4
JG
2526/**
2527 * ice_vc_validate_pattern
2528 * @vf: pointer to the VF info
2529 * @proto: virtchnl protocol headers
2530 *
2531 * validate the pattern is supported or not.
2532 *
2533 * Return: true on success, false on error.
2534 */
2535bool
2536ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
2537{
2538 bool is_ipv4 = false;
2539 bool is_ipv6 = false;
2540 bool is_udp = false;
2541 u16 ptype = -1;
2542 int i = 0;
2543
2544 while (i < proto->count &&
2545 proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
2546 switch (proto->proto_hdr[i].type) {
2547 case VIRTCHNL_PROTO_HDR_ETH:
2548 ptype = ICE_PTYPE_MAC_PAY;
2549 break;
2550 case VIRTCHNL_PROTO_HDR_IPV4:
2551 ptype = ICE_PTYPE_IPV4_PAY;
2552 is_ipv4 = true;
2553 break;
2554 case VIRTCHNL_PROTO_HDR_IPV6:
2555 ptype = ICE_PTYPE_IPV6_PAY;
2556 is_ipv6 = true;
2557 break;
2558 case VIRTCHNL_PROTO_HDR_UDP:
2559 if (is_ipv4)
2560 ptype = ICE_PTYPE_IPV4_UDP_PAY;
2561 else if (is_ipv6)
2562 ptype = ICE_PTYPE_IPV6_UDP_PAY;
2563 is_udp = true;
2564 break;
2565 case VIRTCHNL_PROTO_HDR_TCP:
2566 if (is_ipv4)
2567 ptype = ICE_PTYPE_IPV4_TCP_PAY;
2568 else if (is_ipv6)
2569 ptype = ICE_PTYPE_IPV6_TCP_PAY;
2570 break;
2571 case VIRTCHNL_PROTO_HDR_SCTP:
2572 if (is_ipv4)
2573 ptype = ICE_PTYPE_IPV4_SCTP_PAY;
2574 else if (is_ipv6)
2575 ptype = ICE_PTYPE_IPV6_SCTP_PAY;
2576 break;
2577 case VIRTCHNL_PROTO_HDR_GTPU_IP:
2578 case VIRTCHNL_PROTO_HDR_GTPU_EH:
2579 if (is_ipv4)
2580 ptype = ICE_MAC_IPV4_GTPU;
2581 else if (is_ipv6)
2582 ptype = ICE_MAC_IPV6_GTPU;
2583 goto out;
2584 case VIRTCHNL_PROTO_HDR_L2TPV3:
2585 if (is_ipv4)
2586 ptype = ICE_MAC_IPV4_L2TPV3;
2587 else if (is_ipv6)
2588 ptype = ICE_MAC_IPV6_L2TPV3;
2589 goto out;
2590 case VIRTCHNL_PROTO_HDR_ESP:
2591 if (is_ipv4)
2592 ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
2593 ICE_MAC_IPV4_ESP;
2594 else if (is_ipv6)
2595 ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
2596 ICE_MAC_IPV6_ESP;
2597 goto out;
2598 case VIRTCHNL_PROTO_HDR_AH:
2599 if (is_ipv4)
2600 ptype = ICE_MAC_IPV4_AH;
2601 else if (is_ipv6)
2602 ptype = ICE_MAC_IPV6_AH;
2603 goto out;
2604 case VIRTCHNL_PROTO_HDR_PFCP:
2605 if (is_ipv4)
2606 ptype = ICE_MAC_IPV4_PFCP_SESSION;
2607 else if (is_ipv6)
2608 ptype = ICE_MAC_IPV6_PFCP_SESSION;
2609 goto out;
2610 default:
2611 break;
2612 }
2613 i++;
2614 }
2615
2616out:
2617 return ice_hw_ptype_ena(&vf->pf->hw, ptype);
2618}
2619
222a8ab0
QZ
2620/**
2621 * ice_vc_parse_rss_cfg - parses hash fields and headers from
2622 * a specific virtchnl RSS cfg
2623 * @hw: pointer to the hardware
2624 * @rss_cfg: pointer to the virtchnl RSS cfg
2625 * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
2626 * to configure
2627 * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
2628 *
2629 * Return true if all the protocol header and hash fields in the RSS cfg could
2630 * be parsed, else return false
2631 *
2632 * This function parses the virtchnl RSS cfg to be the intended
2633 * hash fields and the intended header for RSS configuration
2634 */
2635static bool
2636ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
2637 u32 *addl_hdrs, u64 *hash_flds)
2638{
2639 const struct ice_vc_hash_field_match_type *hf_list;
2640 const struct ice_vc_hdr_match_type *hdr_list;
2641 int i, hf_list_len, hdr_list_len;
2642
60f44fe4
JG
2643 hf_list = ice_vc_hash_field_list;
2644 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
2645 hdr_list = ice_vc_hdr_list;
2646 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
222a8ab0
QZ
2647
2648 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
2649 struct virtchnl_proto_hdr *proto_hdr =
2650 &rss_cfg->proto_hdrs.proto_hdr[i];
2651 bool hdr_found = false;
2652 int j;
2653
2654 /* Find matched ice headers according to virtchnl headers. */
2655 for (j = 0; j < hdr_list_len; j++) {
2656 struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
2657
2658 if (proto_hdr->type == hdr_map.vc_hdr) {
2659 *addl_hdrs |= hdr_map.ice_hdr;
2660 hdr_found = true;
2661 }
2662 }
2663
2664 if (!hdr_found)
2665 return false;
2666
2667 /* Find matched ice hash fields according to
2668 * virtchnl hash fields.
2669 */
2670 for (j = 0; j < hf_list_len; j++) {
2671 struct ice_vc_hash_field_match_type hf_map = hf_list[j];
2672
2673 if (proto_hdr->type == hf_map.vc_hdr &&
2674 proto_hdr->field_selector == hf_map.vc_hash_field) {
2675 *hash_flds |= hf_map.ice_hash_field;
2676 break;
2677 }
2678 }
2679 }
2680
2681 return true;
2682}
2683
2684/**
2685 * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
2686 * RSS offloads
2687 * @caps: VF driver negotiated capabilities
2688 *
2689 * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
2690 * else return false
2691 */
2692static bool ice_vf_adv_rss_offload_ena(u32 caps)
2693{
2694 return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
2695}
2696
2697/**
2698 * ice_vc_handle_rss_cfg
2699 * @vf: pointer to the VF info
2700 * @msg: pointer to the message buffer
2701 * @add: add a RSS config if true, otherwise delete a RSS config
2702 *
2703 * This function adds/deletes a RSS config
2704 */
2705static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
2706{
2707 u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
2708 struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
2709 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2710 struct device *dev = ice_pf_to_dev(vf->pf);
2711 struct ice_hw *hw = &vf->pf->hw;
2712 struct ice_vsi *vsi;
2713
2714 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2715 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
2716 vf->vf_id);
2717 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2718 goto error_param;
2719 }
2720
2721 if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
2722 dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
2723 vf->vf_id);
2724 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2725 goto error_param;
2726 }
2727
2728 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2729 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2730 goto error_param;
2731 }
2732
2733 if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
2734 rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
2735 rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
2736 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
2737 vf->vf_id);
2738 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2739 goto error_param;
2740 }
2741
2742 vsi = ice_get_vf_vsi(vf);
2743 if (!vsi) {
2744 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2745 goto error_param;
2746 }
2747
60f44fe4
JG
2748 if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
2749 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2750 goto error_param;
2751 }
2752
222a8ab0
QZ
2753 if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
2754 struct ice_vsi_ctx *ctx;
222a8ab0 2755 u8 lut_type, hash_type;
5518ac2a 2756 int status;
222a8ab0
QZ
2757
2758 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
2759 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
2760 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
2761
2762 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2763 if (!ctx) {
2764 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2765 goto error_param;
2766 }
2767
2768 ctx->info.q_opt_rss = ((lut_type <<
2769 ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
2770 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
2771 (hash_type &
2772 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
2773
2774 /* Preserve existing queueing option setting */
2775 ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
2776 ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
2777 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
2778 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
2779
2780 ctx->info.valid_sections =
2781 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
2782
2783 status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
2784 if (status) {
5f87ec48 2785 dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
5518ac2a 2786 status, ice_aq_str(hw->adminq.sq_last_status));
222a8ab0
QZ
2787 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2788 } else {
2789 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
2790 }
2791
2792 kfree(ctx);
2793 } else {
2794 u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
2795 u64 hash_flds = ICE_HASH_INVALID;
2796
2797 if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
2798 &hash_flds)) {
2799 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2800 goto error_param;
2801 }
2802
2803 if (add) {
2804 if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
2805 addl_hdrs)) {
2806 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2807 dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
2808 vsi->vsi_num, v_ret);
2809 }
2810 } else {
5e24d598 2811 int status;
ddd1f3cf
QZ
2812
2813 status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
2814 addl_hdrs);
5518ac2a
TN
2815 /* We just ignore -ENOENT, because if two configurations
2816 * share the same profile remove one of them actually
2817 * removes both, since the profile is deleted.
ddd1f3cf 2818 */
d54699e2 2819 if (status && status != -ENOENT) {
ddd1f3cf 2820 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5f87ec48
TN
2821 dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
2822 vf->vf_id, status);
ddd1f3cf 2823 }
222a8ab0
QZ
2824 }
2825 }
2826
2827error_param:
2828 return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
2829}
2830
1071a835
AV
2831/**
2832 * ice_vc_config_rss_key
2833 * @vf: pointer to the VF info
2834 * @msg: pointer to the msg buffer
2835 *
2836 * Configure the VF's RSS key
2837 */
2838static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2839{
cf6c6e01 2840 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2841 struct virtchnl_rss_key *vrk =
2842 (struct virtchnl_rss_key *)msg;
4c66d227 2843 struct ice_vsi *vsi;
1071a835
AV
2844
2845 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2846 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2847 goto error_param;
2848 }
2849
2850 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
cf6c6e01 2851 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2852 goto error_param;
2853 }
2854
3f416961 2855 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
cf6c6e01 2856 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2857 goto error_param;
2858 }
2859
3f416961 2860 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2861 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2862 goto error_param;
2863 }
2864
c5afbe99 2865 vsi = ice_get_vf_vsi(vf);
3f416961 2866 if (!vsi) {
cf6c6e01 2867 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2868 goto error_param;
2869 }
2870
b66a972a 2871 if (ice_set_rss_key(vsi, vrk->key))
cf6c6e01 2872 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2873error_param:
cf6c6e01 2874 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1071a835
AV
2875 NULL, 0);
2876}
2877
2878/**
2879 * ice_vc_config_rss_lut
2880 * @vf: pointer to the VF info
2881 * @msg: pointer to the msg buffer
2882 *
2883 * Configure the VF's RSS LUT
2884 */
2885static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2886{
2887 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
cf6c6e01 2888 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4c66d227 2889 struct ice_vsi *vsi;
1071a835
AV
2890
2891 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2892 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2893 goto error_param;
2894 }
2895
2896 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
cf6c6e01 2897 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2898 goto error_param;
2899 }
2900
3f416961 2901 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
cf6c6e01 2902 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2903 goto error_param;
2904 }
2905
3f416961 2906 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2907 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2908 goto error_param;
2909 }
2910
c5afbe99 2911 vsi = ice_get_vf_vsi(vf);
3f416961 2912 if (!vsi) {
cf6c6e01 2913 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2914 goto error_param;
2915 }
2916
b66a972a 2917 if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
cf6c6e01 2918 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2919error_param:
cf6c6e01 2920 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1071a835
AV
2921 NULL, 0);
2922}
2923
c54d209c
BC
2924/**
2925 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2926 * @vf: The VF being resseting
2927 *
2928 * The max poll time is about ~800ms, which is about the maximum time it takes
2929 * for a VF to be reset and/or a VF driver to be removed.
2930 */
2931static void ice_wait_on_vf_reset(struct ice_vf *vf)
2932{
2933 int i;
2934
2935 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2936 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2937 break;
2938 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2939 }
2940}
2941
2942/**
2943 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2944 * @vf: VF to check if it's ready to be configured/queried
2945 *
2946 * The purpose of this function is to make sure the VF is not in reset, not
2947 * disabled, and initialized so it can be configured and/or queried by a host
2948 * administrator.
2949 */
7aae80ce 2950int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
c54d209c
BC
2951{
2952 struct ice_pf *pf;
2953
2954 ice_wait_on_vf_reset(vf);
2955
2956 if (ice_is_vf_disabled(vf))
2957 return -EINVAL;
2958
2959 pf = vf->pf;
2960 if (ice_check_vf_init(pf, vf))
2961 return -EBUSY;
2962
2963 return 0;
2964}
2965
cd6d6b83
BC
2966/**
2967 * ice_set_vf_spoofchk
2968 * @netdev: network interface device structure
2969 * @vf_id: VF identifier
2970 * @ena: flag to enable or disable feature
2971 *
2972 * Enable or disable VF spoof checking
2973 */
2974int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2975{
2976 struct ice_netdev_priv *np = netdev_priv(netdev);
2977 struct ice_pf *pf = np->vsi->back;
cd6d6b83 2978 struct ice_vsi *vf_vsi;
cd6d6b83
BC
2979 struct device *dev;
2980 struct ice_vf *vf;
c54d209c 2981 int ret;
cd6d6b83
BC
2982
2983 dev = ice_pf_to_dev(pf);
2984 if (ice_validate_vf_id(pf, vf_id))
2985 return -EINVAL;
2986
2987 vf = &pf->vf[vf_id];
c54d209c
BC
2988 ret = ice_check_vf_ready_for_cfg(vf);
2989 if (ret)
2990 return ret;
cd6d6b83 2991
c5afbe99 2992 vf_vsi = ice_get_vf_vsi(vf);
cd6d6b83
BC
2993 if (!vf_vsi) {
2994 netdev_err(netdev, "VSI %d for VF %d is null\n",
2995 vf->lan_vsi_idx, vf->vf_id);
2996 return -EINVAL;
2997 }
2998
2999 if (vf_vsi->type != ICE_VSI_VF) {
19cce2c6 3000 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
cd6d6b83
BC
3001 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
3002 return -ENODEV;
3003 }
3004
3005 if (ena == vf->spoofchk) {
3006 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
3007 return 0;
3008 }
3009
daf4dd16
BC
3010 if (ena)
3011 ret = ice_vsi_ena_spoofchk(vf_vsi);
3012 else
3013 ret = ice_vsi_dis_spoofchk(vf_vsi);
3014 if (ret)
3015 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
3016 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
3017 else
3018 vf->spoofchk = ena;
cd6d6b83 3019
cd6d6b83
BC
3020 return ret;
3021}
3022
01b5e89a
BC
3023/**
3024 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
3025 * @pf: PF structure for accessing VF(s)
3026 *
3027 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
3028 * else return true
3029 */
3030bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
3031{
3032 int vf_idx;
3033
3034 ice_for_each_vf(pf, vf_idx) {
3035 struct ice_vf *vf = &pf->vf[vf_idx];
3036
3037 /* found a VF that has promiscuous mode configured */
3038 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3039 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3040 return true;
3041 }
3042
3043 return false;
3044}
3045
3046/**
3047 * ice_vc_cfg_promiscuous_mode_msg
3048 * @vf: pointer to the VF info
3049 * @msg: pointer to the msg buffer
3050 *
3051 * called from the VF to configure VF VSIs promiscuous mode
3052 */
3053static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
3054{
3055 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
382e0a68 3056 bool rm_promisc, alluni = false, allmulti = false;
01b5e89a
BC
3057 struct virtchnl_promisc_info *info =
3058 (struct virtchnl_promisc_info *)msg;
c31af68a 3059 struct ice_vsi_vlan_ops *vlan_ops;
fabf480b 3060 int mcast_err = 0, ucast_err = 0;
01b5e89a
BC
3061 struct ice_pf *pf = vf->pf;
3062 struct ice_vsi *vsi;
3063 struct device *dev;
01b5e89a
BC
3064 int ret = 0;
3065
3066 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3067 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3068 goto error_param;
3069 }
3070
3071 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
3072 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3073 goto error_param;
3074 }
3075
c5afbe99 3076 vsi = ice_get_vf_vsi(vf);
01b5e89a
BC
3077 if (!vsi) {
3078 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3079 goto error_param;
3080 }
3081
3082 dev = ice_pf_to_dev(pf);
3083 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3084 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
3085 vf->vf_id);
3086 /* Leave v_ret alone, lie to the VF on purpose. */
3087 goto error_param;
3088 }
3089
382e0a68
BC
3090 if (info->flags & FLAG_VF_UNICAST_PROMISC)
3091 alluni = true;
3092
3093 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
3094 allmulti = true;
3095
3096 rm_promisc = !allmulti && !alluni;
01b5e89a 3097
c31af68a
BC
3098 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3099 if (rm_promisc)
3100 ret = vlan_ops->ena_rx_filtering(vsi);
3101 else
3102 ret = vlan_ops->dis_rx_filtering(vsi);
3103 if (ret) {
3104 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
3105 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3106 goto error_param;
01b5e89a
BC
3107 }
3108
3109 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
37b52be2 3110 bool set_dflt_vsi = alluni || allmulti;
01b5e89a
BC
3111
3112 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
3113 /* only attempt to set the default forwarding VSI if
3114 * it's not currently set
3115 */
3116 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
3117 else if (!set_dflt_vsi &&
3118 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
3119 /* only attempt to free the default forwarding VSI if we
3120 * are the owner
3121 */
3122 ret = ice_clear_dflt_vsi(pf->first_sw);
3123
3124 if (ret) {
3125 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
3126 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
3127 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3128 goto error_param;
3129 }
3130 } else {
1a8c7778
BC
3131 u8 mcast_m, ucast_m;
3132
c31af68a
BC
3133 if (ice_vf_is_port_vlan_ena(vf) ||
3134 ice_vsi_has_non_zero_vlans(vsi)) {
1a8c7778
BC
3135 mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
3136 ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
01b5e89a 3137 } else {
1a8c7778
BC
3138 mcast_m = ICE_MCAST_PROMISC_BITS;
3139 ucast_m = ICE_UCAST_PROMISC_BITS;
01b5e89a
BC
3140 }
3141
fabf480b
BC
3142 if (alluni)
3143 ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
3144 else
3145 ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
1a8c7778 3146
fabf480b
BC
3147 if (allmulti)
3148 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
3149 else
3150 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
3151
3152 if (ucast_err || mcast_err)
3153 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
01b5e89a
BC
3154 }
3155
fabf480b 3156 if (!mcast_err) {
1a8c7778
BC
3157 if (allmulti &&
3158 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3159 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
3160 vf->vf_id);
3161 else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3162 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
3163 vf->vf_id);
3164 }
01b5e89a 3165
fabf480b 3166 if (!ucast_err) {
1a8c7778
BC
3167 if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3168 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
3169 vf->vf_id);
3170 else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3171 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
3172 vf->vf_id);
3173 }
01b5e89a
BC
3174
3175error_param:
3176 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3177 v_ret, NULL, 0);
3178}
3179
1071a835
AV
3180/**
3181 * ice_vc_get_stats_msg
3182 * @vf: pointer to the VF info
3183 * @msg: pointer to the msg buffer
3184 *
3185 * called from the VF to get VSI stats
3186 */
3187static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
3188{
cf6c6e01 3189 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3190 struct virtchnl_queue_select *vqs =
3191 (struct virtchnl_queue_select *)msg;
949375de 3192 struct ice_eth_stats stats = { 0 };
1071a835
AV
3193 struct ice_vsi *vsi;
3194
3195 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3196 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3197 goto error_param;
3198 }
3199
3200 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 3201 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3202 goto error_param;
3203 }
3204
c5afbe99 3205 vsi = ice_get_vf_vsi(vf);
1071a835 3206 if (!vsi) {
cf6c6e01 3207 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3208 goto error_param;
3209 }
3210
1071a835
AV
3211 ice_update_eth_stats(vsi);
3212
3213 stats = vsi->eth_stats;
3214
3215error_param:
3216 /* send the response to the VF */
cf6c6e01 3217 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1071a835
AV
3218 (u8 *)&stats, sizeof(stats));
3219}
3220
24e2e2a0
BC
3221/**
3222 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
3223 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
3224 *
3225 * Return true on successful validation, else false
3226 */
3227static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
3228{
3229 if ((!vqs->rx_queues && !vqs->tx_queues) ||
0ca469fb
MW
3230 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
3231 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
24e2e2a0
BC
3232 return false;
3233
3234 return true;
3235}
3236
4dc926d3
BC
3237/**
3238 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
3239 * @vsi: VSI of the VF to configure
3240 * @q_idx: VF queue index used to determine the queue in the PF's space
3241 */
3242static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3243{
3244 struct ice_hw *hw = &vsi->back->hw;
3245 u32 pfq = vsi->txq_map[q_idx];
3246 u32 reg;
3247
3248 reg = rd32(hw, QINT_TQCTL(pfq));
3249
3250 /* MSI-X index 0 in the VF's space is always for the OICR, which means
3251 * this is most likely a poll mode VF driver, so don't enable an
3252 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3253 */
3254 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
3255 return;
3256
3257 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
3258}
3259
3260/**
3261 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
3262 * @vsi: VSI of the VF to configure
3263 * @q_idx: VF queue index used to determine the queue in the PF's space
3264 */
3265static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3266{
3267 struct ice_hw *hw = &vsi->back->hw;
3268 u32 pfq = vsi->rxq_map[q_idx];
3269 u32 reg;
3270
3271 reg = rd32(hw, QINT_RQCTL(pfq));
3272
3273 /* MSI-X index 0 in the VF's space is always for the OICR, which means
3274 * this is most likely a poll mode VF driver, so don't enable an
3275 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3276 */
3277 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
3278 return;
3279
3280 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
3281}
3282
1071a835
AV
3283/**
3284 * ice_vc_ena_qs_msg
3285 * @vf: pointer to the VF info
3286 * @msg: pointer to the msg buffer
3287 *
3288 * called from the VF to enable all or specific queue(s)
3289 */
3290static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
3291{
cf6c6e01 3292 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3293 struct virtchnl_queue_select *vqs =
3294 (struct virtchnl_queue_select *)msg;
1071a835 3295 struct ice_vsi *vsi;
77ca27c4
PG
3296 unsigned long q_map;
3297 u16 vf_q_id;
1071a835
AV
3298
3299 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3300 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3301 goto error_param;
3302 }
3303
3304 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 3305 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3306 goto error_param;
3307 }
3308
24e2e2a0 3309 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3f416961
A
3310 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3311 goto error_param;
3312 }
3313
c5afbe99 3314 vsi = ice_get_vf_vsi(vf);
1071a835 3315 if (!vsi) {
cf6c6e01 3316 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3317 goto error_param;
3318 }
3319
3320 /* Enable only Rx rings, Tx rings were enabled by the FW when the
3321 * Tx queue group list was configured and the context bits were
3322 * programmed using ice_vsi_cfg_txqs
3323 */
77ca27c4 3324 q_map = vqs->rx_queues;
0ca469fb 3325 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
3326 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3327 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3328 goto error_param;
3329 }
3330
3331 /* Skip queue if enabled */
3332 if (test_bit(vf_q_id, vf->rxq_ena))
3333 continue;
3334
13a6233b 3335 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
19cce2c6 3336 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
77ca27c4
PG
3337 vf_q_id, vsi->vsi_num);
3338 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3339 goto error_param;
3340 }
3341
4dc926d3 3342 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
77ca27c4 3343 set_bit(vf_q_id, vf->rxq_ena);
77ca27c4
PG
3344 }
3345
77ca27c4 3346 q_map = vqs->tx_queues;
0ca469fb 3347 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
3348 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3349 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3350 goto error_param;
3351 }
3352
3353 /* Skip queue if enabled */
3354 if (test_bit(vf_q_id, vf->txq_ena))
3355 continue;
3356
4dc926d3 3357 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
77ca27c4 3358 set_bit(vf_q_id, vf->txq_ena);
77ca27c4 3359 }
1071a835
AV
3360
3361 /* Set flag to indicate that queues are enabled */
cf6c6e01 3362 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
77ca27c4 3363 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
3364
3365error_param:
3366 /* send the response to the VF */
cf6c6e01 3367 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1071a835
AV
3368 NULL, 0);
3369}
3370
3371/**
3372 * ice_vc_dis_qs_msg
3373 * @vf: pointer to the VF info
3374 * @msg: pointer to the msg buffer
3375 *
3376 * called from the VF to disable all or specific
3377 * queue(s)
3378 */
3379static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
3380{
cf6c6e01 3381 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3382 struct virtchnl_queue_select *vqs =
3383 (struct virtchnl_queue_select *)msg;
1071a835 3384 struct ice_vsi *vsi;
77ca27c4
PG
3385 unsigned long q_map;
3386 u16 vf_q_id;
1071a835
AV
3387
3388 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
77ca27c4 3389 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
cf6c6e01 3390 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3391 goto error_param;
3392 }
3393
3394 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 3395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3396 goto error_param;
3397 }
3398
24e2e2a0 3399 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
cf6c6e01 3400 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3401 goto error_param;
3402 }
3403
c5afbe99 3404 vsi = ice_get_vf_vsi(vf);
1071a835 3405 if (!vsi) {
cf6c6e01 3406 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3407 goto error_param;
3408 }
3409
77ca27c4
PG
3410 if (vqs->tx_queues) {
3411 q_map = vqs->tx_queues;
3412
0ca469fb 3413 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
e72bba21 3414 struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
77ca27c4
PG
3415 struct ice_txq_meta txq_meta = { 0 };
3416
3417 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3418 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3419 goto error_param;
3420 }
3421
3422 /* Skip queue if not enabled */
3423 if (!test_bit(vf_q_id, vf->txq_ena))
3424 continue;
3425
3426 ice_fill_txq_meta(vsi, ring, &txq_meta);
3427
3428 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
3429 ring, &txq_meta)) {
19cce2c6 3430 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
77ca27c4
PG
3431 vf_q_id, vsi->vsi_num);
3432 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3433 goto error_param;
3434 }
3435
3436 /* Clear enabled queues flag */
3437 clear_bit(vf_q_id, vf->txq_ena);
77ca27c4 3438 }
1071a835
AV
3439 }
3440
e1fe6926
BC
3441 q_map = vqs->rx_queues;
3442 /* speed up Rx queue disable by batching them if possible */
3443 if (q_map &&
0ca469fb 3444 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
e1fe6926
BC
3445 if (ice_vsi_stop_all_rx_rings(vsi)) {
3446 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
3447 vsi->vsi_num);
3448 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3449 goto error_param;
3450 }
77ca27c4 3451
0ca469fb 3452 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
e1fe6926 3453 } else if (q_map) {
0ca469fb 3454 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
3455 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3456 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3457 goto error_param;
3458 }
3459
3460 /* Skip queue if not enabled */
3461 if (!test_bit(vf_q_id, vf->rxq_ena))
3462 continue;
3463
13a6233b
BC
3464 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
3465 true)) {
19cce2c6 3466 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
77ca27c4
PG
3467 vf_q_id, vsi->vsi_num);
3468 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3469 goto error_param;
3470 }
3471
3472 /* Clear enabled queues flag */
3473 clear_bit(vf_q_id, vf->rxq_ena);
77ca27c4 3474 }
1071a835
AV
3475 }
3476
3477 /* Clear enabled queues flag */
e1fe6926 3478 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
77ca27c4 3479 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
3480
3481error_param:
3482 /* send the response to the VF */
cf6c6e01 3483 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1071a835
AV
3484 NULL, 0);
3485}
3486
0ca469fb
MW
3487/**
3488 * ice_cfg_interrupt
3489 * @vf: pointer to the VF info
3490 * @vsi: the VSI being configured
3491 * @vector_id: vector ID
3492 * @map: vector map for mapping vectors to queues
3493 * @q_vector: structure for interrupt vector
3494 * configure the IRQ to queue map
3495 */
3496static int
3497ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
3498 struct virtchnl_vector_map *map,
3499 struct ice_q_vector *q_vector)
3500{
3501 u16 vsi_q_id, vsi_q_id_idx;
3502 unsigned long qmap;
3503
3504 q_vector->num_ring_rx = 0;
3505 q_vector->num_ring_tx = 0;
3506
3507 qmap = map->rxq_map;
3508 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3509 vsi_q_id = vsi_q_id_idx;
3510
3511 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3512 return VIRTCHNL_STATUS_ERR_PARAM;
3513
3514 q_vector->num_ring_rx++;
3515 q_vector->rx.itr_idx = map->rxitr_idx;
3516 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
3517 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
3518 q_vector->rx.itr_idx);
3519 }
3520
3521 qmap = map->txq_map;
3522 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3523 vsi_q_id = vsi_q_id_idx;
3524
3525 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3526 return VIRTCHNL_STATUS_ERR_PARAM;
3527
3528 q_vector->num_ring_tx++;
3529 q_vector->tx.itr_idx = map->txitr_idx;
3530 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
3531 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
3532 q_vector->tx.itr_idx);
3533 }
3534
3535 return VIRTCHNL_STATUS_SUCCESS;
3536}
3537
1071a835
AV
3538/**
3539 * ice_vc_cfg_irq_map_msg
3540 * @vf: pointer to the VF info
3541 * @msg: pointer to the msg buffer
3542 *
3543 * called from the VF to configure the IRQ to queue map
3544 */
3545static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
3546{
cf6c6e01 3547 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
0ca469fb 3548 u16 num_q_vectors_mapped, vsi_id, vector_id;
173e23c0 3549 struct virtchnl_irq_map_info *irqmap_info;
1071a835 3550 struct virtchnl_vector_map *map;
1071a835 3551 struct ice_pf *pf = vf->pf;
173e23c0 3552 struct ice_vsi *vsi;
1071a835
AV
3553 int i;
3554
173e23c0 3555 irqmap_info = (struct virtchnl_irq_map_info *)msg;
047e52c0
AV
3556 num_q_vectors_mapped = irqmap_info->num_vectors;
3557
047e52c0
AV
3558 /* Check to make sure number of VF vectors mapped is not greater than
3559 * number of VF vectors originally allocated, and check that
3560 * there is actually at least a single VF queue vector mapped
3561 */
ba0db585 3562 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
46c276ce 3563 pf->num_msix_per_vf < num_q_vectors_mapped ||
0ca469fb 3564 !num_q_vectors_mapped) {
cf6c6e01 3565 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3566 goto error_param;
3567 }
3568
c5afbe99 3569 vsi = ice_get_vf_vsi(vf);
3f416961
A
3570 if (!vsi) {
3571 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3572 goto error_param;
3573 }
3574
047e52c0
AV
3575 for (i = 0; i < num_q_vectors_mapped; i++) {
3576 struct ice_q_vector *q_vector;
ba0db585 3577
1071a835
AV
3578 map = &irqmap_info->vecmap[i];
3579
3580 vector_id = map->vector_id;
3581 vsi_id = map->vsi_id;
b791cdd5
BC
3582 /* vector_id is always 0-based for each VF, and can never be
3583 * larger than or equal to the max allowed interrupts per VF
3584 */
46c276ce 3585 if (!(vector_id < pf->num_msix_per_vf) ||
b791cdd5 3586 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
047e52c0
AV
3587 (!vector_id && (map->rxq_map || map->txq_map))) {
3588 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3589 goto error_param;
3590 }
3591
3592 /* No need to map VF miscellaneous or rogue vector */
3593 if (!vector_id)
3594 continue;
3595
3596 /* Subtract non queue vector from vector_id passed by VF
3597 * to get actual number of VSI queue vector array index
3598 */
3599 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
3600 if (!q_vector) {
cf6c6e01 3601 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3602 goto error_param;
3603 }
3604
1071a835 3605 /* lookout for the invalid queue index */
0ca469fb
MW
3606 v_ret = (enum virtchnl_status_code)
3607 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
3608 if (v_ret)
3609 goto error_param;
1071a835
AV
3610 }
3611
1071a835
AV
3612error_param:
3613 /* send the response to the VF */
cf6c6e01 3614 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1071a835
AV
3615 NULL, 0);
3616}
3617
3618/**
3619 * ice_vc_cfg_qs_msg
3620 * @vf: pointer to the VF info
3621 * @msg: pointer to the msg buffer
3622 *
3623 * called from the VF to configure the Rx/Tx queues
3624 */
3625static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
3626{
cf6c6e01 3627 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3628 struct virtchnl_vsi_queue_config_info *qci =
3629 (struct virtchnl_vsi_queue_config_info *)msg;
3630 struct virtchnl_queue_pair_info *qpi;
5743020d 3631 struct ice_pf *pf = vf->pf;
1071a835 3632 struct ice_vsi *vsi;
7ad15440 3633 int i, q_idx;
1071a835
AV
3634
3635 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3636 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3637 goto error_param;
3638 }
3639
3640 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
cf6c6e01 3641 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3642 goto error_param;
3643 }
3644
c5afbe99 3645 vsi = ice_get_vf_vsi(vf);
9c7dd756 3646 if (!vsi) {
cf6c6e01 3647 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5743020d
AA
3648 goto error_param;
3649 }
3650
0ca469fb 3651 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
9c7dd756 3652 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
19cce2c6 3653 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
9c7dd756 3654 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3f416961
A
3655 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3656 goto error_param;
3657 }
3658
1071a835
AV
3659 for (i = 0; i < qci->num_queue_pairs; i++) {
3660 qpi = &qci->qpair[i];
3661 if (qpi->txq.vsi_id != qci->vsi_id ||
3662 qpi->rxq.vsi_id != qci->vsi_id ||
3663 qpi->rxq.queue_id != qpi->txq.queue_id ||
f8af5bf5 3664 qpi->txq.headwb_enabled ||
9c7dd756
MS
3665 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
3666 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1071a835 3667 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
cf6c6e01 3668 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3669 goto error_param;
3670 }
7ad15440
BC
3671
3672 q_idx = qpi->rxq.queue_id;
3673
3674 /* make sure selected "q_idx" is in valid range of queues
3675 * for selected "vsi"
3676 */
3677 if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
3678 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3679 goto error_param;
3680 }
3681
1071a835 3682 /* copy Tx queue info from VF into VSI */
77ca27c4 3683 if (qpi->txq.ring_len > 0) {
77ca27c4
PG
3684 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
3685 vsi->tx_rings[i]->count = qpi->txq.ring_len;
7ad15440
BC
3686 if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
3687 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3688 goto error_param;
3689 }
1071a835 3690 }
77ca27c4
PG
3691
3692 /* copy Rx queue info from VF into VSI */
3693 if (qpi->rxq.ring_len > 0) {
a6aa7c8f
BC
3694 u16 max_frame_size = ice_vc_get_max_frame_size(vf);
3695
77ca27c4
PG
3696 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
3697 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
3698
3699 if (qpi->rxq.databuffer_size != 0 &&
3700 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
3701 qpi->rxq.databuffer_size < 1024)) {
3702 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3703 goto error_param;
3704 }
3705 vsi->rx_buf_len = qpi->rxq.databuffer_size;
3706 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
a6aa7c8f 3707 if (qpi->rxq.max_pkt_size > max_frame_size ||
77ca27c4
PG
3708 qpi->rxq.max_pkt_size < 64) {
3709 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3710 goto error_param;
3711 }
77ca27c4 3712
7ad15440
BC
3713 vsi->max_frame = qpi->rxq.max_pkt_size;
3714 /* add space for the port VLAN since the VF driver is not
3715 * expected to account for it in the MTU calculation
3716 */
a19d7f7f 3717 if (ice_vf_is_port_vlan_ena(vf))
7ad15440 3718 vsi->max_frame += VLAN_HLEN;
1071a835 3719
7ad15440
BC
3720 if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
3721 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3722 goto error_param;
3723 }
3724 }
3725 }
1071a835
AV
3726
3727error_param:
3728 /* send the response to the VF */
cf6c6e01 3729 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
1071a835
AV
3730 NULL, 0);
3731}
3732
3733/**
3734 * ice_is_vf_trusted
3735 * @vf: pointer to the VF info
3736 */
3737static bool ice_is_vf_trusted(struct ice_vf *vf)
3738{
3739 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3740}
3741
3742/**
3743 * ice_can_vf_change_mac
3744 * @vf: pointer to the VF info
3745 *
3746 * Return true if the VF is allowed to change its MAC filters, false otherwise
3747 */
3748static bool ice_can_vf_change_mac(struct ice_vf *vf)
3749{
3750 /* If the VF MAC address has been set administratively (via the
3751 * ndo_set_vf_mac command), then deny permission to the VF to
3752 * add/delete unicast MAC addresses, unless the VF is trusted
3753 */
3754 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3755 return false;
3756
3757 return true;
3758}
3759
51efbbdf
BC
3760/**
3761 * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
3762 * @vc_ether_addr: used to extract the type
3763 */
3764static u8
3765ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
3766{
3767 return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
3768}
3769
3770/**
3771 * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
3772 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
3773 */
3774static bool
3775ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
3776{
3777 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3778
3779 return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
3780}
3781
3782/**
3783 * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
3784 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
3785 *
3786 * This function should only be called when the MAC address in
3787 * virtchnl_ether_addr is a valid unicast MAC
3788 */
3789static bool
3790ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
3791{
3792 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3793
3794 return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
3795}
3796
3797/**
3798 * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
3799 * @vf: VF to update
3800 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3801 */
3802static void
3803ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3804{
3805 u8 *mac_addr = vc_ether_addr->addr;
3806
3807 if (!is_valid_ether_addr(mac_addr))
3808 return;
3809
f28cd5ce
BC
3810 /* only allow legacy VF drivers to set the device and hardware MAC if it
3811 * is zero and allow new VF drivers to set the hardware MAC if the type
3812 * was correctly specified over VIRTCHNL
51efbbdf
BC
3813 */
3814 if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
3815 is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
f28cd5ce
BC
3816 ice_is_vc_addr_primary(vc_ether_addr)) {
3817 ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
51efbbdf 3818 ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
f28cd5ce 3819 }
51efbbdf 3820
f28cd5ce
BC
3821 /* hardware and device MACs are already set, but its possible that the
3822 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
51efbbdf
BC
3823 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
3824 * away for the legacy VF driver case as it will be updated in the
3825 * delete flow for this case
3826 */
3827 if (ice_is_vc_addr_legacy(vc_ether_addr)) {
3828 ether_addr_copy(vf->legacy_last_added_umac.addr,
3829 mac_addr);
3830 vf->legacy_last_added_umac.time_modified = jiffies;
3831 }
3832}
3833
ed4c068d
BC
3834/**
3835 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3836 * @vf: pointer to the VF info
3837 * @vsi: pointer to the VF's VSI
51efbbdf 3838 * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
ed4c068d
BC
3839 */
3840static int
51efbbdf
BC
3841ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3842 struct virtchnl_ether_addr *vc_ether_addr)
ed4c068d
BC
3843{
3844 struct device *dev = ice_pf_to_dev(vf->pf);
51efbbdf 3845 u8 *mac_addr = vc_ether_addr->addr;
2ccc1c1c 3846 int ret;
ed4c068d 3847
f28cd5ce
BC
3848 /* device MAC already added */
3849 if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
ed4c068d
BC
3850 return 0;
3851
3852 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3853 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3854 return -EPERM;
3855 }
3856
2ccc1c1c
TN
3857 ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3858 if (ret == -EEXIST) {
ce572a5b 3859 dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
ed4c068d 3860 vf->vf_id);
ce572a5b
SD
3861 /* don't return since we might need to update
3862 * the primary MAC in ice_vfhw_mac_add() below
3863 */
2ccc1c1c 3864 } else if (ret) {
5f87ec48 3865 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
2ccc1c1c 3866 mac_addr, vf->vf_id, ret);
c1484691 3867 return ret;
ce572a5b
SD
3868 } else {
3869 vf->num_mac++;
ed4c068d
BC
3870 }
3871
51efbbdf 3872 ice_vfhw_mac_add(vf, vc_ether_addr);
ed4c068d 3873
ce572a5b 3874 return ret;
ed4c068d
BC
3875}
3876
51efbbdf
BC
3877/**
3878 * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
3879 * @last_added_umac: structure used to check expiration
3880 */
3881static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
3882{
3883#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
3884 return time_is_before_jiffies(last_added_umac->time_modified +
3885 ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
3886}
3887
ac19e03e
MS
3888/**
3889 * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
3890 * @vf: VF to update
3891 * @vc_ether_addr: structure from VIRTCHNL with MAC to check
3892 *
3893 * only update cached hardware MAC for legacy VF drivers on delete
3894 * because we cannot guarantee order/type of MAC from the VF driver
3895 */
3896static void
3897ice_update_legacy_cached_mac(struct ice_vf *vf,
3898 struct virtchnl_ether_addr *vc_ether_addr)
3899{
3900 if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
3901 ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
3902 return;
3903
3904 ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
3905 ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
3906}
3907
51efbbdf
BC
3908/**
3909 * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
3910 * @vf: VF to update
3911 * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
3912 */
3913static void
3914ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3915{
3916 u8 *mac_addr = vc_ether_addr->addr;
3917
3918 if (!is_valid_ether_addr(mac_addr) ||
f28cd5ce 3919 !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
51efbbdf
BC
3920 return;
3921
f28cd5ce
BC
3922 /* allow the device MAC to be repopulated in the add flow and don't
3923 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
3924 * to be persistent on VM reboot and across driver unload/load, which
3925 * won't work if we clear the hardware MAC here
3926 */
3927 eth_zero_addr(vf->dev_lan_addr.addr);
51efbbdf 3928
ac19e03e 3929 ice_update_legacy_cached_mac(vf, vc_ether_addr);
51efbbdf
BC
3930}
3931
ed4c068d
BC
3932/**
3933 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3934 * @vf: pointer to the VF info
3935 * @vsi: pointer to the VF's VSI
51efbbdf 3936 * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
ed4c068d
BC
3937 */
3938static int
51efbbdf
BC
3939ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3940 struct virtchnl_ether_addr *vc_ether_addr)
ed4c068d
BC
3941{
3942 struct device *dev = ice_pf_to_dev(vf->pf);
51efbbdf 3943 u8 *mac_addr = vc_ether_addr->addr;
5e24d598 3944 int status;
ed4c068d
BC
3945
3946 if (!ice_can_vf_change_mac(vf) &&
f28cd5ce 3947 ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
ed4c068d
BC
3948 return 0;
3949
1b8f15b6 3950 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
d54699e2 3951 if (status == -ENOENT) {
ed4c068d
BC
3952 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3953 vf->vf_id);
3954 return -ENOENT;
3955 } else if (status) {
5f87ec48
TN
3956 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
3957 mac_addr, vf->vf_id, status);
ed4c068d
BC
3958 return -EIO;
3959 }
3960
51efbbdf 3961 ice_vfhw_mac_del(vf, vc_ether_addr);
ed4c068d
BC
3962
3963 vf->num_mac--;
3964
3965 return 0;
3966}
3967
1071a835
AV
3968/**
3969 * ice_vc_handle_mac_addr_msg
3970 * @vf: pointer to the VF info
3971 * @msg: pointer to the msg buffer
f9867df6 3972 * @set: true if MAC filters are being set, false otherwise
1071a835 3973 *
df17b7e0 3974 * add guest MAC address filter
1071a835
AV
3975 */
3976static int
3977ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3978{
ed4c068d 3979 int (*ice_vc_cfg_mac)
51efbbdf
BC
3980 (struct ice_vf *vf, struct ice_vsi *vsi,
3981 struct virtchnl_ether_addr *virtchnl_ether_addr);
cf6c6e01 3982 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3983 struct virtchnl_ether_addr_list *al =
3984 (struct virtchnl_ether_addr_list *)msg;
3985 struct ice_pf *pf = vf->pf;
3986 enum virtchnl_ops vc_op;
1071a835 3987 struct ice_vsi *vsi;
1071a835
AV
3988 int i;
3989
ed4c068d 3990 if (set) {
1071a835 3991 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
ed4c068d
BC
3992 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3993 } else {
1071a835 3994 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
ed4c068d
BC
3995 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3996 }
1071a835
AV
3997
3998 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3999 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
cf6c6e01 4000 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4001 goto handle_mac_exit;
4002 }
4003
ed4c068d
BC
4004 /* If this VF is not privileged, then we can't add more than a
4005 * limited number of addresses. Check to make sure that the
4006 * additions do not push us over the limit.
4007 */
1071a835
AV
4008 if (set && !ice_is_vf_trusted(vf) &&
4009 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
19cce2c6 4010 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
d84b899a 4011 vf->vf_id);
cf6c6e01 4012 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4013 goto handle_mac_exit;
4014 }
4015
c5afbe99 4016 vsi = ice_get_vf_vsi(vf);
f1ef73f5 4017 if (!vsi) {
cf6c6e01 4018 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
4019 goto handle_mac_exit;
4020 }
1071a835
AV
4021
4022 for (i = 0; i < al->num_elements; i++) {
ed4c068d
BC
4023 u8 *mac_addr = al->list[i].addr;
4024 int result;
1071a835 4025
ed4c068d
BC
4026 if (is_broadcast_ether_addr(mac_addr) ||
4027 is_zero_ether_addr(mac_addr))
4028 continue;
1071a835 4029
51efbbdf 4030 result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
ed4c068d
BC
4031 if (result == -EEXIST || result == -ENOENT) {
4032 continue;
4033 } else if (result) {
4034 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
4035 goto handle_mac_exit;
4036 }
1071a835
AV
4037 }
4038
1071a835 4039handle_mac_exit:
1071a835 4040 /* send the response to the VF */
cf6c6e01 4041 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1071a835
AV
4042}
4043
4044/**
4045 * ice_vc_add_mac_addr_msg
4046 * @vf: pointer to the VF info
4047 * @msg: pointer to the msg buffer
4048 *
4049 * add guest MAC address filter
4050 */
4051static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
4052{
4053 return ice_vc_handle_mac_addr_msg(vf, msg, true);
4054}
4055
4056/**
4057 * ice_vc_del_mac_addr_msg
4058 * @vf: pointer to the VF info
4059 * @msg: pointer to the msg buffer
4060 *
4061 * remove guest MAC address filter
4062 */
4063static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
4064{
4065 return ice_vc_handle_mac_addr_msg(vf, msg, false);
4066}
4067
4068/**
4069 * ice_vc_request_qs_msg
4070 * @vf: pointer to the VF info
4071 * @msg: pointer to the msg buffer
4072 *
4073 * VFs get a default number of queues but can use this message to request a
df17b7e0 4074 * different number. If the request is successful, PF will reset the VF and
1071a835 4075 * return 0. If unsuccessful, PF will send message informing VF of number of
f9867df6 4076 * available queue pairs via virtchnl message response to VF.
1071a835
AV
4077 */
4078static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
4079{
cf6c6e01 4080 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
4081 struct virtchnl_vf_res_request *vfres =
4082 (struct virtchnl_vf_res_request *)msg;
cbfe31b5 4083 u16 req_queues = vfres->num_queue_pairs;
1071a835 4084 struct ice_pf *pf = vf->pf;
cbfe31b5
PK
4085 u16 max_allowed_vf_queues;
4086 u16 tx_rx_queue_left;
4015d11e 4087 struct device *dev;
4ee656bb 4088 u16 cur_queues;
1071a835 4089
4015d11e 4090 dev = ice_pf_to_dev(pf);
1071a835 4091 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 4092 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4093 goto error_param;
4094 }
4095
5743020d 4096 cur_queues = vf->num_vf_qs;
8c243700
AV
4097 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
4098 ice_get_avail_rxq_count(pf));
5743020d 4099 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
cbfe31b5 4100 if (!req_queues) {
4015d11e 4101 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
cbfe31b5 4102 vf->vf_id);
0ca469fb 4103 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4015d11e 4104 dev_err(dev, "VF %d tried to request more than %d queues.\n",
0ca469fb
MW
4105 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
4106 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
cbfe31b5
PK
4107 } else if (req_queues > cur_queues &&
4108 req_queues - cur_queues > tx_rx_queue_left) {
19cce2c6 4109 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1071a835 4110 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
cbfe31b5 4111 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
0ca469fb 4112 ICE_MAX_RSS_QS_PER_VF);
1071a835
AV
4113 } else {
4114 /* request is successful, then reset VF */
4115 vf->num_req_qs = req_queues;
ff010eca 4116 ice_vc_reset_vf(vf);
4015d11e 4117 dev_info(dev, "VF %d granted request of %u queues.\n",
1071a835
AV
4118 vf->vf_id, req_queues);
4119 return 0;
4120 }
4121
4122error_param:
4123 /* send the response to the VF */
4124 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
cf6c6e01 4125 v_ret, (u8 *)vfres, sizeof(*vfres));
1071a835
AV
4126}
4127
cbc8b564
BC
4128/**
4129 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
4130 * @hw: hardware structure used to check the VLAN mode
4131 * @vlan_proto: VLAN TPID being checked
4132 *
4133 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
4134 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
4135 * Mode (SVM), then only ETH_P_8021Q is supported.
4136 */
4137static bool
4138ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
4139{
4140 bool is_supported = false;
4141
4142 switch (vlan_proto) {
4143 case ETH_P_8021Q:
4144 is_supported = true;
4145 break;
4146 case ETH_P_8021AD:
4147 if (ice_is_dvm_ena(hw))
4148 is_supported = true;
4149 break;
4150 }
4151
4152 return is_supported;
4153}
4154
7c710869
AV
4155/**
4156 * ice_set_vf_port_vlan
4157 * @netdev: network interface device structure
4158 * @vf_id: VF identifier
f9867df6 4159 * @vlan_id: VLAN ID being set
7c710869
AV
4160 * @qos: priority setting
4161 * @vlan_proto: VLAN protocol
4162 *
f9867df6 4163 * program VF Port VLAN ID and/or QoS
7c710869
AV
4164 */
4165int
4166ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
4167 __be16 vlan_proto)
4168{
4c66d227 4169 struct ice_pf *pf = ice_netdev_to_pf(netdev);
cbc8b564 4170 u16 local_vlan_proto = ntohs(vlan_proto);
4015d11e 4171 struct device *dev;
7c710869 4172 struct ice_vf *vf;
c54d209c 4173 int ret;
7c710869 4174
4015d11e 4175 dev = ice_pf_to_dev(pf);
4c66d227 4176 if (ice_validate_vf_id(pf, vf_id))
7c710869 4177 return -EINVAL;
7c710869 4178
61c9ce86
BC
4179 if (vlan_id >= VLAN_N_VID || qos > 7) {
4180 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
4181 vf_id, vlan_id, qos);
7c710869
AV
4182 return -EINVAL;
4183 }
4184
cbc8b564
BC
4185 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
4186 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
4187 local_vlan_proto);
7c710869
AV
4188 return -EPROTONOSUPPORT;
4189 }
4190
4191 vf = &pf->vf[vf_id];
c54d209c
BC
4192 ret = ice_check_vf_ready_for_cfg(vf);
4193 if (ret)
4194 return ret;
7c710869 4195
a19d7f7f 4196 if (ice_vf_get_port_vlan_prio(vf) == qos &&
cbc8b564 4197 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
a19d7f7f 4198 ice_vf_get_port_vlan_id(vf) == vlan_id) {
7c710869 4199 /* duplicate request, so just return success */
cbc8b564
BC
4200 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
4201 vlan_id, qos, local_vlan_proto);
c54d209c 4202 return 0;
7c710869
AV
4203 }
4204
e6ba5273
BC
4205 mutex_lock(&vf->cfg_lock);
4206
cbc8b564 4207 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
a19d7f7f 4208 if (ice_vf_is_port_vlan_ena(vf))
cbc8b564
BC
4209 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
4210 vlan_id, qos, local_vlan_proto, vf_id);
cf0bf41d
BC
4211 else
4212 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
7c710869 4213
cf0bf41d 4214 ice_vc_reset_vf(vf);
e6ba5273 4215 mutex_unlock(&vf->cfg_lock);
7c710869 4216
c54d209c 4217 return 0;
7c710869
AV
4218}
4219
d4bc4e2d
BC
4220/**
4221 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
4222 * @caps: VF driver negotiated capabilities
4223 *
4224 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
4225 */
4226static bool ice_vf_vlan_offload_ena(u32 caps)
4227{
4228 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
4229}
4230
cc71de8f
BC
4231/**
4232 * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
4233 * @vf: VF used to determine if VLAN promiscuous config is allowed
4234 */
4235static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
4236{
4237 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
4238 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
4239 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
4240 return true;
4241
4242 return false;
4243}
4244
4245/**
4246 * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
4247 * @vsi: VF's VSI used to enable VLAN promiscuous mode
4248 * @vlan: VLAN used to enable VLAN promiscuous
4249 *
4250 * This function should only be called if VLAN promiscuous mode is allowed,
4251 * which can be determined via ice_is_vlan_promisc_allowed().
4252 */
4253static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
4254{
4255 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
4256 int status;
4257
4258 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
4259 vlan->vid);
4260 if (status && status != -EEXIST)
4261 return status;
4262
4263 return 0;
4264}
4265
4266/**
4267 * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
4268 * @vsi: VF's VSI used to disable VLAN promiscuous mode for
4269 * @vlan: VLAN used to disable VLAN promiscuous
4270 *
4271 * This function should only be called if VLAN promiscuous mode is allowed,
4272 * which can be determined via ice_is_vlan_promisc_allowed().
4273 */
4274static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
4275{
4276 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
4277 int status;
4278
4279 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
4280 vlan->vid);
4281 if (status && status != -ENOENT)
4282 return status;
4283
4284 return 0;
4285}
4286
c31af68a
BC
4287/**
4288 * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
4289 * @vf: VF to check against
4290 * @vsi: VF's VSI
4291 *
4292 * If the VF is trusted then the VF is allowed to add as many VLANs as it
4293 * wants to, so return false.
4294 *
4295 * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
4296 * allowed VLANs for an untrusted VF. Return the result of this comparison.
4297 */
4298static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
4299{
4300 if (ice_is_vf_trusted(vf))
4301 return false;
4302
4303#define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
4304 return ((ice_vsi_num_non_zero_vlans(vsi) +
4305 ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
4306}
4307
1071a835
AV
4308/**
4309 * ice_vc_process_vlan_msg
4310 * @vf: pointer to the VF info
4311 * @msg: pointer to the msg buffer
4312 * @add_v: Add VLAN if true, otherwise delete VLAN
4313 *
f9867df6 4314 * Process virtchnl op to add or remove programmed guest VLAN ID
1071a835
AV
4315 */
4316static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
4317{
cf6c6e01 4318 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
4319 struct virtchnl_vlan_filter_list *vfl =
4320 (struct virtchnl_vlan_filter_list *)msg;
1071a835 4321 struct ice_pf *pf = vf->pf;
5eda8afd 4322 bool vlan_promisc = false;
1071a835 4323 struct ice_vsi *vsi;
4015d11e 4324 struct device *dev;
5eda8afd 4325 int status = 0;
1071a835
AV
4326 int i;
4327
4015d11e 4328 dev = ice_pf_to_dev(pf);
1071a835 4329 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 4330 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4331 goto error_param;
4332 }
4333
d4bc4e2d
BC
4334 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4335 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4336 goto error_param;
4337 }
4338
1071a835 4339 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
cf6c6e01 4340 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4341 goto error_param;
4342 }
4343
1071a835 4344 for (i = 0; i < vfl->num_elements; i++) {
61c9ce86 4345 if (vfl->vlan_id[i] >= VLAN_N_VID) {
cf6c6e01 4346 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6
AV
4347 dev_err(dev, "invalid VF VLAN id %d\n",
4348 vfl->vlan_id[i]);
1071a835
AV
4349 goto error_param;
4350 }
4351 }
4352
c5afbe99 4353 vsi = ice_get_vf_vsi(vf);
1071a835 4354 if (!vsi) {
cf6c6e01 4355 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4356 goto error_param;
4357 }
4358
c31af68a 4359 if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
19cce2c6 4360 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
cd6d6b83
BC
4361 vf->vf_id);
4362 /* There is no need to let VF know about being not trusted,
4363 * so we can just return success message here
4364 */
4365 goto error_param;
4366 }
4367
cc71de8f
BC
4368 /* in DVM a VF can add/delete inner VLAN filters when
4369 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
4370 */
4371 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
cf6c6e01 4372 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4373 goto error_param;
4374 }
4375
cc71de8f
BC
4376 /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
4377 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
4378 * allow vlan_promisc = true in SVM and if no port VLAN is configured
4379 */
4380 vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
4381 !ice_is_dvm_ena(&pf->hw) &&
4382 !ice_vf_is_port_vlan_ena(vf);
5eda8afd 4383
1071a835
AV
4384 if (add_v) {
4385 for (i = 0; i < vfl->num_elements; i++) {
4386 u16 vid = vfl->vlan_id[i];
fb05ba12 4387 struct ice_vlan vlan;
1071a835 4388
c31af68a 4389 if (ice_vf_has_max_vlans(vf, vsi)) {
19cce2c6 4390 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
5079b853
AA
4391 vf->vf_id);
4392 /* There is no need to let VF know about being
4393 * not trusted, so we can just return success
4394 * message here as well.
4395 */
4396 goto error_param;
4397 }
4398
cd6d6b83
BC
4399 /* we add VLAN 0 by default for each VF so we can enable
4400 * Tx VLAN anti-spoof without triggering MDD events so
4401 * we don't need to add it again here
4402 */
4403 if (!vid)
4404 continue;
4405
2bfefa2d 4406 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
c31af68a 4407 status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
cd6d6b83 4408 if (status) {
cf6c6e01 4409 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
4410 goto error_param;
4411 }
1071a835 4412
cc71de8f
BC
4413 /* Enable VLAN filtering on first non-zero VLAN */
4414 if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
4415 if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
cf6c6e01 4416 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 4417 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
5eda8afd
AA
4418 vid, status);
4419 goto error_param;
4420 }
42f3efef 4421 } else if (vlan_promisc) {
cc71de8f 4422 status = ice_vf_ena_vlan_promisc(vsi, &vlan);
cf6c6e01
MW
4423 if (status) {
4424 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 4425 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
5eda8afd 4426 vid, status);
cf6c6e01 4427 }
1071a835
AV
4428 }
4429 }
4430 } else {
bb877b22
AA
4431 /* In case of non_trusted VF, number of VLAN elements passed
4432 * to PF for removal might be greater than number of VLANs
4433 * filter programmed for that VF - So, use actual number of
4434 * VLANS added earlier with add VLAN opcode. In order to avoid
4435 * removing VLAN that doesn't exist, which result to sending
4436 * erroneous failed message back to the VF
4437 */
4438 int num_vf_vlan;
4439
cd6d6b83 4440 num_vf_vlan = vsi->num_vlan;
bb877b22 4441 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1071a835 4442 u16 vid = vfl->vlan_id[i];
fb05ba12 4443 struct ice_vlan vlan;
1071a835 4444
cd6d6b83
BC
4445 /* we add VLAN 0 by default for each VF so we can enable
4446 * Tx VLAN anti-spoof without triggering MDD events so
4447 * we don't want a VIRTCHNL request to remove it
4448 */
4449 if (!vid)
4450 continue;
4451
2bfefa2d 4452 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
c31af68a 4453 status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
cd6d6b83 4454 if (status) {
cf6c6e01 4455 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
4456 goto error_param;
4457 }
4458
cc71de8f
BC
4459 /* Disable VLAN filtering when only VLAN 0 is left */
4460 if (!ice_vsi_has_non_zero_vlans(vsi))
4461 vsi->inner_vlan_ops.dis_rx_filtering(vsi);
1071a835 4462
cc71de8f
BC
4463 if (vlan_promisc)
4464 ice_vf_dis_vlan_promisc(vsi, &vlan);
1071a835
AV
4465 }
4466 }
4467
4468error_param:
4469 /* send the response to the VF */
4470 if (add_v)
cf6c6e01 4471 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1071a835
AV
4472 NULL, 0);
4473 else
cf6c6e01 4474 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1071a835
AV
4475 NULL, 0);
4476}
4477
4478/**
4479 * ice_vc_add_vlan_msg
4480 * @vf: pointer to the VF info
4481 * @msg: pointer to the msg buffer
4482 *
f9867df6 4483 * Add and program guest VLAN ID
1071a835
AV
4484 */
4485static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
4486{
4487 return ice_vc_process_vlan_msg(vf, msg, true);
4488}
4489
4490/**
4491 * ice_vc_remove_vlan_msg
4492 * @vf: pointer to the VF info
4493 * @msg: pointer to the msg buffer
4494 *
f9867df6 4495 * remove programmed guest VLAN ID
1071a835
AV
4496 */
4497static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
4498{
4499 return ice_vc_process_vlan_msg(vf, msg, false);
4500}
4501
4502/**
4503 * ice_vc_ena_vlan_stripping
4504 * @vf: pointer to the VF info
4505 *
4506 * Enable VLAN header stripping for a given VF
4507 */
4508static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
4509{
cf6c6e01 4510 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
4511 struct ice_vsi *vsi;
4512
4513 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 4514 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4515 goto error_param;
4516 }
4517
d4bc4e2d
BC
4518 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4519 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4520 goto error_param;
4521 }
4522
c5afbe99 4523 vsi = ice_get_vf_vsi(vf);
c31af68a 4524 if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
cf6c6e01 4525 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4526
4527error_param:
4528 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
cf6c6e01 4529 v_ret, NULL, 0);
1071a835
AV
4530}
4531
4532/**
4533 * ice_vc_dis_vlan_stripping
4534 * @vf: pointer to the VF info
4535 *
4536 * Disable VLAN header stripping for a given VF
4537 */
4538static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
4539{
cf6c6e01 4540 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
4541 struct ice_vsi *vsi;
4542
4543 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 4544 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4545 goto error_param;
4546 }
4547
d4bc4e2d
BC
4548 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4549 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4550 goto error_param;
4551 }
4552
c5afbe99 4553 vsi = ice_get_vf_vsi(vf);
f1ef73f5 4554 if (!vsi) {
cf6c6e01 4555 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
4556 goto error_param;
4557 }
4558
c31af68a 4559 if (vsi->inner_vlan_ops.dis_stripping(vsi))
cf6c6e01 4560 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
4561
4562error_param:
4563 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
cf6c6e01 4564 v_ret, NULL, 0);
1071a835
AV
4565}
4566
2f9ec241
BC
4567/**
4568 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
4569 * @vf: VF to enable/disable VLAN stripping for on initialization
4570 *
cc71de8f
BC
4571 * Set the default for VLAN stripping based on whether a port VLAN is configured
4572 * and the current VLAN mode of the device.
2f9ec241
BC
4573 */
4574static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
4575{
c5afbe99 4576 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
2f9ec241
BC
4577
4578 if (!vsi)
4579 return -EINVAL;
4580
cc71de8f
BC
4581 /* don't modify stripping if port VLAN is configured in SVM since the
4582 * port VLAN is based on the inner/single VLAN in SVM
4583 */
4584 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
2f9ec241
BC
4585 return 0;
4586
4587 if (ice_vf_vlan_offload_ena(vf->driver_caps))
c31af68a 4588 return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
2f9ec241 4589 else
c31af68a 4590 return vsi->inner_vlan_ops.dis_stripping(vsi);
2f9ec241
BC
4591}
4592
cc71de8f
BC
4593static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
4594{
4595 if (vf->trusted)
4596 return VLAN_N_VID;
4597 else
4598 return ICE_MAX_VLAN_PER_VF;
4599}
4600
4601/**
4602 * ice_vf_outer_vlan_not_allowed - check outer VLAN can be used when the device is in DVM
4603 * @vf: VF that being checked for
4604 */
4605static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
4606{
4607 if (ice_vf_is_port_vlan_ena(vf))
4608 return true;
4609
4610 return false;
4611}
4612
4613/**
4614 * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
4615 * @vf: VF that capabilities are being set for
4616 * @caps: VLAN capabilities to populate
4617 *
4618 * Determine VLAN capabilities support based on whether a port VLAN is
4619 * configured. If a port VLAN is configured then the VF should use the inner
4620 * filtering/offload capabilities since the port VLAN is using the outer VLAN
4621 * capabilies.
4622 */
4623static void
4624ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
4625{
4626 struct virtchnl_vlan_supported_caps *supported_caps;
4627
4628 if (ice_vf_outer_vlan_not_allowed(vf)) {
4629 /* until support for inner VLAN filtering is added when a port
4630 * VLAN is configured, only support software offloaded inner
4631 * VLANs when a port VLAN is confgured in DVM
4632 */
4633 supported_caps = &caps->filtering.filtering_support;
4634 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
4635
4636 supported_caps = &caps->offloads.stripping_support;
4637 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
4638 VIRTCHNL_VLAN_TOGGLE |
4639 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
4640 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4641
4642 supported_caps = &caps->offloads.insertion_support;
4643 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
4644 VIRTCHNL_VLAN_TOGGLE |
4645 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
4646 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4647
4648 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
4649 caps->offloads.ethertype_match =
4650 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
4651 } else {
4652 supported_caps = &caps->filtering.filtering_support;
4653 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
4654 supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
4655 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
4656 VIRTCHNL_VLAN_ETHERTYPE_9100 |
4657 VIRTCHNL_VLAN_ETHERTYPE_AND;
4658 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
4659 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
4660 VIRTCHNL_VLAN_ETHERTYPE_9100;
4661
4662 supported_caps = &caps->offloads.stripping_support;
4663 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
4664 VIRTCHNL_VLAN_ETHERTYPE_8100 |
4665 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
4666 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
4667 VIRTCHNL_VLAN_ETHERTYPE_8100 |
4668 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
4669 VIRTCHNL_VLAN_ETHERTYPE_9100 |
4670 VIRTCHNL_VLAN_ETHERTYPE_XOR |
4671 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
4672
4673 supported_caps = &caps->offloads.insertion_support;
4674 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
4675 VIRTCHNL_VLAN_ETHERTYPE_8100 |
4676 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
4677 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
4678 VIRTCHNL_VLAN_ETHERTYPE_8100 |
4679 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
4680 VIRTCHNL_VLAN_ETHERTYPE_9100 |
4681 VIRTCHNL_VLAN_ETHERTYPE_XOR |
4682 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
4683
4684 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
4685
4686 caps->offloads.ethertype_match =
4687 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
4688 }
4689
4690 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
4691}
4692
4693/**
4694 * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
4695 * @vf: VF that capabilities are being set for
4696 * @caps: VLAN capabilities to populate
4697 *
4698 * Determine VLAN capabilities support based on whether a port VLAN is
4699 * configured. If a port VLAN is configured then the VF does not have any VLAN
4700 * filtering or offload capabilities since the port VLAN is using the inner VLAN
4701 * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
4702 * VLAN fitlering and offload capabilities.
4703 */
4704static void
4705ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
4706{
4707 struct virtchnl_vlan_supported_caps *supported_caps;
4708
4709 if (ice_vf_is_port_vlan_ena(vf)) {
4710 supported_caps = &caps->filtering.filtering_support;
4711 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
4712 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4713
4714 supported_caps = &caps->offloads.stripping_support;
4715 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
4716 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4717
4718 supported_caps = &caps->offloads.insertion_support;
4719 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
4720 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4721
4722 caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
4723 caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
4724 caps->filtering.max_filters = 0;
4725 } else {
4726 supported_caps = &caps->filtering.filtering_support;
4727 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
4728 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4729 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
4730
4731 supported_caps = &caps->offloads.stripping_support;
4732 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
4733 VIRTCHNL_VLAN_TOGGLE |
4734 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
4735 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4736
4737 supported_caps = &caps->offloads.insertion_support;
4738 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
4739 VIRTCHNL_VLAN_TOGGLE |
4740 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
4741 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
4742
4743 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
4744 caps->offloads.ethertype_match =
4745 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
4746 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
4747 }
4748}
4749
4750/**
4751 * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
4752 * @vf: VF to determine VLAN capabilities for
4753 *
4754 * This will only be called if the VF and PF successfully negotiated
4755 * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
4756 *
4757 * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
4758 * is configured or not.
4759 */
4760static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
4761{
4762 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4763 struct virtchnl_vlan_caps *caps = NULL;
4764 int err, len = 0;
4765
4766 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4767 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4768 goto out;
4769 }
4770
4771 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
4772 if (!caps) {
4773 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
4774 goto out;
4775 }
4776 len = sizeof(*caps);
4777
4778 if (ice_is_dvm_ena(&vf->pf->hw))
4779 ice_vc_set_dvm_caps(vf, caps);
4780 else
4781 ice_vc_set_svm_caps(vf, caps);
4782
4783 /* store negotiated caps to prevent invalid VF messages */
4784 memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
4785
4786out:
4787 err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
4788 v_ret, (u8 *)caps, len);
4789 kfree(caps);
4790 return err;
4791}
4792
4793/**
4794 * ice_vc_validate_vlan_tpid - validate VLAN TPID
4795 * @filtering_caps: negotiated/supported VLAN filtering capabilities
4796 * @tpid: VLAN TPID used for validation
4797 *
4798 * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
4799 * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
4800 */
4801static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
4802{
4803 enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
4804
4805 switch (tpid) {
4806 case ETH_P_8021Q:
4807 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
4808 break;
4809 case ETH_P_8021AD:
4810 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
4811 break;
4812 case ETH_P_QINQ1:
4813 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
4814 break;
4815 }
4816
4817 if (!(filtering_caps & vlan_ethertype))
4818 return false;
4819
4820 return true;
4821}
4822
4823/**
4824 * ice_vc_is_valid_vlan - validate the virtchnl_vlan
4825 * @vc_vlan: virtchnl_vlan to validate
4826 *
4827 * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
4828 * false. Otherwise return true.
4829 */
4830static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
4831{
4832 if (!vc_vlan->tci || !vc_vlan->tpid)
4833 return false;
4834
4835 return true;
4836}
4837
4838/**
4839 * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
4840 * @vfc: negotiated/supported VLAN filtering capabilities
4841 * @vfl: VLAN filter list from VF to validate
4842 *
4843 * Validate all of the filters in the VLAN filter list from the VF. If any of
4844 * the checks fail then return false. Otherwise return true.
4845 */
4846static bool
4847ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
4848 struct virtchnl_vlan_filter_list_v2 *vfl)
4849{
4850 u16 i;
4851
4852 if (!vfl->num_elements)
4853 return false;
4854
4855 for (i = 0; i < vfl->num_elements; i++) {
4856 struct virtchnl_vlan_supported_caps *filtering_support =
4857 &vfc->filtering_support;
4858 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
4859 struct virtchnl_vlan *outer = &vlan_fltr->outer;
4860 struct virtchnl_vlan *inner = &vlan_fltr->inner;
4861
4862 if ((ice_vc_is_valid_vlan(outer) &&
4863 filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
4864 (ice_vc_is_valid_vlan(inner) &&
4865 filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
4866 return false;
4867
4868 if ((outer->tci_mask &&
4869 !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
4870 (inner->tci_mask &&
4871 !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
4872 return false;
4873
4874 if (((outer->tci & VLAN_PRIO_MASK) &&
4875 !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
4876 ((inner->tci & VLAN_PRIO_MASK) &&
4877 !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
4878 return false;
4879
4880 if ((ice_vc_is_valid_vlan(outer) &&
4881 !ice_vc_validate_vlan_tpid(filtering_support->outer, outer->tpid)) ||
4882 (ice_vc_is_valid_vlan(inner) &&
4883 !ice_vc_validate_vlan_tpid(filtering_support->inner, inner->tpid)))
4884 return false;
4885 }
4886
4887 return true;
4888}
4889
4890/**
4891 * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
4892 * @vc_vlan: struct virtchnl_vlan to transform
4893 */
4894static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
4895{
4896 struct ice_vlan vlan = { 0 };
4897
4898 vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
4899 vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
4900 vlan.tpid = vc_vlan->tpid;
4901
4902 return vlan;
4903}
4904
4905/**
4906 * ice_vc_vlan_action - action to perform on the virthcnl_vlan
4907 * @vsi: VF's VSI used to perform the action
4908 * @vlan_action: function to perform the action with (i.e. add/del)
4909 * @vlan: VLAN filter to perform the action with
4910 */
4911static int
4912ice_vc_vlan_action(struct ice_vsi *vsi,
4913 int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
4914 struct ice_vlan *vlan)
4915{
4916 int err;
4917
4918 err = vlan_action(vsi, vlan);
4919 if (err)
4920 return err;
4921
4922 return 0;
4923}
4924
4925/**
4926 * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
4927 * @vf: VF used to delete the VLAN(s)
4928 * @vsi: VF's VSI used to delete the VLAN(s)
4929 * @vfl: virthchnl filter list used to delete the filters
4930 */
4931static int
4932ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
4933 struct virtchnl_vlan_filter_list_v2 *vfl)
4934{
4935 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
4936 int err;
4937 u16 i;
4938
4939 for (i = 0; i < vfl->num_elements; i++) {
4940 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
4941 struct virtchnl_vlan *vc_vlan;
4942
4943 vc_vlan = &vlan_fltr->outer;
4944 if (ice_vc_is_valid_vlan(vc_vlan)) {
4945 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
4946
4947 err = ice_vc_vlan_action(vsi,
4948 vsi->outer_vlan_ops.del_vlan,
4949 &vlan);
4950 if (err)
4951 return err;
4952
4953 if (vlan_promisc)
4954 ice_vf_dis_vlan_promisc(vsi, &vlan);
4955 }
4956
4957 vc_vlan = &vlan_fltr->inner;
4958 if (ice_vc_is_valid_vlan(vc_vlan)) {
4959 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
4960
4961 err = ice_vc_vlan_action(vsi,
4962 vsi->inner_vlan_ops.del_vlan,
4963 &vlan);
4964 if (err)
4965 return err;
4966
4967 /* no support for VLAN promiscuous on inner VLAN unless
4968 * we are in Single VLAN Mode (SVM)
4969 */
4970 if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
4971 ice_vf_dis_vlan_promisc(vsi, &vlan);
4972 }
4973 }
4974
4975 return 0;
4976}
4977
4978/**
4979 * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
4980 * @vf: VF the message was received from
4981 * @msg: message received from the VF
4982 */
4983static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
4984{
4985 struct virtchnl_vlan_filter_list_v2 *vfl =
4986 (struct virtchnl_vlan_filter_list_v2 *)msg;
4987 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4988 struct ice_vsi *vsi;
4989
4990 if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
4991 vfl)) {
4992 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4993 goto out;
4994 }
4995
4996 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
4997 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4998 goto out;
4999 }
5000
5001 vsi = ice_get_vf_vsi(vf);
5002 if (!vsi) {
5003 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5004 goto out;
5005 }
5006
5007 if (ice_vc_del_vlans(vf, vsi, vfl))
5008 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5009
5010out:
5011 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
5012 0);
5013}
5014
5015/**
5016 * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
5017 * @vf: VF used to add the VLAN(s)
5018 * @vsi: VF's VSI used to add the VLAN(s)
5019 * @vfl: virthchnl filter list used to add the filters
5020 */
5021static int
5022ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
5023 struct virtchnl_vlan_filter_list_v2 *vfl)
5024{
5025 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
5026 int err;
5027 u16 i;
5028
5029 for (i = 0; i < vfl->num_elements; i++) {
5030 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
5031 struct virtchnl_vlan *vc_vlan;
5032
5033 vc_vlan = &vlan_fltr->outer;
5034 if (ice_vc_is_valid_vlan(vc_vlan)) {
5035 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
5036
5037 err = ice_vc_vlan_action(vsi,
5038 vsi->outer_vlan_ops.add_vlan,
5039 &vlan);
5040 if (err)
5041 return err;
5042
5043 if (vlan_promisc) {
5044 err = ice_vf_ena_vlan_promisc(vsi, &vlan);
5045 if (err)
5046 return err;
5047 }
5048 }
5049
5050 vc_vlan = &vlan_fltr->inner;
5051 if (ice_vc_is_valid_vlan(vc_vlan)) {
5052 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
5053
5054 err = ice_vc_vlan_action(vsi,
5055 vsi->inner_vlan_ops.add_vlan,
5056 &vlan);
5057 if (err)
5058 return err;
5059
5060 /* no support for VLAN promiscuous on inner VLAN unless
5061 * we are in Single VLAN Mode (SVM)
5062 */
5063 if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
5064 err = ice_vf_ena_vlan_promisc(vsi, &vlan);
5065 if (err)
5066 return err;
5067 }
5068 }
5069 }
5070
5071 return 0;
5072}
5073
5074/**
5075 * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
5076 * @vsi: VF VSI used to get number of existing VLAN filters
5077 * @vfc: negotiated/supported VLAN filtering capabilities
5078 * @vfl: VLAN filter list from VF to validate
5079 *
5080 * Validate all of the filters in the VLAN filter list from the VF during the
5081 * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
5082 * Otherwise return true.
5083 */
5084static bool
5085ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
5086 struct virtchnl_vlan_filtering_caps *vfc,
5087 struct virtchnl_vlan_filter_list_v2 *vfl)
5088{
5089 u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
5090
5091 if (num_requested_filters > vfc->max_filters)
5092 return false;
5093
5094 return ice_vc_validate_vlan_filter_list(vfc, vfl);
5095}
5096
5097/**
5098 * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
5099 * @vf: VF the message was received from
5100 * @msg: message received from the VF
5101 */
5102static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
5103{
5104 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
5105 struct virtchnl_vlan_filter_list_v2 *vfl =
5106 (struct virtchnl_vlan_filter_list_v2 *)msg;
5107 struct ice_vsi *vsi;
5108
5109 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
5110 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5111 goto out;
5112 }
5113
5114 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
5115 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5116 goto out;
5117 }
5118
5119 vsi = ice_get_vf_vsi(vf);
5120 if (!vsi) {
5121 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5122 goto out;
5123 }
5124
5125 if (!ice_vc_validate_add_vlan_filter_list(vsi,
5126 &vf->vlan_v2_caps.filtering,
5127 vfl)) {
5128 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5129 goto out;
5130 }
5131
5132 if (ice_vc_add_vlans(vf, vsi, vfl))
5133 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5134
5135out:
5136 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
5137 0);
5138}
5139
5140/**
5141 * ice_vc_valid_vlan_setting - validate VLAN setting
5142 * @negotiated_settings: negotiated VLAN settings during VF init
5143 * @ethertype_setting: ethertype(s) requested for the VLAN setting
5144 */
5145static bool
5146ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
5147{
5148 if (ethertype_setting && !(negotiated_settings & ethertype_setting))
5149 return false;
5150
5151 /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
5152 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
5153 */
5154 if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
5155 hweight32(ethertype_setting) > 1)
5156 return false;
5157
5158 /* ability to modify the VLAN setting was not negotiated */
5159 if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
5160 return false;
5161
5162 return true;
5163}
5164
5165/**
5166 * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
5167 * @caps: negotiated VLAN settings during VF init
5168 * @msg: message to validate
5169 *
5170 * Used to validate any VLAN virtchnl message sent as a
5171 * virtchnl_vlan_setting structure. Validates the message against the
5172 * negotiated/supported caps during VF driver init.
5173 */
5174static bool
5175ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
5176 struct virtchnl_vlan_setting *msg)
5177{
5178 if ((!msg->outer_ethertype_setting &&
5179 !msg->inner_ethertype_setting) ||
5180 (!caps->outer && !caps->inner))
5181 return false;
5182
5183 if (msg->outer_ethertype_setting &&
5184 !ice_vc_valid_vlan_setting(caps->outer,
5185 msg->outer_ethertype_setting))
5186 return false;
5187
5188 if (msg->inner_ethertype_setting &&
5189 !ice_vc_valid_vlan_setting(caps->inner,
5190 msg->inner_ethertype_setting))
5191 return false;
5192
5193 return true;
5194}
5195
5196/**
5197 * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
5198 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
5199 * @tpid: VLAN TPID to populate
5200 */
5201static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
5202{
5203 switch (ethertype_setting) {
5204 case VIRTCHNL_VLAN_ETHERTYPE_8100:
5205 *tpid = ETH_P_8021Q;
5206 break;
5207 case VIRTCHNL_VLAN_ETHERTYPE_88A8:
5208 *tpid = ETH_P_8021AD;
5209 break;
5210 case VIRTCHNL_VLAN_ETHERTYPE_9100:
5211 *tpid = ETH_P_QINQ1;
5212 break;
5213 default:
5214 *tpid = 0;
5215 return -EINVAL;
5216 }
5217
5218 return 0;
5219}
5220
5221/**
5222 * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
5223 * @vsi: VF's VSI used to enable the VLAN offload
5224 * @ena_offload: function used to enable the VLAN offload
5225 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
5226 */
5227static int
5228ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
5229 int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
5230 u32 ethertype_setting)
5231{
5232 u16 tpid;
5233 int err;
5234
5235 err = ice_vc_get_tpid(ethertype_setting, &tpid);
5236 if (err)
5237 return err;
5238
5239 err = ena_offload(vsi, tpid);
5240 if (err)
5241 return err;
5242
5243 return 0;
5244}
5245
5246#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
5247#define ICE_L2TSEL_BIT_OFFSET 23
5248enum ice_l2tsel {
5249 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
5250 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
5251};
5252
5253/**
5254 * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
5255 * @vsi: VSI used to update l2tsel on
5256 * @l2tsel: l2tsel setting requested
5257 *
5258 * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
5259 * This will modify which descriptor field the first offloaded VLAN will be
5260 * stripped into.
5261 */
5262static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
5263{
5264 struct ice_hw *hw = &vsi->back->hw;
5265 u32 l2tsel_bit;
5266 int i;
5267
5268 if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
5269 l2tsel_bit = 0;
5270 else
5271 l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
5272
5273 for (i = 0; i < vsi->alloc_rxq; i++) {
5274 u16 pfq = vsi->rxq_map[i];
5275 u32 qrx_context_offset;
5276 u32 regval;
5277
5278 qrx_context_offset =
5279 QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
5280
5281 regval = rd32(hw, qrx_context_offset);
5282 regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
5283 regval |= l2tsel_bit;
5284 wr32(hw, qrx_context_offset, regval);
5285 }
5286}
5287
5288/**
5289 * ice_vc_ena_vlan_stripping_v2_msg
5290 * @vf: VF the message was received from
5291 * @msg: message received from the VF
5292 *
5293 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
5294 */
5295static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
5296{
5297 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
5298 struct virtchnl_vlan_supported_caps *stripping_support;
5299 struct virtchnl_vlan_setting *strip_msg =
5300 (struct virtchnl_vlan_setting *)msg;
5301 u32 ethertype_setting;
5302 struct ice_vsi *vsi;
5303
5304 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
5305 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5306 goto out;
5307 }
5308
5309 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
5310 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5311 goto out;
5312 }
5313
5314 vsi = ice_get_vf_vsi(vf);
5315 if (!vsi) {
5316 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5317 goto out;
5318 }
5319
5320 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
5321 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
5322 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5323 goto out;
5324 }
5325
5326 ethertype_setting = strip_msg->outer_ethertype_setting;
5327 if (ethertype_setting) {
5328 if (ice_vc_ena_vlan_offload(vsi,
5329 vsi->outer_vlan_ops.ena_stripping,
5330 ethertype_setting)) {
5331 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5332 goto out;
5333 } else {
5334 enum ice_l2tsel l2tsel =
5335 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
5336
5337 /* PF tells the VF that the outer VLAN tag is always
5338 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
5339 * inner is always extracted to
5340 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
5341 * support outer stripping so the first tag always ends
5342 * up in L2TAG2_2ND and the second/inner tag, if
5343 * enabled, is extracted in L2TAG1.
5344 */
5345 ice_vsi_update_l2tsel(vsi, l2tsel);
5346 }
5347 }
5348
5349 ethertype_setting = strip_msg->inner_ethertype_setting;
5350 if (ethertype_setting &&
5351 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
5352 ethertype_setting)) {
5353 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5354 goto out;
5355 }
5356
5357out:
5358 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0);
5359}
5360
5361/**
5362 * ice_vc_dis_vlan_stripping_v2_msg
5363 * @vf: VF the message was received from
5364 * @msg: message received from the VF
5365 *
5366 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
5367 */
5368static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
5369{
5370 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
5371 struct virtchnl_vlan_supported_caps *stripping_support;
5372 struct virtchnl_vlan_setting *strip_msg =
5373 (struct virtchnl_vlan_setting *)msg;
5374 u32 ethertype_setting;
5375 struct ice_vsi *vsi;
5376
5377 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
5378 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5379 goto out;
5380 }
5381
5382 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
5383 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5384 goto out;
5385 }
5386
5387 vsi = ice_get_vf_vsi(vf);
5388 if (!vsi) {
5389 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5390 goto out;
5391 }
5392
5393 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
5394 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
5395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5396 goto out;
5397 }
5398
5399 ethertype_setting = strip_msg->outer_ethertype_setting;
5400 if (ethertype_setting) {
5401 if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
5402 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5403 goto out;
5404 } else {
5405 enum ice_l2tsel l2tsel =
5406 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
5407
5408 /* PF tells the VF that the outer VLAN tag is always
5409 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
5410 * inner is always extracted to
5411 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
5412 * support inner stripping while outer stripping is
5413 * disabled so that the first and only tag is extracted
5414 * in L2TAG1.
5415 */
5416 ice_vsi_update_l2tsel(vsi, l2tsel);
5417 }
5418 }
5419
5420 ethertype_setting = strip_msg->inner_ethertype_setting;
5421 if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
5422 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5423 goto out;
5424 }
5425
5426out:
5427 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0);
5428}
5429
5430/**
5431 * ice_vc_ena_vlan_insertion_v2_msg
5432 * @vf: VF the message was received from
5433 * @msg: message received from the VF
5434 *
5435 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
5436 */
5437static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
5438{
5439 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
5440 struct virtchnl_vlan_supported_caps *insertion_support;
5441 struct virtchnl_vlan_setting *insertion_msg =
5442 (struct virtchnl_vlan_setting *)msg;
5443 u32 ethertype_setting;
5444 struct ice_vsi *vsi;
5445
5446 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
5447 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5448 goto out;
5449 }
5450
5451 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
5452 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5453 goto out;
5454 }
5455
5456 vsi = ice_get_vf_vsi(vf);
5457 if (!vsi) {
5458 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5459 goto out;
5460 }
5461
5462 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
5463 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
5464 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5465 goto out;
5466 }
5467
5468 ethertype_setting = insertion_msg->outer_ethertype_setting;
5469 if (ethertype_setting &&
5470 ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
5471 ethertype_setting)) {
5472 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5473 goto out;
5474 }
5475
5476 ethertype_setting = insertion_msg->inner_ethertype_setting;
5477 if (ethertype_setting &&
5478 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
5479 ethertype_setting)) {
5480 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5481 goto out;
5482 }
5483
5484out:
5485 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2, v_ret, NULL, 0);
5486}
5487
5488/**
5489 * ice_vc_dis_vlan_insertion_v2_msg
5490 * @vf: VF the message was received from
5491 * @msg: message received from the VF
5492 *
5493 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
5494 */
5495static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
5496{
5497 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
5498 struct virtchnl_vlan_supported_caps *insertion_support;
5499 struct virtchnl_vlan_setting *insertion_msg =
5500 (struct virtchnl_vlan_setting *)msg;
5501 u32 ethertype_setting;
5502 struct ice_vsi *vsi;
5503
5504 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
5505 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5506 goto out;
5507 }
5508
5509 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
5510 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5511 goto out;
5512 }
5513
5514 vsi = ice_get_vf_vsi(vf);
5515 if (!vsi) {
5516 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5517 goto out;
5518 }
5519
5520 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
5521 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
5522 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5523 goto out;
5524 }
5525
5526 ethertype_setting = insertion_msg->outer_ethertype_setting;
5527 if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
5528 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5529 goto out;
5530 }
5531
5532 ethertype_setting = insertion_msg->inner_ethertype_setting;
5533 if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
5534 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5535 goto out;
5536 }
5537
5538out:
5539 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, v_ret, NULL, 0);
5540}
5541
ac19e03e
MS
5542static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
5543 .get_ver_msg = ice_vc_get_ver_msg,
5544 .get_vf_res_msg = ice_vc_get_vf_res_msg,
5545 .reset_vf = ice_vc_reset_vf_msg,
5546 .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
5547 .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
5548 .cfg_qs_msg = ice_vc_cfg_qs_msg,
5549 .ena_qs_msg = ice_vc_ena_qs_msg,
5550 .dis_qs_msg = ice_vc_dis_qs_msg,
5551 .request_qs_msg = ice_vc_request_qs_msg,
5552 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
5553 .config_rss_key = ice_vc_config_rss_key,
5554 .config_rss_lut = ice_vc_config_rss_lut,
5555 .get_stats_msg = ice_vc_get_stats_msg,
5556 .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
5557 .add_vlan_msg = ice_vc_add_vlan_msg,
5558 .remove_vlan_msg = ice_vc_remove_vlan_msg,
5559 .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
5560 .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
5561 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
5562 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
5563 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
cc71de8f
BC
5564 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
5565 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
5566 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
5567 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
5568 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
5569 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
5570 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
ac19e03e
MS
5571};
5572
5573void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
5574{
5575 *ops = ice_vc_vf_dflt_ops;
5576}
5577
ac19e03e
MS
5578/**
5579 * ice_vc_repr_add_mac
5580 * @vf: pointer to VF
5581 * @msg: virtchannel message
5582 *
5583 * When port representors are created, we do not add MAC rule
5584 * to firmware, we store it so that PF could report same
5585 * MAC as VF.
5586 */
5587static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
5588{
5589 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
5590 struct virtchnl_ether_addr_list *al =
5591 (struct virtchnl_ether_addr_list *)msg;
5592 struct ice_vsi *vsi;
5593 struct ice_pf *pf;
5594 int i;
5595
5596 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
5597 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
5598 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5599 goto handle_mac_exit;
5600 }
5601
5602 pf = vf->pf;
5603
5604 vsi = ice_get_vf_vsi(vf);
5605 if (!vsi) {
5606 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5607 goto handle_mac_exit;
5608 }
5609
5610 for (i = 0; i < al->num_elements; i++) {
5611 u8 *mac_addr = al->list[i].addr;
c1e5da5d 5612 int result;
ac19e03e
MS
5613
5614 if (!is_unicast_ether_addr(mac_addr) ||
5615 ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
5616 continue;
5617
5618 if (vf->pf_set_mac) {
5619 dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
5620 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
5621 goto handle_mac_exit;
5622 }
5623
c1e5da5d
WD
5624 result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
5625 if (result) {
5626 dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
5627 mac_addr, vf->vf_id, result);
5628 goto handle_mac_exit;
5629 }
5630
ac19e03e
MS
5631 ice_vfhw_mac_add(vf, &al->list[i]);
5632 vf->num_mac++;
5633 break;
5634 }
5635
5636handle_mac_exit:
5637 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
5638 v_ret, NULL, 0);
5639}
5640
5641/**
5642 * ice_vc_repr_del_mac - response with success for deleting MAC
5643 * @vf: pointer to VF
5644 * @msg: virtchannel message
5645 *
5646 * Respond with success to not break normal VF flow.
5647 * For legacy VF driver try to update cached MAC address.
5648 */
5649static int
5650ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
5651{
5652 struct virtchnl_ether_addr_list *al =
5653 (struct virtchnl_ether_addr_list *)msg;
5654
5655 ice_update_legacy_cached_mac(vf, &al->list[0]);
5656
5657 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
5658 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
5659}
5660
e492c2e1 5661static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
ac19e03e 5662{
e492c2e1
MS
5663 dev_dbg(ice_pf_to_dev(vf->pf),
5664 "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
5665 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
5666 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
5667}
5668
5669static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
5670{
5671 dev_dbg(ice_pf_to_dev(vf->pf),
5672 "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
5673 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
5674 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
5675}
5676
5677static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
5678{
5679 dev_dbg(ice_pf_to_dev(vf->pf),
5680 "Can't enable VLAN stripping in switchdev mode for VF %d\n",
5681 vf->vf_id);
5682 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
5683 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
5684 NULL, 0);
5685}
5686
5687static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
5688{
5689 dev_dbg(ice_pf_to_dev(vf->pf),
5690 "Can't disable VLAN stripping in switchdev mode for VF %d\n",
5691 vf->vf_id);
5692 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
5693 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
5694 NULL, 0);
5695}
5696
5697static int
5698ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
5699{
5700 dev_dbg(ice_pf_to_dev(vf->pf),
5701 "Can't config promiscuous mode in switchdev mode for VF %d\n",
5702 vf->vf_id);
5703 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
5704 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
5705 NULL, 0);
ac19e03e
MS
5706}
5707
5708void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
5709{
5710 ops->add_mac_addr_msg = ice_vc_repr_add_mac;
5711 ops->del_mac_addr_msg = ice_vc_repr_del_mac;
e492c2e1
MS
5712 ops->add_vlan_msg = ice_vc_repr_add_vlan;
5713 ops->remove_vlan_msg = ice_vc_repr_del_vlan;
5714 ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
5715 ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
5716 ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
ac19e03e
MS
5717}
5718
1071a835
AV
5719/**
5720 * ice_vc_process_vf_msg - Process request from VF
5721 * @pf: pointer to the PF structure
5722 * @event: pointer to the AQ event
5723 *
5724 * called from the common asq/arq handler to
5725 * process request from VF
5726 */
5727void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
5728{
5729 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
5730 s16 vf_id = le16_to_cpu(event->desc.retval);
5731 u16 msglen = event->msg_len;
ac19e03e 5732 struct ice_vc_vf_ops *ops;
1071a835
AV
5733 u8 *msg = event->msg_buf;
5734 struct ice_vf *vf = NULL;
4015d11e 5735 struct device *dev;
1071a835
AV
5736 int err = 0;
5737
4015d11e 5738 dev = ice_pf_to_dev(pf);
4c66d227 5739 if (ice_validate_vf_id(pf, vf_id)) {
1071a835
AV
5740 err = -EINVAL;
5741 goto error_handler;
5742 }
5743
5744 vf = &pf->vf[vf_id];
5745
5746 /* Check if VF is disabled. */
5747 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
5748 err = -EPERM;
5749 goto error_handler;
5750 }
5751
ac19e03e
MS
5752 ops = &vf->vc_ops;
5753
1071a835
AV
5754 /* Perform basic checks on the msg */
5755 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
5756 if (err) {
cf6c6e01 5757 if (err == VIRTCHNL_STATUS_ERR_PARAM)
1071a835
AV
5758 err = -EPERM;
5759 else
5760 err = -EINVAL;
1071a835
AV
5761 }
5762
c0dcaa55
MS
5763 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
5764 ice_vc_send_msg_to_vf(vf, v_opcode,
5765 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
5766 0);
5767 return;
5768 }
5769
1071a835
AV
5770error_handler:
5771 if (err) {
cf6c6e01
MW
5772 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
5773 NULL, 0);
4015d11e 5774 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
1071a835
AV
5775 vf_id, v_opcode, msglen, err);
5776 return;
5777 }
5778
e6ba5273
BC
5779 /* VF is being configured in another context that triggers a VFR, so no
5780 * need to process this message
5781 */
5782 if (!mutex_trylock(&vf->cfg_lock)) {
5783 dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
5784 vf->vf_id);
5785 return;
5786 }
5787
1071a835
AV
5788 switch (v_opcode) {
5789 case VIRTCHNL_OP_VERSION:
ac19e03e 5790 err = ops->get_ver_msg(vf, msg);
1071a835
AV
5791 break;
5792 case VIRTCHNL_OP_GET_VF_RESOURCES:
ac19e03e 5793 err = ops->get_vf_res_msg(vf, msg);
2f9ec241 5794 if (ice_vf_init_vlan_stripping(vf))
cc71de8f 5795 dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
2f9ec241 5796 vf->vf_id);
dfc62400 5797 ice_vc_notify_vf_link_state(vf);
1071a835
AV
5798 break;
5799 case VIRTCHNL_OP_RESET_VF:
ac19e03e 5800 ops->reset_vf(vf);
1071a835
AV
5801 break;
5802 case VIRTCHNL_OP_ADD_ETH_ADDR:
ac19e03e 5803 err = ops->add_mac_addr_msg(vf, msg);
1071a835
AV
5804 break;
5805 case VIRTCHNL_OP_DEL_ETH_ADDR:
ac19e03e 5806 err = ops->del_mac_addr_msg(vf, msg);
1071a835
AV
5807 break;
5808 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ac19e03e 5809 err = ops->cfg_qs_msg(vf, msg);
1071a835
AV
5810 break;
5811 case VIRTCHNL_OP_ENABLE_QUEUES:
ac19e03e 5812 err = ops->ena_qs_msg(vf, msg);
1071a835
AV
5813 ice_vc_notify_vf_link_state(vf);
5814 break;
5815 case VIRTCHNL_OP_DISABLE_QUEUES:
ac19e03e 5816 err = ops->dis_qs_msg(vf, msg);
1071a835
AV
5817 break;
5818 case VIRTCHNL_OP_REQUEST_QUEUES:
ac19e03e 5819 err = ops->request_qs_msg(vf, msg);
1071a835
AV
5820 break;
5821 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ac19e03e 5822 err = ops->cfg_irq_map_msg(vf, msg);
1071a835
AV
5823 break;
5824 case VIRTCHNL_OP_CONFIG_RSS_KEY:
ac19e03e 5825 err = ops->config_rss_key(vf, msg);
1071a835
AV
5826 break;
5827 case VIRTCHNL_OP_CONFIG_RSS_LUT:
ac19e03e 5828 err = ops->config_rss_lut(vf, msg);
1071a835
AV
5829 break;
5830 case VIRTCHNL_OP_GET_STATS:
ac19e03e 5831 err = ops->get_stats_msg(vf, msg);
1071a835 5832 break;
01b5e89a 5833 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ac19e03e 5834 err = ops->cfg_promiscuous_mode_msg(vf, msg);
01b5e89a 5835 break;
1071a835 5836 case VIRTCHNL_OP_ADD_VLAN:
ac19e03e 5837 err = ops->add_vlan_msg(vf, msg);
1071a835
AV
5838 break;
5839 case VIRTCHNL_OP_DEL_VLAN:
ac19e03e 5840 err = ops->remove_vlan_msg(vf, msg);
1071a835
AV
5841 break;
5842 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ac19e03e 5843 err = ops->ena_vlan_stripping(vf);
1071a835
AV
5844 break;
5845 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
ac19e03e 5846 err = ops->dis_vlan_stripping(vf);
1071a835 5847 break;
1f7ea1cd 5848 case VIRTCHNL_OP_ADD_FDIR_FILTER:
ac19e03e 5849 err = ops->add_fdir_fltr_msg(vf, msg);
1f7ea1cd
QZ
5850 break;
5851 case VIRTCHNL_OP_DEL_FDIR_FILTER:
ac19e03e 5852 err = ops->del_fdir_fltr_msg(vf, msg);
1f7ea1cd 5853 break;
222a8ab0 5854 case VIRTCHNL_OP_ADD_RSS_CFG:
ac19e03e 5855 err = ops->handle_rss_cfg_msg(vf, msg, true);
222a8ab0
QZ
5856 break;
5857 case VIRTCHNL_OP_DEL_RSS_CFG:
ac19e03e 5858 err = ops->handle_rss_cfg_msg(vf, msg, false);
222a8ab0 5859 break;
cc71de8f
BC
5860 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
5861 err = ops->get_offload_vlan_v2_caps(vf);
5862 break;
5863 case VIRTCHNL_OP_ADD_VLAN_V2:
5864 err = ops->add_vlan_v2_msg(vf, msg);
5865 break;
5866 case VIRTCHNL_OP_DEL_VLAN_V2:
5867 err = ops->remove_vlan_v2_msg(vf, msg);
5868 break;
5869 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
5870 err = ops->ena_vlan_stripping_v2_msg(vf, msg);
5871 break;
5872 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
5873 err = ops->dis_vlan_stripping_v2_msg(vf, msg);
5874 break;
5875 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
5876 err = ops->ena_vlan_insertion_v2_msg(vf, msg);
5877 break;
5878 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
5879 err = ops->dis_vlan_insertion_v2_msg(vf, msg);
5880 break;
1071a835
AV
5881 case VIRTCHNL_OP_UNKNOWN:
5882 default:
4015d11e
BC
5883 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
5884 vf_id);
cf6c6e01
MW
5885 err = ice_vc_send_msg_to_vf(vf, v_opcode,
5886 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
1071a835
AV
5887 NULL, 0);
5888 break;
5889 }
5890 if (err) {
5891 /* Helper function cares less about error return values here
5892 * as it is busy with pending work.
5893 */
4015d11e 5894 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
1071a835
AV
5895 vf_id, v_opcode, err);
5896 }
e6ba5273
BC
5897
5898 mutex_unlock(&vf->cfg_lock);
1071a835
AV
5899}
5900
7c710869
AV
5901/**
5902 * ice_get_vf_cfg
5903 * @netdev: network interface device structure
5904 * @vf_id: VF identifier
5905 * @ivi: VF configuration structure
5906 *
5907 * return VF configuration
5908 */
c8b7abdd
BA
5909int
5910ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
7c710869 5911{
4c66d227 5912 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
5913 struct ice_vf *vf;
5914
4c66d227 5915 if (ice_validate_vf_id(pf, vf_id))
7c710869 5916 return -EINVAL;
7c710869
AV
5917
5918 vf = &pf->vf[vf_id];
7c710869 5919
4c66d227 5920 if (ice_check_vf_init(pf, vf))
7c710869 5921 return -EBUSY;
7c710869
AV
5922
5923 ivi->vf = vf_id;
51efbbdf 5924 ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
7c710869
AV
5925
5926 /* VF configuration for VLAN and applicable QoS */
a19d7f7f
BC
5927 ivi->vlan = ice_vf_get_port_vlan_id(vf);
5928 ivi->qos = ice_vf_get_port_vlan_prio(vf);
cbc8b564
BC
5929 if (ice_vf_is_port_vlan_ena(vf))
5930 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
7c710869
AV
5931
5932 ivi->trusted = vf->trusted;
5933 ivi->spoofchk = vf->spoofchk;
5934 if (!vf->link_forced)
5935 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
5936 else if (vf->link_up)
5937 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
5938 else
5939 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4ecc8633
BC
5940 ivi->max_tx_rate = vf->max_tx_rate;
5941 ivi->min_tx_rate = vf->min_tx_rate;
7c710869
AV
5942 return 0;
5943}
5944
47ebc7b0
BC
5945/**
5946 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
5947 * @pf: PF used to reference the switch's rules
5948 * @umac: unicast MAC to compare against existing switch rules
5949 *
5950 * Return true on the first/any match, else return false
5951 */
5952static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
5953{
5954 struct ice_sw_recipe *mac_recipe_list =
5955 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
5956 struct ice_fltr_mgmt_list_entry *list_itr;
5957 struct list_head *rule_head;
5958 struct mutex *rule_lock; /* protect MAC filter list access */
5959
5960 rule_head = &mac_recipe_list->filt_rules;
5961 rule_lock = &mac_recipe_list->filt_rule_lock;
5962
5963 mutex_lock(rule_lock);
5964 list_for_each_entry(list_itr, rule_head, list_entry) {
5965 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5966
5967 if (ether_addr_equal(existing_mac, umac)) {
5968 mutex_unlock(rule_lock);
5969 return true;
5970 }
5971 }
5972
5973 mutex_unlock(rule_lock);
5974
5975 return false;
5976}
5977
7c710869
AV
5978/**
5979 * ice_set_vf_mac
5980 * @netdev: network interface device structure
5981 * @vf_id: VF identifier
f9867df6 5982 * @mac: MAC address
7c710869 5983 *
f9867df6 5984 * program VF MAC address
7c710869
AV
5985 */
5986int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
5987{
4c66d227 5988 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 5989 struct ice_vf *vf;
c54d209c 5990 int ret;
7c710869 5991
4c66d227 5992 if (ice_validate_vf_id(pf, vf_id))
7c710869 5993 return -EINVAL;
7c710869 5994
f109603a 5995 if (is_multicast_ether_addr(mac)) {
7c710869
AV
5996 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
5997 return -EINVAL;
5998 }
5999
c54d209c 6000 vf = &pf->vf[vf_id];
47ebc7b0 6001 /* nothing left to do, unicast MAC already set */
f28cd5ce
BC
6002 if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
6003 ether_addr_equal(vf->hw_lan_addr.addr, mac))
47ebc7b0
BC
6004 return 0;
6005
c54d209c
BC
6006 ret = ice_check_vf_ready_for_cfg(vf);
6007 if (ret)
6008 return ret;
6009
47ebc7b0
BC
6010 if (ice_unicast_mac_exists(pf, mac)) {
6011 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
6012 mac, vf_id, mac);
6013 return -EINVAL;
6014 }
6015
e6ba5273
BC
6016 mutex_lock(&vf->cfg_lock);
6017
f109603a
BC
6018 /* VF is notified of its new MAC via the PF's response to the
6019 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
7c710869 6020 */
f28cd5ce 6021 ether_addr_copy(vf->dev_lan_addr.addr, mac);
51efbbdf 6022 ether_addr_copy(vf->hw_lan_addr.addr, mac);
f109603a
BC
6023 if (is_zero_ether_addr(mac)) {
6024 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
6025 vf->pf_set_mac = false;
6026 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
6027 vf->vf_id);
6028 } else {
6029 /* PF will add MAC rule for the VF */
6030 vf->pf_set_mac = true;
6031 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
6032 mac, vf_id);
6033 }
7c710869 6034
ff010eca 6035 ice_vc_reset_vf(vf);
e6ba5273 6036 mutex_unlock(&vf->cfg_lock);
c54d209c 6037 return 0;
7c710869
AV
6038}
6039
6040/**
6041 * ice_set_vf_trust
6042 * @netdev: network interface device structure
6043 * @vf_id: VF identifier
6044 * @trusted: Boolean value to enable/disable trusted VF
6045 *
6046 * Enable or disable a given VF as trusted
6047 */
6048int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
6049{
4c66d227 6050 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 6051 struct ice_vf *vf;
c54d209c 6052 int ret;
7c710869 6053
1281b745
WD
6054 if (ice_is_eswitch_mode_switchdev(pf)) {
6055 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
6056 return -EOPNOTSUPP;
6057 }
6058
4c66d227 6059 if (ice_validate_vf_id(pf, vf_id))
7c710869 6060 return -EINVAL;
7c710869
AV
6061
6062 vf = &pf->vf[vf_id];
c54d209c
BC
6063 ret = ice_check_vf_ready_for_cfg(vf);
6064 if (ret)
6065 return ret;
7c710869
AV
6066
6067 /* Check if already trusted */
6068 if (trusted == vf->trusted)
6069 return 0;
6070
e6ba5273
BC
6071 mutex_lock(&vf->cfg_lock);
6072
7c710869 6073 vf->trusted = trusted;
ff010eca 6074 ice_vc_reset_vf(vf);
19cce2c6 6075 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
7c710869
AV
6076 vf_id, trusted ? "" : "un");
6077
e6ba5273
BC
6078 mutex_unlock(&vf->cfg_lock);
6079
7c710869
AV
6080 return 0;
6081}
6082
6083/**
6084 * ice_set_vf_link_state
6085 * @netdev: network interface device structure
6086 * @vf_id: VF identifier
6087 * @link_state: required link state
6088 *
6089 * Set VF's link state, irrespective of physical link state status
6090 */
6091int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
6092{
4c66d227 6093 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 6094 struct ice_vf *vf;
c54d209c 6095 int ret;
7c710869 6096
4c66d227 6097 if (ice_validate_vf_id(pf, vf_id))
7c710869 6098 return -EINVAL;
7c710869
AV
6099
6100 vf = &pf->vf[vf_id];
c54d209c
BC
6101 ret = ice_check_vf_ready_for_cfg(vf);
6102 if (ret)
6103 return ret;
7c710869 6104
7c710869
AV
6105 switch (link_state) {
6106 case IFLA_VF_LINK_STATE_AUTO:
6107 vf->link_forced = false;
7c710869
AV
6108 break;
6109 case IFLA_VF_LINK_STATE_ENABLE:
6110 vf->link_forced = true;
6111 vf->link_up = true;
6112 break;
6113 case IFLA_VF_LINK_STATE_DISABLE:
6114 vf->link_forced = true;
6115 vf->link_up = false;
6116 break;
6117 default:
6118 return -EINVAL;
6119 }
6120
26a91525 6121 ice_vc_notify_vf_link_state(vf);
7c710869
AV
6122
6123 return 0;
6124}
730fdea4 6125
4ecc8633
BC
6126/**
6127 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
6128 * @pf: PF associated with VFs
6129 */
6130static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
6131{
6132 int rate = 0, i;
6133
6134 ice_for_each_vf(pf, i)
6135 rate += pf->vf[i].min_tx_rate;
6136
6137 return rate;
6138}
6139
6140/**
6141 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
6142 * @vf: VF trying to configure min_tx_rate
6143 * @min_tx_rate: min Tx rate in Mbps
6144 *
6145 * Check if the min_tx_rate being passed in will cause oversubscription of total
6146 * min_tx_rate based on the current link speed and all other VFs configured
6147 * min_tx_rate
6148 *
6149 * Return true if the passed min_tx_rate would cause oversubscription, else
6150 * return false
6151 */
6152static bool
6153ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
6154{
6155 int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
6156 int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
6157
6158 /* this VF's previous rate is being overwritten */
6159 all_vfs_min_tx_rate -= vf->min_tx_rate;
6160
6161 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
6162 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
6163 min_tx_rate, vf->vf_id,
6164 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
6165 link_speed_mbps);
6166 return true;
6167 }
6168
6169 return false;
6170}
6171
6172/**
6173 * ice_set_vf_bw - set min/max VF bandwidth
6174 * @netdev: network interface device structure
6175 * @vf_id: VF identifier
6176 * @min_tx_rate: Minimum Tx rate in Mbps
6177 * @max_tx_rate: Maximum Tx rate in Mbps
6178 */
6179int
6180ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
6181 int max_tx_rate)
6182{
6183 struct ice_pf *pf = ice_netdev_to_pf(netdev);
6184 struct ice_vsi *vsi;
6185 struct device *dev;
6186 struct ice_vf *vf;
6187 int ret;
6188
6189 dev = ice_pf_to_dev(pf);
6190 if (ice_validate_vf_id(pf, vf_id))
6191 return -EINVAL;
6192
6193 vf = &pf->vf[vf_id];
6194 ret = ice_check_vf_ready_for_cfg(vf);
6195 if (ret)
6196 return ret;
6197
6198 vsi = ice_get_vf_vsi(vf);
6199
6200 /* when max_tx_rate is zero that means no max Tx rate limiting, so only
6201 * check if max_tx_rate is non-zero
6202 */
6203 if (max_tx_rate && min_tx_rate > max_tx_rate) {
6204 dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
6205 min_tx_rate, max_tx_rate);
6206 return -EINVAL;
6207 }
6208
6209 if (min_tx_rate && ice_is_dcb_active(pf)) {
6210 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
6211 return -EOPNOTSUPP;
6212 }
6213
6214 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
6215 return -EINVAL;
6216
6217 if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
6218 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
6219 if (ret) {
6220 dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
6221 vf->vf_id);
6222 return ret;
6223 }
6224
6225 vf->min_tx_rate = min_tx_rate;
6226 }
6227
6228 if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
6229 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
6230 if (ret) {
6231 dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
6232 vf->vf_id);
6233 return ret;
6234 }
6235
6236 vf->max_tx_rate = max_tx_rate;
6237 }
6238
6239 return 0;
6240}
6241
730fdea4
JB
6242/**
6243 * ice_get_vf_stats - populate some stats for the VF
6244 * @netdev: the netdev of the PF
6245 * @vf_id: the host OS identifier (0-255)
6246 * @vf_stats: pointer to the OS memory to be initialized
6247 */
6248int ice_get_vf_stats(struct net_device *netdev, int vf_id,
6249 struct ifla_vf_stats *vf_stats)
6250{
6251 struct ice_pf *pf = ice_netdev_to_pf(netdev);
6252 struct ice_eth_stats *stats;
6253 struct ice_vsi *vsi;
6254 struct ice_vf *vf;
c54d209c 6255 int ret;
730fdea4
JB
6256
6257 if (ice_validate_vf_id(pf, vf_id))
6258 return -EINVAL;
6259
6260 vf = &pf->vf[vf_id];
c54d209c
BC
6261 ret = ice_check_vf_ready_for_cfg(vf);
6262 if (ret)
6263 return ret;
730fdea4 6264
c5afbe99 6265 vsi = ice_get_vf_vsi(vf);
730fdea4
JB
6266 if (!vsi)
6267 return -EINVAL;
6268
6269 ice_update_eth_stats(vsi);
6270 stats = &vsi->eth_stats;
6271
6272 memset(vf_stats, 0, sizeof(*vf_stats));
6273
6274 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
6275 stats->rx_multicast;
6276 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
6277 stats->tx_multicast;
6278 vf_stats->rx_bytes = stats->rx_bytes;
6279 vf_stats->tx_bytes = stats->tx_bytes;
6280 vf_stats->broadcast = stats->rx_broadcast;
6281 vf_stats->multicast = stats->rx_multicast;
6282 vf_stats->rx_dropped = stats->rx_discards;
6283 vf_stats->tx_dropped = stats->tx_discards;
6284
6285 return 0;
6286}
9d5c5a52 6287
7438a3b0
PG
6288/**
6289 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
6290 * @vf: pointer to the VF structure
6291 */
6292void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
6293{
6294 struct ice_pf *pf = vf->pf;
6295 struct device *dev;
6296
6297 dev = ice_pf_to_dev(pf);
6298
6299 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
6300 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
f28cd5ce 6301 vf->dev_lan_addr.addr,
7438a3b0
PG
6302 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
6303 ? "on" : "off");
6304}
6305
9d5c5a52 6306/**
ef860480 6307 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
9d5c5a52
PG
6308 * @pf: pointer to the PF structure
6309 *
6310 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
6311 */
6312void ice_print_vfs_mdd_events(struct ice_pf *pf)
6313{
6314 struct device *dev = ice_pf_to_dev(pf);
6315 struct ice_hw *hw = &pf->hw;
6316 int i;
6317
6318 /* check that there are pending MDD events to print */
7e408e07 6319 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
9d5c5a52
PG
6320 return;
6321
6322 /* VF MDD event logs are rate limited to one second intervals */
6323 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
6324 return;
6325
6326 pf->last_printed_mdd_jiffies = jiffies;
6327
6328 ice_for_each_vf(pf, i) {
6329 struct ice_vf *vf = &pf->vf[i];
6330
6331 /* only print Rx MDD event message if there are new events */
6332 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
6333 vf->mdd_rx_events.last_printed =
6334 vf->mdd_rx_events.count;
7438a3b0 6335 ice_print_vf_rx_mdd_event(vf);
9d5c5a52
PG
6336 }
6337
6338 /* only print Tx MDD event message if there are new events */
6339 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
6340 vf->mdd_tx_events.last_printed =
6341 vf->mdd_tx_events.count;
6342
6343 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
6344 vf->mdd_tx_events.count, hw->pf_id, i,
f28cd5ce 6345 vf->dev_lan_addr.addr);
9d5c5a52
PG
6346 }
6347 }
6348}
a54a0b24
NN
6349
6350/**
6351 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
6352 * @pdev: pointer to a pci_dev structure
6353 *
6354 * Called when recovering from a PF FLR to restore interrupt capability to
6355 * the VFs.
6356 */
6357void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
6358{
a54a0b24
NN
6359 u16 vf_id;
6360 int pos;
6361
6362 if (!pci_num_vf(pdev))
6363 return;
6364
6365 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6366 if (pos) {
4c26f69d
PSJ
6367 struct pci_dev *vfdev;
6368
a54a0b24
NN
6369 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
6370 &vf_id);
6371 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
6372 while (vfdev) {
6373 if (vfdev->is_virtfn && vfdev->physfn == pdev)
6374 pci_restore_msi_state(vfdev);
6375 vfdev = pci_get_device(pdev->vendor, vf_id,
6376 vfdev);
6377 }
6378 }
6379}
0891c896
VS
6380
6381/**
6382 * ice_is_malicious_vf - helper function to detect a malicious VF
6383 * @pf: ptr to struct ice_pf
6384 * @event: pointer to the AQ event
6385 * @num_msg_proc: the number of messages processed so far
6386 * @num_msg_pending: the number of messages peinding in admin queue
6387 */
6388bool
6389ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
6390 u16 num_msg_proc, u16 num_msg_pending)
6391{
6392 s16 vf_id = le16_to_cpu(event->desc.retval);
6393 struct device *dev = ice_pf_to_dev(pf);
6394 struct ice_mbx_data mbxdata;
0891c896
VS
6395 bool malvf = false;
6396 struct ice_vf *vf;
5518ac2a 6397 int status;
0891c896
VS
6398
6399 if (ice_validate_vf_id(pf, vf_id))
6400 return false;
6401
6402 vf = &pf->vf[vf_id];
6403 /* Check if VF is disabled. */
6404 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
6405 return false;
6406
6407 mbxdata.num_msg_proc = num_msg_proc;
6408 mbxdata.num_pending_arq = num_msg_pending;
6409 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
6410#define ICE_MBX_OVERFLOW_WATERMARK 64
6411 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
6412
6413 /* check to see if we have a malicious VF */
6414 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
6415 if (status)
6416 return false;
6417
6418 if (malvf) {
6419 bool report_vf = false;
6420
6421 /* if the VF is malicious and we haven't let the user
6422 * know about it, then let them know now
6423 */
6424 status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
6425 ICE_MAX_VF_COUNT, vf_id,
6426 &report_vf);
6427 if (status)
6428 dev_dbg(dev, "Error reporting malicious VF\n");
6429
6430 if (report_vf) {
6431 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
6432
6433 if (pf_vsi)
6434 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
f28cd5ce 6435 &vf->dev_lan_addr.addr[0],
0891c896
VS
6436 pf_vsi->netdev->dev_addr);
6437 }
6438
6439 return true;
6440 }
6441
6442 /* if there was an error in detection or the VF is not malicious then
6443 * return false
6444 */
6445 return false;
6446}