Merge branches 'acpi-scan', 'acpi-tad', 'acpi-extlog' and 'acpi-misc'
[linux-block.git] / drivers / net / ethernet / intel / ice / ice_eswitch.c
CommitLineData
3ea9bd5d
MS
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2019-2021, Intel Corporation. */
3
4#include "ice.h"
1a1c40df 5#include "ice_lib.h"
3ea9bd5d 6#include "ice_eswitch.h"
1a1c40df
GN
7#include "ice_fltr.h"
8#include "ice_repr.h"
3ea9bd5d 9#include "ice_devlink.h"
7fde6d8b 10#include "ice_tc_lib.h"
3ea9bd5d 11
c1e5da5d 12/**
0ef4479d 13 * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
c1e5da5d
WD
14 * @pf: pointer to PF struct
15 * @vf: pointer to VF struct
c1e5da5d
WD
16 *
17 * This function adds advanced rule that forwards packets with
0ef4479d 18 * VF's VSI index to the corresponding switchdev ctrl VSI queue.
c1e5da5d 19 */
0ef4479d
MS
20static int
21ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
c1e5da5d
WD
22{
23 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
24 struct ice_adv_rule_info rule_info = { 0 };
25 struct ice_adv_lkup_elem *list;
26 struct ice_hw *hw = &pf->hw;
27 const u16 lkups_cnt = 1;
28 int err;
29
30 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
31 if (!list)
32 return -ENOMEM;
33
0ef4479d 34 ice_rule_add_src_vsi_metadata(list);
c1e5da5d 35
0ef4479d 36 rule_info.sw_act.flag = ICE_FLTR_TX;
c1e5da5d
WD
37 rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
38 rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
c1e5da5d
WD
39 rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
40 ctrl_vsi->rxq_map[vf->vf_id];
41 rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
42 rule_info.flags_info.act_valid = true;
b70bc066 43 rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
0ef4479d 44 rule_info.src_vsi = vf->lan_vsi_idx;
c1e5da5d
WD
45
46 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
0ef4479d 47 &vf->repr->sp_rule);
c1e5da5d 48 if (err)
0ef4479d 49 dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
c1e5da5d 50 vf->vf_id);
c1e5da5d
WD
51
52 kfree(list);
53 return err;
54}
55
56/**
0ef4479d 57 * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
c1e5da5d
WD
58 * @vf: pointer to the VF struct
59 *
0ef4479d
MS
60 * Delete the advanced rule that was used to forward packets with the VF's VSI
61 * index to the corresponding switchdev ctrl VSI queue.
c1e5da5d 62 */
0ef4479d 63static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
c1e5da5d 64{
0ef4479d 65 if (!vf->repr)
c1e5da5d
WD
66 return;
67
0ef4479d 68 ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
c1e5da5d
WD
69}
70
1a1c40df
GN
71/**
72 * ice_eswitch_setup_env - configure switchdev HW filters
73 * @pf: pointer to PF struct
74 *
75 * This function adds HW filters configuration specific for switchdev
76 * mode.
77 */
78static int ice_eswitch_setup_env(struct ice_pf *pf)
79{
80 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
c79bb28e 81 struct net_device *uplink_netdev = uplink_vsi->netdev;
1a1c40df 82 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
c31af68a 83 struct ice_vsi_vlan_ops *vlan_ops;
1a1c40df
GN
84 bool rule_added = false;
85
c31af68a
BC
86 vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
87 if (vlan_ops->dis_stripping(ctrl_vsi))
88 return -ENODEV;
1a1c40df
GN
89
90 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
91
c79bb28e
MS
92 netif_addr_lock_bh(uplink_netdev);
93 __dev_uc_unsync(uplink_netdev, NULL);
94 __dev_mc_unsync(uplink_netdev, NULL);
95 netif_addr_unlock_bh(uplink_netdev);
96
3e0b5971 97 if (ice_vsi_add_vlan_zero(uplink_vsi))
1a1c40df
GN
98 goto err_def_rx;
99
d7393425
MW
100 if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
101 if (ice_set_dflt_vsi(uplink_vsi))
1a1c40df
GN
102 goto err_def_rx;
103 rule_added = true;
104 }
105
1a1c40df
GN
106 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
107 goto err_override_uplink;
108
109 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
110 goto err_override_control;
111
1a1c40df
GN
112 return 0;
113
1a1c40df
GN
114err_override_control:
115 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
116err_override_uplink:
1a1c40df 117 if (rule_added)
d7393425 118 ice_clear_dflt_vsi(uplink_vsi);
1a1c40df
GN
119err_def_rx:
120 ice_fltr_add_mac_and_broadcast(uplink_vsi,
121 uplink_vsi->port_info->mac.perm_addr,
122 ICE_FWD_TO_VSI);
123 return -ENODEV;
124}
125
1a1c40df
GN
126/**
127 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
128 * @pf: pointer to PF struct
129 *
130 * In switchdev number of allocated Tx/Rx rings is equal.
131 *
132 * This function fills q_vectors structures associated with representor and
133 * move each ring pairs to port representor netdevs. Each port representor
134 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
135 * number of VFs.
136 */
137static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
138{
139 struct ice_vsi *vsi = pf->switchdev.control_vsi;
140 int q_id;
141
142 ice_for_each_txq(vsi, q_id) {
000773c0
JK
143 struct ice_q_vector *q_vector;
144 struct ice_tx_ring *tx_ring;
145 struct ice_rx_ring *rx_ring;
146 struct ice_repr *repr;
147 struct ice_vf *vf;
148
fb916db1
JK
149 vf = ice_get_vf_by_id(pf, q_id);
150 if (WARN_ON(!vf))
000773c0
JK
151 continue;
152
000773c0
JK
153 repr = vf->repr;
154 q_vector = repr->q_vector;
155 tx_ring = vsi->tx_rings[q_id];
156 rx_ring = vsi->rx_rings[q_id];
1a1c40df
GN
157
158 q_vector->vsi = vsi;
159 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
160
161 q_vector->num_ring_tx = 1;
e72bba21
MF
162 q_vector->tx.tx_ring = tx_ring;
163 tx_ring->q_vector = q_vector;
164 tx_ring->next = NULL;
165 tx_ring->netdev = repr->netdev;
1a1c40df
GN
166 /* In switchdev mode, from OS stack perspective, there is only
167 * one queue for given netdev, so it needs to be indexed as 0.
168 */
169 tx_ring->q_index = 0;
170
171 q_vector->num_ring_rx = 1;
e72bba21
MF
172 q_vector->rx.rx_ring = rx_ring;
173 rx_ring->q_vector = q_vector;
174 rx_ring->next = NULL;
175 rx_ring->netdev = repr->netdev;
3d5985a1
JK
176
177 ice_put_vf(vf);
1a1c40df
GN
178 }
179}
180
df830543
JK
181/**
182 * ice_eswitch_release_reprs - clear PR VSIs configuration
183 * @pf: poiner to PF struct
184 * @ctrl_vsi: pointer to switchdev control VSI
185 */
186static void
187ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
188{
c4c2c7db
JK
189 struct ice_vf *vf;
190 unsigned int bkt;
df830543 191
3d5985a1
JK
192 lockdep_assert_held(&pf->vfs.table_lock);
193
c4c2c7db
JK
194 ice_for_each_vf(pf, bkt, vf) {
195 struct ice_vsi *vsi = vf->repr->src_vsi;
df830543
JK
196
197 /* Skip VFs that aren't configured */
198 if (!vf->repr->dst)
199 continue;
200
201 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
202 metadata_dst_free(vf->repr->dst);
203 vf->repr->dst = NULL;
0ef4479d 204 ice_eswitch_del_vf_sp_rule(vf);
e0645311 205 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
df830543
JK
206 ICE_FWD_TO_VSI);
207
208 netif_napi_del(&vf->repr->q_vector->napi);
209 }
210}
211
1a1c40df
GN
212/**
213 * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
214 * @pf: pointer to PF struct
215 */
216static int ice_eswitch_setup_reprs(struct ice_pf *pf)
217{
218 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
219 int max_vsi_num = 0;
c4c2c7db
JK
220 struct ice_vf *vf;
221 unsigned int bkt;
1a1c40df 222
3d5985a1
JK
223 lockdep_assert_held(&pf->vfs.table_lock);
224
c4c2c7db
JK
225 ice_for_each_vf(pf, bkt, vf) {
226 struct ice_vsi *vsi = vf->repr->src_vsi;
1a1c40df
GN
227
228 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
229 vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
230 GFP_KERNEL);
231 if (!vf->repr->dst) {
0ef4479d
MS
232 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
233 ICE_FWD_TO_VSI);
234 goto err;
235 }
236
237 if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
238 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
1a1c40df
GN
239 ICE_FWD_TO_VSI);
240 goto err;
241 }
242
243 if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
0ef4479d 244 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
1a1c40df 245 ICE_FWD_TO_VSI);
0ef4479d 246 ice_eswitch_del_vf_sp_rule(vf);
1a1c40df 247 metadata_dst_free(vf->repr->dst);
df830543 248 vf->repr->dst = NULL;
1a1c40df
GN
249 goto err;
250 }
251
3e0b5971 252 if (ice_vsi_add_vlan_zero(vsi)) {
0ef4479d 253 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
1a1c40df 254 ICE_FWD_TO_VSI);
0ef4479d 255 ice_eswitch_del_vf_sp_rule(vf);
1a1c40df 256 metadata_dst_free(vf->repr->dst);
df830543 257 vf->repr->dst = NULL;
1a1c40df
GN
258 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
259 goto err;
260 }
261
262 if (max_vsi_num < vsi->vsi_num)
263 max_vsi_num = vsi->vsi_num;
264
b48b89f9
JK
265 netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
266 ice_napi_poll);
1a1c40df
GN
267
268 netif_keep_dst(vf->repr->netdev);
269 }
270
c4c2c7db
JK
271 ice_for_each_vf(pf, bkt, vf) {
272 struct ice_repr *repr = vf->repr;
1a1c40df
GN
273 struct ice_vsi *vsi = repr->src_vsi;
274 struct metadata_dst *dst;
275
1a1c40df
GN
276 dst = repr->dst;
277 dst->u.port_info.port_id = vsi->vsi_num;
278 dst->u.port_info.lower_dev = repr->netdev;
279 ice_repr_set_traffic_vsi(repr, ctrl_vsi);
280 }
281
282 return 0;
283
284err:
df830543 285 ice_eswitch_release_reprs(pf, ctrl_vsi);
1a1c40df
GN
286
287 return -ENODEV;
288}
289
1c54c839
GN
290/**
291 * ice_eswitch_update_repr - reconfigure VF port representor
292 * @vsi: VF VSI for which port representor is configured
293 */
294void ice_eswitch_update_repr(struct ice_vsi *vsi)
295{
296 struct ice_pf *pf = vsi->back;
297 struct ice_repr *repr;
298 struct ice_vf *vf;
299 int ret;
300
301 if (!ice_is_switchdev_running(pf))
302 return;
303
b03d519d 304 vf = vsi->vf;
1c54c839
GN
305 repr = vf->repr;
306 repr->src_vsi = vsi;
307 repr->dst->u.port_info.port_id = vsi->vsi_num;
308
309 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
310 if (ret) {
e0645311 311 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
b03d519d
JK
312 dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
313 vsi->vf->vf_id);
1c54c839
GN
314 }
315}
316
f5396b8a
GN
317/**
318 * ice_eswitch_port_start_xmit - callback for packets transmit
319 * @skb: send buffer
320 * @netdev: network interface device structure
321 *
322 * Returns NETDEV_TX_OK if sent, else an error code
323 */
324netdev_tx_t
325ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
326{
327 struct ice_netdev_priv *np;
328 struct ice_repr *repr;
329 struct ice_vsi *vsi;
330
331 np = netdev_priv(netdev);
332 vsi = np->vsi;
333
d2016651
WD
334 if (ice_is_reset_in_progress(vsi->back->state) ||
335 test_bit(ICE_VF_DIS, vsi->back->state))
f5396b8a
GN
336 return NETDEV_TX_BUSY;
337
338 repr = ice_netdev_to_repr(netdev);
339 skb_dst_drop(skb);
340 dst_hold((struct dst_entry *)repr->dst);
341 skb_dst_set(skb, (struct dst_entry *)repr->dst);
342 skb->queue_mapping = repr->vf->vf_id;
343
344 return ice_start_xmit(skb, netdev);
345}
346
347/**
348 * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
349 * @skb: pointer to send buffer
350 * @off: pointer to offload struct
351 */
352void
353ice_eswitch_set_target_vsi(struct sk_buff *skb,
354 struct ice_tx_offload_params *off)
355{
356 struct metadata_dst *dst = skb_metadata_dst(skb);
357 u64 cd_cmd, dst_vsi;
358
359 if (!dst) {
360 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
361 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
362 } else {
363 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
364 dst_vsi = ((u64)dst->u.port_info.port_id <<
365 ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
366 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
367 }
368}
369
1a1c40df
GN
370/**
371 * ice_eswitch_release_env - clear switchdev HW filters
372 * @pf: pointer to PF struct
373 *
374 * This function removes HW filters configuration specific for switchdev
375 * mode and restores default legacy mode settings.
376 */
377static void ice_eswitch_release_env(struct ice_pf *pf)
378{
379 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
380 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
381
382 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
383 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
d7393425 384 ice_clear_dflt_vsi(uplink_vsi);
1a1c40df
GN
385 ice_fltr_add_mac_and_broadcast(uplink_vsi,
386 uplink_vsi->port_info->mac.perm_addr,
387 ICE_FWD_TO_VSI);
388}
389
390/**
391 * ice_eswitch_vsi_setup - configure switchdev control VSI
392 * @pf: pointer to PF structure
393 * @pi: pointer to port_info structure
394 */
395static struct ice_vsi *
396ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
397{
5e509ab2
JK
398 struct ice_vsi_cfg_params params = {};
399
400 params.type = ICE_VSI_SWITCHDEV_CTRL;
401 params.pi = pi;
402 params.flags = ICE_VSI_FLAG_INIT;
403
404 return ice_vsi_setup(pf, &params);
1a1c40df
GN
405}
406
b3be918d
GN
407/**
408 * ice_eswitch_napi_del - remove NAPI handle for all port representors
409 * @pf: pointer to PF structure
410 */
411static void ice_eswitch_napi_del(struct ice_pf *pf)
412{
c4c2c7db
JK
413 struct ice_vf *vf;
414 unsigned int bkt;
b3be918d 415
3d5985a1
JK
416 lockdep_assert_held(&pf->vfs.table_lock);
417
c4c2c7db
JK
418 ice_for_each_vf(pf, bkt, vf)
419 netif_napi_del(&vf->repr->q_vector->napi);
b3be918d
GN
420}
421
1a1c40df
GN
422/**
423 * ice_eswitch_napi_enable - enable NAPI for all port representors
424 * @pf: pointer to PF structure
425 */
426static void ice_eswitch_napi_enable(struct ice_pf *pf)
427{
c4c2c7db
JK
428 struct ice_vf *vf;
429 unsigned int bkt;
1a1c40df 430
3d5985a1
JK
431 lockdep_assert_held(&pf->vfs.table_lock);
432
c4c2c7db
JK
433 ice_for_each_vf(pf, bkt, vf)
434 napi_enable(&vf->repr->q_vector->napi);
1a1c40df
GN
435}
436
437/**
438 * ice_eswitch_napi_disable - disable NAPI for all port representors
439 * @pf: pointer to PF structure
440 */
441static void ice_eswitch_napi_disable(struct ice_pf *pf)
442{
c4c2c7db
JK
443 struct ice_vf *vf;
444 unsigned int bkt;
1a1c40df 445
3d5985a1
JK
446 lockdep_assert_held(&pf->vfs.table_lock);
447
c4c2c7db
JK
448 ice_for_each_vf(pf, bkt, vf)
449 napi_disable(&vf->repr->q_vector->napi);
1a1c40df
GN
450}
451
1a1c40df
GN
452/**
453 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
454 * @pf: pointer to PF structure
455 */
456static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
457{
458 struct ice_vsi *ctrl_vsi;
459
460 pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
461 if (!pf->switchdev.control_vsi)
462 return -ENODEV;
463
464 ctrl_vsi = pf->switchdev.control_vsi;
465 pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
466 if (!pf->switchdev.uplink_vsi)
467 goto err_vsi;
468
469 if (ice_eswitch_setup_env(pf))
470 goto err_vsi;
471
472 if (ice_repr_add_for_all_vfs(pf))
473 goto err_repr_add;
474
475 if (ice_eswitch_setup_reprs(pf))
476 goto err_setup_reprs;
477
478 ice_eswitch_remap_rings_to_vectors(pf);
479
480 if (ice_vsi_open(ctrl_vsi))
481 goto err_setup_reprs;
482
483 ice_eswitch_napi_enable(pf);
484
1a1c40df
GN
485 return 0;
486
487err_setup_reprs:
488 ice_repr_rem_from_all_vfs(pf);
489err_repr_add:
490 ice_eswitch_release_env(pf);
491err_vsi:
492 ice_vsi_release(ctrl_vsi);
493 return -ENODEV;
494}
495
496/**
497 * ice_eswitch_disable_switchdev - disable switchdev resources
498 * @pf: pointer to PF structure
499 */
500static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
501{
502 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
503
504 ice_eswitch_napi_disable(pf);
505 ice_eswitch_release_env(pf);
c1e5da5d 506 ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
1a1c40df
GN
507 ice_eswitch_release_reprs(pf, ctrl_vsi);
508 ice_vsi_release(ctrl_vsi);
509 ice_repr_rem_from_all_vfs(pf);
510}
511
3ea9bd5d
MS
512/**
513 * ice_eswitch_mode_set - set new eswitch mode
514 * @devlink: pointer to devlink structure
515 * @mode: eswitch mode to switch to
516 * @extack: pointer to extack structure
517 */
518int
519ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
520 struct netlink_ext_ack *extack)
521{
522 struct ice_pf *pf = devlink_priv(devlink);
523
524 if (pf->eswitch_mode == mode)
525 return 0;
526
fb916db1 527 if (ice_has_vfs(pf)) {
3ea9bd5d
MS
528 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
529 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
530 return -EOPNOTSUPP;
531 }
532
533 switch (mode) {
534 case DEVLINK_ESWITCH_MODE_LEGACY:
535 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
536 pf->hw.pf_id);
537 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
538 break;
539 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
540 {
43d00e10
MS
541 if (ice_is_adq_active(pf)) {
542 dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
543 NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
544 return -EOPNOTSUPP;
545 }
546
3ea9bd5d
MS
547 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
548 pf->hw.pf_id);
549 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
550 break;
551 }
552 default:
553 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
554 return -EINVAL;
555 }
556
557 pf->eswitch_mode = mode;
558 return 0;
559}
560
561/**
562 * ice_eswitch_mode_get - get current eswitch mode
563 * @devlink: pointer to devlink structure
564 * @mode: output parameter for current eswitch mode
565 */
566int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
567{
568 struct ice_pf *pf = devlink_priv(devlink);
569
570 *mode = pf->eswitch_mode;
571 return 0;
572}
1a1c40df 573
1c54c839
GN
574/**
575 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
576 * @pf: pointer to PF structure
577 *
578 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
579 * false otherwise.
580 */
581bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
582{
583 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
584}
585
1a1c40df
GN
586/**
587 * ice_eswitch_release - cleanup eswitch
588 * @pf: pointer to PF structure
589 */
590void ice_eswitch_release(struct ice_pf *pf)
591{
592 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
593 return;
594
595 ice_eswitch_disable_switchdev(pf);
596 pf->switchdev.is_running = false;
597}
598
599/**
600 * ice_eswitch_configure - configure eswitch
601 * @pf: pointer to PF structure
602 */
603int ice_eswitch_configure(struct ice_pf *pf)
604{
605 int status;
606
607 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
608 return 0;
609
610 status = ice_eswitch_enable_switchdev(pf);
611 if (status)
612 return status;
613
614 pf->switchdev.is_running = true;
615 return 0;
616}
b3be918d
GN
617
618/**
619 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
620 * @pf: pointer to PF structure
621 */
622static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
623{
c4c2c7db
JK
624 struct ice_vf *vf;
625 unsigned int bkt;
b3be918d 626
3d5985a1
JK
627 lockdep_assert_held(&pf->vfs.table_lock);
628
b3be918d
GN
629 if (test_bit(ICE_DOWN, pf->state))
630 return;
631
c4c2c7db
JK
632 ice_for_each_vf(pf, bkt, vf) {
633 if (vf->repr)
634 ice_repr_start_tx_queues(vf->repr);
b3be918d
GN
635 }
636}
637
638/**
639 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
640 * @pf: pointer to PF structure
641 */
642void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
643{
c4c2c7db
JK
644 struct ice_vf *vf;
645 unsigned int bkt;
b3be918d 646
3d5985a1
JK
647 lockdep_assert_held(&pf->vfs.table_lock);
648
b3be918d
GN
649 if (test_bit(ICE_DOWN, pf->state))
650 return;
651
c4c2c7db
JK
652 ice_for_each_vf(pf, bkt, vf) {
653 if (vf->repr)
654 ice_repr_stop_tx_queues(vf->repr);
b3be918d
GN
655 }
656}
657
658/**
659 * ice_eswitch_rebuild - rebuild eswitch
660 * @pf: pointer to PF structure
661 */
662int ice_eswitch_rebuild(struct ice_pf *pf)
663{
664 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
665 int status;
666
667 ice_eswitch_napi_disable(pf);
668 ice_eswitch_napi_del(pf);
669
670 status = ice_eswitch_setup_env(pf);
671 if (status)
672 return status;
673
674 status = ice_eswitch_setup_reprs(pf);
675 if (status)
676 return status;
677
678 ice_eswitch_remap_rings_to_vectors(pf);
679
7fde6d8b
MS
680 ice_replay_tc_fltrs(pf);
681
b3be918d
GN
682 status = ice_vsi_open(ctrl_vsi);
683 if (status)
684 return status;
685
686 ice_eswitch_napi_enable(pf);
b3be918d
GN
687 ice_eswitch_start_all_tx_queues(pf);
688
689 return 0;
690}