1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72 u8 igu_sb_id, u8 segment, u16 index, u8 op,
75 /* acking a VF sb through the PF - use the GRC */
77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79 u32 func_encode = vf->abs_vfid;
80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81 struct igu_regular cmd_data = {0};
83 cmd_data.sb_id_and_flags =
84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86 (update << IGU_REGULAR_BUPDATE_SHIFT) |
87 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
90 func_encode << IGU_CTRL_REG_FID_SHIFT |
91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 cmd_data.sb_id_and_flags, igu_addr_data);
95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 REG_WR(bp, igu_addr_ctl, ctl);
106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
107 struct bnx2x_virtf *vf,
110 if (!bnx2x_leading_vfq(vf, sp_initialized)) {
112 BNX2X_ERR("Slowpath objects not yet initialized!\n");
114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
120 /* VFOP - VF slow-path operation support */
122 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
124 /* VFOP operations states */
125 enum bnx2x_vfop_qctor_state {
126 BNX2X_VFOP_QCTOR_INIT,
127 BNX2X_VFOP_QCTOR_SETUP,
128 BNX2X_VFOP_QCTOR_INT_EN
131 enum bnx2x_vfop_qdtor_state {
132 BNX2X_VFOP_QDTOR_HALT,
133 BNX2X_VFOP_QDTOR_TERMINATE,
134 BNX2X_VFOP_QDTOR_CFCDEL,
135 BNX2X_VFOP_QDTOR_DONE
138 enum bnx2x_vfop_vlan_mac_state {
139 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
140 BNX2X_VFOP_VLAN_MAC_CLEAR,
141 BNX2X_VFOP_VLAN_MAC_CHK_DONE,
142 BNX2X_VFOP_MAC_CONFIG_LIST,
143 BNX2X_VFOP_VLAN_CONFIG_LIST,
144 BNX2X_VFOP_VLAN_CONFIG_LIST_0
147 enum bnx2x_vfop_qsetup_state {
148 BNX2X_VFOP_QSETUP_CTOR,
149 BNX2X_VFOP_QSETUP_VLAN0,
150 BNX2X_VFOP_QSETUP_DONE
153 enum bnx2x_vfop_mcast_state {
154 BNX2X_VFOP_MCAST_DEL,
155 BNX2X_VFOP_MCAST_ADD,
156 BNX2X_VFOP_MCAST_CHK_DONE
158 enum bnx2x_vfop_qflr_state {
159 BNX2X_VFOP_QFLR_CLR_VLAN,
160 BNX2X_VFOP_QFLR_CLR_MAC,
161 BNX2X_VFOP_QFLR_TERMINATE,
165 enum bnx2x_vfop_flr_state {
166 BNX2X_VFOP_FLR_QUEUES,
170 enum bnx2x_vfop_close_state {
171 BNX2X_VFOP_CLOSE_QUEUES,
175 enum bnx2x_vfop_rxmode_state {
176 BNX2X_VFOP_RXMODE_CONFIG,
177 BNX2X_VFOP_RXMODE_DONE
180 enum bnx2x_vfop_qteardown_state {
181 BNX2X_VFOP_QTEARDOWN_RXMODE,
182 BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
183 BNX2X_VFOP_QTEARDOWN_CLR_MAC,
184 BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
185 BNX2X_VFOP_QTEARDOWN_QDTOR,
186 BNX2X_VFOP_QTEARDOWN_DONE
189 enum bnx2x_vfop_rss_state {
190 BNX2X_VFOP_RSS_CONFIG,
194 enum bnx2x_vfop_tpa_state {
195 BNX2X_VFOP_TPA_CONFIG,
199 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
201 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
202 struct bnx2x_queue_init_params *init_params,
203 struct bnx2x_queue_setup_params *setup_params,
204 u16 q_idx, u16 sb_idx)
207 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
211 init_params->tx.sb_cq_index,
212 init_params->tx.hc_rate,
214 setup_params->txq_params.traffic_type);
217 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
218 struct bnx2x_queue_init_params *init_params,
219 struct bnx2x_queue_setup_params *setup_params,
220 u16 q_idx, u16 sb_idx)
222 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
224 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
225 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
229 init_params->rx.sb_cq_index,
230 init_params->rx.hc_rate,
231 setup_params->gen_params.mtu,
233 rxq_params->sge_buf_sz,
234 rxq_params->max_sges_pkt,
235 rxq_params->tpa_agg_sz,
237 rxq_params->drop_flags,
238 rxq_params->cache_line_log);
241 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
242 struct bnx2x_virtf *vf,
243 struct bnx2x_vf_queue *q,
244 struct bnx2x_vfop_qctor_params *p,
245 unsigned long q_type)
247 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
248 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
252 /* Enable host coalescing in the transition to INIT state */
253 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
254 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
256 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
257 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
260 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
261 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
264 init_p->cxts[0] = q->cxt;
268 /* Setup-op general parameters */
269 setup_p->gen_params.spcl_id = vf->sp_cl_id;
270 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
272 /* Setup-op pause params:
273 * Nothing to do, the pause thresholds are set by default to 0 which
274 * effectively turns off the feature for this queue. We don't want
275 * one queue (VF) to interfering with another queue (another VF)
277 if (vf->cfg_flags & VF_CFG_FW_FC)
278 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
281 * collect statistics, zero statistics, local-switching, security,
282 * OV for Flex10, RSS and MCAST for leading
284 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
285 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
287 /* for VFs, enable tx switching, bd coherency, and mac address
290 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
291 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
292 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
294 /* Setup-op rx parameters */
295 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
296 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
298 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
299 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
300 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
302 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
303 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
306 /* Setup-op tx parameters */
307 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
308 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
309 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
313 /* VFOP queue construction */
314 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
316 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
317 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
318 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
319 enum bnx2x_vfop_qctor_state state = vfop->state;
321 bnx2x_vfop_reset_wq(vf);
326 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
329 case BNX2X_VFOP_QCTOR_INIT:
331 /* has this queue already been opened? */
332 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
333 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
335 "Entered qctor but queue was already up. Aborting gracefully\n");
340 vfop->state = BNX2X_VFOP_QCTOR_SETUP;
342 q_params->cmd = BNX2X_Q_CMD_INIT;
343 vfop->rc = bnx2x_queue_state_change(bp, q_params);
345 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
347 case BNX2X_VFOP_QCTOR_SETUP:
349 vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
351 /* copy pre-prepared setup params to the queue-state params */
352 vfop->op_p->qctor.qstate.params.setup =
353 vfop->op_p->qctor.prep_qsetup;
355 q_params->cmd = BNX2X_Q_CMD_SETUP;
356 vfop->rc = bnx2x_queue_state_change(bp, q_params);
358 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
360 case BNX2X_VFOP_QCTOR_INT_EN:
362 /* enable interrupts */
363 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
364 USTORM_ID, 0, IGU_INT_ENABLE, 0);
367 bnx2x_vfop_default(state);
370 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
371 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
373 bnx2x_vfop_end(bp, vf, vfop);
378 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
379 struct bnx2x_virtf *vf,
380 struct bnx2x_vfop_cmd *cmd,
383 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
386 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
388 vfop->args.qctor.qid = qid;
389 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
391 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
392 bnx2x_vfop_qctor, cmd->done);
393 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
399 /* VFOP queue destruction */
400 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
402 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
403 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
404 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
405 enum bnx2x_vfop_qdtor_state state = vfop->state;
407 bnx2x_vfop_reset_wq(vf);
412 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
415 case BNX2X_VFOP_QDTOR_HALT:
417 /* has this queue already been stopped? */
418 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
419 BNX2X_Q_LOGICAL_STATE_STOPPED) {
421 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
424 vfop->state = BNX2X_VFOP_QDTOR_DONE;
426 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
430 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
432 q_params->cmd = BNX2X_Q_CMD_HALT;
433 vfop->rc = bnx2x_queue_state_change(bp, q_params);
435 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
437 case BNX2X_VFOP_QDTOR_TERMINATE:
439 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
441 q_params->cmd = BNX2X_Q_CMD_TERMINATE;
442 vfop->rc = bnx2x_queue_state_change(bp, q_params);
444 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
446 case BNX2X_VFOP_QDTOR_CFCDEL:
448 vfop->state = BNX2X_VFOP_QDTOR_DONE;
450 q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
451 vfop->rc = bnx2x_queue_state_change(bp, q_params);
453 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
455 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
456 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
458 case BNX2X_VFOP_QDTOR_DONE:
459 /* invalidate the context */
461 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
462 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
464 bnx2x_vfop_end(bp, vf, vfop);
467 bnx2x_vfop_default(state);
473 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
474 struct bnx2x_virtf *vf,
475 struct bnx2x_vfop_cmd *cmd,
478 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
481 struct bnx2x_queue_state_params *qstate =
482 &vf->op_params.qctor.qstate;
484 memset(qstate, 0, sizeof(*qstate));
485 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
487 vfop->args.qdtor.qid = qid;
488 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
490 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
491 bnx2x_vfop_qdtor, cmd->done);
492 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
495 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
501 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
503 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
505 /* the first igu entry belonging to VFs of this PF */
506 if (!BP_VFDB(bp)->first_vf_igu_entry)
507 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
509 /* the first igu entry belonging to this VF */
510 if (!vf_sb_count(vf))
511 vf->igu_base_id = igu_sb_id;
516 BP_VFDB(bp)->vf_sbs_pool++;
519 /* VFOP MAC/VLAN helpers */
520 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
521 struct bnx2x_vfop *vfop,
522 struct bnx2x_vlan_mac_obj *obj)
524 struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
526 /* update credit only if there is no error
527 * and a valid credit counter
529 if (!vfop->rc && args->credit) {
530 struct list_head *pos;
534 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
536 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
538 list_for_each(pos, &obj->head)
542 bnx2x_vlan_mac_h_read_unlock(bp, obj);
544 atomic_set(args->credit, cnt);
548 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
549 struct bnx2x_vfop_filter *pos,
550 struct bnx2x_vlan_mac_data *user_req)
552 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
556 case BNX2X_VFOP_FILTER_MAC:
557 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
559 case BNX2X_VFOP_FILTER_VLAN:
560 user_req->u.vlan.vlan = pos->vid;
563 BNX2X_ERR("Invalid filter type, skipping\n");
569 static int bnx2x_vfop_config_list(struct bnx2x *bp,
570 struct bnx2x_vfop_filters *filters,
571 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
573 struct bnx2x_vfop_filter *pos, *tmp;
574 struct list_head rollback_list, *filters_list = &filters->head;
575 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
578 INIT_LIST_HEAD(&rollback_list);
580 list_for_each_entry_safe(pos, tmp, filters_list, link) {
581 if (bnx2x_vfop_set_user_req(bp, pos, user_req))
584 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
586 cnt += pos->add ? 1 : -1;
587 list_move(&pos->link, &rollback_list);
589 } else if (rc == -EEXIST) {
592 BNX2X_ERR("Failed to add a new vlan_mac command\n");
597 /* rollback if error or too many rules added */
598 if (rc || cnt > filters->add_cnt) {
599 BNX2X_ERR("error or too many rules added. Performing rollback\n");
600 list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
601 pos->add = !pos->add; /* reverse op */
602 bnx2x_vfop_set_user_req(bp, pos, user_req);
603 bnx2x_config_vlan_mac(bp, vlan_mac);
604 list_del(&pos->link);
610 filters->add_cnt = cnt;
614 /* VFOP set VLAN/MAC */
615 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
617 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
618 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
619 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
620 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
622 enum bnx2x_vfop_vlan_mac_state state = vfop->state;
627 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
629 bnx2x_vfop_reset_wq(vf);
632 case BNX2X_VFOP_VLAN_MAC_CLEAR:
634 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
637 vfop->rc = obj->delete_all(bp, obj,
638 &vlan_mac->user_req.vlan_mac_flags,
639 &vlan_mac->ramrod_flags);
641 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
643 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
645 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
648 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
649 if (vfop->rc == -EEXIST)
652 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
654 case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
655 vfop->rc = !!obj->raw.check_pending(&obj->raw);
656 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
658 case BNX2X_VFOP_MAC_CONFIG_LIST:
660 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
663 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
667 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
668 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
669 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
671 case BNX2X_VFOP_VLAN_CONFIG_LIST:
673 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
676 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
678 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
679 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
681 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
684 bnx2x_vfop_default(state);
687 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
690 bnx2x_vfop_credit(bp, vfop, obj);
691 bnx2x_vfop_end(bp, vf, vfop);
696 struct bnx2x_vfop_vlan_mac_flags {
704 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
705 struct bnx2x_vfop_vlan_mac_flags *flags)
707 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
709 memset(ramrod, 0, sizeof(*ramrod));
713 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
714 if (flags->single_cmd)
715 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
718 if (flags->dont_consume)
719 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
722 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
726 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
727 struct bnx2x_vfop_vlan_mac_flags *flags)
729 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
730 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
733 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
734 struct bnx2x_virtf *vf,
735 struct bnx2x_vfop_cmd *cmd,
736 int qid, bool drv_only)
738 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
741 struct bnx2x_vfop_args_filters filters = {
742 .multi_filter = NULL, /* single */
743 .credit = NULL, /* consume credit */
745 struct bnx2x_vfop_vlan_mac_flags flags = {
746 .drv_only = drv_only,
747 .dont_consume = (filters.credit != NULL),
749 .add = false /* don't care */,
751 struct bnx2x_vlan_mac_ramrod_params *ramrod =
752 &vf->op_params.vlan_mac;
754 /* set ramrod params */
755 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
758 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
761 vfop->args.filters = filters;
763 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
764 bnx2x_vfop_vlan_mac, cmd->done);
765 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
771 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
772 struct bnx2x_virtf *vf,
773 struct bnx2x_vfop_cmd *cmd,
774 struct bnx2x_vfop_filters *macs,
775 int qid, bool drv_only)
777 struct bnx2x_vfop *vfop;
779 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
782 vfop = bnx2x_vfop_add(bp, vf);
784 struct bnx2x_vfop_args_filters filters = {
785 .multi_filter = macs,
786 .credit = NULL, /* consume credit */
788 struct bnx2x_vfop_vlan_mac_flags flags = {
789 .drv_only = drv_only,
790 .dont_consume = (filters.credit != NULL),
792 .add = false, /* don't care since only the items in the
793 * filters list affect the sp operation,
794 * not the list itself
797 struct bnx2x_vlan_mac_ramrod_params *ramrod =
798 &vf->op_params.vlan_mac;
800 /* set ramrod params */
801 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
804 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
807 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
808 vfop->args.filters = filters;
810 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
811 bnx2x_vfop_vlan_mac, cmd->done);
812 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
818 static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
819 struct bnx2x_virtf *vf,
820 struct bnx2x_vfop_cmd *cmd,
821 int qid, u16 vid, bool add)
823 struct bnx2x_vfop *vfop;
825 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
828 vfop = bnx2x_vfop_add(bp, vf);
830 struct bnx2x_vfop_args_filters filters = {
831 .multi_filter = NULL, /* single command */
832 .credit = &bnx2x_vfq(vf, qid, vlan_count),
834 struct bnx2x_vfop_vlan_mac_flags flags = {
836 .dont_consume = (filters.credit != NULL),
840 struct bnx2x_vlan_mac_ramrod_params *ramrod =
841 &vf->op_params.vlan_mac;
843 /* set ramrod params */
844 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
845 ramrod->user_req.u.vlan.vlan = vid;
848 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
851 vfop->args.filters = filters;
853 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
854 bnx2x_vfop_vlan_mac, cmd->done);
855 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
861 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
862 struct bnx2x_virtf *vf,
863 struct bnx2x_vfop_cmd *cmd,
864 int qid, bool drv_only)
866 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
869 struct bnx2x_vfop_args_filters filters = {
870 .multi_filter = NULL, /* single command */
871 .credit = &bnx2x_vfq(vf, qid, vlan_count),
873 struct bnx2x_vfop_vlan_mac_flags flags = {
874 .drv_only = drv_only,
875 .dont_consume = (filters.credit != NULL),
877 .add = false, /* don't care */
879 struct bnx2x_vlan_mac_ramrod_params *ramrod =
880 &vf->op_params.vlan_mac;
882 /* set ramrod params */
883 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
886 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
889 vfop->args.filters = filters;
891 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
892 bnx2x_vfop_vlan_mac, cmd->done);
893 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
899 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
900 struct bnx2x_virtf *vf,
901 struct bnx2x_vfop_cmd *cmd,
902 struct bnx2x_vfop_filters *vlans,
903 int qid, bool drv_only)
905 struct bnx2x_vfop *vfop;
907 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
910 vfop = bnx2x_vfop_add(bp, vf);
912 struct bnx2x_vfop_args_filters filters = {
913 .multi_filter = vlans,
914 .credit = &bnx2x_vfq(vf, qid, vlan_count),
916 struct bnx2x_vfop_vlan_mac_flags flags = {
917 .drv_only = drv_only,
918 .dont_consume = (filters.credit != NULL),
920 .add = false, /* don't care */
922 struct bnx2x_vlan_mac_ramrod_params *ramrod =
923 &vf->op_params.vlan_mac;
925 /* set ramrod params */
926 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
929 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
932 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
933 atomic_read(filters.credit);
935 vfop->args.filters = filters;
937 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
938 bnx2x_vfop_vlan_mac, cmd->done);
939 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
945 /* VFOP queue setup (queue constructor + set vlan 0) */
946 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
948 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
949 int qid = vfop->args.qctor.qid;
950 enum bnx2x_vfop_qsetup_state state = vfop->state;
951 struct bnx2x_vfop_cmd cmd = {
952 .done = bnx2x_vfop_qsetup,
959 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
962 case BNX2X_VFOP_QSETUP_CTOR:
963 /* init the queue ctor command */
964 vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
965 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
970 case BNX2X_VFOP_QSETUP_VLAN0:
971 /* skip if non-leading or FPGA/EMU*/
975 /* init the queue set-vlan command (for vlan 0) */
976 vfop->state = BNX2X_VFOP_QSETUP_DONE;
977 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
982 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
984 case BNX2X_VFOP_QSETUP_DONE:
985 vf->cfg_flags |= VF_CFG_VLAN;
986 smp_mb__before_clear_bit();
987 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
989 smp_mb__after_clear_bit();
990 schedule_delayed_work(&bp->sp_rtnl_task, 0);
991 bnx2x_vfop_end(bp, vf, vfop);
994 bnx2x_vfop_default(state);
998 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
999 struct bnx2x_virtf *vf,
1000 struct bnx2x_vfop_cmd *cmd,
1003 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1006 vfop->args.qctor.qid = qid;
1008 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
1009 bnx2x_vfop_qsetup, cmd->done);
1010 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
1016 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1017 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1019 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1020 int qid = vfop->args.qx.qid;
1021 enum bnx2x_vfop_qflr_state state = vfop->state;
1022 struct bnx2x_queue_state_params *qstate;
1023 struct bnx2x_vfop_cmd cmd;
1025 bnx2x_vfop_reset_wq(vf);
1030 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1032 cmd.done = bnx2x_vfop_qflr;
1036 case BNX2X_VFOP_QFLR_CLR_VLAN:
1037 /* vlan-clear-all: driver-only, don't consume credit */
1038 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1040 /* the vlan_mac vfop will re-schedule us */
1041 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
1046 case BNX2X_VFOP_QFLR_CLR_MAC:
1047 /* mac-clear-all: driver only consume credit */
1048 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1049 /* the vlan_mac vfop will re-schedule us */
1050 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
1055 case BNX2X_VFOP_QFLR_TERMINATE:
1056 qstate = &vfop->op_p->qctor.qstate;
1057 memset(qstate , 0, sizeof(*qstate));
1058 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1059 vfop->state = BNX2X_VFOP_QFLR_DONE;
1061 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1062 vf->abs_vfid, qstate->q_obj->state);
1064 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1065 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1066 qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1067 vfop->rc = bnx2x_queue_state_change(bp, qstate);
1068 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1074 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1075 vf->abs_vfid, qid, vfop->rc);
1077 case BNX2X_VFOP_QFLR_DONE:
1078 bnx2x_vfop_end(bp, vf, vfop);
1081 bnx2x_vfop_default(state);
1087 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1088 struct bnx2x_virtf *vf,
1089 struct bnx2x_vfop_cmd *cmd,
1092 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1095 vfop->args.qx.qid = qid;
1096 if ((qid == LEADING_IDX) &&
1097 bnx2x_validate_vf_sp_objs(bp, vf, false))
1098 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1099 bnx2x_vfop_qflr, cmd->done);
1101 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
1102 bnx2x_vfop_qflr, cmd->done);
1103 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1109 /* VFOP multi-casts */
1110 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1112 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1113 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1114 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1115 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1116 enum bnx2x_vfop_mcast_state state = vfop->state;
1119 bnx2x_vfop_reset_wq(vf);
1124 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1127 case BNX2X_VFOP_MCAST_DEL:
1128 /* clear existing mcasts */
1129 vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
1130 : BNX2X_VFOP_MCAST_CHK_DONE;
1131 mcast->mcast_list_len = vf->mcast_list_len;
1132 vf->mcast_list_len = args->mc_num;
1133 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1134 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1136 case BNX2X_VFOP_MCAST_ADD:
1137 if (raw->check_pending(raw))
1140 /* update mcast list on the ramrod params */
1141 INIT_LIST_HEAD(&mcast->mcast_list);
1142 for (i = 0; i < args->mc_num; i++)
1143 list_add_tail(&(args->mc[i].link),
1144 &mcast->mcast_list);
1145 mcast->mcast_list_len = args->mc_num;
1147 /* add new mcasts */
1148 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1149 vfop->rc = bnx2x_config_mcast(bp, mcast,
1150 BNX2X_MCAST_CMD_ADD);
1151 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1153 case BNX2X_VFOP_MCAST_CHK_DONE:
1154 vfop->rc = raw->check_pending(raw) ? 1 : 0;
1155 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1157 bnx2x_vfop_default(state);
1160 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1163 bnx2x_vfop_end(bp, vf, vfop);
1168 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1169 struct bnx2x_virtf *vf,
1170 struct bnx2x_vfop_cmd *cmd,
1171 bnx2x_mac_addr_t *mcasts,
1172 int mcast_num, bool drv_only)
1174 struct bnx2x_vfop *vfop = NULL;
1175 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1176 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1180 vfop = bnx2x_vfop_add(bp, vf);
1183 struct bnx2x_mcast_ramrod_params *ramrod =
1184 &vf->op_params.mcast;
1186 /* set ramrod params */
1187 memset(ramrod, 0, sizeof(*ramrod));
1188 ramrod->mcast_obj = &vf->mcast_obj;
1190 set_bit(RAMROD_DRV_CLR_ONLY,
1191 &ramrod->ramrod_flags);
1193 /* copy mcasts pointers */
1194 vfop->args.mc_list.mc_num = mcast_num;
1195 vfop->args.mc_list.mc = mc;
1196 for (i = 0; i < mcast_num; i++)
1197 mc[i].mac = mcasts[i];
1199 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1200 bnx2x_vfop_mcast, cmd->done);
1201 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1211 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1213 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1214 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1215 enum bnx2x_vfop_rxmode_state state = vfop->state;
1217 bnx2x_vfop_reset_wq(vf);
1222 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1225 case BNX2X_VFOP_RXMODE_CONFIG:
1227 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1229 /* record the accept flags in vfdb so hypervisor can modify them
1232 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1233 ramrod->rx_accept_flags;
1234 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1235 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1237 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1239 case BNX2X_VFOP_RXMODE_DONE:
1240 bnx2x_vfop_end(bp, vf, vfop);
1243 bnx2x_vfop_default(state);
1249 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1250 struct bnx2x_rx_mode_ramrod_params *ramrod,
1251 struct bnx2x_virtf *vf,
1252 unsigned long accept_flags)
1254 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1256 memset(ramrod, 0, sizeof(*ramrod));
1257 ramrod->cid = vfq->cid;
1258 ramrod->cl_id = vfq_cl_id(vf, vfq);
1259 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1260 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1261 ramrod->rx_accept_flags = accept_flags;
1262 ramrod->tx_accept_flags = accept_flags;
1263 ramrod->pstate = &vf->filter_state;
1264 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1266 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1267 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1268 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1270 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1271 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1274 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1275 struct bnx2x_virtf *vf,
1276 struct bnx2x_vfop_cmd *cmd,
1277 int qid, unsigned long accept_flags)
1279 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1282 struct bnx2x_rx_mode_ramrod_params *ramrod =
1283 &vf->op_params.rx_mode;
1285 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1287 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1288 bnx2x_vfop_rxmode, cmd->done);
1289 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1295 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1298 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1300 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1301 int qid = vfop->args.qx.qid;
1302 enum bnx2x_vfop_qteardown_state state = vfop->state;
1303 struct bnx2x_vfop_cmd cmd;
1308 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1310 cmd.done = bnx2x_vfop_qdown;
1314 case BNX2X_VFOP_QTEARDOWN_RXMODE:
1316 if (bnx2x_validate_vf_sp_objs(bp, vf, true))
1317 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1319 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1320 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1325 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1326 /* vlan-clear-all: don't consume credit */
1327 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1328 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1333 case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1334 /* mac-clear-all: consume credit */
1335 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
1336 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1341 case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
1342 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1343 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
1348 case BNX2X_VFOP_QTEARDOWN_QDTOR:
1349 /* run the queue destruction flow */
1350 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1351 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1352 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1353 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1354 DP(BNX2X_MSG_IOV, "returned from cmd\n");
1359 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1360 vf->abs_vfid, qid, vfop->rc);
1362 case BNX2X_VFOP_QTEARDOWN_DONE:
1363 bnx2x_vfop_end(bp, vf, vfop);
1366 bnx2x_vfop_default(state);
1370 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1371 struct bnx2x_virtf *vf,
1372 struct bnx2x_vfop_cmd *cmd,
1375 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1377 /* for non leading queues skip directly to qdown sate */
1379 vfop->args.qx.qid = qid;
1380 bnx2x_vfop_opset(qid == LEADING_IDX ?
1381 BNX2X_VFOP_QTEARDOWN_RXMODE :
1382 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
1384 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1391 /* VF enable primitives
1392 * when pretend is required the caller is responsible
1393 * for calling pretend prior to calling these routines
1396 /* internal vf enable - until vf is enabled internally all transactions
1397 * are blocked. This routine should always be called last with pretend.
1399 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1401 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1404 /* clears vf error in all semi blocks */
1405 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1407 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1408 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1409 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1410 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1413 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1415 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1416 u32 was_err_reg = 0;
1418 switch (was_err_group) {
1420 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1423 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1426 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1429 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1432 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1435 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1440 /* Set VF masks and configuration - pretend */
1441 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1443 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1444 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1445 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1446 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1447 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1448 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1450 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1451 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1452 if (vf->cfg_flags & VF_CFG_INT_SIMD)
1453 val |= IGU_VF_CONF_SINGLE_ISR_EN;
1454 val &= ~IGU_VF_CONF_PARENT_MASK;
1455 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
1456 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1459 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
1462 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1464 /* iterate over all queues, clear sb consumer */
1465 for (i = 0; i < vf_sb_count(vf); i++) {
1466 u8 igu_sb_id = vf_igu_sb(vf, i);
1468 /* zero prod memory */
1469 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1471 /* clear sb state machine */
1472 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1475 /* disable + update */
1476 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1477 IGU_INT_DISABLE, 1);
1481 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1483 /* set the VF-PF association in the FW */
1484 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1485 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1487 /* clear vf errors*/
1488 bnx2x_vf_semi_clear_err(bp, abs_vfid);
1489 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1491 /* internal vf-enable - pretend */
1492 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1493 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1494 bnx2x_vf_enable_internal(bp, true);
1495 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1498 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1500 /* Reset vf in IGU interrupts are still disabled */
1501 bnx2x_vf_igu_reset(bp, vf);
1503 /* pretend to enable the vf with the PBF */
1504 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1505 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1506 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1509 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1511 struct pci_dev *dev;
1512 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1517 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1519 return bnx2x_is_pcie_pending(dev);
1523 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1525 /* Verify no pending pci transactions */
1526 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1527 BNX2X_ERR("PCIE Transactions still pending\n");
1532 /* must be called after the number of PF queues and the number of VFs are
1536 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1538 struct vf_pf_resc_request *resc = &vf->alloc_resc;
1541 /* will be set only during VF-ACQUIRE */
1545 /* no credit calculations for macs (just yet) */
1546 resc->num_mac_filters = 1;
1548 /* divvy up vlan rules */
1549 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1550 vlan_count = 1 << ilog2(vlan_count);
1551 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1553 /* no real limitation */
1554 resc->num_mc_filters = 0;
1556 /* num_sbs already set */
1557 resc->num_sbs = vf->sb_count;
1561 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1563 /* reset the state variables */
1564 bnx2x_iov_static_resc(bp, vf);
1565 vf->state = VF_FREE;
1568 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1570 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1572 /* DQ usage counter */
1573 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1574 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1575 "DQ VF usage counter timed out",
1577 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1579 /* FW cleanup command - poll for the results */
1580 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1582 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1584 /* verify TX hw is flushed */
1585 bnx2x_tx_hw_flushed(bp, poll_cnt);
1588 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1590 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1591 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1592 enum bnx2x_vfop_flr_state state = vfop->state;
1593 struct bnx2x_vfop_cmd cmd = {
1594 .done = bnx2x_vfop_flr,
1601 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1604 case BNX2X_VFOP_FLR_QUEUES:
1605 /* the cleanup operations are valid if and only if the VF
1606 * was first acquired.
1608 if (++(qx->qid) < vf_rxq_count(vf)) {
1609 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1615 /* remove multicasts */
1616 vfop->state = BNX2X_VFOP_FLR_HW;
1617 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1622 case BNX2X_VFOP_FLR_HW:
1624 /* dispatch final cleanup and wait for HW queues to flush */
1625 bnx2x_vf_flr_clnup_hw(bp, vf);
1627 /* release VF resources */
1628 bnx2x_vf_free_resc(bp, vf);
1630 /* re-open the mailbox */
1631 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1635 bnx2x_vfop_default(state);
1638 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1640 vf->flr_clnup_stage = VF_FLR_ACK;
1641 bnx2x_vfop_end(bp, vf, vfop);
1642 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1645 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1646 struct bnx2x_virtf *vf,
1647 vfop_handler_t done)
1649 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1651 vfop->args.qx.qid = -1; /* loop */
1652 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1653 bnx2x_vfop_flr, done);
1654 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1659 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1661 int i = prev_vf ? prev_vf->index + 1 : 0;
1662 struct bnx2x_virtf *vf;
1664 /* find next VF to cleanup */
1667 i < BNX2X_NR_VIRTFN(bp) &&
1668 (bnx2x_vf(bp, i, state) != VF_RESET ||
1669 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1673 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1674 BNX2X_NR_VIRTFN(bp));
1676 if (i < BNX2X_NR_VIRTFN(bp)) {
1679 /* lock the vf pf channel */
1680 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1682 /* invoke the VF FLR SM */
1683 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1684 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1687 /* mark the VF to be ACKED and continue */
1688 vf->flr_clnup_stage = VF_FLR_ACK;
1689 goto next_vf_to_clean;
1694 /* we are done, update vf records */
1695 for_each_vf(bp, i) {
1698 if (vf->flr_clnup_stage != VF_FLR_ACK)
1701 vf->flr_clnup_stage = VF_FLR_EPILOG;
1704 /* Acknowledge the handled VFs.
1705 * we are acknowledge all the vfs which an flr was requested for, even
1706 * if amongst them there are such that we never opened, since the mcp
1707 * will interrupt us immediately again if we only ack some of the bits,
1708 * resulting in an endless loop. This can happen for example in KVM
1709 * where an 'all ones' flr request is sometimes given by hyper visor
1711 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1712 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1713 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1714 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1715 bp->vfdb->flrd_vfs[i]);
1717 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1719 /* clear the acked bits - better yet if the MCP implemented
1720 * write to clear semantics
1722 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1723 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1726 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1730 /* Read FLR'd VFs */
1731 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1732 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1735 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1736 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1738 for_each_vf(bp, i) {
1739 struct bnx2x_virtf *vf = BP_VF(bp, i);
1742 if (vf->abs_vfid < 32)
1743 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1745 reset = bp->vfdb->flrd_vfs[1] &
1746 (1 << (vf->abs_vfid - 32));
1749 /* set as reset and ready for cleanup */
1750 vf->state = VF_RESET;
1751 vf->flr_clnup_stage = VF_FLR_CLN;
1754 "Initiating Final cleanup for VF %d\n",
1759 /* do the FLR cleanup for all marked VFs*/
1760 bnx2x_vf_flr_clnup(bp, NULL);
1763 /* IOV global initialization routines */
1764 void bnx2x_iov_init_dq(struct bnx2x *bp)
1769 /* Set the DQ such that the CID reflect the abs_vfid */
1770 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1771 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1773 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1776 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1778 /* The VF window size is the log2 of the max number of CIDs per VF */
1779 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1781 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1782 * the Pf doorbell size although the 2 are independent.
1784 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1786 /* No security checks for now -
1787 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1788 * CID range 0 - 0x1ffff
1790 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1791 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1792 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1793 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1795 /* set the VF doorbell threshold */
1796 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1799 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1801 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1802 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1805 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1807 struct pci_dev *dev = bp->pdev;
1808 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1810 return dev->bus->number + ((dev->devfn + iov->offset +
1811 iov->stride * vfid) >> 8);
1814 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1816 struct pci_dev *dev = bp->pdev;
1817 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1819 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1822 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1825 struct pci_dev *dev = bp->pdev;
1826 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1828 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1829 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1830 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1833 vf->bars[n].bar = start + size * vf->abs_vfid;
1834 vf->bars[n].size = size;
1838 static int bnx2x_ari_enabled(struct pci_dev *dev)
1840 return dev->bus->self && dev->bus->self->ari_enabled;
1844 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1848 u8 fid, current_pf = 0;
1850 /* IGU in normal mode - read CAM */
1851 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1852 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1853 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1855 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1856 if (fid & IGU_FID_ENCODE_IS_PF)
1857 current_pf = fid & IGU_FID_PF_NUM_MASK;
1858 else if (current_pf == BP_FUNC(bp))
1859 bnx2x_vf_set_igu_info(bp, sb_id,
1860 (fid & IGU_FID_VF_NUM_MASK));
1861 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1862 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1863 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1864 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1865 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1867 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1870 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1873 kfree(bp->vfdb->vfqs);
1874 kfree(bp->vfdb->vfs);
1880 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1883 struct pci_dev *dev = bp->pdev;
1885 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1887 BNX2X_ERR("failed to find SRIOV capability in device\n");
1892 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1893 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1894 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1895 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1896 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1897 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1898 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1899 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1900 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1905 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1909 /* read the SRIOV capability structure
1910 * The fields can be read via configuration read or
1911 * directly from the device (starting at offset PCICFG_OFFSET)
1913 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1916 /* get the number of SRIOV bars */
1919 /* read the first_vfid */
1920 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1921 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1922 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1925 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1927 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1928 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1933 /* must be called after PF bars are mapped */
1934 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1938 struct bnx2x_sriov *iov;
1939 struct pci_dev *dev = bp->pdev;
1947 /* verify sriov capability is present in configuration space */
1948 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1951 /* verify chip revision */
1952 if (CHIP_IS_E1x(bp))
1955 /* check if SRIOV support is turned off */
1959 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1960 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1961 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1962 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1966 /* SRIOV can be enabled only with MSIX */
1967 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1968 int_mode_param == BNX2X_INT_MODE_INTX) {
1969 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1974 /* verify ari is enabled */
1975 if (!bnx2x_ari_enabled(bp->pdev)) {
1976 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1980 /* verify igu is in normal mode */
1981 if (CHIP_INT_MODE_IS_BC(bp)) {
1982 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1986 /* allocate the vfs database */
1987 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1989 BNX2X_ERR("failed to allocate vf database\n");
1994 /* get the sriov info - Linux already collected all the pertinent
1995 * information, however the sriov structure is for the private use
1996 * of the pci module. Also we want this information regardless
1997 * of the hyper-visor.
1999 iov = &(bp->vfdb->sriov);
2000 err = bnx2x_sriov_info(bp, iov);
2004 /* SR-IOV capability was enabled but there are no VFs*/
2005 if (iov->total == 0)
2008 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
2010 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
2011 num_vfs_param, iov->nr_virtfn);
2013 /* allocate the vf array */
2014 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
2015 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
2016 if (!bp->vfdb->vfs) {
2017 BNX2X_ERR("failed to allocate vf array\n");
2022 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
2023 for_each_vf(bp, i) {
2024 bnx2x_vf(bp, i, index) = i;
2025 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
2026 bnx2x_vf(bp, i, state) = VF_FREE;
2027 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
2028 mutex_init(&bnx2x_vf(bp, i, op_mutex));
2029 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
2032 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
2033 bnx2x_get_vf_igu_cam_info(bp);
2035 /* allocate the queue arrays for all VFs */
2036 bp->vfdb->vfqs = kzalloc(
2037 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
2040 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
2042 if (!bp->vfdb->vfqs) {
2043 BNX2X_ERR("failed to allocate vf queue array\n");
2050 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
2051 __bnx2x_iov_free_vfdb(bp);
2055 void bnx2x_iov_remove_one(struct bnx2x *bp)
2059 /* if SRIOV is not enabled there's nothing to do */
2063 DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
2064 pci_disable_sriov(bp->pdev);
2065 DP(BNX2X_MSG_IOV, "sriov disabled\n");
2067 /* disable access to all VFs */
2068 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
2069 bnx2x_pretend_func(bp,
2071 bp->vfdb->sriov.first_vf_in_pf +
2073 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
2074 bp->vfdb->sriov.first_vf_in_pf + vf_idx);
2075 bnx2x_vf_enable_internal(bp, 0);
2076 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2079 /* free vf database */
2080 __bnx2x_iov_free_vfdb(bp);
2083 void bnx2x_iov_free_mem(struct bnx2x *bp)
2090 /* free vfs hw contexts */
2091 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2092 struct hw_dma *cxt = &bp->vfdb->context[i];
2093 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2096 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2097 BP_VFDB(bp)->sp_dma.mapping,
2098 BP_VFDB(bp)->sp_dma.size);
2100 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2101 BP_VF_MBX_DMA(bp)->mapping,
2102 BP_VF_MBX_DMA(bp)->size);
2104 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2105 BP_VF_BULLETIN_DMA(bp)->mapping,
2106 BP_VF_BULLETIN_DMA(bp)->size);
2109 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2117 /* allocate vfs hw contexts */
2118 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2119 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2121 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2122 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2123 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2126 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
2131 tot_size -= cxt->size;
2134 /* allocate vfs ramrods dma memory - client_init and set_mac */
2135 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2136 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
2138 BP_VFDB(bp)->sp_dma.size = tot_size;
2140 /* allocate mailboxes */
2141 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2142 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
2144 BP_VF_MBX_DMA(bp)->size = tot_size;
2146 /* allocate local bulletin boards */
2147 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2148 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
2149 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
2150 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2158 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2159 struct bnx2x_vf_queue *q)
2161 u8 cl_id = vfq_cl_id(vf, q);
2162 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2163 unsigned long q_type = 0;
2165 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2166 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2168 /* Queue State object */
2169 bnx2x_init_queue_obj(bp, &q->sp_obj,
2170 cl_id, &q->cid, 1, func_id,
2171 bnx2x_vf_sp(bp, vf, q_data),
2172 bnx2x_vf_sp_map(bp, vf, q_data),
2175 /* sp indication is set only when vlan/mac/etc. are initialized */
2176 q->sp_initialized = false;
2179 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2180 vf->abs_vfid, q->sp_obj.func_id, q->cid);
2183 /* called by bnx2x_nic_load */
2184 int bnx2x_iov_nic_init(struct bnx2x *bp)
2188 if (!IS_SRIOV(bp)) {
2189 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2193 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2195 /* let FLR complete ... */
2198 /* initialize vf database */
2199 for_each_vf(bp, vfid) {
2200 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2202 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2205 union cdu_context *base_cxt = (union cdu_context *)
2206 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2207 (base_vf_cid & (ILT_PAGE_CIDS-1));
2210 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2211 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2212 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2214 /* init statically provisioned resources */
2215 bnx2x_iov_static_resc(bp, vf);
2217 /* queues are initialized during VF-ACQUIRE */
2219 /* reserve the vf vlan credit */
2220 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2222 vf->filter_state = 0;
2223 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2225 /* init mcast object - This object will be re-initialized
2226 * during VF-ACQUIRE with the proper cl_id and cid.
2227 * It needs to be initialized here so that it can be safely
2228 * handled by a subsequent FLR flow.
2230 vf->mcast_list_len = 0;
2231 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2233 bnx2x_vf_sp(bp, vf, mcast_rdata),
2234 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2235 BNX2X_FILTER_MCAST_PENDING,
2237 BNX2X_OBJ_TYPE_RX_TX);
2239 /* set the mailbox message addresses */
2240 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2241 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2242 MBX_MSG_ALIGNED_SIZE);
2244 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2245 vfid * MBX_MSG_ALIGNED_SIZE;
2247 /* Enable vf mailbox */
2248 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2252 for_each_vf(bp, vfid) {
2253 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2255 /* fill in the BDF and bars */
2256 vf->bus = bnx2x_vf_bus(bp, vfid);
2257 vf->devfn = bnx2x_vf_devfn(bp, vfid);
2258 bnx2x_vf_set_bars(bp, vf);
2261 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2262 vf->abs_vfid, vf->bus, vf->devfn,
2263 (unsigned)vf->bars[0].bar, vf->bars[0].size,
2264 (unsigned)vf->bars[1].bar, vf->bars[1].size,
2265 (unsigned)vf->bars[2].bar, vf->bars[2].size);
2271 /* called by bnx2x_chip_cleanup */
2272 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2279 /* release all the VFs */
2281 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2286 /* called by bnx2x_init_hw_func, returns the next ilt line */
2287 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2290 struct bnx2x_ilt *ilt = BP_ILT(bp);
2295 /* set vfs ilt lines */
2296 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2297 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2299 ilt->lines[line+i].page = hw_cxt->addr;
2300 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2301 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2306 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2308 return ((cid >= BNX2X_FIRST_VF_CID) &&
2309 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2313 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2314 struct bnx2x_vf_queue *vfq,
2315 union event_ring_elem *elem)
2317 unsigned long ramrod_flags = 0;
2320 /* Always push next commands out, don't wait here */
2321 set_bit(RAMROD_CONT, &ramrod_flags);
2323 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2324 case BNX2X_FILTER_MAC_PENDING:
2325 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2328 case BNX2X_FILTER_VLAN_PENDING:
2329 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2333 BNX2X_ERR("Unsupported classification command: %d\n",
2334 elem->message.data.eth_event.echo);
2338 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2340 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2344 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2345 struct bnx2x_virtf *vf)
2347 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2350 rparam.mcast_obj = &vf->mcast_obj;
2351 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2353 /* If there are pending mcast commands - send them */
2354 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2355 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2357 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2363 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2364 struct bnx2x_virtf *vf)
2366 smp_mb__before_clear_bit();
2367 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2368 smp_mb__after_clear_bit();
2371 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2373 struct bnx2x_virtf *vf;
2374 int qidx = 0, abs_vfid;
2381 /* first get the cid - the only events we handle here are cfc-delete
2382 * and set-mac completion
2384 opcode = elem->message.opcode;
2387 case EVENT_RING_OPCODE_CFC_DEL:
2388 cid = SW_CID((__force __le32)
2389 elem->message.data.cfc_del_event.cid);
2390 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2392 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2393 case EVENT_RING_OPCODE_MULTICAST_RULES:
2394 case EVENT_RING_OPCODE_FILTERS_RULES:
2395 cid = (elem->message.data.eth_event.echo &
2397 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2399 case EVENT_RING_OPCODE_VF_FLR:
2400 abs_vfid = elem->message.data.vf_flr_event.vf_id;
2401 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2404 case EVENT_RING_OPCODE_MALICIOUS_VF:
2405 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2406 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2408 elem->message.data.malicious_vf_event.err_id);
2414 /* check if the cid is the VF range */
2415 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2416 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2420 /* extract vf and rxq index from vf_cid - relies on the following:
2421 * 1. vfid on cid reflects the true abs_vfid
2422 * 2. The max number of VFs (per path) is 64
2424 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2425 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2427 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2430 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2436 case EVENT_RING_OPCODE_CFC_DEL:
2437 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2438 vf->abs_vfid, qidx);
2439 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2442 BNX2X_Q_CMD_CFC_DEL);
2444 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2445 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2446 vf->abs_vfid, qidx);
2447 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2449 case EVENT_RING_OPCODE_MULTICAST_RULES:
2450 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2451 vf->abs_vfid, qidx);
2452 bnx2x_vf_handle_mcast_eqe(bp, vf);
2454 case EVENT_RING_OPCODE_FILTERS_RULES:
2455 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2456 vf->abs_vfid, qidx);
2457 bnx2x_vf_handle_filters_eqe(bp, vf);
2459 case EVENT_RING_OPCODE_VF_FLR:
2460 case EVENT_RING_OPCODE_MALICIOUS_VF:
2461 /* Do nothing for now */
2464 /* SRIOV: reschedule any 'in_progress' operations */
2465 bnx2x_iov_sp_event(bp, cid, false);
2470 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2472 /* extract the vf from vf_cid - relies on the following:
2473 * 1. vfid on cid reflects the true abs_vfid
2474 * 2. The max number of VFs (per path) is 64
2476 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2477 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2480 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2481 struct bnx2x_queue_sp_obj **q_obj)
2483 struct bnx2x_virtf *vf;
2488 vf = bnx2x_vf_by_cid(bp, vf_cid);
2491 /* extract queue index from vf_cid - relies on the following:
2492 * 1. vfid on cid reflects the true abs_vfid
2493 * 2. The max number of VFs (per path) is 64
2495 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2496 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2498 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2502 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2504 struct bnx2x_virtf *vf;
2506 /* check if the cid is the VF range */
2507 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2510 vf = bnx2x_vf_by_cid(bp, vf_cid);
2512 /* set in_progress flag */
2513 atomic_set(&vf->op_in_progress, 1);
2515 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2519 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2522 int first_queue_query_index, num_queues_req;
2523 dma_addr_t cur_data_offset;
2524 struct stats_query_entry *cur_query_entry;
2526 bool is_fcoe = false;
2534 /* fcoe adds one global request and one queue request */
2535 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2536 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2539 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2540 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2541 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2542 first_queue_query_index + num_queues_req);
2544 cur_data_offset = bp->fw_stats_data_mapping +
2545 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2546 num_queues_req * sizeof(struct per_queue_stats);
2548 cur_query_entry = &bp->fw_stats_req->
2549 query[first_queue_query_index + num_queues_req];
2551 for_each_vf(bp, i) {
2553 struct bnx2x_virtf *vf = BP_VF(bp, i);
2555 if (vf->state != VF_ENABLED) {
2556 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2557 "vf %d not enabled so no stats for it\n",
2562 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2563 for_each_vfq(vf, j) {
2564 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2566 dma_addr_t q_stats_addr =
2567 vf->fw_stat_map + j * vf->stats_stride;
2569 /* collect stats fro active queues only */
2570 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2571 BNX2X_Q_LOGICAL_STATE_STOPPED)
2574 /* create stats query entry for this queue */
2575 cur_query_entry->kind = STATS_TYPE_QUEUE;
2576 cur_query_entry->index = vfq_stat_id(vf, rxq);
2577 cur_query_entry->funcID =
2578 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2579 cur_query_entry->address.hi =
2580 cpu_to_le32(U64_HI(q_stats_addr));
2581 cur_query_entry->address.lo =
2582 cpu_to_le32(U64_LO(q_stats_addr));
2584 "added address %x %x for vf %d queue %d client %d\n",
2585 cur_query_entry->address.hi,
2586 cur_query_entry->address.lo, cur_query_entry->funcID,
2587 j, cur_query_entry->index);
2589 cur_data_offset += sizeof(struct per_queue_stats);
2592 /* all stats are coalesced to the leading queue */
2593 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
2597 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2600 void bnx2x_iov_sp_task(struct bnx2x *bp)
2606 /* Iterate over all VFs and invoke state transition for VFs with
2607 * 'in-progress' slow-path operations
2609 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
2610 "searching for pending vf operations\n");
2611 for_each_vf(bp, i) {
2612 struct bnx2x_virtf *vf = BP_VF(bp, i);
2615 BNX2X_ERR("VF was null! skipping...\n");
2619 if (!list_empty(&vf->op_list_head) &&
2620 atomic_read(&vf->op_in_progress)) {
2621 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2622 bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2628 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2631 struct bnx2x_virtf *vf = NULL;
2633 for_each_vf(bp, i) {
2635 if (stat_id >= vf->igu_base_id &&
2636 stat_id < vf->igu_base_id + vf_sb_count(vf))
2642 /* VF API helpers */
2643 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2646 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2647 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2649 REG_WR(bp, reg, val);
2652 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2657 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2658 vfq_qzone_id(vf, vfq_get(vf, i)), false);
2661 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2665 /* clear the VF configuration - pretend */
2666 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2667 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2668 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2669 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2670 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2671 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2674 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2676 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2677 BNX2X_VF_MAX_QUEUES);
2681 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2682 struct vf_pf_resc_request *req_resc)
2684 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2685 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2687 return ((req_resc->num_rxqs <= rxq_cnt) &&
2688 (req_resc->num_txqs <= txq_cnt) &&
2689 (req_resc->num_sbs <= vf_sb_count(vf)) &&
2690 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2691 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2695 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2696 struct vf_pf_resc_request *resc)
2698 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2701 union cdu_context *base_cxt = (union cdu_context *)
2702 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2703 (base_vf_cid & (ILT_PAGE_CIDS-1));
2706 /* if state is 'acquired' the VF was not released or FLR'd, in
2707 * this case the returned resources match the acquired already
2708 * acquired resources. Verify that the requested numbers do
2709 * not exceed the already acquired numbers.
2711 if (vf->state == VF_ACQUIRED) {
2712 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2715 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2716 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2723 /* Otherwise vf state must be 'free' or 'reset' */
2724 if (vf->state != VF_FREE && vf->state != VF_RESET) {
2725 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2726 vf->abs_vfid, vf->state);
2730 /* static allocation:
2731 * the global maximum number are fixed per VF. Fail the request if
2732 * requested number exceed these globals
2734 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2736 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2737 /* set the max resource in the vf */
2741 /* Set resources counters - 0 request means max available */
2742 vf_sb_count(vf) = resc->num_sbs;
2743 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2744 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2745 if (resc->num_mac_filters)
2746 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2747 if (resc->num_vlan_filters)
2748 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2751 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2752 vf_sb_count(vf), vf_rxq_count(vf),
2753 vf_txq_count(vf), vf_mac_rules_cnt(vf),
2754 vf_vlan_rules_cnt(vf));
2756 /* Initialize the queues */
2758 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2762 for_each_vfq(vf, i) {
2763 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2766 BNX2X_ERR("q number %d was not allocated\n", i);
2771 q->cxt = &((base_cxt + i)->eth);
2772 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2774 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2775 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2777 /* init SP objects */
2778 bnx2x_vfq_init(bp, vf, q);
2780 vf->state = VF_ACQUIRED;
2784 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2786 struct bnx2x_func_init_params func_init = {0};
2790 /* the sb resources are initialized at this point, do the
2791 * FW/HW initializations
2793 for_each_vf_sb(vf, i)
2794 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2795 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2798 if (vf->state != VF_ACQUIRED) {
2799 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2800 vf->abs_vfid, vf->state);
2804 /* let FLR complete ... */
2807 /* FLR cleanup epilogue */
2808 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2811 /* reset IGU VF statistics: MSIX */
2812 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2815 if (vf->cfg_flags & VF_CFG_STATS)
2816 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2818 if (vf->cfg_flags & VF_CFG_TPA)
2819 flags |= FUNC_FLG_TPA;
2821 if (is_vf_multi(vf))
2822 flags |= FUNC_FLG_RSS;
2824 /* function setup */
2825 func_init.func_flgs = flags;
2826 func_init.pf_id = BP_FUNC(bp);
2827 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2828 func_init.fw_stat_map = vf->fw_stat_map;
2829 func_init.spq_map = vf->spq_map;
2830 func_init.spq_prod = 0;
2831 bnx2x_func_init(bp, &func_init);
2834 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2835 bnx2x_vf_enable_traffic(bp, vf);
2837 /* queue protection table */
2839 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2840 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2842 vf->state = VF_ENABLED;
2844 /* update vf bulletin board */
2845 bnx2x_post_vf_bulletin(bp, vf->index);
2850 struct set_vf_state_cookie {
2851 struct bnx2x_virtf *vf;
2855 static void bnx2x_set_vf_state(void *cookie)
2857 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2859 p->vf->state = p->state;
2862 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2863 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2865 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2866 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2867 enum bnx2x_vfop_close_state state = vfop->state;
2868 struct bnx2x_vfop_cmd cmd = {
2869 .done = bnx2x_vfop_close,
2876 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2879 case BNX2X_VFOP_CLOSE_QUEUES:
2881 if (++(qx->qid) < vf_rxq_count(vf)) {
2882 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2887 vfop->state = BNX2X_VFOP_CLOSE_HW;
2889 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
2891 case BNX2X_VFOP_CLOSE_HW:
2893 /* disable the interrupts */
2894 DP(BNX2X_MSG_IOV, "disabling igu\n");
2895 bnx2x_vf_igu_disable(bp, vf);
2897 /* disable the VF */
2898 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2899 bnx2x_vf_clr_qtbl(bp, vf);
2903 bnx2x_vfop_default(state);
2906 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2909 /* need to make sure there are no outstanding stats ramrods which may
2910 * cause the device to access the VF's stats buffer which it will free
2911 * as soon as we return from the close flow.
2914 struct set_vf_state_cookie cookie;
2917 cookie.state = VF_ACQUIRED;
2918 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2921 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2922 bnx2x_vfop_end(bp, vf, vfop);
2924 /* Not supported at the moment; Exists for macros only */
2928 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2929 struct bnx2x_virtf *vf,
2930 struct bnx2x_vfop_cmd *cmd)
2932 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2934 vfop->args.qx.qid = -1; /* loop */
2935 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2936 bnx2x_vfop_close, cmd->done);
2937 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2943 /* VF release can be called either: 1. The VF was acquired but
2944 * not enabled 2. the vf was enabled or in the process of being
2947 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2949 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2950 struct bnx2x_vfop_cmd cmd = {
2951 .done = bnx2x_vfop_release,
2955 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2960 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2961 vf->state == VF_FREE ? "Free" :
2962 vf->state == VF_ACQUIRED ? "Acquired" :
2963 vf->state == VF_ENABLED ? "Enabled" :
2964 vf->state == VF_RESET ? "Reset" :
2967 switch (vf->state) {
2969 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2975 DP(BNX2X_MSG_IOV, "about to free resources\n");
2976 bnx2x_vf_free_resc(bp, vf);
2977 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2985 bnx2x_vfop_default(vf->state);
2988 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2990 bnx2x_vfop_end(bp, vf, vfop);
2993 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
2995 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2996 enum bnx2x_vfop_rss_state state;
2999 BNX2X_ERR("vfop was null\n");
3003 state = vfop->state;
3004 bnx2x_vfop_reset_wq(vf);
3009 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
3012 case BNX2X_VFOP_RSS_CONFIG:
3014 vfop->state = BNX2X_VFOP_RSS_DONE;
3015 bnx2x_config_rss(bp, &vfop->op_p->rss);
3016 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3018 BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
3020 case BNX2X_VFOP_RSS_DONE:
3021 bnx2x_vfop_end(bp, vf, vfop);
3024 bnx2x_vfop_default(state);
3030 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
3031 struct bnx2x_virtf *vf,
3032 struct bnx2x_vfop_cmd *cmd)
3034 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3036 bnx2x_vfop_opset(-1, /* use vf->state */
3037 bnx2x_vfop_release, cmd->done);
3038 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
3044 int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
3045 struct bnx2x_virtf *vf,
3046 struct bnx2x_vfop_cmd *cmd)
3048 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3051 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
3053 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
3059 /* VFOP tpa update, send update on all queues */
3060 static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
3062 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
3063 struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
3064 enum bnx2x_vfop_tpa_state state = vfop->state;
3066 bnx2x_vfop_reset_wq(vf);
3071 DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
3072 vf->abs_vfid, tpa_args->qid,
3076 case BNX2X_VFOP_TPA_CONFIG:
3078 if (tpa_args->qid < vf_rxq_count(vf)) {
3079 struct bnx2x_queue_state_params *qstate =
3080 &vf->op_params.qstate;
3082 qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
3084 /* The only thing that changes for the ramrod params
3085 * between calls is the sge_map
3087 qstate->params.update_tpa.sge_map =
3088 tpa_args->sge_map[tpa_args->qid];
3090 DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
3092 U64_HI(qstate->params.update_tpa.sge_map),
3093 U64_LO(qstate->params.update_tpa.sge_map));
3094 qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
3095 vfop->rc = bnx2x_queue_state_change(bp, qstate);
3098 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
3100 vfop->state = BNX2X_VFOP_TPA_DONE;
3102 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3104 BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
3106 case BNX2X_VFOP_TPA_DONE:
3107 bnx2x_vfop_end(bp, vf, vfop);
3110 bnx2x_vfop_default(state);
3116 int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
3117 struct bnx2x_virtf *vf,
3118 struct bnx2x_vfop_cmd *cmd,
3119 struct vfpf_tpa_tlv *tpa_tlv)
3121 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3124 vfop->args.qx.qid = 0; /* loop */
3125 memcpy(&vfop->args.tpa.sge_map,
3126 tpa_tlv->tpa_client_info.sge_addr,
3127 sizeof(vfop->args.tpa.sge_map));
3128 bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
3129 bnx2x_vfop_tpa, cmd->done);
3130 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
3136 /* VF release ~ VF close + VF release-resources
3137 * Release is the ultimate SW shutdown and is called whenever an
3138 * irrecoverable error is encountered.
3140 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
3142 struct bnx2x_vfop_cmd cmd = {
3148 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
3149 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
3151 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
3154 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
3158 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
3159 struct bnx2x_virtf *vf, u32 *sbdf)
3161 *sbdf = vf->devfn | (vf->bus << 8);
3164 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3165 enum channel_tlvs tlv)
3167 /* we don't lock the channel for unsupported tlvs */
3168 if (!bnx2x_tlv_supported(tlv)) {
3169 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3173 /* lock the channel */
3174 mutex_lock(&vf->op_mutex);
3176 /* record the locking op */
3177 vf->op_current = tlv;
3180 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
3184 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3185 enum channel_tlvs expected_tlv)
3187 enum channel_tlvs current_tlv;
3190 BNX2X_ERR("VF was %p\n", vf);
3194 current_tlv = vf->op_current;
3196 /* we don't unlock the channel for unsupported tlvs */
3197 if (!bnx2x_tlv_supported(expected_tlv))
3200 WARN(expected_tlv != vf->op_current,
3201 "lock mismatch: expected %d found %d", expected_tlv,
3204 /* record the locking op */
3205 vf->op_current = CHANNEL_TLV_NONE;
3207 /* lock the channel */
3208 mutex_unlock(&vf->op_mutex);
3210 /* log the unlock */
3211 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3212 vf->abs_vfid, vf->op_current);
3215 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
3217 struct bnx2x_queue_state_params q_params;
3221 /* Verify changes are needed and record current Tx switching state */
3222 prev_flags = bp->flags;
3224 bp->flags |= TX_SWITCHING;
3226 bp->flags &= ~TX_SWITCHING;
3227 if (prev_flags == bp->flags)
3230 /* Verify state enables the sending of queue ramrods */
3231 if ((bp->state != BNX2X_STATE_OPEN) ||
3232 (bnx2x_get_q_logical_state(bp,
3233 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
3234 BNX2X_Q_LOGICAL_STATE_ACTIVE))
3237 /* send q. update ramrod to configure Tx switching */
3238 memset(&q_params, 0, sizeof(q_params));
3239 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3240 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3241 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
3242 &q_params.params.update.update_flags);
3244 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
3245 &q_params.params.update.update_flags);
3247 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
3248 &q_params.params.update.update_flags);
3250 /* send the ramrod on all the queues of the PF */
3251 for_each_eth_queue(bp, i) {
3252 struct bnx2x_fastpath *fp = &bp->fp[i];
3254 /* Set the appropriate Queue object */
3255 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
3257 /* Update the Queue state */
3258 rc = bnx2x_queue_state_change(bp, &q_params);
3260 BNX2X_ERR("Failed to configure Tx switching\n");
3265 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
3269 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3271 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3273 if (!IS_SRIOV(bp)) {
3274 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
3278 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3279 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3281 /* HW channel is only operational when PF is up */
3282 if (bp->state != BNX2X_STATE_OPEN) {
3283 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3287 /* we are always bound by the total_vfs in the configuration space */
3288 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3289 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3290 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3291 num_vfs_param = BNX2X_NR_VIRTFN(bp);
3294 bp->requested_nr_virtfn = num_vfs_param;
3295 if (num_vfs_param == 0) {
3296 bnx2x_set_pf_tx_switching(bp, false);
3297 pci_disable_sriov(dev);
3300 return bnx2x_enable_sriov(bp);
3304 #define IGU_ENTRY_SIZE 4
3306 int bnx2x_enable_sriov(struct bnx2x *bp)
3308 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3309 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
3310 u32 igu_entry, address;
3316 first_vf = bp->vfdb->sriov.first_vf_in_pf;
3318 /* statically distribute vf sb pool between VFs */
3319 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
3320 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
3322 /* zero previous values learned from igu cam */
3323 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
3324 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3327 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
3329 bp->vfdb->vf_sbs_pool = 0;
3331 /* prepare IGU cam */
3332 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
3333 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
3334 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3335 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
3336 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
3337 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
3338 IGU_REG_MAPPING_MEMORY_VALID;
3339 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
3341 REG_WR(bp, address, igu_entry);
3343 address += IGU_ENTRY_SIZE;
3347 /* Reinitialize vf database according to igu cam */
3348 bnx2x_get_vf_igu_cam_info(bp);
3350 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
3351 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
3354 for_each_vf(bp, vf_idx) {
3355 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3357 /* set local queue arrays */
3358 vf->vfqs = &bp->vfdb->vfqs[qcount];
3359 qcount += vf_sb_count(vf);
3360 bnx2x_iov_static_resc(bp, vf);
3363 /* prepare msix vectors in VF configuration space - the value in the
3364 * PCI configuration space should be the index of the last entry,
3365 * namely one less than the actual size of the table
3367 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3368 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3369 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3371 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3372 vf_idx, num_vf_queues - 1);
3374 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3376 /* enable sriov. This will probe all the VFs, and consequentially cause
3377 * the "acquire" messages to appear on the VF PF channel.
3379 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3380 bnx2x_disable_sriov(bp);
3382 rc = bnx2x_set_pf_tx_switching(bp, true);
3386 rc = pci_enable_sriov(bp->pdev, req_vfs);
3388 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3391 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3395 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3398 struct pf_vf_bulletin_content *bulletin;
3400 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3401 for_each_vf(bp, vfidx) {
3402 bulletin = BP_VF_BULLETIN(bp, vfidx);
3403 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3404 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3408 void bnx2x_disable_sriov(struct bnx2x *bp)
3410 pci_disable_sriov(bp->pdev);
3413 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3414 struct bnx2x_virtf **vf,
3415 struct pf_vf_bulletin_content **bulletin)
3417 if (bp->state != BNX2X_STATE_OPEN) {
3418 BNX2X_ERR("vf ndo called though PF is down\n");
3422 if (!IS_SRIOV(bp)) {
3423 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3427 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3428 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3429 vfidx, BNX2X_NR_VIRTFN(bp));
3434 *vf = BP_VF(bp, vfidx);
3435 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3438 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3444 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3450 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3458 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3459 struct ifla_vf_info *ivi)
3461 struct bnx2x *bp = netdev_priv(dev);
3462 struct bnx2x_virtf *vf = NULL;
3463 struct pf_vf_bulletin_content *bulletin = NULL;
3464 struct bnx2x_vlan_mac_obj *mac_obj;
3465 struct bnx2x_vlan_mac_obj *vlan_obj;
3468 /* sanity and init */
3469 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3472 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3473 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3474 if (!mac_obj || !vlan_obj) {
3475 BNX2X_ERR("VF partially initialized\n");
3481 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3482 ivi->spoofchk = 1; /*always enabled */
3483 if (vf->state == VF_ENABLED) {
3484 /* mac and vlan are in vlan_mac objects */
3485 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
3486 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3488 vlan_obj->get_n_elements(bp, vlan_obj, 1,
3489 (u8 *)&ivi->vlan, 0,
3494 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3495 /* mac configured by ndo so its in bulletin board */
3496 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3498 /* function has not been loaded yet. Show mac as 0s */
3499 memset(&ivi->mac, 0, ETH_ALEN);
3502 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3503 /* vlan configured by ndo so its in bulletin board */
3504 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3506 /* function has not been loaded yet. Show vlans as 0s */
3507 memset(&ivi->vlan, 0, VLAN_HLEN);
3513 /* New mac for VF. Consider these cases:
3514 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3515 * supply at acquire.
3516 * 2. VF has already been acquired but has not yet initialized - store in local
3517 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
3518 * will configure this mac when it is ready.
3519 * 3. VF has already initialized but has not yet setup a queue - post the new
3520 * mac on VF's bulletin board right now. VF will configure this mac when it
3522 * 4. VF has already set a queue - delete any macs already configured for this
3523 * queue and manually config the new mac.
3524 * In any event, once this function has been called refuse any attempts by the
3525 * VF to configure any mac for itself except for this mac. In case of a race
3526 * where the VF fails to see the new post on its bulletin board before sending a
3527 * mac configuration request, the PF will simply fail the request and VF can try
3528 * again after consulting its bulletin board.
3530 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3532 struct bnx2x *bp = netdev_priv(dev);
3533 int rc, q_logical_state;
3534 struct bnx2x_virtf *vf = NULL;
3535 struct pf_vf_bulletin_content *bulletin = NULL;
3537 /* sanity and init */
3538 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3541 if (!is_valid_ether_addr(mac)) {
3542 BNX2X_ERR("mac address invalid\n");
3546 /* update PF's copy of the VF's bulletin. Will no longer accept mac
3547 * configuration requests from vf unless match this mac
3549 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3550 memcpy(bulletin->mac, mac, ETH_ALEN);
3552 /* Post update on VF's bulletin board */
3553 rc = bnx2x_post_vf_bulletin(bp, vfidx);
3555 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3560 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3561 if (vf->state == VF_ENABLED &&
3562 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3563 /* configure the mac in device on this vf's queue */
3564 unsigned long ramrod_flags = 0;
3565 struct bnx2x_vlan_mac_obj *mac_obj;
3567 /* User should be able to see failure reason in system logs */
3568 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3571 /* must lock vfpf channel to protect against vf flows */
3572 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3574 /* remove existing eth macs */
3575 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3576 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3578 BNX2X_ERR("failed to delete eth macs\n");
3583 /* remove existing uc list macs */
3584 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3586 BNX2X_ERR("failed to delete uc_list macs\n");
3591 /* configure the new mac to device */
3592 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3593 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3594 BNX2X_ETH_MAC, &ramrod_flags);
3597 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3603 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3605 struct bnx2x_queue_state_params q_params = {NULL};
3606 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3607 struct bnx2x_queue_update_params *update_params;
3608 struct pf_vf_bulletin_content *bulletin = NULL;
3609 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3610 struct bnx2x *bp = netdev_priv(dev);
3611 struct bnx2x_vlan_mac_obj *vlan_obj;
3612 unsigned long vlan_mac_flags = 0;
3613 unsigned long ramrod_flags = 0;
3614 struct bnx2x_virtf *vf = NULL;
3615 unsigned long accept_flags;
3618 /* sanity and init */
3619 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3624 BNX2X_ERR("illegal vlan value %d\n", vlan);
3628 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3631 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3632 * to the VF since it doesn't have anything to do with it. But it useful
3633 * to store it here in case the VF is not up yet and we can only
3634 * configure the vlan later when it does. Treat vlan id 0 as remove the
3638 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3640 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3641 bulletin->vlan = vlan;
3643 /* is vf initialized and queue set up? */
3644 if (vf->state != VF_ENABLED ||
3645 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3646 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3649 /* User should be able to see error in system logs */
3650 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3653 /* must lock vfpf channel to protect against vf flows */
3654 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3656 /* remove existing vlans */
3657 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3658 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3659 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3662 BNX2X_ERR("failed to delete vlans\n");
3667 /* need to remove/add the VF's accept_any_vlan bit */
3668 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3670 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3672 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3674 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3676 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3677 bnx2x_config_rx_mode(bp, &rx_ramrod);
3679 /* configure the new vlan to device */
3680 memset(&ramrod_param, 0, sizeof(ramrod_param));
3681 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3682 ramrod_param.vlan_mac_obj = vlan_obj;
3683 ramrod_param.ramrod_flags = ramrod_flags;
3684 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3685 &ramrod_param.user_req.vlan_mac_flags);
3686 ramrod_param.user_req.u.vlan.vlan = vlan;
3687 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3688 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3690 BNX2X_ERR("failed to configure vlan\n");
3695 /* send queue update ramrod to configure default vlan and silent
3698 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3699 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3700 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3701 update_params = &q_params.params.update;
3702 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3703 &update_params->update_flags);
3704 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3705 &update_params->update_flags);
3707 /* if vlan is 0 then we want to leave the VF traffic
3708 * untagged, and leave the incoming traffic untouched
3709 * (i.e. do not remove any vlan tags).
3711 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3712 &update_params->update_flags);
3713 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3714 &update_params->update_flags);
3716 /* configure default vlan to vf queue and set silent
3717 * vlan removal (the vf remains unaware of this vlan).
3719 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3720 &update_params->update_flags);
3721 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3722 &update_params->update_flags);
3723 update_params->def_vlan = vlan;
3724 update_params->silent_removal_value =
3725 vlan & VLAN_VID_MASK;
3726 update_params->silent_removal_mask = VLAN_VID_MASK;
3729 /* Update the Queue state */
3730 rc = bnx2x_queue_state_change(bp, &q_params);
3732 BNX2X_ERR("Failed to configure default VLAN\n");
3737 /* clear the flag indicating that this VF needs its vlan
3738 * (will only be set if the HV configured the Vlan before vf was
3739 * up and we were called because the VF came up later
3742 vf->cfg_flags &= ~VF_CFG_VLAN;
3743 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3748 /* crc is the first field in the bulletin board. Compute the crc over the
3749 * entire bulletin board excluding the crc field itself. Use the length field
3750 * as the Bulletin Board was posted by a PF with possibly a different version
3751 * from the vf which will sample it. Therefore, the length is computed by the
3752 * PF and the used blindly by the VF.
3754 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3755 struct pf_vf_bulletin_content *bulletin)
3757 return crc32(BULLETIN_CRC_SEED,
3758 ((u8 *)bulletin) + sizeof(bulletin->crc),
3759 bulletin->length - sizeof(bulletin->crc));
3762 /* Check for new posts on the bulletin board */
3763 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3765 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3768 /* bulletin board hasn't changed since last sample */
3769 if (bp->old_bulletin.version == bulletin.version)
3770 return PFVF_BULLETIN_UNCHANGED;
3772 /* validate crc of new bulletin board */
3773 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3774 /* sampling structure in mid post may result with corrupted data
3775 * validate crc to ensure coherency.
3777 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3778 bulletin = bp->pf2vf_bulletin->content;
3779 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3782 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3784 bnx2x_crc_vf_bulletin(bp, &bulletin));
3786 if (attempts >= BULLETIN_ATTEMPTS) {
3787 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3789 return PFVF_BULLETIN_CRC_ERR;
3793 /* the mac address in bulletin board is valid and is new */
3794 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3795 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
3796 /* update new mac to net device */
3797 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3800 /* the vlan in bulletin board is valid and is new */
3801 if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3802 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3804 /* copy new bulletin board to bp */
3805 bp->old_bulletin = bulletin;
3807 return PFVF_BULLETIN_UPDATED;
3810 void bnx2x_timer_sriov(struct bnx2x *bp)
3812 bnx2x_sample_bulletin(bp);
3814 /* if channel is down we need to self destruct */
3815 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
3816 smp_mb__before_clear_bit();
3817 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3818 &bp->sp_rtnl_state);
3819 smp_mb__after_clear_bit();
3820 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3824 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3826 /* vf doorbells are embedded within the regview */
3827 return bp->regview + PXP_VF_ADDR_DB_START;
3830 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3832 mutex_init(&bp->vf2pf_mutex);
3834 /* allocate vf2pf mailbox for vf to pf channel */
3835 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3836 sizeof(struct bnx2x_vf_mbx_msg));
3838 /* allocate pf 2 vf bulletin board */
3839 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
3840 sizeof(union pf_vf_bulletin));
3845 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3846 sizeof(struct bnx2x_vf_mbx_msg));
3847 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3848 sizeof(union pf_vf_bulletin));
3852 void bnx2x_iov_channel_down(struct bnx2x *bp)
3855 struct pf_vf_bulletin_content *bulletin;
3860 for_each_vf(bp, vf_idx) {
3861 /* locate this VFs bulletin board and update the channel down
3864 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3865 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3867 /* update vf bulletin board */
3868 bnx2x_post_vf_bulletin(bp, vf_idx);