1 /* bnx2x_vfpf.c: QLogic Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
4 * Copyright 2014 QLogic Corporation
7 * Unless you and QLogic execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2, available
10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
12 * Notwithstanding the above, under no circumstances may you combine this
13 * software in any way with any other QLogic software provided under a
14 * license other than the GPL, without QLogic's express prior written
17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18 * Written by: Shmulik Ravid
19 * Ariel Elior <ariel.elior@qlogic.com>
23 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
26 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
28 /* place a given tlv on the tlv buffer at a given offset */
29 static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
30 u16 offset, u16 type, u16 length)
32 struct channel_tlv *tl =
33 (struct channel_tlv *)(tlvs_list + offset);
39 /* Clear the mailbox and init the header of the first tlv */
40 static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
43 mutex_lock(&bp->vf2pf_mutex);
45 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
49 memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
51 /* init type and length */
52 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
54 /* init first tlv header */
55 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
58 /* releases the mailbox */
59 static void bnx2x_vfpf_finalize(struct bnx2x *bp,
60 struct vfpf_first_tlv *first_tlv)
62 DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
65 mutex_unlock(&bp->vf2pf_mutex);
68 /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
69 static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
70 enum channel_tlvs req_tlv)
72 struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
75 if (tlv->type == req_tlv)
79 BNX2X_ERR("Found TLV with length 0\n");
83 tlvs_list += tlv->length;
84 tlv = (struct channel_tlv *)tlvs_list;
85 } while (tlv->type != CHANNEL_TLV_LIST_END);
87 DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
92 /* list the types and lengths of the tlvs on the buffer */
93 static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
96 struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
98 while (tlv->type != CHANNEL_TLV_LIST_END) {
100 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
101 tlv->type, tlv->length);
103 /* advance to next tlv */
104 tlvs_list += tlv->length;
106 /* cast general tlv list pointer to channel tlv header*/
107 tlv = (struct channel_tlv *)tlvs_list;
111 /* break condition for this loop */
112 if (i > MAX_TLVS_IN_LIST) {
113 WARN(true, "corrupt tlvs");
118 /* output last tlv */
119 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
120 tlv->type, tlv->length);
123 /* test whether we support a tlv type */
124 bool bnx2x_tlv_supported(u16 tlvtype)
126 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
129 static inline int bnx2x_pfvf_status_codes(int rc)
133 return PFVF_STATUS_SUCCESS;
135 return PFVF_STATUS_NO_RESOURCE;
137 return PFVF_STATUS_FAILURE;
141 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
143 struct cstorm_vf_zone_data __iomem *zone_data =
144 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
145 int tout = 100, interval = 100; /* wait for 10 seconds */
148 BNX2X_ERR("done was non zero before message to pf was sent\n");
153 /* if PF indicated channel is down avoid sending message. Return success
154 * so calling flow can continue
156 bnx2x_sample_bulletin(bp);
157 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
158 DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
159 *done = PFVF_STATUS_SUCCESS;
163 /* Write message address */
164 writel(U64_LO(msg_mapping),
165 &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
166 writel(U64_HI(msg_mapping),
167 &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
169 /* make sure the address is written before FW accesses it */
172 /* Trigger the PF FW */
173 writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
175 /* Wait for PF to complete */
176 while ((tout >= 0) && (!*done)) {
180 /* progress indicator - HV can take its own sweet time in
183 DP_CONT(BNX2X_MSG_IOV, ".");
187 BNX2X_ERR("PF response has timed out\n");
190 DP(BNX2X_MSG_SP, "Got a response from PF\n");
194 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
197 int tout = 10, interval = 100; /* Wait for 1 sec */
200 /* pxp traps vf read of doorbells and returns me reg value */
201 me_reg = readl(bp->doorbells);
202 if (GOOD_ME_REG(me_reg))
207 BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
209 } while (tout-- > 0);
211 if (!GOOD_ME_REG(me_reg)) {
212 BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
216 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
218 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
223 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
225 int rc = 0, attempts = 0;
226 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
227 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
228 struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
229 struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
231 bool resources_acquired = false;
233 /* clear mailbox and prep first tlv */
234 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
236 if (bnx2x_get_vf_id(bp, &vf_id)) {
241 req->vfdev_info.vf_id = vf_id;
242 req->vfdev_info.vf_os = 0;
243 req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
245 req->resc_request.num_rxqs = rx_count;
246 req->resc_request.num_txqs = tx_count;
247 req->resc_request.num_sbs = bp->igu_sb_cnt;
248 req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
249 req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
250 req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
252 /* pf 2 vf bulletin board address */
253 req->bulletin_addr = bp->pf2vf_bulletin_mapping;
255 /* Request physical port identifier */
256 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
257 CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
259 /* Bulletin support for bulletin board with length > legacy length */
260 req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
261 /* vlan filtering is supported */
262 req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
264 /* add list termination tlv */
265 bnx2x_add_tlv(bp, req,
266 req->first_tlv.tl.length + sizeof(struct channel_tlv),
267 CHANNEL_TLV_LIST_END,
268 sizeof(struct channel_list_end_tlv));
270 /* output tlvs list */
271 bnx2x_dp_tlv_list(bp, req);
273 while (!resources_acquired) {
274 DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
276 /* send acquire request */
277 rc = bnx2x_send_msg2pf(bp,
279 bp->vf2pf_mbox_mapping);
285 /* copy acquire response from buffer to bp */
286 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
290 /* test whether the PF accepted our request. If not, humble
291 * the request and try again.
293 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
294 DP(BNX2X_MSG_SP, "resources acquired\n");
295 resources_acquired = true;
296 } else if (bp->acquire_resp.hdr.status ==
297 PFVF_STATUS_NO_RESOURCE &&
298 attempts < VF_ACQUIRE_THRESH) {
300 "PF unwilling to fulfill resource request. Try PF recommended amount\n");
302 /* humble our request */
303 req->resc_request.num_txqs =
304 min(req->resc_request.num_txqs,
305 bp->acquire_resp.resc.num_txqs);
306 req->resc_request.num_rxqs =
307 min(req->resc_request.num_rxqs,
308 bp->acquire_resp.resc.num_rxqs);
309 req->resc_request.num_sbs =
310 min(req->resc_request.num_sbs,
311 bp->acquire_resp.resc.num_sbs);
312 req->resc_request.num_mac_filters =
313 min(req->resc_request.num_mac_filters,
314 bp->acquire_resp.resc.num_mac_filters);
315 req->resc_request.num_vlan_filters =
316 min(req->resc_request.num_vlan_filters,
317 bp->acquire_resp.resc.num_vlan_filters);
318 req->resc_request.num_mc_filters =
319 min(req->resc_request.num_mc_filters,
320 bp->acquire_resp.resc.num_mc_filters);
322 /* Clear response buffer */
323 memset(&bp->vf2pf_mbox->resp, 0,
324 sizeof(union pfvf_tlvs));
326 /* Determine reason of PF failure of acquire process */
327 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
328 CHANNEL_TLV_FP_HSI_SUPPORT);
329 if (fp_hsi_resp && !fp_hsi_resp->is_supported)
330 BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
332 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
333 bp->acquire_resp.hdr.status);
339 /* Retrieve physical port id (if possible) */
340 phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
341 bnx2x_search_tlv_list(bp, resp,
342 CHANNEL_TLV_PHYS_PORT_ID);
343 if (phys_port_resp) {
344 memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
345 bp->flags |= HAS_PHYS_PORT_ID;
348 /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
349 * If that's the case, we need to make certain required FW was
350 * supported by such a hypervisor [i.e., v0-v2].
352 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
353 CHANNEL_TLV_FP_HSI_SUPPORT);
354 if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
355 BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
357 /* Since acquire succeeded on the PF side, we need to send a
358 * release message in order to allow future probes.
360 bnx2x_vfpf_finalize(bp, &req->first_tlv);
361 bnx2x_vfpf_release(bp);
368 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
369 bp->link_params.chip_id = bp->common.chip_id;
370 bp->db_size = bp->acquire_resp.pfdev_info.db_size;
371 bp->common.int_block = INT_BLOCK_IGU;
372 bp->common.chip_port_mode = CHIP_2_PORT_MODE;
376 bp->common.flash_size = 0;
378 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
379 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
380 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
381 bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
383 strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
386 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
387 eth_hw_addr_set(bp->dev,
388 bp->acquire_resp.resc.current_mac_addr);
391 bnx2x_vfpf_finalize(bp, &req->first_tlv);
395 int bnx2x_vfpf_release(struct bnx2x *bp)
397 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
398 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
401 /* clear mailbox and prep first tlv */
402 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
404 if (bnx2x_get_vf_id(bp, &vf_id)) {
411 /* add list termination tlv */
412 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
413 sizeof(struct channel_list_end_tlv));
415 /* output tlvs list */
416 bnx2x_dp_tlv_list(bp, req);
418 /* send release request */
419 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
425 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
427 DP(BNX2X_MSG_SP, "vf released\n");
429 /* PF reports error */
430 BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
436 bnx2x_vfpf_finalize(bp, &req->first_tlv);
441 /* Tell PF about SB addresses */
442 int bnx2x_vfpf_init(struct bnx2x *bp)
444 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
445 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
448 /* clear mailbox and prep first tlv */
449 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
452 for_each_eth_queue(bp, i)
453 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
456 /* statistics - requests only supports single queue for now */
457 req->stats_addr = bp->fw_stats_data_mapping +
458 offsetof(struct bnx2x_fw_stats_data, queue_stats);
460 req->stats_stride = sizeof(struct per_queue_stats);
462 /* add list termination tlv */
463 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
464 sizeof(struct channel_list_end_tlv));
466 /* output tlvs list */
467 bnx2x_dp_tlv_list(bp, req);
469 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
473 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
474 BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
480 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
482 bnx2x_vfpf_finalize(bp, &req->first_tlv);
487 /* CLOSE VF - opposite to INIT_VF */
488 void bnx2x_vfpf_close_vf(struct bnx2x *bp)
490 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
491 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
495 /* If we haven't got a valid VF id, there is no sense to
496 * continue with sending messages
498 if (bnx2x_get_vf_id(bp, &vf_id))
501 /* Close the queues */
502 for_each_queue(bp, i)
503 bnx2x_vfpf_teardown_queue(bp, i);
506 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
508 /* clear mailbox and prep first tlv */
509 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
513 /* add list termination tlv */
514 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
515 sizeof(struct channel_list_end_tlv));
517 /* output tlvs list */
518 bnx2x_dp_tlv_list(bp, req);
520 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
523 BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
525 else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
526 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
529 bnx2x_vfpf_finalize(bp, &req->first_tlv);
532 if (!bp->nic_stopped) {
533 /* Disable HW interrupts, NAPI */
534 bnx2x_netif_stop(bp, 0);
535 /* Delete all NAPI objects */
536 bnx2x_del_all_napi(bp);
540 bp->nic_stopped = true;
544 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
545 struct bnx2x_vf_queue *q)
547 u8 cl_id = vfq_cl_id(vf, q);
548 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
551 bnx2x_init_mac_obj(bp, &q->mac_obj,
552 cl_id, q->cid, func_id,
553 bnx2x_vf_sp(bp, vf, mac_rdata),
554 bnx2x_vf_sp_map(bp, vf, mac_rdata),
555 BNX2X_FILTER_MAC_PENDING,
557 BNX2X_OBJ_TYPE_RX_TX,
560 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
561 cl_id, q->cid, func_id,
562 bnx2x_vf_sp(bp, vf, vlan_rdata),
563 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
564 BNX2X_FILTER_VLAN_PENDING,
566 BNX2X_OBJ_TYPE_RX_TX,
569 bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
570 cl_id, q->cid, func_id,
571 bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
572 bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
573 BNX2X_FILTER_VLAN_MAC_PENDING,
575 BNX2X_OBJ_TYPE_RX_TX,
579 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
580 q->cid, func_id, func_id,
581 bnx2x_vf_sp(bp, vf, mcast_rdata),
582 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
583 BNX2X_FILTER_MCAST_PENDING,
585 BNX2X_OBJ_TYPE_RX_TX);
588 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
590 bnx2x_vf_sp(bp, vf, rss_rdata),
591 bnx2x_vf_sp_map(bp, vf, rss_rdata),
592 BNX2X_FILTER_RSS_CONF_PENDING,
594 BNX2X_OBJ_TYPE_RX_TX);
596 vf->leading_rss = cl_id;
597 q->is_leading = true;
598 q->sp_initialized = true;
601 /* ask the pf to open a queue for the vf */
602 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
605 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
606 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
607 u8 fp_idx = fp->index;
608 u16 tpa_agg_size = 0, flags = 0;
611 /* clear mailbox and prep first tlv */
612 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
614 /* select tpa mode to request */
615 if (fp->mode != TPA_MODE_DISABLED) {
616 flags |= VFPF_QUEUE_FLG_TPA;
617 flags |= VFPF_QUEUE_FLG_TPA_IPV6;
618 if (fp->mode == TPA_MODE_GRO)
619 flags |= VFPF_QUEUE_FLG_TPA_GRO;
620 tpa_agg_size = TPA_AGG_SIZE;
624 flags |= VFPF_QUEUE_FLG_LEADING_RSS;
626 /* calculate queue flags */
627 flags |= VFPF_QUEUE_FLG_STATS;
628 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
629 flags |= VFPF_QUEUE_FLG_VLAN;
632 req->vf_qid = fp_idx;
633 req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
636 req->rxq.rcq_addr = fp->rx_comp_mapping;
637 req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
638 req->rxq.rxq_addr = fp->rx_desc_mapping;
639 req->rxq.sge_addr = fp->rx_sge_mapping;
640 req->rxq.vf_sb = fp_idx;
641 req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
642 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
643 req->rxq.mtu = bp->dev->mtu;
644 req->rxq.buf_sz = fp->rx_buf_size;
645 req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
646 req->rxq.tpa_agg_sz = tpa_agg_size;
647 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
648 req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
649 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
650 req->rxq.flags = flags;
651 req->rxq.drop_flags = 0;
652 req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
653 req->rxq.stat_id = -1; /* No stats at the moment */
656 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
657 req->txq.vf_sb = fp_idx;
658 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
659 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
660 req->txq.flags = flags;
661 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
663 /* add list termination tlv */
664 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
665 sizeof(struct channel_list_end_tlv));
667 /* output tlvs list */
668 bnx2x_dp_tlv_list(bp, req);
670 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
672 BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
675 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
676 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
677 fp_idx, resp->hdr.status);
681 bnx2x_vfpf_finalize(bp, &req->first_tlv);
686 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
688 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
689 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
692 /* clear mailbox and prep first tlv */
693 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
698 /* add list termination tlv */
699 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
700 sizeof(struct channel_list_end_tlv));
702 /* output tlvs list */
703 bnx2x_dp_tlv_list(bp, req);
705 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
708 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
713 /* PF failed the transaction */
714 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
715 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
721 bnx2x_vfpf_finalize(bp, &req->first_tlv);
726 /* request pf to add a mac for the vf */
727 int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
729 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
730 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
731 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
734 /* clear mailbox and prep first tlv */
735 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
738 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
739 req->vf_qid = vf_qid;
740 req->n_mac_vlan_filters = 1;
742 req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
744 req->filters[0].flags |= VFPF_Q_FILTER_SET;
746 /* sample bulletin board for new mac */
747 bnx2x_sample_bulletin(bp);
749 /* copy mac from device to request */
750 memcpy(req->filters[0].mac, addr, ETH_ALEN);
752 /* add list termination tlv */
753 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
754 sizeof(struct channel_list_end_tlv));
756 /* output tlvs list */
757 bnx2x_dp_tlv_list(bp, req);
759 /* send message to pf */
760 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
762 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
766 /* failure may mean PF was configured with a new mac for us */
767 while (resp->hdr.status == PFVF_STATUS_FAILURE) {
769 "vfpf SET MAC failed. Check bulletin board for new posts\n");
771 /* copy mac from bulletin to device */
772 eth_hw_addr_set(bp->dev, bulletin.mac);
774 /* check if bulletin board was updated */
775 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
776 /* copy mac from device to request */
777 memcpy(req->filters[0].mac, bp->dev->dev_addr,
780 /* send message to pf */
781 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
782 bp->vf2pf_mbox_mapping);
784 /* no new info in bulletin */
789 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
790 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
794 bnx2x_vfpf_finalize(bp, &req->first_tlv);
799 /* request pf to config rss table for vf queues*/
800 int bnx2x_vfpf_config_rss(struct bnx2x *bp,
801 struct bnx2x_config_rss_params *params)
803 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
804 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
807 /* clear mailbox and prep first tlv */
808 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
811 /* add list termination tlv */
812 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
813 sizeof(struct channel_list_end_tlv));
815 memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
816 memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
817 req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
818 req->rss_key_size = T_ETH_RSS_KEY;
819 req->rss_result_mask = params->rss_result_mask;
821 /* flags handled individually for backward/forward compatibility */
822 if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
823 req->rss_flags |= VFPF_RSS_MODE_DISABLED;
824 if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
825 req->rss_flags |= VFPF_RSS_MODE_REGULAR;
826 if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
827 req->rss_flags |= VFPF_RSS_SET_SRCH;
828 if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
829 req->rss_flags |= VFPF_RSS_IPV4;
830 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
831 req->rss_flags |= VFPF_RSS_IPV4_TCP;
832 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
833 req->rss_flags |= VFPF_RSS_IPV4_UDP;
834 if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
835 req->rss_flags |= VFPF_RSS_IPV6;
836 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
837 req->rss_flags |= VFPF_RSS_IPV6_TCP;
838 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
839 req->rss_flags |= VFPF_RSS_IPV6_UDP;
841 DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
843 /* output tlvs list */
844 bnx2x_dp_tlv_list(bp, req);
846 /* send message to pf */
847 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
849 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
853 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
854 /* Since older drivers don't support this feature (and VF has
855 * no way of knowing other than failing this), don't propagate
856 * an error in this case.
859 "Failed to send rss message to PF over VF-PF channel [%d]\n",
863 bnx2x_vfpf_finalize(bp, &req->first_tlv);
868 int bnx2x_vfpf_set_mcast(struct net_device *dev)
870 struct bnx2x *bp = netdev_priv(dev);
871 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
872 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
874 struct netdev_hw_addr *ha;
876 if (bp->state != BNX2X_STATE_OPEN) {
877 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
881 /* clear mailbox and prep first tlv */
882 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
885 /* Get Rx mode requested */
886 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
888 /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
889 if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
891 "VF supports not more than %d multicast MAC addresses\n",
892 PFVF_MAX_MULTICAST_PER_VF);
897 netdev_for_each_mc_addr(ha, dev) {
898 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
900 memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
904 req->n_multicast = i;
905 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
908 /* add list termination tlv */
909 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
910 sizeof(struct channel_list_end_tlv));
912 /* output tlvs list */
913 bnx2x_dp_tlv_list(bp, req);
914 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
916 BNX2X_ERR("Sending a message failed: %d\n", rc);
920 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
921 BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
926 bnx2x_vfpf_finalize(bp, &req->first_tlv);
931 /* request pf to add a vlan for the vf */
932 int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
934 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
935 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
938 if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
939 DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
943 /* clear mailbox and prep first tlv */
944 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
947 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
948 req->vf_qid = vf_qid;
949 req->n_mac_vlan_filters = 1;
951 req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
954 req->filters[0].flags |= VFPF_Q_FILTER_SET;
956 /* sample bulletin board for hypervisor vlan */
957 bnx2x_sample_bulletin(bp);
959 if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
960 BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
965 req->filters[0].vlan_tag = vid;
967 /* add list termination tlv */
968 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
969 sizeof(struct channel_list_end_tlv));
971 /* output tlvs list */
972 bnx2x_dp_tlv_list(bp, req);
974 /* send message to pf */
975 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
977 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
981 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
982 BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
987 bnx2x_vfpf_finalize(bp, &req->first_tlv);
992 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
994 int mode = bp->rx_mode;
995 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
996 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
999 /* clear mailbox and prep first tlv */
1000 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
1003 DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
1005 /* Ignore everything accept MODE_NONE */
1006 if (mode == BNX2X_RX_MODE_NONE) {
1007 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
1009 /* Current PF driver will not look at the specific flags,
1010 * but they are required when working with older drivers on hv.
1012 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
1013 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
1014 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
1015 if (mode == BNX2X_RX_MODE_PROMISC)
1016 req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1019 if (bp->accept_any_vlan)
1020 req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1022 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
1025 /* add list termination tlv */
1026 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
1027 sizeof(struct channel_list_end_tlv));
1029 /* output tlvs list */
1030 bnx2x_dp_tlv_list(bp, req);
1032 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
1034 BNX2X_ERR("Sending a message failed: %d\n", rc);
1036 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1037 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
1041 bnx2x_vfpf_finalize(bp, &req->first_tlv);
1046 /* General service functions */
1047 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
1049 u32 addr = BAR_CSTRORM_INTMEM +
1050 CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
1052 REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
1055 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
1057 u32 addr = BAR_CSTRORM_INTMEM +
1058 CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
1060 REG_WR8(bp, addr, 1);
1063 /* enable vf_pf mailbox (aka vf-pf-channel) */
1064 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
1066 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
1068 /* enable the mailbox in the FW */
1069 storm_memset_vf_mbx_ack(bp, abs_vfid);
1070 storm_memset_vf_mbx_valid(bp, abs_vfid);
1072 /* enable the VF access to the mailbox */
1073 bnx2x_vf_enable_access(bp, abs_vfid);
1076 /* this works only on !E1h */
1077 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
1078 dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
1079 u32 vf_addr_lo, u32 len32)
1081 struct dmae_command dmae;
1083 if (CHIP_IS_E1x(bp)) {
1084 BNX2X_ERR("Chip revision does not support VFs\n");
1085 return DMAE_NOT_RDY;
1088 if (!bp->dmae_ready) {
1089 BNX2X_ERR("DMAE is not ready, can not copy\n");
1090 return DMAE_NOT_RDY;
1093 /* set opcode and fixed command fields */
1094 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
1097 dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
1098 (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
1099 (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
1101 dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
1103 dmae.src_addr_lo = vf_addr_lo;
1104 dmae.src_addr_hi = vf_addr_hi;
1105 dmae.dst_addr_lo = U64_LO(pf_addr);
1106 dmae.dst_addr_hi = U64_HI(pf_addr);
1108 dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
1109 (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
1110 (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
1112 dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
1114 dmae.src_addr_lo = U64_LO(pf_addr);
1115 dmae.src_addr_hi = U64_HI(pf_addr);
1116 dmae.dst_addr_lo = vf_addr_lo;
1117 dmae.dst_addr_hi = vf_addr_hi;
1121 /* issue the command and wait for completion */
1122 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
1125 static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
1126 struct bnx2x_virtf *vf)
1128 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1131 /* prepare response */
1132 type = mbx->first_tlv.tl.type;
1133 length = type == CHANNEL_TLV_ACQUIRE ?
1134 sizeof(struct pfvf_acquire_resp_tlv) :
1135 sizeof(struct pfvf_general_resp_tlv);
1136 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
1137 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1138 sizeof(struct channel_list_end_tlv));
1141 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1142 struct bnx2x_virtf *vf,
1145 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1146 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
1151 bnx2x_dp_tlv_list(bp, resp);
1152 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1153 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1155 resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
1158 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
1159 mbx->first_tlv.resp_msg_offset;
1160 pf_addr = mbx->msg_mapping +
1161 offsetof(struct bnx2x_vf_mbx_msg, resp);
1163 /* Copy the response buffer. The first u64 is written afterwards, as
1164 * the vf is sensitive to the header being written
1166 vf_addr += sizeof(u64);
1167 pf_addr += sizeof(u64);
1168 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1171 (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
1173 BNX2X_ERR("Failed to copy response body to VF %d\n",
1177 vf_addr -= sizeof(u64);
1178 pf_addr -= sizeof(u64);
1181 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1183 /* copy the response header including status-done field,
1184 * must be last dmae, must be after FW is acked
1186 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1191 /* unlock channel mutex */
1192 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1195 BNX2X_ERR("Failed to copy response status to VF %d\n",
1202 bnx2x_vf_release(bp, vf);
1205 static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
1206 struct bnx2x_virtf *vf,
1209 bnx2x_vf_mbx_resp_single_tlv(bp, vf);
1210 bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
1213 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
1214 struct bnx2x_virtf *vf,
1218 struct vfpf_port_phys_id_resp_tlv *port_id;
1220 if (!(bp->flags & HAS_PHYS_PORT_ID))
1223 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
1224 sizeof(struct vfpf_port_phys_id_resp_tlv));
1226 port_id = (struct vfpf_port_phys_id_resp_tlv *)
1227 (((u8 *)buffer) + *offset);
1228 memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
1230 /* Offset should continue representing the offset to the tail
1231 * of TLV data (outside this function scope)
1233 *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
1236 static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
1237 struct bnx2x_virtf *vf,
1241 struct vfpf_fp_hsi_resp_tlv *fp_hsi;
1243 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
1244 sizeof(struct vfpf_fp_hsi_resp_tlv));
1246 fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
1247 (((u8 *)buffer) + *offset);
1248 fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
1250 /* Offset should continue representing the offset to the tail
1251 * of TLV data (outside this function scope)
1253 *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
1256 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1257 struct bnx2x_vf_mbx *mbx, int vfop_status)
1260 struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
1261 struct pf_vf_resc *resc = &resp->resc;
1262 u8 status = bnx2x_pfvf_status_codes(vfop_status);
1265 memset(resp, 0, sizeof(*resp));
1267 /* fill in pfdev info */
1268 resp->pfdev_info.chip_num = bp->common.chip_id;
1269 resp->pfdev_info.db_size = bp->db_size;
1270 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1271 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1273 PFVF_CAP_TPA_UPDATE |
1274 PFVF_CAP_VLAN_FILTER);
1275 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1276 sizeof(resp->pfdev_info.fw_ver));
1278 if (status == PFVF_STATUS_NO_RESOURCE ||
1279 status == PFVF_STATUS_SUCCESS) {
1280 /* set resources numbers, if status equals NO_RESOURCE these
1281 * are max possible numbers
1283 resc->num_rxqs = vf_rxq_count(vf) ? :
1284 bnx2x_vf_max_queue_cnt(bp, vf);
1285 resc->num_txqs = vf_txq_count(vf) ? :
1286 bnx2x_vf_max_queue_cnt(bp, vf);
1287 resc->num_sbs = vf_sb_count(vf);
1288 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1289 resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
1290 resc->num_mc_filters = 0;
1292 if (status == PFVF_STATUS_SUCCESS) {
1293 /* fill in the allocated resources */
1294 struct pf_vf_bulletin_content *bulletin =
1295 BP_VF_BULLETIN(bp, vf->index);
1299 vfq_qzone_id(vf, vfq_get(vf, i));
1301 for_each_vf_sb(vf, i) {
1302 resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
1303 resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
1306 /* if a mac has been set for this vf, supply it */
1307 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1308 memcpy(resc->current_mac_addr, bulletin->mac,
1314 DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
1315 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
1317 resp->pfdev_info.chip_num,
1318 resp->pfdev_info.db_size,
1319 resp->pfdev_info.indices_per_sb,
1320 resp->pfdev_info.pf_cap,
1324 resc->num_mac_filters,
1325 resc->num_vlan_filters,
1326 resc->num_mc_filters,
1327 resp->pfdev_info.fw_ver);
1329 DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
1330 for (i = 0; i < vf_rxq_count(vf); i++)
1331 DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
1332 DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
1333 for (i = 0; i < vf_sb_count(vf); i++)
1334 DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
1335 resc->hw_sbs[i].hw_sb_id,
1336 resc->hw_sbs[i].sb_qid);
1337 DP_CONT(BNX2X_MSG_IOV, "]\n");
1339 /* prepare response */
1340 length = sizeof(struct pfvf_acquire_resp_tlv);
1341 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
1343 /* Handle possible VF requests for physical port identifiers.
1344 * 'length' should continue to indicate the offset of the first empty
1345 * place in the buffer (i.e., where next TLV should be inserted)
1347 if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1348 CHANNEL_TLV_PHYS_PORT_ID))
1349 bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
1351 /* `New' vfs will want to know if fastpath HSI is supported, since
1352 * if that's not the case they could print into system log the fact
1353 * the driver version must be updated.
1355 bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
1357 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1358 sizeof(struct channel_list_end_tlv));
1360 /* send the response */
1361 bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
1364 static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
1365 struct vfpf_acquire_tlv *acquire)
1367 /* Windows driver does one of three things:
1368 * 1. Old driver doesn't have bulletin board address set.
1369 * 2. 'Middle' driver sends mc_num == 32.
1370 * 3. New driver sets the OS field.
1372 if (!acquire->bulletin_addr ||
1373 acquire->resc_request.num_mc_filters == 32 ||
1374 ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
1381 static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
1382 struct bnx2x_virtf *vf,
1383 struct bnx2x_vf_mbx *mbx)
1385 /* Linux drivers which correctly set the doorbell size also
1386 * send a physical port request
1388 if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1389 CHANNEL_TLV_PHYS_PORT_ID))
1392 /* Issue does not exist in windows VMs */
1393 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1399 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1400 struct bnx2x_vf_mbx *mbx)
1403 struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
1405 /* log vfdef info */
1407 "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
1408 vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
1409 acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
1410 acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
1411 acquire->resc_request.num_vlan_filters,
1412 acquire->resc_request.num_mc_filters);
1414 /* Prevent VFs with old drivers from loading, since they calculate
1415 * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
1416 * while being upgraded.
1418 rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
1421 "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
1426 /* Verify the VF fastpath HSI can be supported by the loaded FW.
1427 * Linux vfs should be oblivious to changes between v0 and v2.
1429 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1430 vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
1432 vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
1434 if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
1436 "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
1437 vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
1438 ETH_FP_HSI_VERSION);
1443 /* acquire the resources */
1444 rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
1446 /* store address of vf's bulletin board */
1447 vf->bulletin_map = acquire->bulletin_addr;
1448 if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
1449 DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
1451 vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
1453 vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
1456 if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
1457 DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
1459 vf->cfg_flags |= VF_CFG_VLAN_FILTER;
1461 vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
1466 bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
1469 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1470 struct bnx2x_vf_mbx *mbx)
1472 struct vfpf_init_tlv *init = &mbx->msg->req.init;
1475 /* record ghost addresses from vf message */
1476 vf->fw_stat_map = init->stats_addr;
1477 vf->stats_stride = init->stats_stride;
1478 rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1480 /* set VF multiqueue statistics collection mode */
1481 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1482 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1484 /* Update VF's view of link state */
1485 if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
1486 bnx2x_iov_link_update_vf(bp, vf->index);
1489 bnx2x_vf_mbx_resp(bp, vf, rc);
1492 /* convert MBX queue-flags to standard SP queue-flags */
1493 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1494 unsigned long *sp_q_flags)
1496 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
1497 __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
1498 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
1499 __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
1500 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
1501 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1502 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1503 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1504 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1505 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1506 if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
1507 __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
1508 if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
1509 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1510 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1511 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1512 if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1513 __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1515 /* outer vlan removal is set according to PF's multi function mode */
1517 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1520 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1521 struct bnx2x_vf_mbx *mbx)
1523 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1524 struct bnx2x_vf_queue_construct_params qctor;
1528 if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1529 BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1530 setup_q->vf_qid, vf_rxq_count(vf));
1535 /* tx queues must be setup alongside rx queues thus if the rx queue
1536 * is not marked as valid there's nothing to do.
1538 if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
1539 struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
1540 unsigned long q_type = 0;
1542 struct bnx2x_queue_init_params *init_p;
1543 struct bnx2x_queue_setup_params *setup_p;
1545 if (bnx2x_vfq_is_leading(q))
1546 bnx2x_leading_vfq_init(bp, vf, q);
1548 /* re-init the VF operation context */
1550 sizeof(struct bnx2x_vf_queue_construct_params));
1551 setup_p = &qctor.prep_qsetup;
1552 init_p = &qctor.qstate.params.init;
1554 /* activate immediately */
1555 __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
1557 if (setup_q->param_valid & VFPF_TXQ_VALID) {
1558 struct bnx2x_txq_setup_params *txq_params =
1559 &setup_p->txq_params;
1561 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1563 /* save sb resource index */
1564 q->sb_idx = setup_q->txq.vf_sb;
1567 init_p->tx.hc_rate = setup_q->txq.hc_rate;
1568 init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1570 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1573 /* tx setup - flags */
1574 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1577 /* tx setup - general, nothing */
1580 txq_params->dscr_map = setup_q->txq.txq_addr;
1581 txq_params->sb_cq_index = setup_q->txq.sb_index;
1582 txq_params->traffic_type = setup_q->txq.traffic_type;
1584 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
1585 q->index, q->sb_idx);
1588 if (setup_q->param_valid & VFPF_RXQ_VALID) {
1589 struct bnx2x_rxq_setup_params *rxq_params =
1590 &setup_p->rxq_params;
1592 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1594 /* Note: there is no support for different SBs
1597 q->sb_idx = setup_q->rxq.vf_sb;
1600 init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1601 init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1602 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1605 /* rx setup - flags */
1606 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1609 /* rx setup - general */
1610 setup_p->gen_params.mtu = setup_q->rxq.mtu;
1613 rxq_params->drop_flags = setup_q->rxq.drop_flags;
1614 rxq_params->dscr_map = setup_q->rxq.rxq_addr;
1615 rxq_params->sge_map = setup_q->rxq.sge_addr;
1616 rxq_params->rcq_map = setup_q->rxq.rcq_addr;
1617 rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
1618 rxq_params->buf_sz = setup_q->rxq.buf_sz;
1619 rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
1620 rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
1621 rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
1622 rxq_params->cache_line_log =
1623 setup_q->rxq.cache_line_log;
1624 rxq_params->sb_cq_index = setup_q->rxq.sb_index;
1626 /* rx setup - multicast engine */
1627 if (bnx2x_vfq_is_leading(q)) {
1628 u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
1630 rxq_params->mcast_engine_id = mcast_id;
1631 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
1634 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
1635 q->index, q->sb_idx);
1637 /* complete the preparations */
1638 bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
1640 rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
1645 bnx2x_vf_mbx_resp(bp, vf, rc);
1648 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1649 struct bnx2x_virtf *vf,
1650 struct vfpf_set_q_filters_tlv *tlv,
1651 struct bnx2x_vf_mac_vlan_filters **pfl,
1655 struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1657 fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
1662 for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1663 struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1665 if ((msg_filter->flags & type_flag) != type_flag)
1667 memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
1668 if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
1669 fl->filters[j].mac = msg_filter->mac;
1670 fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
1672 if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
1673 fl->filters[j].vid = msg_filter->vlan_tag;
1674 fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
1676 fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
1688 static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
1693 for (i = 0; i < filters->n_mac_vlan_filters; i++)
1694 if ((filters->filters[i].flags & flags) == flags)
1700 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
1701 struct vfpf_q_mac_vlan_filter *filter)
1703 DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
1704 if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
1705 DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
1706 if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
1707 DP_CONT(msglvl, ", MAC=%pM", filter->mac);
1708 DP_CONT(msglvl, "\n");
1711 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1712 struct vfpf_set_q_filters_tlv *filters)
1716 if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
1717 for (i = 0; i < filters->n_mac_vlan_filters; i++)
1718 bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
1719 &filters->filters[i]);
1721 if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
1722 DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
1724 if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
1725 for (i = 0; i < filters->n_multicast; i++)
1726 DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
1729 #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
1730 #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
1731 #define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
1733 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1737 struct vfpf_set_q_filters_tlv *msg =
1738 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1740 /* check for any mac/vlan changes */
1741 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1742 struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1744 /* build vlan-mac list */
1745 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1746 VFPF_VLAN_MAC_FILTER);
1752 /* set vlan-mac list */
1753 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1760 /* build mac list */
1763 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1770 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1777 /* build vlan list */
1780 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1787 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1796 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1797 unsigned long accept = 0;
1798 struct pf_vf_bulletin_content *bulletin =
1799 BP_VF_BULLETIN(bp, vf->index);
1801 /* Ignore VF requested mode; instead set a regular mode */
1802 if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
1803 __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1804 __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1805 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1808 /* any_vlan is not configured if HV is forcing VLAN
1809 * any_vlan is configured if
1810 * 1. VF does not support vlan filtering
1812 * 2. VF supports vlan filtering and explicitly requested it
1814 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
1815 (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
1816 msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
1817 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1820 rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
1825 if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1827 rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
1828 msg->n_multicast, false);
1834 BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1835 vf->abs_vfid, msg->vf_qid, rc);
1839 static int bnx2x_filters_validate_mac(struct bnx2x *bp,
1840 struct bnx2x_virtf *vf,
1841 struct vfpf_set_q_filters_tlv *filters)
1843 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1846 /* if a mac was already set for this VF via the set vf mac ndo, we only
1847 * accept mac configurations of that mac. Why accept them at all?
1848 * because PF may have been unable to configure the mac at the time
1849 * since queue was not set up.
1851 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1852 struct vfpf_q_mac_vlan_filter *filter = NULL;
1855 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1856 if (!(filters->filters[i].flags &
1857 VFPF_Q_FILTER_DEST_MAC_VALID))
1860 /* once a mac was set by ndo can only accept
1864 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
1866 filters->n_mac_vlan_filters);
1871 filter = &filters->filters[i];
1874 /* ...and only the mac set by the ndo */
1876 !ether_addr_equal(filter->mac, bulletin->mac)) {
1877 BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1889 static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
1890 struct bnx2x_virtf *vf,
1891 struct vfpf_set_q_filters_tlv *filters)
1893 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1896 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1897 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1898 /* search for vlan filters */
1900 if (bnx2x_vf_filters_contain(filters,
1901 VFPF_Q_FILTER_VLAN_TAG_VALID)) {
1902 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1910 if (filters->vf_qid > vf_rxq_count(vf)) {
1919 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1920 struct bnx2x_virtf *vf,
1921 struct bnx2x_vf_mbx *mbx)
1923 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1926 rc = bnx2x_filters_validate_mac(bp, vf, filters);
1930 rc = bnx2x_filters_validate_vlan(bp, vf, filters);
1934 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
1938 /* print q_filter message */
1939 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1941 rc = bnx2x_vf_mbx_qfilters(bp, vf);
1943 bnx2x_vf_mbx_resp(bp, vf, rc);
1946 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1947 struct bnx2x_vf_mbx *mbx)
1949 int qid = mbx->msg->req.q_op.vf_qid;
1952 DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1955 rc = bnx2x_vf_queue_teardown(bp, vf, qid);
1956 bnx2x_vf_mbx_resp(bp, vf, rc);
1959 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1960 struct bnx2x_vf_mbx *mbx)
1964 DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1966 rc = bnx2x_vf_close(bp, vf);
1967 bnx2x_vf_mbx_resp(bp, vf, rc);
1970 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1971 struct bnx2x_vf_mbx *mbx)
1975 DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1977 rc = bnx2x_vf_free(bp, vf);
1978 bnx2x_vf_mbx_resp(bp, vf, rc);
1981 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1982 struct bnx2x_vf_mbx *mbx)
1984 struct bnx2x_config_rss_params rss;
1985 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1988 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1989 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1990 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1996 memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
1998 /* set vfop params according to rss tlv */
1999 memcpy(rss.ind_table, rss_tlv->ind_table,
2000 T_ETH_INDIRECTION_TABLE_SIZE);
2001 memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
2002 rss.rss_obj = &vf->rss_conf_obj;
2003 rss.rss_result_mask = rss_tlv->rss_result_mask;
2005 /* flags handled individually for backward/forward compatibility */
2007 rss.ramrod_flags = 0;
2009 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
2010 __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
2011 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
2012 __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
2013 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
2014 __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
2015 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
2016 __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
2017 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
2018 __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
2019 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
2020 __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
2021 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
2022 __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
2023 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
2024 __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
2025 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
2026 __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
2028 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
2029 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
2030 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
2031 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
2032 BNX2X_ERR("about to hit a FW assert. aborting...\n");
2037 rc = bnx2x_vf_rss_update(bp, vf, &rss);
2039 bnx2x_vf_mbx_resp(bp, vf, rc);
2042 static int bnx2x_validate_tpa_params(struct bnx2x *bp,
2043 struct vfpf_tpa_tlv *tpa_tlv)
2047 if (tpa_tlv->tpa_client_info.max_sges_for_packet >
2048 U_ETH_MAX_SGES_FOR_PACKET) {
2050 BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
2051 tpa_tlv->tpa_client_info.max_sges_for_packet,
2052 U_ETH_MAX_SGES_FOR_PACKET);
2055 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
2057 BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
2058 tpa_tlv->tpa_client_info.max_tpa_queues,
2065 static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
2066 struct bnx2x_vf_mbx *mbx)
2068 struct bnx2x_queue_update_tpa_params vf_op_params;
2069 struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
2072 memset(&vf_op_params, 0, sizeof(vf_op_params));
2074 if (bnx2x_validate_tpa_params(bp, tpa_tlv))
2077 vf_op_params.complete_on_both_clients =
2078 tpa_tlv->tpa_client_info.complete_on_both_clients;
2079 vf_op_params.dont_verify_thr =
2080 tpa_tlv->tpa_client_info.dont_verify_thr;
2081 vf_op_params.max_agg_sz =
2082 tpa_tlv->tpa_client_info.max_agg_size;
2083 vf_op_params.max_sges_pkt =
2084 tpa_tlv->tpa_client_info.max_sges_for_packet;
2085 vf_op_params.max_tpa_queues =
2086 tpa_tlv->tpa_client_info.max_tpa_queues;
2087 vf_op_params.sge_buff_sz =
2088 tpa_tlv->tpa_client_info.sge_buff_size;
2089 vf_op_params.sge_pause_thr_high =
2090 tpa_tlv->tpa_client_info.sge_pause_thr_high;
2091 vf_op_params.sge_pause_thr_low =
2092 tpa_tlv->tpa_client_info.sge_pause_thr_low;
2093 vf_op_params.tpa_mode =
2094 tpa_tlv->tpa_client_info.tpa_mode;
2095 vf_op_params.update_ipv4 =
2096 tpa_tlv->tpa_client_info.update_ipv4;
2097 vf_op_params.update_ipv6 =
2098 tpa_tlv->tpa_client_info.update_ipv6;
2100 rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
2103 bnx2x_vf_mbx_resp(bp, vf, rc);
2106 /* dispatch request */
2107 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
2108 struct bnx2x_vf_mbx *mbx)
2112 if (vf->state == VF_LOST) {
2113 /* Just ack the FW and return if VFs are lost
2114 * in case of parity error. VFs are supposed to be timedout
2115 * on waiting for PF response.
2118 "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
2120 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
2124 /* check if tlv type is known */
2125 if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
2126 /* Lock the per vf op mutex and note the locker's identity.
2127 * The unlock will take place in mbx response.
2129 bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2131 /* switch on the opcode */
2132 switch (mbx->first_tlv.tl.type) {
2133 case CHANNEL_TLV_ACQUIRE:
2134 bnx2x_vf_mbx_acquire(bp, vf, mbx);
2136 case CHANNEL_TLV_INIT:
2137 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
2139 case CHANNEL_TLV_SETUP_Q:
2140 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
2142 case CHANNEL_TLV_SET_Q_FILTERS:
2143 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
2145 case CHANNEL_TLV_TEARDOWN_Q:
2146 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
2148 case CHANNEL_TLV_CLOSE:
2149 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
2151 case CHANNEL_TLV_RELEASE:
2152 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
2154 case CHANNEL_TLV_UPDATE_RSS:
2155 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
2157 case CHANNEL_TLV_UPDATE_TPA:
2158 bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
2163 /* unknown TLV - this may belong to a VF driver from the future
2164 * - a version written after this PF driver was written, which
2165 * supports features unknown as of yet. Too bad since we don't
2166 * support them. Or this may be because someone wrote a crappy
2167 * VF driver and is sending garbage over the channel.
2169 BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
2170 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
2172 for (i = 0; i < 20; i++)
2173 DP_CONT(BNX2X_MSG_IOV, "%x ",
2174 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
2177 /* can we respond to VF (do we have an address for it?) */
2178 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
2179 /* notify the VF that we do not support this request */
2180 bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
2182 /* can't send a response since this VF is unknown to us
2183 * just ack the FW to release the mailbox and unlock
2186 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
2187 /* Firmware ack should be written before unlocking channel */
2188 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2192 void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
2193 struct vf_pf_event_data *vfpf_event)
2198 "vf pf event received: vfid %d, address_hi %x, address lo %x",
2199 vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
2200 /* Sanity checks consider removing later */
2202 /* check if the vf_id is valid */
2203 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
2204 BNX2X_NR_VIRTFN(bp)) {
2205 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
2206 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
2210 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
2212 /* Update VFDB with current message and schedule its handling */
2213 mutex_lock(&BP_VFDB(bp)->event_mutex);
2214 BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
2215 le32_to_cpu(vfpf_event->msg_addr_hi);
2216 BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
2217 le32_to_cpu(vfpf_event->msg_addr_lo);
2218 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
2219 mutex_unlock(&BP_VFDB(bp)->event_mutex);
2221 bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
2224 /* handle new vf-pf messages */
2225 void bnx2x_vf_mbx(struct bnx2x *bp)
2227 struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
2235 mutex_lock(&vfdb->event_mutex);
2236 events = vfdb->event_occur;
2237 vfdb->event_occur = 0;
2238 mutex_unlock(&vfdb->event_mutex);
2240 for_each_vf(bp, vf_idx) {
2241 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
2242 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2244 /* Handle VFs which have pending events */
2245 if (!(events & (1ULL << vf_idx)))
2249 "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
2250 vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
2251 mbx->first_tlv.resp_msg_offset);
2253 /* dmae to get the VF request */
2254 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
2255 vf->abs_vfid, mbx->vf_addr_hi,
2257 sizeof(union vfpf_tlvs)/4);
2259 BNX2X_ERR("Failed to copy request VF %d\n",
2261 bnx2x_vf_release(bp, vf);
2265 /* process the VF message header */
2266 mbx->first_tlv = mbx->msg->req.first_tlv;
2268 /* Clean response buffer to refrain from falsely
2271 memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
2273 /* dispatch the request (will prepare the response) */
2274 bnx2x_vf_mbx_request(bp, vf, mbx);
2278 void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
2281 /* Older VFs contain a bug where they can't check CRC for bulletin
2282 * boards of length greater than legacy size.
2284 bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
2285 BULLETIN_CONTENT_LEGACY_SIZE;
2286 bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
2289 /* propagate local bulletin board to vf */
2290 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
2292 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
2293 dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
2294 vf * BULLETIN_CONTENT_SIZE;
2295 dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
2298 /* can only update vf after init took place */
2299 if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
2300 bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
2303 /* increment bulletin board version and compute crc */
2304 bulletin->version++;
2305 bnx2x_vf_bulletin_finalize(bulletin,
2306 (bnx2x_vf(bp, vf, cfg_flags) &
2307 VF_CFG_EXT_BULLETIN) ? true : false);
2309 /* propagate bulletin board via dmae to vm memory */
2310 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
2311 bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
2312 U64_LO(vf_addr), bulletin->length / 4);