bnx2x: VF RSS support - PF side
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_vfpf.c
CommitLineData
be1f1ffa
AE
1/* bnx2x_vfpf.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright 2009-2013 Broadcom Corporation
be1f1ffa
AE
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
18 */
19
20#include "bnx2x.h"
6411280a 21#include "bnx2x_cmn.h"
b93288d5 22#include <linux/crc32.h>
be1f1ffa
AE
23
24/* place a given tlv on the tlv buffer at a given offset */
25void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
26 u16 length)
27{
28 struct channel_tlv *tl =
29 (struct channel_tlv *)(tlvs_list + offset);
30
31 tl->type = type;
32 tl->length = length;
33}
34
35/* Clear the mailbox and init the header of the first tlv */
36void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
37 u16 type, u16 length)
38{
1d6f3cd8
DK
39 mutex_lock(&bp->vf2pf_mutex);
40
be1f1ffa
AE
41 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
42 type);
43
44 /* Clear mailbox */
45 memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
46
47 /* init type and length */
48 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
49
50 /* init first tlv header */
51 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
52}
53
1d6f3cd8
DK
54/* releases the mailbox */
55void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
56{
57 DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
58 first_tlv->tl.type);
59
60 mutex_unlock(&bp->vf2pf_mutex);
61}
62
be1f1ffa
AE
63/* list the types and lengths of the tlvs on the buffer */
64void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
65{
66 int i = 1;
67 struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
68
69 while (tlv->type != CHANNEL_TLV_LIST_END) {
70 /* output tlv */
71 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
72 tlv->type, tlv->length);
73
74 /* advance to next tlv */
75 tlvs_list += tlv->length;
76
77 /* cast general tlv list pointer to channel tlv header*/
78 tlv = (struct channel_tlv *)tlvs_list;
79
80 i++;
81
82 /* break condition for this loop */
83 if (i > MAX_TLVS_IN_LIST) {
84 WARN(true, "corrupt tlvs");
85 return;
86 }
87 }
88
89 /* output last tlv */
90 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
91 tlv->type, tlv->length);
92}
b56e9670 93
fd1fc79d
AE
94/* test whether we support a tlv type */
95bool bnx2x_tlv_supported(u16 tlvtype)
96{
97 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
98}
99
100static inline int bnx2x_pfvf_status_codes(int rc)
101{
102 switch (rc) {
103 case 0:
104 return PFVF_STATUS_SUCCESS;
105 case -ENOMEM:
106 return PFVF_STATUS_NO_RESOURCE;
107 default:
108 return PFVF_STATUS_FAILURE;
109 }
110}
111
732ac8ca 112static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
6411280a
AE
113{
114 struct cstorm_vf_zone_data __iomem *zone_data =
115 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
78c3bcc5 116 int tout = 100, interval = 100; /* wait for 10 seconds */
6411280a
AE
117
118 if (*done) {
119 BNX2X_ERR("done was non zero before message to pf was sent\n");
120 WARN_ON(true);
121 return -EINVAL;
122 }
123
78c3bcc5
AE
124 /* if PF indicated channel is down avoid sending message. Return success
125 * so calling flow can continue
126 */
127 bnx2x_sample_bulletin(bp);
128 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
129 DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
130 *done = PFVF_STATUS_SUCCESS;
131 return 0;
132 }
133
6411280a
AE
134 /* Write message address */
135 writel(U64_LO(msg_mapping),
136 &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
137 writel(U64_HI(msg_mapping),
138 &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
139
140 /* make sure the address is written before FW accesses it */
141 wmb();
142
143 /* Trigger the PF FW */
144 writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
145
146 /* Wait for PF to complete */
147 while ((tout >= 0) && (!*done)) {
148 msleep(interval);
149 tout -= 1;
150
151 /* progress indicator - HV can take its own sweet time in
152 * answering VFs...
153 */
154 DP_CONT(BNX2X_MSG_IOV, ".");
155 }
156
157 if (!*done) {
158 BNX2X_ERR("PF response has timed out\n");
159 return -EAGAIN;
160 }
161 DP(BNX2X_MSG_SP, "Got a response from PF\n");
162 return 0;
163}
164
732ac8ca 165static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
6411280a
AE
166{
167 u32 me_reg;
168 int tout = 10, interval = 100; /* Wait for 1 sec */
169
170 do {
171 /* pxp traps vf read of doorbells and returns me reg value */
172 me_reg = readl(bp->doorbells);
173 if (GOOD_ME_REG(me_reg))
174 break;
175
176 msleep(interval);
177
178 BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
179 me_reg);
180 } while (tout-- > 0);
181
182 if (!GOOD_ME_REG(me_reg)) {
183 BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
184 return -EINVAL;
185 }
186
187 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
188
189 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
190
191 return 0;
192}
193
194int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
195{
196 int rc = 0, attempts = 0;
197 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
198 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
199 u32 vf_id;
200 bool resources_acquired = false;
201
202 /* clear mailbox and prep first tlv */
203 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
204
1d6f3cd8
DK
205 if (bnx2x_get_vf_id(bp, &vf_id)) {
206 rc = -EAGAIN;
207 goto out;
208 }
6411280a
AE
209
210 req->vfdev_info.vf_id = vf_id;
211 req->vfdev_info.vf_os = 0;
212
213 req->resc_request.num_rxqs = rx_count;
214 req->resc_request.num_txqs = tx_count;
215 req->resc_request.num_sbs = bp->igu_sb_cnt;
216 req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
217 req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
218
219 /* pf 2 vf bulletin board address */
220 req->bulletin_addr = bp->pf2vf_bulletin_mapping;
221
222 /* add list termination tlv */
223 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
224 sizeof(struct channel_list_end_tlv));
225
226 /* output tlvs list */
227 bnx2x_dp_tlv_list(bp, req);
228
229 while (!resources_acquired) {
230 DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
231
232 /* send acquire request */
233 rc = bnx2x_send_msg2pf(bp,
234 &resp->hdr.status,
235 bp->vf2pf_mbox_mapping);
236
237 /* PF timeout */
238 if (rc)
1d6f3cd8 239 goto out;
6411280a
AE
240
241 /* copy acquire response from buffer to bp */
242 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
243
244 attempts++;
245
16a5fd92 246 /* test whether the PF accepted our request. If not, humble
6411280a
AE
247 * the request and try again.
248 */
249 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
250 DP(BNX2X_MSG_SP, "resources acquired\n");
251 resources_acquired = true;
252 } else if (bp->acquire_resp.hdr.status ==
253 PFVF_STATUS_NO_RESOURCE &&
254 attempts < VF_ACQUIRE_THRESH) {
255 DP(BNX2X_MSG_SP,
256 "PF unwilling to fulfill resource request. Try PF recommended amount\n");
257
258 /* humble our request */
259 req->resc_request.num_txqs =
b9871bcf
AE
260 min(req->resc_request.num_txqs,
261 bp->acquire_resp.resc.num_txqs);
6411280a 262 req->resc_request.num_rxqs =
b9871bcf
AE
263 min(req->resc_request.num_rxqs,
264 bp->acquire_resp.resc.num_rxqs);
6411280a 265 req->resc_request.num_sbs =
b9871bcf
AE
266 min(req->resc_request.num_sbs,
267 bp->acquire_resp.resc.num_sbs);
6411280a 268 req->resc_request.num_mac_filters =
b9871bcf
AE
269 min(req->resc_request.num_mac_filters,
270 bp->acquire_resp.resc.num_mac_filters);
6411280a 271 req->resc_request.num_vlan_filters =
b9871bcf
AE
272 min(req->resc_request.num_vlan_filters,
273 bp->acquire_resp.resc.num_vlan_filters);
6411280a 274 req->resc_request.num_mc_filters =
b9871bcf
AE
275 min(req->resc_request.num_mc_filters,
276 bp->acquire_resp.resc.num_mc_filters);
6411280a
AE
277
278 /* Clear response buffer */
279 memset(&bp->vf2pf_mbox->resp, 0,
280 sizeof(union pfvf_tlvs));
281 } else {
282 /* PF reports error */
283 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
284 bp->acquire_resp.hdr.status);
1d6f3cd8
DK
285 rc = -EAGAIN;
286 goto out;
6411280a
AE
287 }
288 }
289
290 /* get HW info */
291 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
292 bp->link_params.chip_id = bp->common.chip_id;
293 bp->db_size = bp->acquire_resp.pfdev_info.db_size;
294 bp->common.int_block = INT_BLOCK_IGU;
295 bp->common.chip_port_mode = CHIP_2_PORT_MODE;
296 bp->igu_dsb_id = -1;
297 bp->mf_ov = 0;
298 bp->mf_mode = 0;
299 bp->common.flash_size = 0;
300 bp->flags |=
301 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
b9871bcf 302 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
6411280a
AE
303 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
304 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
305 sizeof(bp->fw_ver));
306
307 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
308 memcpy(bp->dev->dev_addr,
309 bp->acquire_resp.resc.current_mac_addr,
310 ETH_ALEN);
311
1d6f3cd8
DK
312out:
313 bnx2x_vfpf_finalize(bp, &req->first_tlv);
314 return rc;
6411280a
AE
315}
316
317int bnx2x_vfpf_release(struct bnx2x *bp)
318{
319 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
320 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
1d6f3cd8 321 u32 rc, vf_id;
6411280a
AE
322
323 /* clear mailbox and prep first tlv */
324 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
325
1d6f3cd8
DK
326 if (bnx2x_get_vf_id(bp, &vf_id)) {
327 rc = -EAGAIN;
328 goto out;
329 }
6411280a
AE
330
331 req->vf_id = vf_id;
332
333 /* add list termination tlv */
334 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
335 sizeof(struct channel_list_end_tlv));
336
337 /* output tlvs list */
338 bnx2x_dp_tlv_list(bp, req);
339
340 /* send release request */
341 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
342
343 if (rc)
344 /* PF timeout */
1d6f3cd8
DK
345 goto out;
346
6411280a
AE
347 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
348 /* PF released us */
349 DP(BNX2X_MSG_SP, "vf released\n");
350 } else {
351 /* PF reports error */
6bf07b8e 352 BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
6411280a 353 resp->hdr.status);
1d6f3cd8
DK
354 rc = -EAGAIN;
355 goto out;
6411280a 356 }
1d6f3cd8
DK
357out:
358 bnx2x_vfpf_finalize(bp, &req->first_tlv);
6411280a 359
1d6f3cd8 360 return rc;
6411280a
AE
361}
362
363/* Tell PF about SB addresses */
364int bnx2x_vfpf_init(struct bnx2x *bp)
365{
366 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
367 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
368 int rc, i;
369
370 /* clear mailbox and prep first tlv */
371 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
372
373 /* status blocks */
374 for_each_eth_queue(bp, i)
375 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
376 status_blk_mapping);
377
378 /* statistics - requests only supports single queue for now */
379 req->stats_addr = bp->fw_stats_data_mapping +
380 offsetof(struct bnx2x_fw_stats_data, queue_stats);
381
382 /* add list termination tlv */
383 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
384 sizeof(struct channel_list_end_tlv));
385
386 /* output tlvs list */
387 bnx2x_dp_tlv_list(bp, req);
388
389 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
390 if (rc)
1d6f3cd8 391 goto out;
6411280a
AE
392
393 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
394 BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
395 resp->hdr.status);
1d6f3cd8
DK
396 rc = -EAGAIN;
397 goto out;
6411280a
AE
398 }
399
400 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
1d6f3cd8
DK
401out:
402 bnx2x_vfpf_finalize(bp, &req->first_tlv);
403
404 return rc;
6411280a
AE
405}
406
407/* CLOSE VF - opposite to INIT_VF */
408void bnx2x_vfpf_close_vf(struct bnx2x *bp)
409{
410 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
411 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
412 int i, rc;
413 u32 vf_id;
414
415 /* If we haven't got a valid VF id, there is no sense to
416 * continue with sending messages
417 */
418 if (bnx2x_get_vf_id(bp, &vf_id))
419 goto free_irq;
420
421 /* Close the queues */
422 for_each_queue(bp, i)
423 bnx2x_vfpf_teardown_queue(bp, i);
424
f8f4f61a
DK
425 /* remove mac */
426 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
427
6411280a
AE
428 /* clear mailbox and prep first tlv */
429 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
430
431 req->vf_id = vf_id;
432
433 /* add list termination tlv */
434 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
435 sizeof(struct channel_list_end_tlv));
436
437 /* output tlvs list */
438 bnx2x_dp_tlv_list(bp, req);
439
440 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
441
442 if (rc)
443 BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
444
445 else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
446 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
447 resp->hdr.status);
448
1d6f3cd8
DK
449 bnx2x_vfpf_finalize(bp, &req->first_tlv);
450
6411280a
AE
451free_irq:
452 /* Disable HW interrupts, NAPI */
453 bnx2x_netif_stop(bp, 0);
454 /* Delete all NAPI objects */
455 bnx2x_del_all_napi(bp);
456
457 /* Release IRQs */
458 bnx2x_free_irq(bp);
459}
460
b9871bcf
AE
461static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
462 struct bnx2x_vf_queue *q)
463{
464 u8 cl_id = vfq_cl_id(vf, q);
465 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
466
467 /* mac */
468 bnx2x_init_mac_obj(bp, &q->mac_obj,
469 cl_id, q->cid, func_id,
470 bnx2x_vf_sp(bp, vf, mac_rdata),
471 bnx2x_vf_sp_map(bp, vf, mac_rdata),
472 BNX2X_FILTER_MAC_PENDING,
473 &vf->filter_state,
474 BNX2X_OBJ_TYPE_RX_TX,
475 &bp->macs_pool);
476 /* vlan */
477 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
478 cl_id, q->cid, func_id,
479 bnx2x_vf_sp(bp, vf, vlan_rdata),
480 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
481 BNX2X_FILTER_VLAN_PENDING,
482 &vf->filter_state,
483 BNX2X_OBJ_TYPE_RX_TX,
484 &bp->vlans_pool);
485
486 /* mcast */
487 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
488 q->cid, func_id, func_id,
489 bnx2x_vf_sp(bp, vf, mcast_rdata),
490 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
491 BNX2X_FILTER_MCAST_PENDING,
492 &vf->filter_state,
493 BNX2X_OBJ_TYPE_RX_TX);
494
495 /* rss */
496 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
497 func_id, func_id,
498 bnx2x_vf_sp(bp, vf, rss_rdata),
499 bnx2x_vf_sp_map(bp, vf, rss_rdata),
500 BNX2X_FILTER_RSS_CONF_PENDING,
501 &vf->filter_state,
502 BNX2X_OBJ_TYPE_RX_TX);
503
504 vf->leading_rss = cl_id;
505 q->is_leading = true;
506}
507
6411280a
AE
508/* ask the pf to open a queue for the vf */
509int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
510{
511 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
512 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
513 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
514 u16 tpa_agg_size = 0, flags = 0;
515 int rc;
516
517 /* clear mailbox and prep first tlv */
518 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
519
520 /* select tpa mode to request */
521 if (!fp->disable_tpa) {
522 flags |= VFPF_QUEUE_FLG_TPA;
523 flags |= VFPF_QUEUE_FLG_TPA_IPV6;
524 if (fp->mode == TPA_MODE_GRO)
525 flags |= VFPF_QUEUE_FLG_TPA_GRO;
526 tpa_agg_size = TPA_AGG_SIZE;
527 }
528
529 /* calculate queue flags */
530 flags |= VFPF_QUEUE_FLG_STATS;
531 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
6411280a
AE
532 flags |= VFPF_QUEUE_FLG_VLAN;
533 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
534
535 /* Common */
536 req->vf_qid = fp_idx;
537 req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
538
539 /* Rx */
540 req->rxq.rcq_addr = fp->rx_comp_mapping;
541 req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
542 req->rxq.rxq_addr = fp->rx_desc_mapping;
543 req->rxq.sge_addr = fp->rx_sge_mapping;
544 req->rxq.vf_sb = fp_idx;
545 req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
546 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
547 req->rxq.mtu = bp->dev->mtu;
548 req->rxq.buf_sz = fp->rx_buf_size;
549 req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
550 req->rxq.tpa_agg_sz = tpa_agg_size;
551 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
552 req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
553 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
554 req->rxq.flags = flags;
555 req->rxq.drop_flags = 0;
556 req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
557 req->rxq.stat_id = -1; /* No stats at the moment */
558
559 /* Tx */
560 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
561 req->txq.vf_sb = fp_idx;
562 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
563 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
564 req->txq.flags = flags;
565 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
566
567 /* add list termination tlv */
568 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
569 sizeof(struct channel_list_end_tlv));
570
571 /* output tlvs list */
572 bnx2x_dp_tlv_list(bp, req);
573
574 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
575 if (rc)
576 BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
577 fp_idx);
578
579 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
580 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
581 fp_idx, resp->hdr.status);
1d6f3cd8 582 rc = -EINVAL;
6411280a 583 }
1d6f3cd8
DK
584
585 bnx2x_vfpf_finalize(bp, &req->first_tlv);
586
6411280a
AE
587 return rc;
588}
589
590int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
591{
592 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
593 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
594 int rc;
595
596 /* clear mailbox and prep first tlv */
597 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
598 sizeof(*req));
599
600 req->vf_qid = qidx;
601
602 /* add list termination tlv */
603 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
604 sizeof(struct channel_list_end_tlv));
605
606 /* output tlvs list */
607 bnx2x_dp_tlv_list(bp, req);
608
609 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
610
611 if (rc) {
612 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
613 rc);
1d6f3cd8 614 goto out;
6411280a
AE
615 }
616
617 /* PF failed the transaction */
618 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
619 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
620 resp->hdr.status);
1d6f3cd8 621 rc = -EINVAL;
6411280a
AE
622 }
623
1d6f3cd8
DK
624out:
625 bnx2x_vfpf_finalize(bp, &req->first_tlv);
626 return rc;
6411280a
AE
627}
628
629/* request pf to add a mac for the vf */
f8f4f61a 630int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
6411280a
AE
631{
632 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
633 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
f8f4f61a 634 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
1d6f3cd8 635 int rc = 0;
6411280a
AE
636
637 /* clear mailbox and prep first tlv */
638 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
639 sizeof(*req));
640
641 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
f8f4f61a 642 req->vf_qid = vf_qid;
6411280a 643 req->n_mac_vlan_filters = 1;
f8f4f61a
DK
644
645 req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
646 if (set)
647 req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
6411280a
AE
648
649 /* sample bulletin board for new mac */
650 bnx2x_sample_bulletin(bp);
651
652 /* copy mac from device to request */
f8f4f61a 653 memcpy(req->filters[0].mac, addr, ETH_ALEN);
6411280a
AE
654
655 /* add list termination tlv */
656 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
657 sizeof(struct channel_list_end_tlv));
658
659 /* output tlvs list */
660 bnx2x_dp_tlv_list(bp, req);
661
662 /* send message to pf */
663 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
664 if (rc) {
665 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
1d6f3cd8 666 goto out;
6411280a
AE
667 }
668
669 /* failure may mean PF was configured with a new mac for us */
670 while (resp->hdr.status == PFVF_STATUS_FAILURE) {
671 DP(BNX2X_MSG_IOV,
672 "vfpf SET MAC failed. Check bulletin board for new posts\n");
673
f8f4f61a
DK
674 /* copy mac from bulletin to device */
675 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
676
6411280a
AE
677 /* check if bulletin board was updated */
678 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
679 /* copy mac from device to request */
680 memcpy(req->filters[0].mac, bp->dev->dev_addr,
681 ETH_ALEN);
682
683 /* send message to pf */
684 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
685 bp->vf2pf_mbox_mapping);
686 } else {
687 /* no new info in bulletin */
688 break;
689 }
690 }
691
692 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
693 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
1d6f3cd8 694 rc = -EINVAL;
6411280a 695 }
1d6f3cd8
DK
696out:
697 bnx2x_vfpf_finalize(bp, &req->first_tlv);
6411280a
AE
698
699 return 0;
700}
701
702int bnx2x_vfpf_set_mcast(struct net_device *dev)
703{
704 struct bnx2x *bp = netdev_priv(dev);
705 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
706 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
707 int rc, i = 0;
708 struct netdev_hw_addr *ha;
709
710 if (bp->state != BNX2X_STATE_OPEN) {
711 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
712 return -EINVAL;
713 }
714
715 /* clear mailbox and prep first tlv */
716 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
717 sizeof(*req));
718
719 /* Get Rx mode requested */
720 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
721
722 netdev_for_each_mc_addr(ha, dev) {
723 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
724 bnx2x_mc_addr(ha));
725 memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
726 i++;
727 }
728
729 /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
730 * addresses tops
731 */
732 if (i >= PFVF_MAX_MULTICAST_PER_VF) {
733 DP(NETIF_MSG_IFUP,
734 "VF supports not more than %d multicast MAC addresses\n",
735 PFVF_MAX_MULTICAST_PER_VF);
736 return -EINVAL;
737 }
738
739 req->n_multicast = i;
740 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
741 req->vf_qid = 0;
742
743 /* add list termination tlv */
744 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
745 sizeof(struct channel_list_end_tlv));
746
747 /* output tlvs list */
748 bnx2x_dp_tlv_list(bp, req);
749 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
750 if (rc) {
751 BNX2X_ERR("Sending a message failed: %d\n", rc);
1d6f3cd8 752 goto out;
6411280a
AE
753 }
754
755 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
756 BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
757 resp->hdr.status);
1d6f3cd8 758 rc = -EINVAL;
6411280a 759 }
1d6f3cd8
DK
760out:
761 bnx2x_vfpf_finalize(bp, &req->first_tlv);
6411280a
AE
762
763 return 0;
764}
765
766int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
767{
768 int mode = bp->rx_mode;
769 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
770 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
771 int rc;
772
773 /* clear mailbox and prep first tlv */
774 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
775 sizeof(*req));
776
777 DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
778
779 switch (mode) {
780 case BNX2X_RX_MODE_NONE: /* no Rx */
781 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
782 break;
783 case BNX2X_RX_MODE_NORMAL:
784 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
785 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
786 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
787 break;
788 case BNX2X_RX_MODE_ALLMULTI:
789 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
790 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
791 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
792 break;
793 case BNX2X_RX_MODE_PROMISC:
794 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
795 req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
796 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
797 break;
798 default:
799 BNX2X_ERR("BAD rx mode (%d)\n", mode);
1d6f3cd8
DK
800 rc = -EINVAL;
801 goto out;
6411280a
AE
802 }
803
804 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
805 req->vf_qid = 0;
806
807 /* add list termination tlv */
808 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
809 sizeof(struct channel_list_end_tlv));
810
811 /* output tlvs list */
812 bnx2x_dp_tlv_list(bp, req);
813
814 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
815 if (rc)
816 BNX2X_ERR("Sending a message failed: %d\n", rc);
817
818 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
819 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
1d6f3cd8 820 rc = -EINVAL;
6411280a 821 }
1d6f3cd8
DK
822out:
823 bnx2x_vfpf_finalize(bp, &req->first_tlv);
6411280a
AE
824
825 return rc;
826}
827
b56e9670
AE
828/* General service functions */
829static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
830{
831 u32 addr = BAR_CSTRORM_INTMEM +
832 CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
833
834 REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
835}
836
837static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
838{
839 u32 addr = BAR_CSTRORM_INTMEM +
840 CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
841
842 REG_WR8(bp, addr, 1);
843}
844
845static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
846{
847 int i;
848
849 for_each_vf(bp, i)
850 storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
851}
852
16a5fd92 853/* enable vf_pf mailbox (aka vf-pf-channel) */
b56e9670
AE
854void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
855{
856 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
857
858 /* enable the mailbox in the FW */
859 storm_memset_vf_mbx_ack(bp, abs_vfid);
860 storm_memset_vf_mbx_valid(bp, abs_vfid);
861
862 /* enable the VF access to the mailbox */
863 bnx2x_vf_enable_access(bp, abs_vfid);
864}
fd1fc79d
AE
865
866/* this works only on !E1h */
867static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
868 dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
869 u32 vf_addr_lo, u32 len32)
870{
871 struct dmae_command dmae;
872
873 if (CHIP_IS_E1x(bp)) {
874 BNX2X_ERR("Chip revision does not support VFs\n");
875 return DMAE_NOT_RDY;
876 }
877
878 if (!bp->dmae_ready) {
879 BNX2X_ERR("DMAE is not ready, can not copy\n");
880 return DMAE_NOT_RDY;
881 }
882
883 /* set opcode and fixed command fields */
884 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
885
886 if (from_vf) {
887 dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
888 (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
889 (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
890
891 dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
892
893 dmae.src_addr_lo = vf_addr_lo;
894 dmae.src_addr_hi = vf_addr_hi;
895 dmae.dst_addr_lo = U64_LO(pf_addr);
896 dmae.dst_addr_hi = U64_HI(pf_addr);
897 } else {
898 dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
899 (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
900 (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
901
902 dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
903
904 dmae.src_addr_lo = U64_LO(pf_addr);
905 dmae.src_addr_hi = U64_HI(pf_addr);
906 dmae.dst_addr_lo = vf_addr_lo;
907 dmae.dst_addr_hi = vf_addr_hi;
908 }
909 dmae.len = len32;
fd1fc79d
AE
910
911 /* issue the command and wait for completion */
912 return bnx2x_issue_dmae_with_comp(bp, &dmae);
913}
914
8ca5e17e
AE
915static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
916{
917 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
918 u64 vf_addr;
919 dma_addr_t pf_addr;
920 u16 length, type;
921 int rc;
922 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
923
924 /* prepare response */
925 type = mbx->first_tlv.tl.type;
926 length = type == CHANNEL_TLV_ACQUIRE ?
927 sizeof(struct pfvf_acquire_resp_tlv) :
928 sizeof(struct pfvf_general_resp_tlv);
929 bnx2x_add_tlv(bp, resp, 0, type, length);
930 resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
931 bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
932 sizeof(struct channel_list_end_tlv));
933 bnx2x_dp_tlv_list(bp, resp);
934 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
935 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
936
937 /* send response */
938 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
939 mbx->first_tlv.resp_msg_offset;
940 pf_addr = mbx->msg_mapping +
941 offsetof(struct bnx2x_vf_mbx_msg, resp);
942
943 /* copy the response body, if there is one, before the header, as the vf
944 * is sensitive to the header being written
945 */
946 if (resp->hdr.tl.length > sizeof(u64)) {
947 length = resp->hdr.tl.length - sizeof(u64);
948 vf_addr += sizeof(u64);
949 pf_addr += sizeof(u64);
950 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
951 U64_HI(vf_addr),
952 U64_LO(vf_addr),
953 length/4);
954 if (rc) {
955 BNX2X_ERR("Failed to copy response body to VF %d\n",
956 vf->abs_vfid);
f1929b01 957 goto mbx_error;
8ca5e17e
AE
958 }
959 vf_addr -= sizeof(u64);
960 pf_addr -= sizeof(u64);
961 }
962
963 /* ack the FW */
964 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
965 mmiowb();
966
967 /* initiate dmae to send the response */
968 mbx->flags &= ~VF_MSG_INPROCESS;
969
970 /* copy the response header including status-done field,
971 * must be last dmae, must be after FW is acked
972 */
973 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
974 U64_HI(vf_addr),
975 U64_LO(vf_addr),
976 sizeof(u64)/4);
977
978 /* unlock channel mutex */
979 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
980
981 if (rc) {
982 BNX2X_ERR("Failed to copy response status to VF %d\n",
983 vf->abs_vfid);
f1929b01 984 goto mbx_error;
8ca5e17e
AE
985 }
986 return;
f1929b01
AE
987
988mbx_error:
989 bnx2x_vf_release(bp, vf, false); /* non blocking */
8ca5e17e
AE
990}
991
992static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
993 struct bnx2x_vf_mbx *mbx, int vfop_status)
994{
995 int i;
996 struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
997 struct pf_vf_resc *resc = &resp->resc;
998 u8 status = bnx2x_pfvf_status_codes(vfop_status);
999
1000 memset(resp, 0, sizeof(*resp));
1001
1002 /* fill in pfdev info */
1003 resp->pfdev_info.chip_num = bp->common.chip_id;
b9871bcf 1004 resp->pfdev_info.db_size = bp->db_size;
8ca5e17e
AE
1005 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1006 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1007 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
1008 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1009 sizeof(resp->pfdev_info.fw_ver));
1010
1011 if (status == PFVF_STATUS_NO_RESOURCE ||
1012 status == PFVF_STATUS_SUCCESS) {
1013 /* set resources numbers, if status equals NO_RESOURCE these
1014 * are max possible numbers
1015 */
1016 resc->num_rxqs = vf_rxq_count(vf) ? :
1017 bnx2x_vf_max_queue_cnt(bp, vf);
1018 resc->num_txqs = vf_txq_count(vf) ? :
1019 bnx2x_vf_max_queue_cnt(bp, vf);
1020 resc->num_sbs = vf_sb_count(vf);
1021 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1022 resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
1023 resc->num_mc_filters = 0;
1024
1025 if (status == PFVF_STATUS_SUCCESS) {
abc5a021
AE
1026 /* fill in the allocated resources */
1027 struct pf_vf_bulletin_content *bulletin =
1028 BP_VF_BULLETIN(bp, vf->index);
1029
8ca5e17e
AE
1030 for_each_vfq(vf, i)
1031 resc->hw_qid[i] =
1032 vfq_qzone_id(vf, vfq_get(vf, i));
1033
1034 for_each_vf_sb(vf, i) {
1035 resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
1036 resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
1037 }
abc5a021
AE
1038
1039 /* if a mac has been set for this vf, supply it */
1040 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1041 memcpy(resc->current_mac_addr, bulletin->mac,
1042 ETH_ALEN);
1043 }
8ca5e17e
AE
1044 }
1045 }
1046
1047 DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
1048 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
1049 vf->abs_vfid,
1050 resp->pfdev_info.chip_num,
1051 resp->pfdev_info.db_size,
1052 resp->pfdev_info.indices_per_sb,
1053 resp->pfdev_info.pf_cap,
1054 resc->num_rxqs,
1055 resc->num_txqs,
1056 resc->num_sbs,
1057 resc->num_mac_filters,
1058 resc->num_vlan_filters,
1059 resc->num_mc_filters,
1060 resp->pfdev_info.fw_ver);
1061
1062 DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
1063 for (i = 0; i < vf_rxq_count(vf); i++)
1064 DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
1065 DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
1066 for (i = 0; i < vf_sb_count(vf); i++)
1067 DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
1068 resc->hw_sbs[i].hw_sb_id,
1069 resc->hw_sbs[i].sb_qid);
1070 DP_CONT(BNX2X_MSG_IOV, "]\n");
1071
1072 /* send the response */
1073 vf->op_rc = vfop_status;
1074 bnx2x_vf_mbx_resp(bp, vf);
1075}
1076
1077static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1078 struct bnx2x_vf_mbx *mbx)
1079{
1080 int rc;
1081 struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
1082
1083 /* log vfdef info */
1084 DP(BNX2X_MSG_IOV,
1085 "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
1086 vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
1087 acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
1088 acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
1089 acquire->resc_request.num_vlan_filters,
1090 acquire->resc_request.num_mc_filters);
1091
1092 /* acquire the resources */
1093 rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
1094
abc5a021
AE
1095 /* store address of vf's bulletin board */
1096 vf->bulletin_map = acquire->bulletin_addr;
1097
8ca5e17e
AE
1098 /* response */
1099 bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
1100}
1101
b93288d5
AE
1102static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1103 struct bnx2x_vf_mbx *mbx)
1104{
1105 struct vfpf_init_tlv *init = &mbx->msg->req.init;
1106
1107 /* record ghost addresses from vf message */
1108 vf->spq_map = init->spq_addr;
1109 vf->fw_stat_map = init->stats_addr;
b9871bcf 1110 vf->stats_stride = init->stats_stride;
b93288d5
AE
1111 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1112
b9871bcf
AE
1113 /* set VF multiqueue statistics collection mode */
1114 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1115 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1116
b93288d5
AE
1117 /* response */
1118 bnx2x_vf_mbx_resp(bp, vf);
1119}
1120
8db573ba 1121/* convert MBX queue-flags to standard SP queue-flags */
21776537 1122static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
8db573ba
AE
1123 unsigned long *sp_q_flags)
1124{
1125 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
1126 __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
1127 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
1128 __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
1129 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
1130 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1131 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1132 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
8db573ba
AE
1133 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1134 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1135 if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
1136 __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
1137 if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
1138 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1139 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1140 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
b9871bcf
AE
1141 if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1142 __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
21776537 1143
16a5fd92 1144 /* outer vlan removal is set according to PF's multi function mode */
21776537
AE
1145 if (IS_MF_SD(bp))
1146 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
8db573ba
AE
1147}
1148
1149static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1150 struct bnx2x_vf_mbx *mbx)
1151{
1152 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1153 struct bnx2x_vfop_cmd cmd = {
1154 .done = bnx2x_vf_mbx_resp,
1155 .block = false,
1156 };
1157
1158 /* verify vf_qid */
1159 if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1160 BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1161 setup_q->vf_qid, vf_rxq_count(vf));
1162 vf->op_rc = -EINVAL;
1163 goto response;
1164 }
1165
1166 /* tx queues must be setup alongside rx queues thus if the rx queue
1167 * is not marked as valid there's nothing to do.
1168 */
1169 if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
1170 struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
1171 unsigned long q_type = 0;
1172
1173 struct bnx2x_queue_init_params *init_p;
1174 struct bnx2x_queue_setup_params *setup_p;
1175
b9871bcf
AE
1176 if (bnx2x_vfq_is_leading(q))
1177 bnx2x_leading_vfq_init(bp, vf, q);
1178
16a5fd92 1179 /* re-init the VF operation context */
8db573ba
AE
1180 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
1181 setup_p = &vf->op_params.qctor.prep_qsetup;
1182 init_p = &vf->op_params.qctor.qstate.params.init;
1183
1184 /* activate immediately */
1185 __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
1186
1187 if (setup_q->param_valid & VFPF_TXQ_VALID) {
1188 struct bnx2x_txq_setup_params *txq_params =
1189 &setup_p->txq_params;
1190
1191 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1192
1193 /* save sb resource index */
1194 q->sb_idx = setup_q->txq.vf_sb;
1195
1196 /* tx init */
1197 init_p->tx.hc_rate = setup_q->txq.hc_rate;
1198 init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1199
21776537 1200 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
8db573ba
AE
1201 &init_p->tx.flags);
1202
1203 /* tx setup - flags */
21776537 1204 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
8db573ba
AE
1205 &setup_p->flags);
1206
1207 /* tx setup - general, nothing */
1208
1209 /* tx setup - tx */
1210 txq_params->dscr_map = setup_q->txq.txq_addr;
1211 txq_params->sb_cq_index = setup_q->txq.sb_index;
1212 txq_params->traffic_type = setup_q->txq.traffic_type;
1213
1214 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
1215 q->index, q->sb_idx);
1216 }
1217
1218 if (setup_q->param_valid & VFPF_RXQ_VALID) {
1219 struct bnx2x_rxq_setup_params *rxq_params =
1220 &setup_p->rxq_params;
1221
1222 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1223
1224 /* Note: there is no support for different SBs
1225 * for TX and RX
1226 */
1227 q->sb_idx = setup_q->rxq.vf_sb;
1228
1229 /* rx init */
1230 init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1231 init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
21776537 1232 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
8db573ba
AE
1233 &init_p->rx.flags);
1234
1235 /* rx setup - flags */
21776537 1236 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
8db573ba
AE
1237 &setup_p->flags);
1238
1239 /* rx setup - general */
1240 setup_p->gen_params.mtu = setup_q->rxq.mtu;
1241
1242 /* rx setup - rx */
1243 rxq_params->drop_flags = setup_q->rxq.drop_flags;
1244 rxq_params->dscr_map = setup_q->rxq.rxq_addr;
1245 rxq_params->sge_map = setup_q->rxq.sge_addr;
1246 rxq_params->rcq_map = setup_q->rxq.rcq_addr;
1247 rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
1248 rxq_params->buf_sz = setup_q->rxq.buf_sz;
1249 rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
1250 rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
1251 rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
1252 rxq_params->cache_line_log =
1253 setup_q->rxq.cache_line_log;
1254 rxq_params->sb_cq_index = setup_q->rxq.sb_index;
1255
1256 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
1257 q->index, q->sb_idx);
1258 }
1259 /* complete the preparations */
1260 bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
1261
1262 vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
1263 if (vf->op_rc)
1264 goto response;
1265 return;
1266 }
1267response:
1268 bnx2x_vf_mbx_resp(bp, vf);
1269}
1270
954ea748
AE
1271enum bnx2x_vfop_filters_state {
1272 BNX2X_VFOP_MBX_Q_FILTERS_MACS,
1273 BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
1274 BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
1275 BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
1276 BNX2X_VFOP_MBX_Q_FILTERS_DONE
1277};
1278
1279static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1280 struct bnx2x_virtf *vf,
1281 struct vfpf_set_q_filters_tlv *tlv,
1282 struct bnx2x_vfop_filters **pfl,
1283 u32 type_flag)
1284{
1285 int i, j;
1286 struct bnx2x_vfop_filters *fl = NULL;
1287 size_t fsz;
1288
1289 fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
1290 sizeof(struct bnx2x_vfop_filters);
1291
1292 fl = kzalloc(fsz, GFP_KERNEL);
1293 if (!fl)
1294 return -ENOMEM;
1295
1296 INIT_LIST_HEAD(&fl->head);
1297
1298 for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1299 struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1300
1301 if ((msg_filter->flags & type_flag) != type_flag)
1302 continue;
1303 if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
1304 fl->filters[j].mac = msg_filter->mac;
1305 fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
1306 } else {
1307 fl->filters[j].vid = msg_filter->vlan_tag;
1308 fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
1309 }
1310 fl->filters[j].add =
1311 (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
1312 true : false;
1313 list_add_tail(&fl->filters[j++].link, &fl->head);
1314 }
1315 if (list_empty(&fl->head))
1316 kfree(fl);
1317 else
1318 *pfl = fl;
1319
1320 return 0;
1321}
1322
1323static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
1324 struct vfpf_q_mac_vlan_filter *filter)
1325{
1326 DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
1327 if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
1328 DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
1329 if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
1330 DP_CONT(msglvl, ", MAC=%pM", filter->mac);
1331 DP_CONT(msglvl, "\n");
1332}
1333
1334static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1335 struct vfpf_set_q_filters_tlv *filters)
1336{
1337 int i;
1338
1339 if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
1340 for (i = 0; i < filters->n_mac_vlan_filters; i++)
1341 bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
1342 &filters->filters[i]);
1343
1344 if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
1345 DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
1346
1347 if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
1348 for (i = 0; i < filters->n_multicast; i++)
1349 DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
1350}
1351
1352#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
1353#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
1354
1355static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1356{
1357 int rc;
1358
1359 struct vfpf_set_q_filters_tlv *msg =
1360 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1361
1362 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1363 enum bnx2x_vfop_filters_state state = vfop->state;
1364
1365 struct bnx2x_vfop_cmd cmd = {
1366 .done = bnx2x_vfop_mbx_qfilters,
1367 .block = false,
1368 };
1369
1370 DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
1371
1372 if (vfop->rc < 0)
1373 goto op_err;
1374
1375 switch (state) {
1376 case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
1377 /* next state */
1378 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
1379
1380 /* check for any vlan/mac changes */
1381 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1382 /* build mac list */
1383 struct bnx2x_vfop_filters *fl = NULL;
1384
1385 vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1386 VFPF_MAC_FILTER);
1387 if (vfop->rc)
1388 goto op_err;
1389
1390 if (fl) {
1391 /* set mac list */
1392 rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
1393 msg->vf_qid,
1394 false);
1395 if (rc) {
1396 vfop->rc = rc;
1397 goto op_err;
1398 }
1399 return;
1400 }
1401 }
1402 /* fall through */
1403
1404 case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
1405 /* next state */
1406 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
1407
1408 /* check for any vlan/mac changes */
1409 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1410 /* build vlan list */
1411 struct bnx2x_vfop_filters *fl = NULL;
1412
1413 vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1414 VFPF_VLAN_FILTER);
1415 if (vfop->rc)
1416 goto op_err;
1417
1418 if (fl) {
1419 /* set vlan list */
1420 rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
1421 msg->vf_qid,
1422 false);
1423 if (rc) {
1424 vfop->rc = rc;
1425 goto op_err;
1426 }
1427 return;
1428 }
1429 }
1430 /* fall through */
1431
1432 case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
1433 /* next state */
1434 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
1435
1436 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1437 unsigned long accept = 0;
1438
1439 /* covert VF-PF if mask to bnx2x accept flags */
1440 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
1441 __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1442
1443 if (msg->rx_mask &
1444 VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
1445 __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1446
1447 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
1448 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
1449
1450 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
1451 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
1452
1453 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
1454 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1455
1456 /* A packet arriving the vf's mac should be accepted
1457 * with any vlan
1458 */
1459 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1460
1461 /* set rx-mode */
1462 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
1463 msg->vf_qid, accept);
1464 if (rc) {
1465 vfop->rc = rc;
1466 goto op_err;
1467 }
1468 return;
1469 }
1470 /* fall through */
1471
1472 case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
1473 /* next state */
1474 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
1475
1476 if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1477 /* set mcasts */
1478 rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
1479 msg->n_multicast, false);
1480 if (rc) {
1481 vfop->rc = rc;
1482 goto op_err;
1483 }
1484 return;
1485 }
1486 /* fall through */
1487op_done:
1488 case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
1489 bnx2x_vfop_end(bp, vf, vfop);
1490 return;
1491op_err:
1492 BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1493 vf->abs_vfid, msg->vf_qid, vfop->rc);
1494 goto op_done;
1495
1496 default:
1497 bnx2x_vfop_default(state);
1498 }
1499}
1500
1501static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
1502 struct bnx2x_virtf *vf,
1503 struct bnx2x_vfop_cmd *cmd)
1504{
1505 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1506 if (vfop) {
1507 bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
1508 bnx2x_vfop_mbx_qfilters, cmd->done);
1509 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
1510 cmd->block);
1511 }
1512 return -ENOMEM;
1513}
1514
1515static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1516 struct bnx2x_virtf *vf,
1517 struct bnx2x_vf_mbx *mbx)
1518{
1519 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
abc5a021 1520 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
954ea748
AE
1521 struct bnx2x_vfop_cmd cmd = {
1522 .done = bnx2x_vf_mbx_resp,
1523 .block = false,
1524 };
1525
abc5a021
AE
1526 /* if a mac was already set for this VF via the set vf mac ndo, we only
1527 * accept mac configurations of that mac. Why accept them at all?
1528 * because PF may have been unable to configure the mac at the time
1529 * since queue was not set up.
1530 */
1531 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1532 /* once a mac was set by ndo can only accept a single mac... */
1533 if (filters->n_mac_vlan_filters > 1) {
1534 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
1535 vf->abs_vfid);
1536 vf->op_rc = -EPERM;
1537 goto response;
1538 }
1539
1540 /* ...and only the mac set by the ndo */
1541 if (filters->n_mac_vlan_filters == 1 &&
1542 memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
1543 BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1544 vf->abs_vfid);
1545
1546 vf->op_rc = -EPERM;
1547 goto response;
1548 }
1549 }
1550
954ea748
AE
1551 /* verify vf_qid */
1552 if (filters->vf_qid > vf_rxq_count(vf))
1553 goto response;
1554
1555 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
1556 vf->abs_vfid,
1557 filters->vf_qid);
1558
1559 /* print q_filter message */
1560 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1561
1562 vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
1563 if (vf->op_rc)
1564 goto response;
1565 return;
1566
1567response:
1568 bnx2x_vf_mbx_resp(bp, vf);
1569}
1570
463a68a7
AE
1571static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1572 struct bnx2x_vf_mbx *mbx)
1573{
1574 int qid = mbx->msg->req.q_op.vf_qid;
1575 struct bnx2x_vfop_cmd cmd = {
1576 .done = bnx2x_vf_mbx_resp,
1577 .block = false,
1578 };
1579
1580 DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1581 vf->abs_vfid, qid);
1582
1583 vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
1584 if (vf->op_rc)
1585 bnx2x_vf_mbx_resp(bp, vf);
1586}
1587
99e9d211
AE
1588static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1589 struct bnx2x_vf_mbx *mbx)
1590{
1591 struct bnx2x_vfop_cmd cmd = {
1592 .done = bnx2x_vf_mbx_resp,
1593 .block = false,
1594 };
1595
1596 DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1597
1598 vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
1599 if (vf->op_rc)
1600 bnx2x_vf_mbx_resp(bp, vf);
1601}
1602
f1929b01
AE
1603static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1604 struct bnx2x_vf_mbx *mbx)
1605{
1606 struct bnx2x_vfop_cmd cmd = {
1607 .done = bnx2x_vf_mbx_resp,
1608 .block = false,
1609 };
1610
1611 DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1612
1613 vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
1614 if (vf->op_rc)
1615 bnx2x_vf_mbx_resp(bp, vf);
1616}
1617
b9871bcf
AE
1618static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1619 struct bnx2x_vf_mbx *mbx)
1620{
1621 struct bnx2x_vfop_cmd cmd = {
1622 .done = bnx2x_vf_mbx_resp,
1623 .block = false,
1624 };
1625 struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
1626 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1627
1628 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1629 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1630 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1631 vf->index);
1632 vf->op_rc = -EINVAL;
1633 goto mbx_resp;
1634 }
1635
1636 /* set vfop params according to rss tlv */
1637 memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
1638 T_ETH_INDIRECTION_TABLE_SIZE);
1639 memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
1640 sizeof(rss_tlv->rss_key));
1641 vf_op_params->rss_obj = &vf->rss_conf_obj;
1642 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1643
1644 /* flags handled individually for backward/forward compatability */
1645 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1646 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1647 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
1648 __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
1649 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
1650 __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
1651 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
1652 __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
1653 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
1654 __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
1655 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
1656 __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
1657 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
1658 __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
1659 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
1660 __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
1661 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
1662 __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
1663
1664 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
1665 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
1666 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
1667 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
1668 BNX2X_ERR("about to hit a FW assert. aborting...\n");
1669 vf->op_rc = -EINVAL;
1670 goto mbx_resp;
1671 }
1672
1673 vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
1674
1675mbx_resp:
1676 if (vf->op_rc)
1677 bnx2x_vf_mbx_resp(bp, vf);
1678}
1679
fd1fc79d
AE
1680/* dispatch request */
1681static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1682 struct bnx2x_vf_mbx *mbx)
1683{
1684 int i;
1685
1686 /* check if tlv type is known */
1687 if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
8ca5e17e
AE
1688 /* Lock the per vf op mutex and note the locker's identity.
1689 * The unlock will take place in mbx response.
1690 */
1691 bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1692
fd1fc79d
AE
1693 /* switch on the opcode */
1694 switch (mbx->first_tlv.tl.type) {
8ca5e17e
AE
1695 case CHANNEL_TLV_ACQUIRE:
1696 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1697 break;
b93288d5
AE
1698 case CHANNEL_TLV_INIT:
1699 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1700 break;
8db573ba
AE
1701 case CHANNEL_TLV_SETUP_Q:
1702 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1703 break;
954ea748
AE
1704 case CHANNEL_TLV_SET_Q_FILTERS:
1705 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1706 break;
463a68a7
AE
1707 case CHANNEL_TLV_TEARDOWN_Q:
1708 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1709 break;
99e9d211
AE
1710 case CHANNEL_TLV_CLOSE:
1711 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1712 break;
f1929b01
AE
1713 case CHANNEL_TLV_RELEASE:
1714 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1715 break;
b9871bcf
AE
1716 case CHANNEL_TLV_UPDATE_RSS:
1717 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1718 break;
fd1fc79d 1719 }
463a68a7 1720
fd1fc79d
AE
1721 } else {
1722 /* unknown TLV - this may belong to a VF driver from the future
1723 * - a version written after this PF driver was written, which
1724 * supports features unknown as of yet. Too bad since we don't
1725 * support them. Or this may be because someone wrote a crappy
1726 * VF driver and is sending garbage over the channel.
1727 */
6bf07b8e
YM
1728 BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
1729 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
1730 vf->state);
fd1fc79d
AE
1731 for (i = 0; i < 20; i++)
1732 DP_CONT(BNX2X_MSG_IOV, "%x ",
1733 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
8ca5e17e
AE
1734
1735 /* test whether we can respond to the VF (do we have an address
1736 * for it?)
1737 */
b9871bcf 1738 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
8ca5e17e
AE
1739 /* mbx_resp uses the op_rc of the VF */
1740 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1741
1742 /* notify the VF that we do not support this request */
1743 bnx2x_vf_mbx_resp(bp, vf);
1744 } else {
1745 /* can't send a response since this VF is unknown to us
70ca5d74
AE
1746 * just ack the FW to release the mailbox and unlock
1747 * the channel.
8ca5e17e 1748 */
70ca5d74
AE
1749 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1750 mmiowb();
8ca5e17e
AE
1751 bnx2x_unlock_vf_pf_channel(bp, vf,
1752 mbx->first_tlv.tl.type);
1753 }
fd1fc79d
AE
1754 }
1755}
1756
1757/* handle new vf-pf message */
1758void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
1759{
1760 struct bnx2x_virtf *vf;
1761 struct bnx2x_vf_mbx *mbx;
1762 u8 vf_idx;
1763 int rc;
1764
1765 DP(BNX2X_MSG_IOV,
1766 "vf pf event received: vfid %d, address_hi %x, address lo %x",
1767 vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
1768 /* Sanity checks consider removing later */
1769
1770 /* check if the vf_id is valid */
1771 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
1772 BNX2X_NR_VIRTFN(bp)) {
1773 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
1774 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
1775 goto mbx_done;
1776 }
1777 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
1778 mbx = BP_VF_MBX(bp, vf_idx);
1779
1780 /* verify an event is not currently being processed -
1781 * debug failsafe only
1782 */
1783 if (mbx->flags & VF_MSG_INPROCESS) {
1784 BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
1785 vfpf_event->vf_id);
1786 goto mbx_done;
1787 }
1788 vf = BP_VF(bp, vf_idx);
1789
1790 /* save the VF message address */
1791 mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
1792 mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
1793 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1794 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1795
1796 /* dmae to get the VF request */
1797 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
1798 mbx->vf_addr_hi, mbx->vf_addr_lo,
1799 sizeof(union vfpf_tlvs)/4);
1800 if (rc) {
1801 BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
1802 goto mbx_error;
1803 }
1804
1805 /* process the VF message header */
1806 mbx->first_tlv = mbx->msg->req.first_tlv;
1807
1808 /* dispatch the request (will prepare the response) */
1809 bnx2x_vf_mbx_request(bp, vf, mbx);
1810 goto mbx_done;
1811
1812mbx_error:
f1929b01 1813 bnx2x_vf_release(bp, vf, false); /* non blocking */
fd1fc79d
AE
1814mbx_done:
1815 return;
1816}
abc5a021
AE
1817
1818/* propagate local bulletin board to vf */
1819int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
1820{
1821 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
1822 dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
1823 vf * BULLETIN_CONTENT_SIZE;
1824 dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
abc5a021
AE
1825 int rc;
1826
1827 /* can only update vf after init took place */
1828 if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
1829 bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
1830 return 0;
1831
1832 /* increment bulletin board version and compute crc */
1833 bulletin->version++;
4c133c39 1834 bulletin->length = BULLETIN_CONTENT_SIZE;
abc5a021
AE
1835 bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
1836
1837 /* propagate bulletin board via dmae to vm memory */
1838 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
1839 bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
4c133c39 1840 U64_LO(vf_addr), bulletin->length / 4);
abc5a021
AE
1841 return rc;
1842}