2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
26 ib->coalescing_timeo = coalescing_timeo;
27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
28 (u32)ib->coalescing_timeo, 0);
33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
36 (rxf)->vlan_strip_pending = true; \
39 #define bna_rxf_rss_cfg_soft_reset(rxf) \
41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
43 BNA_RSS_F_CFG_PENDING | \
44 BNA_RSS_F_STATUS_PENDING); \
47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
49 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
50 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
54 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
55 enum bna_cleanup_type cleanup);
56 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
57 enum bna_cleanup_type cleanup);
58 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
59 enum bna_cleanup_type cleanup);
61 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
63 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
65 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
67 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
69 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
71 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
75 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
77 call_rxf_stop_cbfn(rxf);
81 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
85 if (rxf->flags & BNA_RXF_F_PAUSED) {
86 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
87 call_rxf_start_cbfn(rxf);
89 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
93 call_rxf_stop_cbfn(rxf);
101 call_rxf_cam_fltr_cbfn(rxf);
105 rxf->flags |= BNA_RXF_F_PAUSED;
109 rxf->flags &= ~BNA_RXF_F_PAUSED;
118 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
123 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
128 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
132 call_rxf_cam_fltr_cbfn(rxf);
136 rxf->flags &= ~BNA_RXF_F_PAUSED;
137 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
146 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
148 if (!bna_rxf_cfg_apply(rxf)) {
149 /* No more pending config updates */
150 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
155 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
159 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
163 bna_rxf_cfg_reset(rxf);
164 call_rxf_start_cbfn(rxf);
165 call_rxf_cam_fltr_cbfn(rxf);
166 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
174 rxf->flags |= BNA_RXF_F_PAUSED;
175 call_rxf_start_cbfn(rxf);
176 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
180 if (!bna_rxf_cfg_apply(rxf)) {
181 /* No more pending config updates */
182 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
192 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
194 call_rxf_start_cbfn(rxf);
195 call_rxf_cam_fltr_cbfn(rxf);
199 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
204 bna_rxf_cfg_reset(rxf);
205 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
209 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
213 rxf->flags |= BNA_RXF_F_PAUSED;
214 if (!bna_rxf_fltr_clear(rxf))
215 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
217 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
226 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
231 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
235 bna_rxf_cfg_reset(rxf);
236 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
240 if (!bna_rxf_fltr_clear(rxf)) {
241 /* No more pending CAM entries to clear */
242 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
252 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
257 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
262 bna_rxf_cfg_reset(rxf);
263 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
272 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
273 enum bfi_enet_h2i_msgs req_type)
275 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
277 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
278 req->mh.num_entries = htons(
279 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
280 ether_addr_copy(req->mac_addr, mac->addr);
281 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
282 sizeof(struct bfi_enet_ucast_req), &req->mh);
283 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
287 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
289 struct bfi_enet_mcast_add_req *req =
290 &rxf->bfi_enet_cmd.mcast_add_req;
292 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
294 req->mh.num_entries = htons(
295 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
296 ether_addr_copy(req->mac_addr, mac->addr);
297 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
298 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
299 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
303 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
305 struct bfi_enet_mcast_del_req *req =
306 &rxf->bfi_enet_cmd.mcast_del_req;
308 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
310 req->mh.num_entries = htons(
311 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
312 req->handle = htons(handle);
313 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
314 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
315 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
319 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
321 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
323 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
324 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
325 req->mh.num_entries = htons(
326 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
327 req->enable = status;
328 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
329 sizeof(struct bfi_enet_enable_req), &req->mh);
330 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
334 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
336 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
338 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
339 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
340 req->mh.num_entries = htons(
341 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
342 req->enable = status;
343 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
344 sizeof(struct bfi_enet_enable_req), &req->mh);
345 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
349 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
351 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
355 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
356 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
357 req->mh.num_entries = htons(
358 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
359 req->block_idx = block_idx;
360 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
361 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
362 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
364 htonl(rxf->vlan_filter_table[j]);
366 req->bit_mask[i] = 0xFFFFFFFF;
368 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
369 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
370 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
374 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
376 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
378 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
379 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
380 req->mh.num_entries = htons(
381 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
382 req->enable = rxf->vlan_strip_status;
383 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
384 sizeof(struct bfi_enet_enable_req), &req->mh);
385 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
389 bna_bfi_rit_cfg(struct bna_rxf *rxf)
391 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
393 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
394 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
395 req->mh.num_entries = htons(
396 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
397 req->size = htons(rxf->rit_size);
398 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
399 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
400 sizeof(struct bfi_enet_rit_req), &req->mh);
401 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
405 bna_bfi_rss_cfg(struct bna_rxf *rxf)
407 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
410 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
411 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
412 req->mh.num_entries = htons(
413 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
414 req->cfg.type = rxf->rss_cfg.hash_type;
415 req->cfg.mask = rxf->rss_cfg.hash_mask;
416 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
418 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
419 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
420 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
421 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
425 bna_bfi_rss_enable(struct bna_rxf *rxf)
427 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
429 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
430 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
431 req->mh.num_entries = htons(
432 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
433 req->enable = rxf->rss_status;
434 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
435 sizeof(struct bfi_enet_enable_req), &req->mh);
436 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
439 /* This function gets the multicast MAC that has already been added to CAM */
440 static struct bna_mac *
441 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
444 struct list_head *qe;
446 list_for_each(qe, &rxf->mcast_active_q) {
447 mac = (struct bna_mac *)qe;
448 if (ether_addr_equal(mac->addr, mac_addr))
452 list_for_each(qe, &rxf->mcast_pending_del_q) {
453 mac = (struct bna_mac *)qe;
454 if (ether_addr_equal(mac->addr, mac_addr))
461 static struct bna_mcam_handle *
462 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
464 struct bna_mcam_handle *mchandle;
465 struct list_head *qe;
467 list_for_each(qe, &rxf->mcast_handle_q) {
468 mchandle = (struct bna_mcam_handle *)qe;
469 if (mchandle->handle == handle)
477 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
479 struct bna_mac *mcmac;
480 struct bna_mcam_handle *mchandle;
482 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
483 mchandle = bna_rxf_mchandle_get(rxf, handle);
484 if (mchandle == NULL) {
485 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
486 mchandle->handle = handle;
487 mchandle->refcnt = 0;
488 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
491 mcmac->handle = mchandle;
495 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
496 enum bna_cleanup_type cleanup)
498 struct bna_mcam_handle *mchandle;
501 mchandle = mac->handle;
502 if (mchandle == NULL)
506 if (mchandle->refcnt == 0) {
507 if (cleanup == BNA_HARD_CLEANUP) {
508 bna_bfi_mcast_del_req(rxf, mchandle->handle);
511 list_del(&mchandle->qe);
512 bfa_q_qe_init(&mchandle->qe);
513 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
521 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
523 struct bna_mac *mac = NULL;
524 struct list_head *qe;
527 /* First delete multicast entries to maintain the count */
528 while (!list_empty(&rxf->mcast_pending_del_q)) {
529 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
531 mac = (struct bna_mac *)qe;
532 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
533 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
538 /* Add multicast entries */
539 if (!list_empty(&rxf->mcast_pending_add_q)) {
540 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
542 mac = (struct bna_mac *)qe;
543 list_add_tail(&mac->qe, &rxf->mcast_active_q);
544 bna_bfi_mcast_add_req(rxf, mac);
552 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
554 u8 vlan_pending_bitmask;
557 if (rxf->vlan_pending_bitmask) {
558 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
559 while (!(vlan_pending_bitmask & 0x1)) {
561 vlan_pending_bitmask >>= 1;
563 rxf->vlan_pending_bitmask &= ~BIT(block_idx);
564 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
572 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
574 struct list_head *qe;
578 /* Throw away delete pending mcast entries */
579 while (!list_empty(&rxf->mcast_pending_del_q)) {
580 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
582 mac = (struct bna_mac *)qe;
583 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
584 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
589 /* Move active mcast entries to pending_add_q */
590 while (!list_empty(&rxf->mcast_active_q)) {
591 bfa_q_deq(&rxf->mcast_active_q, &qe);
593 list_add_tail(qe, &rxf->mcast_pending_add_q);
594 mac = (struct bna_mac *)qe;
595 if (bna_rxf_mcast_del(rxf, mac, cleanup))
603 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
605 if (rxf->rss_pending) {
606 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
607 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
608 bna_bfi_rit_cfg(rxf);
612 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
613 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
614 bna_bfi_rss_cfg(rxf);
618 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
619 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
620 bna_bfi_rss_enable(rxf);
629 bna_rxf_cfg_apply(struct bna_rxf *rxf)
631 if (bna_rxf_ucast_cfg_apply(rxf))
634 if (bna_rxf_mcast_cfg_apply(rxf))
637 if (bna_rxf_promisc_cfg_apply(rxf))
640 if (bna_rxf_allmulti_cfg_apply(rxf))
643 if (bna_rxf_vlan_cfg_apply(rxf))
646 if (bna_rxf_vlan_strip_cfg_apply(rxf))
649 if (bna_rxf_rss_cfg_apply(rxf))
655 /* Only software reset */
657 bna_rxf_fltr_clear(struct bna_rxf *rxf)
659 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
662 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
665 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
668 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
675 bna_rxf_cfg_reset(struct bna_rxf *rxf)
677 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
678 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
679 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
680 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
681 bna_rxf_vlan_cfg_soft_reset(rxf);
682 bna_rxf_rss_cfg_soft_reset(rxf);
686 bna_rit_init(struct bna_rxf *rxf, int rit_size)
688 struct bna_rx *rx = rxf->rx;
690 struct list_head *qe;
693 rxf->rit_size = rit_size;
694 list_for_each(qe, &rx->rxp_q) {
695 rxp = (struct bna_rxp *)qe;
696 rxf->rit[offset] = rxp->cq.ccb->id;
703 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
705 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
709 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
710 struct bfi_msgq_mhdr *msghdr)
712 struct bfi_enet_rsp *rsp =
713 container_of(msghdr, struct bfi_enet_rsp, mh);
716 /* Clear ucast from cache */
717 rxf->ucast_active_set = 0;
720 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
724 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
725 struct bfi_msgq_mhdr *msghdr)
727 struct bfi_enet_mcast_add_req *req =
728 &rxf->bfi_enet_cmd.mcast_add_req;
729 struct bfi_enet_mcast_add_rsp *rsp =
730 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
732 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
734 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
738 bna_rxf_init(struct bna_rxf *rxf,
740 struct bna_rx_config *q_config,
741 struct bna_res_info *res_info)
745 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
746 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
747 rxf->ucast_pending_set = 0;
748 rxf->ucast_active_set = 0;
749 INIT_LIST_HEAD(&rxf->ucast_active_q);
750 rxf->ucast_pending_mac = NULL;
752 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
753 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
754 INIT_LIST_HEAD(&rxf->mcast_active_q);
755 INIT_LIST_HEAD(&rxf->mcast_handle_q);
757 if (q_config->paused)
758 rxf->flags |= BNA_RXF_F_PAUSED;
761 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
762 bna_rit_init(rxf, q_config->num_paths);
764 rxf->rss_status = q_config->rss_status;
765 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
766 rxf->rss_cfg = q_config->rss_config;
767 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
768 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
769 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
772 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
773 memset(rxf->vlan_filter_table, 0,
774 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
775 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
776 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
778 rxf->vlan_strip_status = q_config->vlan_strip_status;
780 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
784 bna_rxf_uninit(struct bna_rxf *rxf)
788 rxf->ucast_pending_set = 0;
789 rxf->ucast_active_set = 0;
791 while (!list_empty(&rxf->ucast_pending_add_q)) {
792 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
793 bfa_q_qe_init(&mac->qe);
794 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
797 if (rxf->ucast_pending_mac) {
798 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
799 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
800 rxf->ucast_pending_mac);
801 rxf->ucast_pending_mac = NULL;
804 while (!list_empty(&rxf->mcast_pending_add_q)) {
805 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
806 bfa_q_qe_init(&mac->qe);
807 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
810 rxf->rxmode_pending = 0;
811 rxf->rxmode_pending_bitmask = 0;
812 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
813 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
814 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
815 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
817 rxf->rss_pending = 0;
818 rxf->vlan_strip_pending = false;
826 bna_rx_cb_rxf_started(struct bna_rx *rx)
828 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
832 bna_rxf_start(struct bna_rxf *rxf)
834 rxf->start_cbfn = bna_rx_cb_rxf_started;
835 rxf->start_cbarg = rxf->rx;
836 bfa_fsm_send_event(rxf, RXF_E_START);
840 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
842 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
846 bna_rxf_stop(struct bna_rxf *rxf)
848 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
849 rxf->stop_cbarg = rxf->rx;
850 bfa_fsm_send_event(rxf, RXF_E_STOP);
854 bna_rxf_fail(struct bna_rxf *rxf)
856 bfa_fsm_send_event(rxf, RXF_E_FAIL);
860 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
862 struct bna_rxf *rxf = &rx->rxf;
864 if (rxf->ucast_pending_mac == NULL) {
865 rxf->ucast_pending_mac =
866 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
867 if (rxf->ucast_pending_mac == NULL)
868 return BNA_CB_UCAST_CAM_FULL;
869 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
872 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
873 rxf->ucast_pending_set = 1;
874 rxf->cam_fltr_cbfn = NULL;
875 rxf->cam_fltr_cbarg = rx->bna->bnad;
877 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
879 return BNA_CB_SUCCESS;
883 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
884 void (*cbfn)(struct bnad *, struct bna_rx *))
886 struct bna_rxf *rxf = &rx->rxf;
889 /* Check if already added or pending addition */
890 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
891 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
893 cbfn(rx->bna->bnad, rx);
894 return BNA_CB_SUCCESS;
897 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
899 return BNA_CB_MCAST_LIST_FULL;
900 bfa_q_qe_init(&mac->qe);
901 ether_addr_copy(mac->addr, addr);
902 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
904 rxf->cam_fltr_cbfn = cbfn;
905 rxf->cam_fltr_cbarg = rx->bna->bnad;
907 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
909 return BNA_CB_SUCCESS;
913 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
915 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
916 struct bna_rxf *rxf = &rx->rxf;
917 struct list_head list_head;
918 struct list_head *qe;
920 struct bna_mac *mac, *del_mac;
923 /* Purge the pending_add_q */
924 while (!list_empty(&rxf->ucast_pending_add_q)) {
925 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
927 mac = (struct bna_mac *)qe;
928 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
931 /* Schedule active_q entries for deletion */
932 while (!list_empty(&rxf->ucast_active_q)) {
933 bfa_q_deq(&rxf->ucast_active_q, &qe);
934 mac = (struct bna_mac *)qe;
935 bfa_q_qe_init(&mac->qe);
937 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
938 memcpy(del_mac, mac, sizeof(*del_mac));
939 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
940 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
944 INIT_LIST_HEAD(&list_head);
945 for (i = 0, mcaddr = uclist; i < count; i++) {
946 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
949 bfa_q_qe_init(&mac->qe);
950 ether_addr_copy(mac->addr, mcaddr);
951 list_add_tail(&mac->qe, &list_head);
955 /* Add the new entries */
956 while (!list_empty(&list_head)) {
957 bfa_q_deq(&list_head, &qe);
958 mac = (struct bna_mac *)qe;
959 bfa_q_qe_init(&mac->qe);
960 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
963 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
965 return BNA_CB_SUCCESS;
968 while (!list_empty(&list_head)) {
969 bfa_q_deq(&list_head, &qe);
970 mac = (struct bna_mac *)qe;
971 bfa_q_qe_init(&mac->qe);
972 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
975 return BNA_CB_UCAST_CAM_FULL;
979 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
981 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
982 struct bna_rxf *rxf = &rx->rxf;
983 struct list_head list_head;
984 struct list_head *qe;
986 struct bna_mac *mac, *del_mac;
989 /* Purge the pending_add_q */
990 while (!list_empty(&rxf->mcast_pending_add_q)) {
991 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
993 mac = (struct bna_mac *)qe;
994 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
997 /* Schedule active_q entries for deletion */
998 while (!list_empty(&rxf->mcast_active_q)) {
999 bfa_q_deq(&rxf->mcast_active_q, &qe);
1000 mac = (struct bna_mac *)qe;
1001 bfa_q_qe_init(&mac->qe);
1003 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
1005 memcpy(del_mac, mac, sizeof(*del_mac));
1006 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1008 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1011 /* Allocate nodes */
1012 INIT_LIST_HEAD(&list_head);
1013 for (i = 0, mcaddr = mclist; i < count; i++) {
1014 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
1017 bfa_q_qe_init(&mac->qe);
1018 ether_addr_copy(mac->addr, mcaddr);
1019 list_add_tail(&mac->qe, &list_head);
1024 /* Add the new entries */
1025 while (!list_empty(&list_head)) {
1026 bfa_q_deq(&list_head, &qe);
1027 mac = (struct bna_mac *)qe;
1028 bfa_q_qe_init(&mac->qe);
1029 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1032 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1034 return BNA_CB_SUCCESS;
1037 while (!list_empty(&list_head)) {
1038 bfa_q_deq(&list_head, &qe);
1039 mac = (struct bna_mac *)qe;
1040 bfa_q_qe_init(&mac->qe);
1041 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1044 return BNA_CB_MCAST_LIST_FULL;
1048 bna_rx_mcast_delall(struct bna_rx *rx)
1050 struct bna_rxf *rxf = &rx->rxf;
1051 struct list_head *qe;
1052 struct bna_mac *mac, *del_mac;
1053 int need_hw_config = 0;
1055 /* Purge all entries from pending_add_q */
1056 while (!list_empty(&rxf->mcast_pending_add_q)) {
1057 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1058 mac = (struct bna_mac *)qe;
1059 bfa_q_qe_init(&mac->qe);
1060 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1063 /* Schedule all entries in active_q for deletion */
1064 while (!list_empty(&rxf->mcast_active_q)) {
1065 bfa_q_deq(&rxf->mcast_active_q, &qe);
1066 mac = (struct bna_mac *)qe;
1067 bfa_q_qe_init(&mac->qe);
1069 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
1071 memcpy(del_mac, mac, sizeof(*del_mac));
1072 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1074 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1079 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1083 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1085 struct bna_rxf *rxf = &rx->rxf;
1086 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1087 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1088 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1090 rxf->vlan_filter_table[index] |= bit;
1091 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1092 rxf->vlan_pending_bitmask |= BIT(group_id);
1093 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1098 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1100 struct bna_rxf *rxf = &rx->rxf;
1101 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1102 int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
1103 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1105 rxf->vlan_filter_table[index] &= ~bit;
1106 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1107 rxf->vlan_pending_bitmask |= BIT(group_id);
1108 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1113 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1115 struct bna_mac *mac = NULL;
1116 struct list_head *qe;
1118 /* Delete MAC addresses previousely added */
1119 if (!list_empty(&rxf->ucast_pending_del_q)) {
1120 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1122 mac = (struct bna_mac *)qe;
1123 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1124 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1128 /* Set default unicast MAC */
1129 if (rxf->ucast_pending_set) {
1130 rxf->ucast_pending_set = 0;
1131 ether_addr_copy(rxf->ucast_active_mac.addr,
1132 rxf->ucast_pending_mac->addr);
1133 rxf->ucast_active_set = 1;
1134 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1135 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1139 /* Add additional MAC entries */
1140 if (!list_empty(&rxf->ucast_pending_add_q)) {
1141 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1143 mac = (struct bna_mac *)qe;
1144 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1145 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1153 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1155 struct list_head *qe;
1156 struct bna_mac *mac;
1158 /* Throw away delete pending ucast entries */
1159 while (!list_empty(&rxf->ucast_pending_del_q)) {
1160 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1162 mac = (struct bna_mac *)qe;
1163 if (cleanup == BNA_SOFT_CLEANUP)
1164 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1167 bna_bfi_ucast_req(rxf, mac,
1168 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1169 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1175 /* Move active ucast entries to pending_add_q */
1176 while (!list_empty(&rxf->ucast_active_q)) {
1177 bfa_q_deq(&rxf->ucast_active_q, &qe);
1179 list_add_tail(qe, &rxf->ucast_pending_add_q);
1180 if (cleanup == BNA_HARD_CLEANUP) {
1181 mac = (struct bna_mac *)qe;
1182 bna_bfi_ucast_req(rxf, mac,
1183 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1188 if (rxf->ucast_active_set) {
1189 rxf->ucast_pending_set = 1;
1190 rxf->ucast_active_set = 0;
1191 if (cleanup == BNA_HARD_CLEANUP) {
1192 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1193 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1202 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1204 struct bna *bna = rxf->rx->bna;
1206 /* Enable/disable promiscuous mode */
1207 if (is_promisc_enable(rxf->rxmode_pending,
1208 rxf->rxmode_pending_bitmask)) {
1209 /* move promisc configuration from pending -> active */
1210 promisc_inactive(rxf->rxmode_pending,
1211 rxf->rxmode_pending_bitmask);
1212 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1213 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1215 } else if (is_promisc_disable(rxf->rxmode_pending,
1216 rxf->rxmode_pending_bitmask)) {
1217 /* move promisc configuration from pending -> active */
1218 promisc_inactive(rxf->rxmode_pending,
1219 rxf->rxmode_pending_bitmask);
1220 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1221 bna->promisc_rid = BFI_INVALID_RID;
1222 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1230 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1232 struct bna *bna = rxf->rx->bna;
1234 /* Clear pending promisc mode disable */
1235 if (is_promisc_disable(rxf->rxmode_pending,
1236 rxf->rxmode_pending_bitmask)) {
1237 promisc_inactive(rxf->rxmode_pending,
1238 rxf->rxmode_pending_bitmask);
1239 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1240 bna->promisc_rid = BFI_INVALID_RID;
1241 if (cleanup == BNA_HARD_CLEANUP) {
1242 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1247 /* Move promisc mode config from active -> pending */
1248 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1249 promisc_enable(rxf->rxmode_pending,
1250 rxf->rxmode_pending_bitmask);
1251 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1252 if (cleanup == BNA_HARD_CLEANUP) {
1253 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1262 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1264 /* Enable/disable allmulti mode */
1265 if (is_allmulti_enable(rxf->rxmode_pending,
1266 rxf->rxmode_pending_bitmask)) {
1267 /* move allmulti configuration from pending -> active */
1268 allmulti_inactive(rxf->rxmode_pending,
1269 rxf->rxmode_pending_bitmask);
1270 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1271 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1273 } else if (is_allmulti_disable(rxf->rxmode_pending,
1274 rxf->rxmode_pending_bitmask)) {
1275 /* move allmulti configuration from pending -> active */
1276 allmulti_inactive(rxf->rxmode_pending,
1277 rxf->rxmode_pending_bitmask);
1278 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1279 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1287 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1289 /* Clear pending allmulti mode disable */
1290 if (is_allmulti_disable(rxf->rxmode_pending,
1291 rxf->rxmode_pending_bitmask)) {
1292 allmulti_inactive(rxf->rxmode_pending,
1293 rxf->rxmode_pending_bitmask);
1294 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1295 if (cleanup == BNA_HARD_CLEANUP) {
1296 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1301 /* Move allmulti mode config from active -> pending */
1302 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1303 allmulti_enable(rxf->rxmode_pending,
1304 rxf->rxmode_pending_bitmask);
1305 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1306 if (cleanup == BNA_HARD_CLEANUP) {
1307 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1316 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1318 struct bna *bna = rxf->rx->bna;
1321 if (is_promisc_enable(rxf->rxmode_pending,
1322 rxf->rxmode_pending_bitmask) ||
1323 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1324 /* Do nothing if pending enable or already enabled */
1325 } else if (is_promisc_disable(rxf->rxmode_pending,
1326 rxf->rxmode_pending_bitmask)) {
1327 /* Turn off pending disable command */
1328 promisc_inactive(rxf->rxmode_pending,
1329 rxf->rxmode_pending_bitmask);
1331 /* Schedule enable */
1332 promisc_enable(rxf->rxmode_pending,
1333 rxf->rxmode_pending_bitmask);
1334 bna->promisc_rid = rxf->rx->rid;
1342 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1344 struct bna *bna = rxf->rx->bna;
1347 if (is_promisc_disable(rxf->rxmode_pending,
1348 rxf->rxmode_pending_bitmask) ||
1349 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1350 /* Do nothing if pending disable or already disabled */
1351 } else if (is_promisc_enable(rxf->rxmode_pending,
1352 rxf->rxmode_pending_bitmask)) {
1353 /* Turn off pending enable command */
1354 promisc_inactive(rxf->rxmode_pending,
1355 rxf->rxmode_pending_bitmask);
1356 bna->promisc_rid = BFI_INVALID_RID;
1357 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1358 /* Schedule disable */
1359 promisc_disable(rxf->rxmode_pending,
1360 rxf->rxmode_pending_bitmask);
1368 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1372 if (is_allmulti_enable(rxf->rxmode_pending,
1373 rxf->rxmode_pending_bitmask) ||
1374 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1375 /* Do nothing if pending enable or already enabled */
1376 } else if (is_allmulti_disable(rxf->rxmode_pending,
1377 rxf->rxmode_pending_bitmask)) {
1378 /* Turn off pending disable command */
1379 allmulti_inactive(rxf->rxmode_pending,
1380 rxf->rxmode_pending_bitmask);
1382 /* Schedule enable */
1383 allmulti_enable(rxf->rxmode_pending,
1384 rxf->rxmode_pending_bitmask);
1392 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1396 if (is_allmulti_disable(rxf->rxmode_pending,
1397 rxf->rxmode_pending_bitmask) ||
1398 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1399 /* Do nothing if pending disable or already disabled */
1400 } else if (is_allmulti_enable(rxf->rxmode_pending,
1401 rxf->rxmode_pending_bitmask)) {
1402 /* Turn off pending enable command */
1403 allmulti_inactive(rxf->rxmode_pending,
1404 rxf->rxmode_pending_bitmask);
1405 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1406 /* Schedule disable */
1407 allmulti_disable(rxf->rxmode_pending,
1408 rxf->rxmode_pending_bitmask);
1416 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1418 if (rxf->vlan_strip_pending) {
1419 rxf->vlan_strip_pending = false;
1420 bna_bfi_vlan_strip_enable(rxf);
1429 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1430 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1432 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1433 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1435 #define call_rx_stop_cbfn(rx) \
1437 if ((rx)->stop_cbfn) { \
1438 void (*cbfn)(void *, struct bna_rx *); \
1440 cbfn = (rx)->stop_cbfn; \
1441 cbarg = (rx)->stop_cbarg; \
1442 (rx)->stop_cbfn = NULL; \
1443 (rx)->stop_cbarg = NULL; \
1448 #define call_rx_stall_cbfn(rx) \
1450 if ((rx)->rx_stall_cbfn) \
1451 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1454 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1456 struct bna_dma_addr cur_q_addr = \
1457 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1458 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1459 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1460 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1461 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1462 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1463 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1466 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1467 static void bna_rx_enet_stop(struct bna_rx *rx);
1468 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1470 bfa_fsm_state_decl(bna_rx, stopped,
1471 struct bna_rx, enum bna_rx_event);
1472 bfa_fsm_state_decl(bna_rx, start_wait,
1473 struct bna_rx, enum bna_rx_event);
1474 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1475 struct bna_rx, enum bna_rx_event);
1476 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1477 struct bna_rx, enum bna_rx_event);
1478 bfa_fsm_state_decl(bna_rx, started,
1479 struct bna_rx, enum bna_rx_event);
1480 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1481 struct bna_rx, enum bna_rx_event);
1482 bfa_fsm_state_decl(bna_rx, stop_wait,
1483 struct bna_rx, enum bna_rx_event);
1484 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1485 struct bna_rx, enum bna_rx_event);
1486 bfa_fsm_state_decl(bna_rx, failed,
1487 struct bna_rx, enum bna_rx_event);
1488 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1489 struct bna_rx, enum bna_rx_event);
1491 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1493 call_rx_stop_cbfn(rx);
1496 static void bna_rx_sm_stopped(struct bna_rx *rx,
1497 enum bna_rx_event event)
1501 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1505 call_rx_stop_cbfn(rx);
1513 bfa_sm_fault(event);
1518 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1520 bna_bfi_rx_enet_start(rx);
1524 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1529 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1534 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1535 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1539 bna_rx_enet_stop(rx);
1543 bfa_sm_fault(event);
1548 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1549 enum bna_rx_event event)
1553 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1557 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1561 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1565 bfa_sm_fault(event);
1570 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1572 rx->rx_post_cbfn(rx->bna->bnad, rx);
1573 bna_rxf_start(&rx->rxf);
1577 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1582 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1586 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1587 bna_rxf_fail(&rx->rxf);
1588 call_rx_stall_cbfn(rx);
1589 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1592 case RX_E_RXF_STARTED:
1593 bna_rxf_stop(&rx->rxf);
1596 case RX_E_RXF_STOPPED:
1597 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1598 call_rx_stall_cbfn(rx);
1599 bna_rx_enet_stop(rx);
1603 bfa_sm_fault(event);
1610 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1615 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1620 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1624 bna_rx_enet_stop(rx);
1628 bfa_sm_fault(event);
1633 bna_rx_sm_started_entry(struct bna_rx *rx)
1635 struct bna_rxp *rxp;
1636 struct list_head *qe_rxp;
1637 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1640 list_for_each(qe_rxp, &rx->rxp_q) {
1641 rxp = (struct bna_rxp *)qe_rxp;
1642 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1645 bna_ethport_cb_rx_started(&rx->bna->ethport);
1649 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1653 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1654 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1655 bna_rxf_stop(&rx->rxf);
1659 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1660 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1661 bna_rxf_fail(&rx->rxf);
1662 call_rx_stall_cbfn(rx);
1663 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1667 bfa_sm_fault(event);
1672 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1673 enum bna_rx_event event)
1677 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1681 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1682 bna_rxf_fail(&rx->rxf);
1683 call_rx_stall_cbfn(rx);
1684 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1687 case RX_E_RXF_STARTED:
1688 bfa_fsm_set_state(rx, bna_rx_sm_started);
1692 bfa_sm_fault(event);
1698 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1703 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1707 case RX_E_RXF_STOPPED:
1711 case RX_E_CLEANUP_DONE:
1712 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1716 bfa_sm_fault(event);
1722 bna_rx_sm_failed_entry(struct bna_rx *rx)
1727 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1731 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1735 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1739 case RX_E_RXF_STARTED:
1740 case RX_E_RXF_STOPPED:
1744 case RX_E_CLEANUP_DONE:
1745 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1749 bfa_sm_fault(event);
1754 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1759 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1763 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1767 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1770 case RX_E_CLEANUP_DONE:
1771 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1775 bfa_sm_fault(event);
1781 bna_bfi_rx_enet_start(struct bna_rx *rx)
1783 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1784 struct bna_rxp *rxp = NULL;
1785 struct bna_rxq *q0 = NULL, *q1 = NULL;
1786 struct list_head *rxp_qe;
1789 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1790 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1791 cfg_req->mh.num_entries = htons(
1792 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1794 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1795 cfg_req->num_queue_sets = rx->num_paths;
1796 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1798 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1799 rxp = (struct bna_rxp *)rxp_qe;
1801 GET_RXQS(rxp, q0, q1);
1802 switch (rxp->type) {
1806 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1808 cfg_req->q_cfg[i].qs.rx_buffer_size =
1809 htons((u16)q1->buffer_size);
1812 case BNA_RXP_SINGLE:
1813 /* Large/Single RxQ */
1814 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1816 if (q0->multi_buffer)
1817 /* multi-buffer is enabled by allocating
1818 * a new rx with new set of resources.
1819 * q0->buffer_size should be initialized to
1822 cfg_req->rx_cfg.multi_buffer =
1823 BNA_STATUS_T_ENABLED;
1826 bna_enet_mtu_get(&rx->bna->enet);
1827 cfg_req->q_cfg[i].ql.rx_buffer_size =
1828 htons((u16)q0->buffer_size);
1835 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1838 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1839 rxp->cq.ib.ib_seg_host_addr.lsb;
1840 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1841 rxp->cq.ib.ib_seg_host_addr.msb;
1842 cfg_req->q_cfg[i].ib.intr.msix_index =
1843 htons((u16)rxp->cq.ib.intr_vector);
1846 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1847 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1848 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1849 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1850 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1851 ? BNA_STATUS_T_ENABLED :
1852 BNA_STATUS_T_DISABLED;
1853 cfg_req->ib_cfg.coalescing_timeout =
1854 htonl((u32)rxp->cq.ib.coalescing_timeo);
1855 cfg_req->ib_cfg.inter_pkt_timeout =
1856 htonl((u32)rxp->cq.ib.interpkt_timeo);
1857 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1859 switch (rxp->type) {
1861 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1865 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1866 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1867 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1868 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1871 case BNA_RXP_SINGLE:
1872 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1878 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1880 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1881 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1882 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1886 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1888 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1890 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1891 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1892 req->mh.num_entries = htons(
1893 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1894 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1896 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1900 bna_rx_enet_stop(struct bna_rx *rx)
1902 struct bna_rxp *rxp;
1903 struct list_head *qe_rxp;
1906 list_for_each(qe_rxp, &rx->rxp_q) {
1907 rxp = (struct bna_rxp *)qe_rxp;
1908 bna_ib_stop(rx->bna, &rxp->cq.ib);
1911 bna_bfi_rx_enet_stop(rx);
1915 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1917 if ((rx_mod->rx_free_count == 0) ||
1918 (rx_mod->rxp_free_count == 0) ||
1919 (rx_mod->rxq_free_count == 0))
1922 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1923 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1924 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1927 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1928 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1935 static struct bna_rxq *
1936 bna_rxq_get(struct bna_rx_mod *rx_mod)
1938 struct bna_rxq *rxq = NULL;
1939 struct list_head *qe = NULL;
1941 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1942 rx_mod->rxq_free_count--;
1943 rxq = (struct bna_rxq *)qe;
1944 bfa_q_qe_init(&rxq->qe);
1950 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1952 bfa_q_qe_init(&rxq->qe);
1953 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1954 rx_mod->rxq_free_count++;
1957 static struct bna_rxp *
1958 bna_rxp_get(struct bna_rx_mod *rx_mod)
1960 struct list_head *qe = NULL;
1961 struct bna_rxp *rxp = NULL;
1963 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1964 rx_mod->rxp_free_count--;
1965 rxp = (struct bna_rxp *)qe;
1966 bfa_q_qe_init(&rxp->qe);
1972 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1974 bfa_q_qe_init(&rxp->qe);
1975 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1976 rx_mod->rxp_free_count++;
1979 static struct bna_rx *
1980 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1982 struct list_head *qe = NULL;
1983 struct bna_rx *rx = NULL;
1985 if (type == BNA_RX_T_REGULAR) {
1986 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1988 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1990 rx_mod->rx_free_count--;
1991 rx = (struct bna_rx *)qe;
1992 bfa_q_qe_init(&rx->qe);
1993 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2000 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2002 struct list_head *prev_qe = NULL;
2003 struct list_head *qe;
2005 bfa_q_qe_init(&rx->qe);
2007 list_for_each(qe, &rx_mod->rx_free_q) {
2008 if (((struct bna_rx *)qe)->rid < rx->rid)
2014 if (prev_qe == NULL) {
2015 /* This is the first entry */
2016 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
2017 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
2018 /* This is the last entry */
2019 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2021 /* Somewhere in the middle */
2022 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
2023 bfa_q_prev(&rx->qe) = prev_qe;
2024 bfa_q_next(prev_qe) = &rx->qe;
2025 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
2028 rx_mod->rx_free_count++;
2032 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
2035 switch (rxp->type) {
2036 case BNA_RXP_SINGLE:
2037 rxp->rxq.single.only = q0;
2038 rxp->rxq.single.reserved = NULL;
2041 rxp->rxq.slr.large = q0;
2042 rxp->rxq.slr.small = q1;
2045 rxp->rxq.hds.data = q0;
2046 rxp->rxq.hds.hdr = q1;
2054 bna_rxq_qpt_setup(struct bna_rxq *rxq,
2055 struct bna_rxp *rxp,
2058 struct bna_mem_descr *qpt_mem,
2059 struct bna_mem_descr *swqpt_mem,
2060 struct bna_mem_descr *page_mem)
2064 struct bna_dma_addr bna_dma;
2067 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2068 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2069 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2070 rxq->qpt.page_count = page_count;
2071 rxq->qpt.page_size = page_size;
2073 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2074 rxq->rcb->sw_q = page_mem->kva;
2076 kva = page_mem->kva;
2077 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2079 for (i = 0; i < rxq->qpt.page_count; i++) {
2080 rxq->rcb->sw_qpt[i] = kva;
2083 BNA_SET_DMA_ADDR(dma, &bna_dma);
2084 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2086 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2093 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2096 struct bna_mem_descr *qpt_mem,
2097 struct bna_mem_descr *swqpt_mem,
2098 struct bna_mem_descr *page_mem)
2102 struct bna_dma_addr bna_dma;
2105 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2106 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2107 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2108 rxp->cq.qpt.page_count = page_count;
2109 rxp->cq.qpt.page_size = page_size;
2111 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2112 rxp->cq.ccb->sw_q = page_mem->kva;
2114 kva = page_mem->kva;
2115 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2117 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2118 rxp->cq.ccb->sw_qpt[i] = kva;
2121 BNA_SET_DMA_ADDR(dma, &bna_dma);
2122 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2124 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2131 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2133 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2135 bfa_wc_down(&rx_mod->rx_stop_wc);
2139 bna_rx_mod_cb_rx_stopped_all(void *arg)
2141 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2143 if (rx_mod->stop_cbfn)
2144 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2145 rx_mod->stop_cbfn = NULL;
2149 bna_rx_start(struct bna_rx *rx)
2151 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2152 if (rx->rx_flags & BNA_RX_F_ENABLED)
2153 bfa_fsm_send_event(rx, RX_E_START);
2157 bna_rx_stop(struct bna_rx *rx)
2159 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2160 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2161 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2163 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2164 rx->stop_cbarg = &rx->bna->rx_mod;
2165 bfa_fsm_send_event(rx, RX_E_STOP);
2170 bna_rx_fail(struct bna_rx *rx)
2172 /* Indicate Enet is not enabled, and failed */
2173 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2174 bfa_fsm_send_event(rx, RX_E_FAIL);
2178 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2181 struct list_head *qe;
2183 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2184 if (type == BNA_RX_T_LOOPBACK)
2185 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2187 list_for_each(qe, &rx_mod->rx_active_q) {
2188 rx = (struct bna_rx *)qe;
2189 if (rx->type == type)
2195 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2198 struct list_head *qe;
2200 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2201 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2203 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2205 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2207 list_for_each(qe, &rx_mod->rx_active_q) {
2208 rx = (struct bna_rx *)qe;
2209 if (rx->type == type) {
2210 bfa_wc_up(&rx_mod->rx_stop_wc);
2215 bfa_wc_wait(&rx_mod->rx_stop_wc);
2219 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2222 struct list_head *qe;
2224 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2225 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2227 list_for_each(qe, &rx_mod->rx_active_q) {
2228 rx = (struct bna_rx *)qe;
2233 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2234 struct bna_res_info *res_info)
2237 struct bna_rx *rx_ptr;
2238 struct bna_rxp *rxp_ptr;
2239 struct bna_rxq *rxq_ptr;
2244 rx_mod->rx = (struct bna_rx *)
2245 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2246 rx_mod->rxp = (struct bna_rxp *)
2247 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2248 rx_mod->rxq = (struct bna_rxq *)
2249 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2251 /* Initialize the queues */
2252 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2253 rx_mod->rx_free_count = 0;
2254 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2255 rx_mod->rxq_free_count = 0;
2256 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2257 rx_mod->rxp_free_count = 0;
2258 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2260 /* Build RX queues */
2261 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2262 rx_ptr = &rx_mod->rx[index];
2264 bfa_q_qe_init(&rx_ptr->qe);
2265 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2267 rx_ptr->rid = index;
2268 rx_ptr->stop_cbfn = NULL;
2269 rx_ptr->stop_cbarg = NULL;
2271 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2272 rx_mod->rx_free_count++;
2275 /* build RX-path queue */
2276 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2277 rxp_ptr = &rx_mod->rxp[index];
2278 bfa_q_qe_init(&rxp_ptr->qe);
2279 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2280 rx_mod->rxp_free_count++;
2283 /* build RXQ queue */
2284 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2285 rxq_ptr = &rx_mod->rxq[index];
2286 bfa_q_qe_init(&rxq_ptr->qe);
2287 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2288 rx_mod->rxq_free_count++;
2293 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2295 struct list_head *qe;
2299 list_for_each(qe, &rx_mod->rx_free_q)
2303 list_for_each(qe, &rx_mod->rxp_free_q)
2307 list_for_each(qe, &rx_mod->rxq_free_q)
2314 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2316 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2317 struct bna_rxp *rxp = NULL;
2318 struct bna_rxq *q0 = NULL, *q1 = NULL;
2319 struct list_head *rxp_qe;
2322 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2323 sizeof(struct bfi_enet_rx_cfg_rsp));
2325 rx->hw_id = cfg_rsp->hw_id;
2327 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2329 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2330 rxp = (struct bna_rxp *)rxp_qe;
2331 GET_RXQS(rxp, q0, q1);
2333 /* Setup doorbells */
2334 rxp->cq.ccb->i_dbell->doorbell_addr =
2335 rx->bna->pcidev.pci_bar_kva
2336 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2337 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2339 rx->bna->pcidev.pci_bar_kva
2340 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2341 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2344 rx->bna->pcidev.pci_bar_kva
2345 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2346 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2349 /* Initialize producer/consumer indexes */
2350 (*rxp->cq.ccb->hw_producer_index) = 0;
2351 rxp->cq.ccb->producer_index = 0;
2352 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2354 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2357 bfa_fsm_send_event(rx, RX_E_STARTED);
2361 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2363 bfa_fsm_send_event(rx, RX_E_STOPPED);
2367 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2369 u32 cq_size, hq_size, dq_size;
2370 u32 cpage_count, hpage_count, dpage_count;
2371 struct bna_mem_info *mem_info;
2376 dq_depth = q_cfg->q0_depth;
2377 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2378 cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2380 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2381 cq_size = ALIGN(cq_size, PAGE_SIZE);
2382 cpage_count = SIZE_TO_PAGES(cq_size);
2384 dq_depth = roundup_pow_of_two(dq_depth);
2385 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2386 dq_size = ALIGN(dq_size, PAGE_SIZE);
2387 dpage_count = SIZE_TO_PAGES(dq_size);
2389 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2390 hq_depth = roundup_pow_of_two(hq_depth);
2391 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2392 hq_size = ALIGN(hq_size, PAGE_SIZE);
2393 hpage_count = SIZE_TO_PAGES(hq_size);
2397 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2398 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2399 mem_info->mem_type = BNA_MEM_T_KVA;
2400 mem_info->len = sizeof(struct bna_ccb);
2401 mem_info->num = q_cfg->num_paths;
2403 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2404 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2405 mem_info->mem_type = BNA_MEM_T_KVA;
2406 mem_info->len = sizeof(struct bna_rcb);
2407 mem_info->num = BNA_GET_RXQS(q_cfg);
2409 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2410 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2411 mem_info->mem_type = BNA_MEM_T_DMA;
2412 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2413 mem_info->num = q_cfg->num_paths;
2415 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2416 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2417 mem_info->mem_type = BNA_MEM_T_KVA;
2418 mem_info->len = cpage_count * sizeof(void *);
2419 mem_info->num = q_cfg->num_paths;
2421 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2422 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2423 mem_info->mem_type = BNA_MEM_T_DMA;
2424 mem_info->len = PAGE_SIZE * cpage_count;
2425 mem_info->num = q_cfg->num_paths;
2427 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2428 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2429 mem_info->mem_type = BNA_MEM_T_DMA;
2430 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2431 mem_info->num = q_cfg->num_paths;
2433 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2434 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2435 mem_info->mem_type = BNA_MEM_T_KVA;
2436 mem_info->len = dpage_count * sizeof(void *);
2437 mem_info->num = q_cfg->num_paths;
2439 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2440 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2441 mem_info->mem_type = BNA_MEM_T_DMA;
2442 mem_info->len = PAGE_SIZE * dpage_count;
2443 mem_info->num = q_cfg->num_paths;
2445 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2446 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2447 mem_info->mem_type = BNA_MEM_T_DMA;
2448 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2449 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2451 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2452 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2453 mem_info->mem_type = BNA_MEM_T_KVA;
2454 mem_info->len = hpage_count * sizeof(void *);
2455 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2457 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2458 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2459 mem_info->mem_type = BNA_MEM_T_DMA;
2460 mem_info->len = PAGE_SIZE * hpage_count;
2461 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2463 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2464 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2465 mem_info->mem_type = BNA_MEM_T_DMA;
2466 mem_info->len = BFI_IBIDX_SIZE;
2467 mem_info->num = q_cfg->num_paths;
2469 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2470 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2471 mem_info->mem_type = BNA_MEM_T_KVA;
2472 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2475 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2476 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2477 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2481 bna_rx_create(struct bna *bna, struct bnad *bnad,
2482 struct bna_rx_config *rx_cfg,
2483 const struct bna_rx_event_cbfn *rx_cbfn,
2484 struct bna_res_info *res_info,
2487 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2489 struct bna_rxp *rxp;
2492 struct bna_intr_info *intr_info;
2493 struct bna_mem_descr *hqunmap_mem;
2494 struct bna_mem_descr *dqunmap_mem;
2495 struct bna_mem_descr *ccb_mem;
2496 struct bna_mem_descr *rcb_mem;
2497 struct bna_mem_descr *cqpt_mem;
2498 struct bna_mem_descr *cswqpt_mem;
2499 struct bna_mem_descr *cpage_mem;
2500 struct bna_mem_descr *hqpt_mem;
2501 struct bna_mem_descr *dqpt_mem;
2502 struct bna_mem_descr *hsqpt_mem;
2503 struct bna_mem_descr *dsqpt_mem;
2504 struct bna_mem_descr *hpage_mem;
2505 struct bna_mem_descr *dpage_mem;
2506 u32 dpage_count, hpage_count;
2507 u32 hq_idx, dq_idx, rcb_idx;
2511 if (!bna_rx_res_check(rx_mod, rx_cfg))
2514 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2515 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2516 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2517 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2518 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2519 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2520 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2521 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2522 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2523 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2524 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2525 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2526 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2527 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2529 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2532 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2535 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2538 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2541 INIT_LIST_HEAD(&rx->rxp_q);
2542 rx->stop_cbfn = NULL;
2543 rx->stop_cbarg = NULL;
2546 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2547 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2548 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2549 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2550 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2551 /* Following callbacks are mandatory */
2552 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2553 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2555 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2557 case BNA_RX_T_REGULAR:
2558 if (!(rx->bna->rx_mod.flags &
2559 BNA_RX_MOD_F_ENET_LOOPBACK))
2560 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2562 case BNA_RX_T_LOOPBACK:
2563 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2564 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2569 rx->num_paths = rx_cfg->num_paths;
2570 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2571 i < rx->num_paths; i++) {
2572 rxp = bna_rxp_get(rx_mod);
2573 list_add_tail(&rxp->qe, &rx->rxp_q);
2574 rxp->type = rx_cfg->rxp_type;
2578 q0 = bna_rxq_get(rx_mod);
2579 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2582 q1 = bna_rxq_get(rx_mod);
2584 if (1 == intr_info->num)
2585 rxp->vector = intr_info->idl[0].vector;
2587 rxp->vector = intr_info->idl[i].vector;
2591 rxp->cq.ib.ib_seg_host_addr.lsb =
2592 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2593 rxp->cq.ib.ib_seg_host_addr.msb =
2594 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2595 rxp->cq.ib.ib_seg_host_addr_kva =
2596 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2597 rxp->cq.ib.intr_type = intr_info->intr_type;
2598 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2599 rxp->cq.ib.intr_vector = rxp->vector;
2601 rxp->cq.ib.intr_vector = BIT(rxp->vector);
2602 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2603 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2604 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2606 bna_rxp_add_rxqs(rxp, q0, q1);
2613 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2614 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2615 rcb_idx++; dq_idx++;
2616 q0->rcb->q_depth = rx_cfg->q0_depth;
2617 q0->q_depth = rx_cfg->q0_depth;
2618 q0->multi_buffer = rx_cfg->q0_multi_buf;
2619 q0->buffer_size = rx_cfg->q0_buf_size;
2620 q0->num_vecs = rx_cfg->q0_num_vecs;
2622 q0->rcb->bnad = bna->bnad;
2624 q0->rx_packets = q0->rx_bytes = 0;
2625 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2627 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2628 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2630 if (rx->rcb_setup_cbfn)
2631 rx->rcb_setup_cbfn(bnad, q0->rcb);
2639 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2640 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2641 rcb_idx++; hq_idx++;
2642 q1->rcb->q_depth = rx_cfg->q1_depth;
2643 q1->q_depth = rx_cfg->q1_depth;
2644 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2647 q1->rcb->bnad = bna->bnad;
2649 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2650 rx_cfg->hds_config.forced_offset
2651 : rx_cfg->q1_buf_size;
2652 q1->rx_packets = q1->rx_bytes = 0;
2653 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2655 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2656 &hqpt_mem[i], &hsqpt_mem[i],
2659 if (rx->rcb_setup_cbfn)
2660 rx->rcb_setup_cbfn(bnad, q1->rcb);
2665 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2666 cq_depth = rx_cfg->q0_depth +
2667 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2668 0 : rx_cfg->q1_depth);
2669 /* if multi-buffer is enabled sum of q0_depth
2670 * and q1_depth need not be a power of 2
2672 cq_depth = roundup_pow_of_two(cq_depth);
2673 rxp->cq.ccb->q_depth = cq_depth;
2674 rxp->cq.ccb->cq = &rxp->cq;
2675 rxp->cq.ccb->rcb[0] = q0->rcb;
2676 q0->rcb->ccb = rxp->cq.ccb;
2678 rxp->cq.ccb->rcb[1] = q1->rcb;
2679 q1->rcb->ccb = rxp->cq.ccb;
2681 rxp->cq.ccb->hw_producer_index =
2682 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2683 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2684 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2685 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2686 rxp->cq.ccb->rx_coalescing_timeo =
2687 rxp->cq.ib.coalescing_timeo;
2688 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2689 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2690 rxp->cq.ccb->bnad = bna->bnad;
2691 rxp->cq.ccb->id = i;
2693 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2694 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2696 if (rx->ccb_setup_cbfn)
2697 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2700 rx->hds_cfg = rx_cfg->hds_config;
2702 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2704 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2706 rx_mod->rid_mask |= BIT(rx->rid);
2712 bna_rx_destroy(struct bna_rx *rx)
2714 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2715 struct bna_rxq *q0 = NULL;
2716 struct bna_rxq *q1 = NULL;
2717 struct bna_rxp *rxp;
2718 struct list_head *qe;
2720 bna_rxf_uninit(&rx->rxf);
2722 while (!list_empty(&rx->rxp_q)) {
2723 bfa_q_deq(&rx->rxp_q, &rxp);
2724 GET_RXQS(rxp, q0, q1);
2725 if (rx->rcb_destroy_cbfn)
2726 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2730 bna_rxq_put(rx_mod, q0);
2733 if (rx->rcb_destroy_cbfn)
2734 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2738 bna_rxq_put(rx_mod, q1);
2740 rxp->rxq.slr.large = NULL;
2741 rxp->rxq.slr.small = NULL;
2743 if (rx->ccb_destroy_cbfn)
2744 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2747 bna_rxp_put(rx_mod, rxp);
2750 list_for_each(qe, &rx_mod->rx_active_q) {
2751 if (qe == &rx->qe) {
2753 bfa_q_qe_init(&rx->qe);
2758 rx_mod->rid_mask &= ~BIT(rx->rid);
2762 bna_rx_put(rx_mod, rx);
2766 bna_rx_enable(struct bna_rx *rx)
2768 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2771 rx->rx_flags |= BNA_RX_F_ENABLED;
2772 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2773 bfa_fsm_send_event(rx, RX_E_START);
2777 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2778 void (*cbfn)(void *, struct bna_rx *))
2780 if (type == BNA_SOFT_CLEANUP) {
2781 /* h/w should not be accessed. Treat we're stopped */
2782 (*cbfn)(rx->bna->bnad, rx);
2784 rx->stop_cbfn = cbfn;
2785 rx->stop_cbarg = rx->bna->bnad;
2787 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2789 bfa_fsm_send_event(rx, RX_E_STOP);
2794 bna_rx_cleanup_complete(struct bna_rx *rx)
2796 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2800 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2802 struct bna_rxf *rxf = &rx->rxf;
2804 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2805 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2806 rxf->vlan_strip_pending = true;
2807 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2812 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2814 struct bna_rxf *rxf = &rx->rxf;
2816 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2817 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2818 rxf->vlan_strip_pending = true;
2819 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2824 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2825 enum bna_rxmode bitmask)
2827 struct bna_rxf *rxf = &rx->rxf;
2828 int need_hw_config = 0;
2832 if (is_promisc_enable(new_mode, bitmask)) {
2833 /* If promisc mode is already enabled elsewhere in the system */
2834 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2835 (rx->bna->promisc_rid != rxf->rx->rid))
2838 /* If default mode is already enabled in the system */
2839 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2842 /* Trying to enable promiscuous and default mode together */
2843 if (is_default_enable(new_mode, bitmask))
2847 if (is_default_enable(new_mode, bitmask)) {
2848 /* If default mode is already enabled elsewhere in the system */
2849 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2850 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2854 /* If promiscuous mode is already enabled in the system */
2855 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2859 /* Process the commands */
2861 if (is_promisc_enable(new_mode, bitmask)) {
2862 if (bna_rxf_promisc_enable(rxf))
2864 } else if (is_promisc_disable(new_mode, bitmask)) {
2865 if (bna_rxf_promisc_disable(rxf))
2869 if (is_allmulti_enable(new_mode, bitmask)) {
2870 if (bna_rxf_allmulti_enable(rxf))
2872 } else if (is_allmulti_disable(new_mode, bitmask)) {
2873 if (bna_rxf_allmulti_disable(rxf))
2877 /* Trigger h/w if needed */
2879 if (need_hw_config) {
2880 rxf->cam_fltr_cbfn = NULL;
2881 rxf->cam_fltr_cbarg = rx->bna->bnad;
2882 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2885 return BNA_CB_SUCCESS;
2892 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2894 struct bna_rxf *rxf = &rx->rxf;
2896 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2897 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2898 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2899 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2904 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2906 struct bna_rxp *rxp;
2907 struct list_head *qe;
2909 list_for_each(qe, &rx->rxp_q) {
2910 rxp = (struct bna_rxp *)qe;
2911 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2912 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2917 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2921 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2922 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2923 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2927 bna_rx_dim_update(struct bna_ccb *ccb)
2929 struct bna *bna = ccb->cq->rx->bna;
2931 u32 pkt_rt, small_rt, large_rt;
2932 u8 coalescing_timeo;
2934 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2935 (ccb->pkt_rate.large_pkt_cnt == 0))
2938 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2940 small_rt = ccb->pkt_rate.small_pkt_cnt;
2941 large_rt = ccb->pkt_rate.large_pkt_cnt;
2943 pkt_rt = small_rt + large_rt;
2945 if (pkt_rt < BNA_PKT_RATE_10K)
2946 load = BNA_LOAD_T_LOW_4;
2947 else if (pkt_rt < BNA_PKT_RATE_20K)
2948 load = BNA_LOAD_T_LOW_3;
2949 else if (pkt_rt < BNA_PKT_RATE_30K)
2950 load = BNA_LOAD_T_LOW_2;
2951 else if (pkt_rt < BNA_PKT_RATE_40K)
2952 load = BNA_LOAD_T_LOW_1;
2953 else if (pkt_rt < BNA_PKT_RATE_50K)
2954 load = BNA_LOAD_T_HIGH_1;
2955 else if (pkt_rt < BNA_PKT_RATE_60K)
2956 load = BNA_LOAD_T_HIGH_2;
2957 else if (pkt_rt < BNA_PKT_RATE_80K)
2958 load = BNA_LOAD_T_HIGH_3;
2960 load = BNA_LOAD_T_HIGH_4;
2962 if (small_rt > (large_rt << 1))
2967 ccb->pkt_rate.small_pkt_cnt = 0;
2968 ccb->pkt_rate.large_pkt_cnt = 0;
2970 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2971 ccb->rx_coalescing_timeo = coalescing_timeo;
2974 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2977 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2990 #define call_tx_stop_cbfn(tx) \
2992 if ((tx)->stop_cbfn) { \
2993 void (*cbfn)(void *, struct bna_tx *); \
2995 cbfn = (tx)->stop_cbfn; \
2996 cbarg = (tx)->stop_cbarg; \
2997 (tx)->stop_cbfn = NULL; \
2998 (tx)->stop_cbarg = NULL; \
2999 cbfn(cbarg, (tx)); \
3003 #define call_tx_prio_change_cbfn(tx) \
3005 if ((tx)->prio_change_cbfn) { \
3006 void (*cbfn)(struct bnad *, struct bna_tx *); \
3007 cbfn = (tx)->prio_change_cbfn; \
3008 (tx)->prio_change_cbfn = NULL; \
3009 cbfn((tx)->bna->bnad, (tx)); \
3013 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3014 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3015 static void bna_tx_enet_stop(struct bna_tx *tx);
3023 TX_E_PRIO_CHANGE = 6,
3024 TX_E_CLEANUP_DONE = 7,
3028 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
3029 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
3030 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
3031 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
3032 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
3034 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3036 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
3038 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
3039 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
3043 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3045 call_tx_stop_cbfn(tx);
3049 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3053 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3057 call_tx_stop_cbfn(tx);
3064 case TX_E_PRIO_CHANGE:
3065 call_tx_prio_change_cbfn(tx);
3068 case TX_E_BW_UPDATE:
3073 bfa_sm_fault(event);
3078 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
3080 bna_bfi_tx_enet_start(tx);
3084 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3088 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3089 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3093 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3094 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3098 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3099 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3100 BNA_TX_F_BW_UPDATED);
3101 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3103 bfa_fsm_set_state(tx, bna_tx_sm_started);
3106 case TX_E_PRIO_CHANGE:
3107 tx->flags |= BNA_TX_F_PRIO_CHANGED;
3110 case TX_E_BW_UPDATE:
3111 tx->flags |= BNA_TX_F_BW_UPDATED;
3115 bfa_sm_fault(event);
3120 bna_tx_sm_started_entry(struct bna_tx *tx)
3122 struct bna_txq *txq;
3123 struct list_head *qe;
3124 int is_regular = (tx->type == BNA_TX_T_REGULAR);
3126 list_for_each(qe, &tx->txq_q) {
3127 txq = (struct bna_txq *)qe;
3128 txq->tcb->priority = txq->priority;
3130 bna_ib_start(tx->bna, &txq->ib, is_regular);
3132 tx->tx_resume_cbfn(tx->bna->bnad, tx);
3136 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3140 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3141 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3142 bna_tx_enet_stop(tx);
3146 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3147 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3148 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3151 case TX_E_PRIO_CHANGE:
3152 case TX_E_BW_UPDATE:
3153 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3157 bfa_sm_fault(event);
3162 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3167 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3172 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3173 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3178 * We are here due to start_wait -> stop_wait transition on
3181 bna_tx_enet_stop(tx);
3184 case TX_E_PRIO_CHANGE:
3185 case TX_E_BW_UPDATE:
3190 bfa_sm_fault(event);
3195 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3200 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3204 case TX_E_PRIO_CHANGE:
3205 case TX_E_BW_UPDATE:
3209 case TX_E_CLEANUP_DONE:
3210 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3214 bfa_sm_fault(event);
3219 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3221 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3222 bna_tx_enet_stop(tx);
3226 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3230 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3234 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3235 call_tx_prio_change_cbfn(tx);
3236 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3240 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3243 case TX_E_PRIO_CHANGE:
3244 case TX_E_BW_UPDATE:
3249 bfa_sm_fault(event);
3254 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3256 call_tx_prio_change_cbfn(tx);
3257 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3261 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3265 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3269 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3272 case TX_E_PRIO_CHANGE:
3273 case TX_E_BW_UPDATE:
3277 case TX_E_CLEANUP_DONE:
3278 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3282 bfa_sm_fault(event);
3287 bna_tx_sm_failed_entry(struct bna_tx *tx)
3292 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3296 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3300 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3307 case TX_E_CLEANUP_DONE:
3308 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3312 bfa_sm_fault(event);
3317 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3322 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3326 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3330 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3333 case TX_E_CLEANUP_DONE:
3334 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3337 case TX_E_BW_UPDATE:
3342 bfa_sm_fault(event);
3347 bna_bfi_tx_enet_start(struct bna_tx *tx)
3349 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3350 struct bna_txq *txq = NULL;
3351 struct list_head *qe;
3354 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3355 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3356 cfg_req->mh.num_entries = htons(
3357 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3359 cfg_req->num_queues = tx->num_txq;
3360 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3362 i++, qe = bfa_q_next(qe)) {
3363 txq = (struct bna_txq *)qe;
3365 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3366 cfg_req->q_cfg[i].q.priority = txq->priority;
3368 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3369 txq->ib.ib_seg_host_addr.lsb;
3370 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3371 txq->ib.ib_seg_host_addr.msb;
3372 cfg_req->q_cfg[i].ib.intr.msix_index =
3373 htons((u16)txq->ib.intr_vector);
3376 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3377 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3378 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3379 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3380 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3381 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3382 cfg_req->ib_cfg.coalescing_timeout =
3383 htonl((u32)txq->ib.coalescing_timeo);
3384 cfg_req->ib_cfg.inter_pkt_timeout =
3385 htonl((u32)txq->ib.interpkt_timeo);
3386 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3388 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3389 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3390 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3391 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3393 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3394 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3395 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3399 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3401 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3403 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3404 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3405 req->mh.num_entries = htons(
3406 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3407 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3409 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3413 bna_tx_enet_stop(struct bna_tx *tx)
3415 struct bna_txq *txq;
3416 struct list_head *qe;
3419 list_for_each(qe, &tx->txq_q) {
3420 txq = (struct bna_txq *)qe;
3421 bna_ib_stop(tx->bna, &txq->ib);
3424 bna_bfi_tx_enet_stop(tx);
3428 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3429 struct bna_mem_descr *qpt_mem,
3430 struct bna_mem_descr *swqpt_mem,
3431 struct bna_mem_descr *page_mem)
3435 struct bna_dma_addr bna_dma;
3438 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3439 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3440 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3441 txq->qpt.page_count = page_count;
3442 txq->qpt.page_size = page_size;
3444 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3445 txq->tcb->sw_q = page_mem->kva;
3447 kva = page_mem->kva;
3448 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3450 for (i = 0; i < page_count; i++) {
3451 txq->tcb->sw_qpt[i] = kva;
3454 BNA_SET_DMA_ADDR(dma, &bna_dma);
3455 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3457 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3463 static struct bna_tx *
3464 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3466 struct list_head *qe = NULL;
3467 struct bna_tx *tx = NULL;
3469 if (list_empty(&tx_mod->tx_free_q))
3471 if (type == BNA_TX_T_REGULAR) {
3472 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3474 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3476 tx = (struct bna_tx *)qe;
3477 bfa_q_qe_init(&tx->qe);
3484 bna_tx_free(struct bna_tx *tx)
3486 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3487 struct bna_txq *txq;
3488 struct list_head *prev_qe;
3489 struct list_head *qe;
3491 while (!list_empty(&tx->txq_q)) {
3492 bfa_q_deq(&tx->txq_q, &txq);
3493 bfa_q_qe_init(&txq->qe);
3496 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3499 list_for_each(qe, &tx_mod->tx_active_q) {
3500 if (qe == &tx->qe) {
3502 bfa_q_qe_init(&tx->qe);
3511 list_for_each(qe, &tx_mod->tx_free_q) {
3512 if (((struct bna_tx *)qe)->rid < tx->rid)
3519 if (prev_qe == NULL) {
3520 /* This is the first entry */
3521 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3522 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3523 /* This is the last entry */
3524 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3526 /* Somewhere in the middle */
3527 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3528 bfa_q_prev(&tx->qe) = prev_qe;
3529 bfa_q_next(prev_qe) = &tx->qe;
3530 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3535 bna_tx_start(struct bna_tx *tx)
3537 tx->flags |= BNA_TX_F_ENET_STARTED;
3538 if (tx->flags & BNA_TX_F_ENABLED)
3539 bfa_fsm_send_event(tx, TX_E_START);
3543 bna_tx_stop(struct bna_tx *tx)
3545 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3546 tx->stop_cbarg = &tx->bna->tx_mod;
3548 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3549 bfa_fsm_send_event(tx, TX_E_STOP);
3553 bna_tx_fail(struct bna_tx *tx)
3555 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3556 bfa_fsm_send_event(tx, TX_E_FAIL);
3560 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3562 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3563 struct bna_txq *txq = NULL;
3564 struct list_head *qe;
3567 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3568 sizeof(struct bfi_enet_tx_cfg_rsp));
3570 tx->hw_id = cfg_rsp->hw_id;
3572 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3573 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3574 txq = (struct bna_txq *)qe;
3576 /* Setup doorbells */
3577 txq->tcb->i_dbell->doorbell_addr =
3578 tx->bna->pcidev.pci_bar_kva
3579 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3581 tx->bna->pcidev.pci_bar_kva
3582 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3583 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3585 /* Initialize producer/consumer indexes */
3586 (*txq->tcb->hw_consumer_index) = 0;
3587 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3590 bfa_fsm_send_event(tx, TX_E_STARTED);
3594 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3596 bfa_fsm_send_event(tx, TX_E_STOPPED);
3600 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3603 struct list_head *qe;
3605 list_for_each(qe, &tx_mod->tx_active_q) {
3606 tx = (struct bna_tx *)qe;
3607 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3612 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3616 struct bna_mem_info *mem_info;
3618 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3619 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3620 mem_info->mem_type = BNA_MEM_T_KVA;
3621 mem_info->len = sizeof(struct bna_tcb);
3622 mem_info->num = num_txq;
3624 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3625 q_size = ALIGN(q_size, PAGE_SIZE);
3626 page_count = q_size >> PAGE_SHIFT;
3628 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3629 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3630 mem_info->mem_type = BNA_MEM_T_DMA;
3631 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3632 mem_info->num = num_txq;
3634 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3635 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3636 mem_info->mem_type = BNA_MEM_T_KVA;
3637 mem_info->len = page_count * sizeof(void *);
3638 mem_info->num = num_txq;
3640 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3641 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3642 mem_info->mem_type = BNA_MEM_T_DMA;
3643 mem_info->len = PAGE_SIZE * page_count;
3644 mem_info->num = num_txq;
3646 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3647 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3648 mem_info->mem_type = BNA_MEM_T_DMA;
3649 mem_info->len = BFI_IBIDX_SIZE;
3650 mem_info->num = num_txq;
3652 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3653 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3655 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3659 bna_tx_create(struct bna *bna, struct bnad *bnad,
3660 struct bna_tx_config *tx_cfg,
3661 const struct bna_tx_event_cbfn *tx_cbfn,
3662 struct bna_res_info *res_info, void *priv)
3664 struct bna_intr_info *intr_info;
3665 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3667 struct bna_txq *txq;
3668 struct list_head *qe;
3672 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3673 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3680 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3685 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3693 INIT_LIST_HEAD(&tx->txq_q);
3694 for (i = 0; i < tx_cfg->num_txq; i++) {
3695 if (list_empty(&tx_mod->txq_free_q))
3698 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3699 bfa_q_qe_init(&txq->qe);
3700 list_add_tail(&txq->qe, &tx->txq_q);
3710 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3711 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3712 /* Following callbacks are mandatory */
3713 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3714 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3715 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3717 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3719 tx->num_txq = tx_cfg->num_txq;
3722 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3724 case BNA_TX_T_REGULAR:
3725 if (!(tx->bna->tx_mod.flags &
3726 BNA_TX_MOD_F_ENET_LOOPBACK))
3727 tx->flags |= BNA_TX_F_ENET_STARTED;
3729 case BNA_TX_T_LOOPBACK:
3730 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3731 tx->flags |= BNA_TX_F_ENET_STARTED;
3739 list_for_each(qe, &tx->txq_q) {
3740 txq = (struct bna_txq *)qe;
3741 txq->tcb = (struct bna_tcb *)
3742 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3743 txq->tx_packets = 0;
3747 txq->ib.ib_seg_host_addr.lsb =
3748 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3749 txq->ib.ib_seg_host_addr.msb =
3750 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3751 txq->ib.ib_seg_host_addr_kva =
3752 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3753 txq->ib.intr_type = intr_info->intr_type;
3754 txq->ib.intr_vector = (intr_info->num == 1) ?
3755 intr_info->idl[0].vector :
3756 intr_info->idl[i].vector;
3757 if (intr_info->intr_type == BNA_INTR_T_INTX)
3758 txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3759 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3760 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3761 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3765 txq->tcb->q_depth = tx_cfg->txq_depth;
3766 txq->tcb->unmap_q = (void *)
3767 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3768 txq->tcb->hw_consumer_index =
3769 (u32 *)txq->ib.ib_seg_host_addr_kva;
3770 txq->tcb->i_dbell = &txq->ib.door_bell;
3771 txq->tcb->intr_type = txq->ib.intr_type;
3772 txq->tcb->intr_vector = txq->ib.intr_vector;
3773 txq->tcb->txq = txq;
3774 txq->tcb->bnad = bnad;
3777 /* QPT, SWQPT, Pages */
3778 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3779 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3780 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3781 &res_info[BNA_TX_RES_MEM_T_PAGE].
3782 res_u.mem_info.mdl[i]);
3784 /* Callback to bnad for setting up TCB */
3785 if (tx->tcb_setup_cbfn)
3786 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3788 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3789 txq->priority = txq->tcb->id;
3791 txq->priority = tx_mod->default_prio;
3796 tx->txf_vlan_id = 0;
3798 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3800 tx_mod->rid_mask |= BIT(tx->rid);
3810 bna_tx_destroy(struct bna_tx *tx)
3812 struct bna_txq *txq;
3813 struct list_head *qe;
3815 list_for_each(qe, &tx->txq_q) {
3816 txq = (struct bna_txq *)qe;
3817 if (tx->tcb_destroy_cbfn)
3818 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3821 tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3826 bna_tx_enable(struct bna_tx *tx)
3828 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3831 tx->flags |= BNA_TX_F_ENABLED;
3833 if (tx->flags & BNA_TX_F_ENET_STARTED)
3834 bfa_fsm_send_event(tx, TX_E_START);
3838 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3839 void (*cbfn)(void *, struct bna_tx *))
3841 if (type == BNA_SOFT_CLEANUP) {
3842 (*cbfn)(tx->bna->bnad, tx);
3846 tx->stop_cbfn = cbfn;
3847 tx->stop_cbarg = tx->bna->bnad;
3849 tx->flags &= ~BNA_TX_F_ENABLED;
3851 bfa_fsm_send_event(tx, TX_E_STOP);
3855 bna_tx_cleanup_complete(struct bna_tx *tx)
3857 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3861 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3863 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3865 bfa_wc_down(&tx_mod->tx_stop_wc);
3869 bna_tx_mod_cb_tx_stopped_all(void *arg)
3871 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3873 if (tx_mod->stop_cbfn)
3874 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3875 tx_mod->stop_cbfn = NULL;
3879 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3880 struct bna_res_info *res_info)
3887 tx_mod->tx = (struct bna_tx *)
3888 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3889 tx_mod->txq = (struct bna_txq *)
3890 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3892 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3893 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3895 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3897 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3898 tx_mod->tx[i].rid = i;
3899 bfa_q_qe_init(&tx_mod->tx[i].qe);
3900 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3901 bfa_q_qe_init(&tx_mod->txq[i].qe);
3902 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3905 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3906 tx_mod->default_prio = 0;
3907 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3908 tx_mod->iscsi_prio = -1;
3912 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3914 struct list_head *qe;
3918 list_for_each(qe, &tx_mod->tx_free_q)
3922 list_for_each(qe, &tx_mod->txq_free_q)
3929 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3932 struct list_head *qe;
3934 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3935 if (type == BNA_TX_T_LOOPBACK)
3936 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3938 list_for_each(qe, &tx_mod->tx_active_q) {
3939 tx = (struct bna_tx *)qe;
3940 if (tx->type == type)
3946 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3949 struct list_head *qe;
3951 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3952 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3954 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3956 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3958 list_for_each(qe, &tx_mod->tx_active_q) {
3959 tx = (struct bna_tx *)qe;
3960 if (tx->type == type) {
3961 bfa_wc_up(&tx_mod->tx_stop_wc);
3966 bfa_wc_wait(&tx_mod->tx_stop_wc);
3970 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3973 struct list_head *qe;
3975 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3976 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3978 list_for_each(qe, &tx_mod->tx_active_q) {
3979 tx = (struct bna_tx *)qe;
3985 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3987 struct bna_txq *txq;
3988 struct list_head *qe;
3990 list_for_each(qe, &tx->txq_q) {
3991 txq = (struct bna_txq *)qe;
3992 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);