1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright (c) 2011-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
28 #include "bnx2x_cmn.h"
31 #define BNX2X_MAX_EMUL_MULTI 16
33 /**** Exe Queue interfaces ****/
36 * bnx2x_exe_queue_init - init the Exe Queue object
38 * @o: pointer to the object
40 * @owner: pointer to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
52 exe_q_optimize optimize,
56 memset(o, 0, sizeof(*o));
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
61 spin_lock_init(&o->lock);
63 o->exe_chunk_len = exe_len;
66 /* Owner specific callbacks */
67 o->validate = validate;
69 o->optimize = optimize;
73 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
86 struct bnx2x_exeq_elem *elem;
89 spin_lock_bh(&o->lock);
91 list_for_each_entry(elem, &o->exe_queue, link)
94 spin_unlock_bh(&o->lock);
100 * bnx2x_exe_queue_add - add a new element to the execution queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
107 * If the element is optimized or is illegal, frees it.
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
116 spin_lock_bh(&o->lock);
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
135 spin_unlock_bh(&o->lock);
140 bnx2x_exe_queue_free_elem(bp, elem);
142 spin_unlock_bh(&o->lock);
147 static inline void __bnx2x_exe_queue_reset_pending(
149 struct bnx2x_exe_queue_obj *o)
151 struct bnx2x_exeq_elem *elem;
153 while (!list_empty(&o->pending_comp)) {
154 elem = list_first_entry(&o->pending_comp,
155 struct bnx2x_exeq_elem, link);
157 list_del(&elem->link);
158 bnx2x_exe_queue_free_elem(bp, elem);
162 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
163 struct bnx2x_exe_queue_obj *o)
165 spin_lock_bh(&o->lock);
167 __bnx2x_exe_queue_reset_pending(bp, o);
169 spin_unlock_bh(&o->lock);
173 * bnx2x_exe_queue_step - execute one execution chunk atomically
177 * @ramrod_flags: flags
179 * (Atomicity is ensured using the exe_queue->lock).
181 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
182 struct bnx2x_exe_queue_obj *o,
183 unsigned long *ramrod_flags)
185 struct bnx2x_exeq_elem *elem, spacer;
188 memset(&spacer, 0, sizeof(spacer));
190 spin_lock_bh(&o->lock);
192 /* Next step should not be performed until the current is finished,
193 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
194 * properly clear object internals without sending any command to the FW
195 * which also implies there won't be any completion to clear the
198 if (!list_empty(&o->pending_comp)) {
199 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
200 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
201 __bnx2x_exe_queue_reset_pending(bp, o);
203 spin_unlock_bh(&o->lock);
208 /* Run through the pending commands list and create a next
211 while (!list_empty(&o->exe_queue)) {
212 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
214 WARN_ON(!elem->cmd_len);
216 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
217 cur_len += elem->cmd_len;
218 /* Prevent from both lists being empty when moving an
219 * element. This will allow the call of
220 * bnx2x_exe_queue_empty() without locking.
222 list_add_tail(&spacer.link, &o->pending_comp);
224 list_move_tail(&elem->link, &o->pending_comp);
225 list_del(&spacer.link);
232 spin_unlock_bh(&o->lock);
236 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
238 /* In case of an error return the commands back to the queue
239 * and reset the pending_comp.
241 list_splice_init(&o->pending_comp, &o->exe_queue);
243 /* If zero is returned, means there are no outstanding pending
244 * completions and we may dismiss the pending list.
246 __bnx2x_exe_queue_reset_pending(bp, o);
248 spin_unlock_bh(&o->lock);
252 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
254 bool empty = list_empty(&o->exe_queue);
256 /* Don't reorder!!! */
259 return empty && list_empty(&o->pending_comp);
262 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
265 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
266 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
269 /************************ raw_obj functions ***********************************/
270 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
272 return !!test_bit(o->state, o->pstate);
275 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
277 smp_mb__before_clear_bit();
278 clear_bit(o->state, o->pstate);
279 smp_mb__after_clear_bit();
282 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
284 smp_mb__before_clear_bit();
285 set_bit(o->state, o->pstate);
286 smp_mb__after_clear_bit();
290 * bnx2x_state_wait - wait until the given bit(state) is cleared
293 * @state: state which is to be cleared
294 * @state_p: state buffer
297 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
298 unsigned long *pstate)
300 /* can take a while if any port is running */
303 if (CHIP_REV_IS_EMUL(bp))
306 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
310 if (!test_bit(state, pstate)) {
311 #ifdef BNX2X_STOP_ON_ERROR
312 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
317 usleep_range(1000, 2000);
324 BNX2X_ERR("timeout waiting for state %d\n", state);
325 #ifdef BNX2X_STOP_ON_ERROR
332 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
334 return bnx2x_state_wait(bp, raw->state, raw->pstate);
337 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
338 /* credit handling callbacks */
339 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
341 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
345 return mp->get_entry(mp, offset);
348 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
350 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
354 return mp->get(mp, 1);
357 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
359 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
363 return vp->get_entry(vp, offset);
366 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
368 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
372 return vp->get(vp, 1);
375 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
377 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
378 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
383 if (!vp->get(vp, 1)) {
391 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
393 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
395 return mp->put_entry(mp, offset);
398 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
400 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
402 return mp->put(mp, 1);
405 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
407 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
409 return vp->put_entry(vp, offset);
412 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
414 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
416 return vp->put(vp, 1);
419 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
421 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
422 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
427 if (!vp->put(vp, 1)) {
435 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
436 int n, u8 *base, u8 stride, u8 size)
438 struct bnx2x_vlan_mac_registry_elem *pos;
443 list_for_each_entry(pos, &o->head, link) {
445 memcpy(next, &pos->u, size);
447 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
449 next += stride + size;
452 return counter * ETH_ALEN;
455 /* check_add() callbacks */
456 static int bnx2x_check_mac_add(struct bnx2x *bp,
457 struct bnx2x_vlan_mac_obj *o,
458 union bnx2x_classification_ramrod_data *data)
460 struct bnx2x_vlan_mac_registry_elem *pos;
462 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
464 if (!is_valid_ether_addr(data->mac.mac))
467 /* Check if a requested MAC already exists */
468 list_for_each_entry(pos, &o->head, link)
469 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
470 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
476 static int bnx2x_check_vlan_add(struct bnx2x *bp,
477 struct bnx2x_vlan_mac_obj *o,
478 union bnx2x_classification_ramrod_data *data)
480 struct bnx2x_vlan_mac_registry_elem *pos;
482 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
484 list_for_each_entry(pos, &o->head, link)
485 if (data->vlan.vlan == pos->u.vlan.vlan)
491 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
492 struct bnx2x_vlan_mac_obj *o,
493 union bnx2x_classification_ramrod_data *data)
495 struct bnx2x_vlan_mac_registry_elem *pos;
497 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
498 data->vlan_mac.mac, data->vlan_mac.vlan);
500 list_for_each_entry(pos, &o->head, link)
501 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
502 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
504 (data->vlan_mac.is_inner_mac ==
505 pos->u.vlan_mac.is_inner_mac))
511 /* check_del() callbacks */
512 static struct bnx2x_vlan_mac_registry_elem *
513 bnx2x_check_mac_del(struct bnx2x *bp,
514 struct bnx2x_vlan_mac_obj *o,
515 union bnx2x_classification_ramrod_data *data)
517 struct bnx2x_vlan_mac_registry_elem *pos;
519 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
521 list_for_each_entry(pos, &o->head, link)
522 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
523 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
529 static struct bnx2x_vlan_mac_registry_elem *
530 bnx2x_check_vlan_del(struct bnx2x *bp,
531 struct bnx2x_vlan_mac_obj *o,
532 union bnx2x_classification_ramrod_data *data)
534 struct bnx2x_vlan_mac_registry_elem *pos;
536 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
538 list_for_each_entry(pos, &o->head, link)
539 if (data->vlan.vlan == pos->u.vlan.vlan)
545 static struct bnx2x_vlan_mac_registry_elem *
546 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
547 struct bnx2x_vlan_mac_obj *o,
548 union bnx2x_classification_ramrod_data *data)
550 struct bnx2x_vlan_mac_registry_elem *pos;
552 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
553 data->vlan_mac.mac, data->vlan_mac.vlan);
555 list_for_each_entry(pos, &o->head, link)
556 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
557 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
559 (data->vlan_mac.is_inner_mac ==
560 pos->u.vlan_mac.is_inner_mac))
566 /* check_move() callback */
567 static bool bnx2x_check_move(struct bnx2x *bp,
568 struct bnx2x_vlan_mac_obj *src_o,
569 struct bnx2x_vlan_mac_obj *dst_o,
570 union bnx2x_classification_ramrod_data *data)
572 struct bnx2x_vlan_mac_registry_elem *pos;
575 /* Check if we can delete the requested configuration from the first
578 pos = src_o->check_del(bp, src_o, data);
580 /* check if configuration can be added */
581 rc = dst_o->check_add(bp, dst_o, data);
583 /* If this classification can not be added (is already set)
584 * or can't be deleted - return an error.
592 static bool bnx2x_check_move_always_err(
594 struct bnx2x_vlan_mac_obj *src_o,
595 struct bnx2x_vlan_mac_obj *dst_o,
596 union bnx2x_classification_ramrod_data *data)
601 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
603 struct bnx2x_raw_obj *raw = &o->raw;
606 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
607 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
608 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
610 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
611 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
612 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
617 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
618 bool add, unsigned char *dev_addr, int index)
621 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
622 NIG_REG_LLH0_FUNC_MEM;
624 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
627 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
630 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
631 (add ? "ADD" : "DELETE"), index);
634 /* LLH_FUNC_MEM is a u64 WB register */
635 reg_offset += 8*index;
637 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
638 (dev_addr[4] << 8) | dev_addr[5]);
639 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
641 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
644 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
645 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
649 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
652 * @o: queue for which we want to configure this rule
653 * @add: if true the command is an ADD command, DEL otherwise
654 * @opcode: CLASSIFY_RULE_OPCODE_XXX
655 * @hdr: pointer to a header to setup
658 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
659 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
660 struct eth_classify_cmd_header *hdr)
662 struct bnx2x_raw_obj *raw = &o->raw;
664 hdr->client_id = raw->cl_id;
665 hdr->func_id = raw->func_id;
667 /* Rx or/and Tx (internal switching) configuration ? */
668 hdr->cmd_general_data |=
669 bnx2x_vlan_mac_get_rx_tx_flag(o);
672 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
674 hdr->cmd_general_data |=
675 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
679 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
681 * @cid: connection id
682 * @type: BNX2X_FILTER_XXX_PENDING
683 * @hdr: pointer to header to setup
686 * currently we always configure one rule and echo field to contain a CID and an
689 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
690 struct eth_classify_header *hdr, int rule_cnt)
692 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
693 (type << BNX2X_SWCID_SHIFT));
694 hdr->rule_cnt = (u8)rule_cnt;
697 /* hw_config() callbacks */
698 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
699 struct bnx2x_vlan_mac_obj *o,
700 struct bnx2x_exeq_elem *elem, int rule_idx,
703 struct bnx2x_raw_obj *raw = &o->raw;
704 struct eth_classify_rules_ramrod_data *data =
705 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
706 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
707 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
708 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
709 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
710 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
712 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
713 * relevant. In addition, current implementation is tuned for a
716 * When multiple unicast ETH MACs PF configuration in switch
717 * independent mode is required (NetQ, multiple netdev MACs,
718 * etc.), consider better utilisation of 8 per function MAC
719 * entries in the LLH register. There is also
720 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
721 * total number of CAM entries to 16.
723 * Currently we won't configure NIG for MACs other than a primary ETH
724 * MAC and iSCSI L2 MAC.
726 * If this MAC is moving from one Queue to another, no need to change
729 if (cmd != BNX2X_VLAN_MAC_MOVE) {
730 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
731 bnx2x_set_mac_in_nig(bp, add, mac,
732 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
733 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
734 bnx2x_set_mac_in_nig(bp, add, mac,
735 BNX2X_LLH_CAM_ETH_LINE);
738 /* Reset the ramrod data buffer for the first rule */
740 memset(data, 0, sizeof(*data));
742 /* Setup a command header */
743 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
744 &rule_entry->mac.header);
746 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
747 (add ? "add" : "delete"), mac, raw->cl_id);
749 /* Set a MAC itself */
750 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
751 &rule_entry->mac.mac_mid,
752 &rule_entry->mac.mac_lsb, mac);
753 rule_entry->mac.inner_mac =
754 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
756 /* MOVE: Add a rule that will add this MAC to the target Queue */
757 if (cmd == BNX2X_VLAN_MAC_MOVE) {
761 /* Setup ramrod data */
762 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
763 elem->cmd_data.vlan_mac.target_obj,
764 true, CLASSIFY_RULE_OPCODE_MAC,
765 &rule_entry->mac.header);
767 /* Set a MAC itself */
768 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
769 &rule_entry->mac.mac_mid,
770 &rule_entry->mac.mac_lsb, mac);
771 rule_entry->mac.inner_mac =
772 cpu_to_le16(elem->cmd_data.vlan_mac.
776 /* Set the ramrod data header */
777 /* TODO: take this to the higher level in order to prevent multiple
779 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
784 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
789 * @cam_offset: offset in cam memory
790 * @hdr: pointer to a header to setup
794 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
795 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
796 struct mac_configuration_hdr *hdr)
798 struct bnx2x_raw_obj *r = &o->raw;
801 hdr->offset = (u8)cam_offset;
802 hdr->client_id = cpu_to_le16(0xff);
803 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
804 (type << BNX2X_SWCID_SHIFT));
807 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
808 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
809 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
811 struct bnx2x_raw_obj *r = &o->raw;
812 u32 cl_bit_vec = (1 << r->cl_id);
814 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
815 cfg_entry->pf_id = r->func_id;
816 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
819 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
820 T_ETH_MAC_COMMAND_SET);
821 SET_FLAG(cfg_entry->flags,
822 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
824 /* Set a MAC in a ramrod data */
825 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
826 &cfg_entry->middle_mac_addr,
827 &cfg_entry->lsb_mac_addr, mac);
829 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
830 T_ETH_MAC_COMMAND_INVALIDATE);
833 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
834 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
835 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
837 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
838 struct bnx2x_raw_obj *raw = &o->raw;
840 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
842 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
845 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
846 (add ? "setting" : "clearing"),
847 mac, raw->cl_id, cam_offset);
851 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
854 * @o: bnx2x_vlan_mac_obj
855 * @elem: bnx2x_exeq_elem
856 * @rule_idx: rule_idx
857 * @cam_offset: cam_offset
859 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
860 struct bnx2x_vlan_mac_obj *o,
861 struct bnx2x_exeq_elem *elem, int rule_idx,
864 struct bnx2x_raw_obj *raw = &o->raw;
865 struct mac_configuration_cmd *config =
866 (struct mac_configuration_cmd *)(raw->rdata);
867 /* 57710 and 57711 do not support MOVE command,
868 * so it's either ADD or DEL
870 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
873 /* Reset the ramrod data buffer */
874 memset(config, 0, sizeof(*config));
876 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
878 elem->cmd_data.vlan_mac.u.mac.mac, 0,
879 ETH_VLAN_FILTER_ANY_VLAN, config);
882 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
883 struct bnx2x_vlan_mac_obj *o,
884 struct bnx2x_exeq_elem *elem, int rule_idx,
887 struct bnx2x_raw_obj *raw = &o->raw;
888 struct eth_classify_rules_ramrod_data *data =
889 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
890 int rule_cnt = rule_idx + 1;
891 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
892 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
893 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
894 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
896 /* Reset the ramrod data buffer for the first rule */
898 memset(data, 0, sizeof(*data));
900 /* Set a rule header */
901 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
902 &rule_entry->vlan.header);
904 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
907 /* Set a VLAN itself */
908 rule_entry->vlan.vlan = cpu_to_le16(vlan);
910 /* MOVE: Add a rule that will add this MAC to the target Queue */
911 if (cmd == BNX2X_VLAN_MAC_MOVE) {
915 /* Setup ramrod data */
916 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
917 elem->cmd_data.vlan_mac.target_obj,
918 true, CLASSIFY_RULE_OPCODE_VLAN,
919 &rule_entry->vlan.header);
921 /* Set a VLAN itself */
922 rule_entry->vlan.vlan = cpu_to_le16(vlan);
925 /* Set the ramrod data header */
926 /* TODO: take this to the higher level in order to prevent multiple
928 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
932 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
933 struct bnx2x_vlan_mac_obj *o,
934 struct bnx2x_exeq_elem *elem,
935 int rule_idx, int cam_offset)
937 struct bnx2x_raw_obj *raw = &o->raw;
938 struct eth_classify_rules_ramrod_data *data =
939 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
940 int rule_cnt = rule_idx + 1;
941 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
942 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
943 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
944 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
945 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
947 /* Reset the ramrod data buffer for the first rule */
949 memset(data, 0, sizeof(*data));
951 /* Set a rule header */
952 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
953 &rule_entry->pair.header);
955 /* Set VLAN and MAC themselves */
956 rule_entry->pair.vlan = cpu_to_le16(vlan);
957 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
958 &rule_entry->pair.mac_mid,
959 &rule_entry->pair.mac_lsb, mac);
960 rule_entry->pair.inner_mac =
961 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
962 /* MOVE: Add a rule that will add this MAC to the target Queue */
963 if (cmd == BNX2X_VLAN_MAC_MOVE) {
967 /* Setup ramrod data */
968 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
969 elem->cmd_data.vlan_mac.target_obj,
970 true, CLASSIFY_RULE_OPCODE_PAIR,
971 &rule_entry->pair.header);
973 /* Set a VLAN itself */
974 rule_entry->pair.vlan = cpu_to_le16(vlan);
975 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
976 &rule_entry->pair.mac_mid,
977 &rule_entry->pair.mac_lsb, mac);
978 rule_entry->pair.inner_mac =
979 cpu_to_le16(elem->cmd_data.vlan_mac.u.
980 vlan_mac.is_inner_mac);
983 /* Set the ramrod data header */
984 /* TODO: take this to the higher level in order to prevent multiple
986 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
991 * bnx2x_set_one_vlan_mac_e1h -
994 * @o: bnx2x_vlan_mac_obj
995 * @elem: bnx2x_exeq_elem
996 * @rule_idx: rule_idx
997 * @cam_offset: cam_offset
999 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1000 struct bnx2x_vlan_mac_obj *o,
1001 struct bnx2x_exeq_elem *elem,
1002 int rule_idx, int cam_offset)
1004 struct bnx2x_raw_obj *raw = &o->raw;
1005 struct mac_configuration_cmd *config =
1006 (struct mac_configuration_cmd *)(raw->rdata);
1007 /* 57710 and 57711 do not support MOVE command,
1008 * so it's either ADD or DEL
1010 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1013 /* Reset the ramrod data buffer */
1014 memset(config, 0, sizeof(*config));
1016 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1018 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1019 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1020 ETH_VLAN_FILTER_CLASSIFY, config);
1023 #define list_next_entry(pos, member) \
1024 list_entry((pos)->member.next, typeof(*(pos)), member)
1027 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1029 * @bp: device handle
1030 * @p: command parameters
1031 * @ppos: pointer to the cookie
1033 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1034 * previously configured elements list.
1036 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1039 * pointer to the cookie - that should be given back in the next call to make
1040 * function handle the next element. If *ppos is set to NULL it will restart the
1041 * iterator. If returned *ppos == NULL this means that the last element has been
1045 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1046 struct bnx2x_vlan_mac_ramrod_params *p,
1047 struct bnx2x_vlan_mac_registry_elem **ppos)
1049 struct bnx2x_vlan_mac_registry_elem *pos;
1050 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1052 /* If list is empty - there is nothing to do here */
1053 if (list_empty(&o->head)) {
1058 /* make a step... */
1060 *ppos = list_first_entry(&o->head,
1061 struct bnx2x_vlan_mac_registry_elem,
1064 *ppos = list_next_entry(*ppos, link);
1068 /* If it's the last step - return NULL */
1069 if (list_is_last(&pos->link, &o->head))
1072 /* Prepare a 'user_req' */
1073 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1075 /* Set the command */
1076 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1078 /* Set vlan_mac_flags */
1079 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1081 /* Set a restore bit */
1082 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1084 return bnx2x_config_vlan_mac(bp, p);
1087 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1088 * pointer to an element with a specific criteria and NULL if such an element
1089 * hasn't been found.
1091 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1092 struct bnx2x_exe_queue_obj *o,
1093 struct bnx2x_exeq_elem *elem)
1095 struct bnx2x_exeq_elem *pos;
1096 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1098 /* Check pending for execution commands */
1099 list_for_each_entry(pos, &o->exe_queue, link)
1100 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1102 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1108 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1109 struct bnx2x_exe_queue_obj *o,
1110 struct bnx2x_exeq_elem *elem)
1112 struct bnx2x_exeq_elem *pos;
1113 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1115 /* Check pending for execution commands */
1116 list_for_each_entry(pos, &o->exe_queue, link)
1117 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1119 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1125 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1126 struct bnx2x_exe_queue_obj *o,
1127 struct bnx2x_exeq_elem *elem)
1129 struct bnx2x_exeq_elem *pos;
1130 struct bnx2x_vlan_mac_ramrod_data *data =
1131 &elem->cmd_data.vlan_mac.u.vlan_mac;
1133 /* Check pending for execution commands */
1134 list_for_each_entry(pos, &o->exe_queue, link)
1135 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1137 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1144 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1146 * @bp: device handle
1147 * @qo: bnx2x_qable_obj
1148 * @elem: bnx2x_exeq_elem
1150 * Checks that the requested configuration can be added. If yes and if
1151 * requested, consume CAM credit.
1153 * The 'validate' is run after the 'optimize'.
1156 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1157 union bnx2x_qable_obj *qo,
1158 struct bnx2x_exeq_elem *elem)
1160 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1161 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1164 /* Check the registry */
1165 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1167 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1171 /* Check if there is a pending ADD command for this
1172 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1174 if (exeq->get(exeq, elem)) {
1175 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1179 /* TODO: Check the pending MOVE from other objects where this
1180 * object is a destination object.
1183 /* Consume the credit if not requested not to */
1184 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1185 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1193 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1195 * @bp: device handle
1196 * @qo: quable object to check
1197 * @elem: element that needs to be deleted
1199 * Checks that the requested configuration can be deleted. If yes and if
1200 * requested, returns a CAM credit.
1202 * The 'validate' is run after the 'optimize'.
1204 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1205 union bnx2x_qable_obj *qo,
1206 struct bnx2x_exeq_elem *elem)
1208 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1209 struct bnx2x_vlan_mac_registry_elem *pos;
1210 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1211 struct bnx2x_exeq_elem query_elem;
1213 /* If this classification can not be deleted (doesn't exist)
1214 * - return a BNX2X_EXIST.
1216 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1218 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1222 /* Check if there are pending DEL or MOVE commands for this
1223 * MAC/VLAN/VLAN-MAC. Return an error if so.
1225 memcpy(&query_elem, elem, sizeof(query_elem));
1227 /* Check for MOVE commands */
1228 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1229 if (exeq->get(exeq, &query_elem)) {
1230 BNX2X_ERR("There is a pending MOVE command already\n");
1234 /* Check for DEL commands */
1235 if (exeq->get(exeq, elem)) {
1236 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1240 /* Return the credit to the credit pool if not requested not to */
1241 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1242 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1243 o->put_credit(o))) {
1244 BNX2X_ERR("Failed to return a credit\n");
1252 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1254 * @bp: device handle
1255 * @qo: quable object to check (source)
1256 * @elem: element that needs to be moved
1258 * Checks that the requested configuration can be moved. If yes and if
1259 * requested, returns a CAM credit.
1261 * The 'validate' is run after the 'optimize'.
1263 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1264 union bnx2x_qable_obj *qo,
1265 struct bnx2x_exeq_elem *elem)
1267 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1268 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1269 struct bnx2x_exeq_elem query_elem;
1270 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1271 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1273 /* Check if we can perform this operation based on the current registry
1276 if (!src_o->check_move(bp, src_o, dest_o,
1277 &elem->cmd_data.vlan_mac.u)) {
1278 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1282 /* Check if there is an already pending DEL or MOVE command for the
1283 * source object or ADD command for a destination object. Return an
1286 memcpy(&query_elem, elem, sizeof(query_elem));
1288 /* Check DEL on source */
1289 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1290 if (src_exeq->get(src_exeq, &query_elem)) {
1291 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1295 /* Check MOVE on source */
1296 if (src_exeq->get(src_exeq, elem)) {
1297 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1301 /* Check ADD on destination */
1302 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1303 if (dest_exeq->get(dest_exeq, &query_elem)) {
1304 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1308 /* Consume the credit if not requested not to */
1309 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1310 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1311 dest_o->get_credit(dest_o)))
1314 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1315 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1316 src_o->put_credit(src_o))) {
1317 /* return the credit taken from dest... */
1318 dest_o->put_credit(dest_o);
1325 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1326 union bnx2x_qable_obj *qo,
1327 struct bnx2x_exeq_elem *elem)
1329 switch (elem->cmd_data.vlan_mac.cmd) {
1330 case BNX2X_VLAN_MAC_ADD:
1331 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1332 case BNX2X_VLAN_MAC_DEL:
1333 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1334 case BNX2X_VLAN_MAC_MOVE:
1335 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1341 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1342 union bnx2x_qable_obj *qo,
1343 struct bnx2x_exeq_elem *elem)
1347 /* If consumption wasn't required, nothing to do */
1348 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1349 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1352 switch (elem->cmd_data.vlan_mac.cmd) {
1353 case BNX2X_VLAN_MAC_ADD:
1354 case BNX2X_VLAN_MAC_MOVE:
1355 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1357 case BNX2X_VLAN_MAC_DEL:
1358 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1371 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1373 * @bp: device handle
1374 * @o: bnx2x_vlan_mac_obj
1377 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1378 struct bnx2x_vlan_mac_obj *o)
1381 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1382 struct bnx2x_raw_obj *raw = &o->raw;
1385 /* Wait for the current command to complete */
1386 rc = raw->wait_comp(bp, raw);
1390 /* Wait until there are no pending commands */
1391 if (!bnx2x_exe_queue_empty(exeq))
1392 usleep_range(1000, 2000);
1401 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1403 * @bp: device handle
1404 * @o: bnx2x_vlan_mac_obj
1406 * @cont: if true schedule next execution chunk
1409 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1410 struct bnx2x_vlan_mac_obj *o,
1411 union event_ring_elem *cqe,
1412 unsigned long *ramrod_flags)
1414 struct bnx2x_raw_obj *r = &o->raw;
1417 /* Reset pending list */
1418 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1421 r->clear_pending(r);
1423 /* If ramrod failed this is most likely a SW bug */
1424 if (cqe->message.error)
1427 /* Run the next bulk of pending commands if requested */
1428 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1429 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1434 /* If there is more work to do return PENDING */
1435 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1442 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1444 * @bp: device handle
1445 * @o: bnx2x_qable_obj
1446 * @elem: bnx2x_exeq_elem
1448 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1449 union bnx2x_qable_obj *qo,
1450 struct bnx2x_exeq_elem *elem)
1452 struct bnx2x_exeq_elem query, *pos;
1453 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1454 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1456 memcpy(&query, elem, sizeof(query));
1458 switch (elem->cmd_data.vlan_mac.cmd) {
1459 case BNX2X_VLAN_MAC_ADD:
1460 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1462 case BNX2X_VLAN_MAC_DEL:
1463 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1466 /* Don't handle anything other than ADD or DEL */
1470 /* If we found the appropriate element - delete it */
1471 pos = exeq->get(exeq, &query);
1474 /* Return the credit of the optimized command */
1475 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1476 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1477 if ((query.cmd_data.vlan_mac.cmd ==
1478 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1479 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1481 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1482 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1487 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1488 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1491 list_del(&pos->link);
1492 bnx2x_exe_queue_free_elem(bp, pos);
1500 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1502 * @bp: device handle
1508 * prepare a registry element according to the current command request.
1510 static inline int bnx2x_vlan_mac_get_registry_elem(
1512 struct bnx2x_vlan_mac_obj *o,
1513 struct bnx2x_exeq_elem *elem,
1515 struct bnx2x_vlan_mac_registry_elem **re)
1517 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1518 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1520 /* Allocate a new registry element if needed. */
1522 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1523 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1527 /* Get a new CAM offset */
1528 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1529 /* This shall never happen, because we have checked the
1530 * CAM availability in the 'validate'.
1537 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1539 /* Set a VLAN-MAC data */
1540 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1541 sizeof(reg_elem->u));
1543 /* Copy the flags (needed for DEL and RESTORE flows) */
1544 reg_elem->vlan_mac_flags =
1545 elem->cmd_data.vlan_mac.vlan_mac_flags;
1546 } else /* DEL, RESTORE */
1547 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1554 * bnx2x_execute_vlan_mac - execute vlan mac command
1556 * @bp: device handle
1561 * go and send a ramrod!
1563 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1564 union bnx2x_qable_obj *qo,
1565 struct list_head *exe_chunk,
1566 unsigned long *ramrod_flags)
1568 struct bnx2x_exeq_elem *elem;
1569 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1570 struct bnx2x_raw_obj *r = &o->raw;
1572 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1573 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1574 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1575 enum bnx2x_vlan_mac_cmd cmd;
1577 /* If DRIVER_ONLY execution is requested, cleanup a registry
1578 * and exit. Otherwise send a ramrod to FW.
1581 WARN_ON(r->check_pending(r));
1586 /* Fill the ramrod data */
1587 list_for_each_entry(elem, exe_chunk, link) {
1588 cmd = elem->cmd_data.vlan_mac.cmd;
1589 /* We will add to the target object in MOVE command, so
1590 * change the object for a CAM search.
1592 if (cmd == BNX2X_VLAN_MAC_MOVE)
1593 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1597 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1605 /* Push a new entry into the registry */
1607 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1608 (cmd == BNX2X_VLAN_MAC_MOVE)))
1609 list_add(®_elem->link, &cam_obj->head);
1611 /* Configure a single command in a ramrod data buffer */
1612 o->set_one_rule(bp, o, elem, idx,
1613 reg_elem->cam_offset);
1615 /* MOVE command consumes 2 entries in the ramrod data */
1616 if (cmd == BNX2X_VLAN_MAC_MOVE)
1622 /* No need for an explicit memory barrier here as long we would
1623 * need to ensure the ordering of writing to the SPQ element
1624 * and updating of the SPQ producer which involves a memory
1625 * read and we will have to put a full memory barrier there
1626 * (inside bnx2x_sp_post()).
1629 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1630 U64_HI(r->rdata_mapping),
1631 U64_LO(r->rdata_mapping),
1632 ETH_CONNECTION_TYPE);
1637 /* Now, when we are done with the ramrod - clean up the registry */
1638 list_for_each_entry(elem, exe_chunk, link) {
1639 cmd = elem->cmd_data.vlan_mac.cmd;
1640 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1641 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1642 reg_elem = o->check_del(bp, o,
1643 &elem->cmd_data.vlan_mac.u);
1647 o->put_cam_offset(o, reg_elem->cam_offset);
1648 list_del(®_elem->link);
1659 r->clear_pending(r);
1661 /* Cleanup a registry in case of a failure */
1662 list_for_each_entry(elem, exe_chunk, link) {
1663 cmd = elem->cmd_data.vlan_mac.cmd;
1665 if (cmd == BNX2X_VLAN_MAC_MOVE)
1666 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1670 /* Delete all newly added above entries */
1672 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1673 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1674 reg_elem = o->check_del(bp, cam_obj,
1675 &elem->cmd_data.vlan_mac.u);
1677 list_del(®_elem->link);
1686 static inline int bnx2x_vlan_mac_push_new_cmd(
1688 struct bnx2x_vlan_mac_ramrod_params *p)
1690 struct bnx2x_exeq_elem *elem;
1691 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1692 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1694 /* Allocate the execution queue element */
1695 elem = bnx2x_exe_queue_alloc_elem(bp);
1699 /* Set the command 'length' */
1700 switch (p->user_req.cmd) {
1701 case BNX2X_VLAN_MAC_MOVE:
1708 /* Fill the object specific info */
1709 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1711 /* Try to add a new command to the pending list */
1712 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1716 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1718 * @bp: device handle
1722 int bnx2x_config_vlan_mac(
1724 struct bnx2x_vlan_mac_ramrod_params *p)
1727 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1728 unsigned long *ramrod_flags = &p->ramrod_flags;
1729 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1730 struct bnx2x_raw_obj *raw = &o->raw;
1733 * Add new elements to the execution list for commands that require it.
1736 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1741 /* If nothing will be executed further in this iteration we want to
1742 * return PENDING if there are pending commands
1744 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1747 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1748 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1749 raw->clear_pending(raw);
1752 /* Execute commands if required */
1753 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1754 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1755 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1760 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1761 * then user want to wait until the last command is done.
1763 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1764 /* Wait maximum for the current exe_queue length iterations plus
1765 * one (for the current pending command).
1767 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1769 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1772 /* Wait for the current command to complete */
1773 rc = raw->wait_comp(bp, raw);
1777 /* Make a next step */
1778 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1791 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1793 * @bp: device handle
1796 * @ramrod_flags: execution flags to be used for this deletion
1798 * if the last operation has completed successfully and there are no
1799 * more elements left, positive value if the last operation has completed
1800 * successfully and there are more previously configured elements, negative
1801 * value is current operation has failed.
1803 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1804 struct bnx2x_vlan_mac_obj *o,
1805 unsigned long *vlan_mac_flags,
1806 unsigned long *ramrod_flags)
1808 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1810 struct bnx2x_vlan_mac_ramrod_params p;
1811 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1812 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1814 /* Clear pending commands first */
1816 spin_lock_bh(&exeq->lock);
1818 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1819 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1821 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1823 BNX2X_ERR("Failed to remove command\n");
1824 spin_unlock_bh(&exeq->lock);
1827 list_del(&exeq_pos->link);
1828 bnx2x_exe_queue_free_elem(bp, exeq_pos);
1832 spin_unlock_bh(&exeq->lock);
1834 /* Prepare a command request */
1835 memset(&p, 0, sizeof(p));
1837 p.ramrod_flags = *ramrod_flags;
1838 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1840 /* Add all but the last VLAN-MAC to the execution queue without actually
1841 * execution anything.
1843 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1844 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1845 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1847 list_for_each_entry(pos, &o->head, link) {
1848 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1849 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1850 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1851 rc = bnx2x_config_vlan_mac(bp, &p);
1853 BNX2X_ERR("Failed to add a new DEL command\n");
1859 p.ramrod_flags = *ramrod_flags;
1860 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1862 return bnx2x_config_vlan_mac(bp, &p);
1865 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1866 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1867 unsigned long *pstate, bnx2x_obj_type type)
1869 raw->func_id = func_id;
1873 raw->rdata_mapping = rdata_mapping;
1875 raw->pstate = pstate;
1876 raw->obj_type = type;
1877 raw->check_pending = bnx2x_raw_check_pending;
1878 raw->clear_pending = bnx2x_raw_clear_pending;
1879 raw->set_pending = bnx2x_raw_set_pending;
1880 raw->wait_comp = bnx2x_raw_wait;
1883 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1884 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1885 int state, unsigned long *pstate, bnx2x_obj_type type,
1886 struct bnx2x_credit_pool_obj *macs_pool,
1887 struct bnx2x_credit_pool_obj *vlans_pool)
1889 INIT_LIST_HEAD(&o->head);
1891 o->macs_pool = macs_pool;
1892 o->vlans_pool = vlans_pool;
1894 o->delete_all = bnx2x_vlan_mac_del_all;
1895 o->restore = bnx2x_vlan_mac_restore;
1896 o->complete = bnx2x_complete_vlan_mac;
1897 o->wait = bnx2x_wait_vlan_mac;
1899 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1900 state, pstate, type);
1903 void bnx2x_init_mac_obj(struct bnx2x *bp,
1904 struct bnx2x_vlan_mac_obj *mac_obj,
1905 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1906 dma_addr_t rdata_mapping, int state,
1907 unsigned long *pstate, bnx2x_obj_type type,
1908 struct bnx2x_credit_pool_obj *macs_pool)
1910 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1912 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1913 rdata_mapping, state, pstate, type,
1916 /* CAM credit pool handling */
1917 mac_obj->get_credit = bnx2x_get_credit_mac;
1918 mac_obj->put_credit = bnx2x_put_credit_mac;
1919 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1920 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1922 if (CHIP_IS_E1x(bp)) {
1923 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1924 mac_obj->check_del = bnx2x_check_mac_del;
1925 mac_obj->check_add = bnx2x_check_mac_add;
1926 mac_obj->check_move = bnx2x_check_move_always_err;
1927 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1930 bnx2x_exe_queue_init(bp,
1931 &mac_obj->exe_queue, 1, qable_obj,
1932 bnx2x_validate_vlan_mac,
1933 bnx2x_remove_vlan_mac,
1934 bnx2x_optimize_vlan_mac,
1935 bnx2x_execute_vlan_mac,
1936 bnx2x_exeq_get_mac);
1938 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1939 mac_obj->check_del = bnx2x_check_mac_del;
1940 mac_obj->check_add = bnx2x_check_mac_add;
1941 mac_obj->check_move = bnx2x_check_move;
1942 mac_obj->ramrod_cmd =
1943 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1944 mac_obj->get_n_elements = bnx2x_get_n_elements;
1947 bnx2x_exe_queue_init(bp,
1948 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1949 qable_obj, bnx2x_validate_vlan_mac,
1950 bnx2x_remove_vlan_mac,
1951 bnx2x_optimize_vlan_mac,
1952 bnx2x_execute_vlan_mac,
1953 bnx2x_exeq_get_mac);
1957 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1958 struct bnx2x_vlan_mac_obj *vlan_obj,
1959 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1960 dma_addr_t rdata_mapping, int state,
1961 unsigned long *pstate, bnx2x_obj_type type,
1962 struct bnx2x_credit_pool_obj *vlans_pool)
1964 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1966 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1967 rdata_mapping, state, pstate, type, NULL,
1970 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1971 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1972 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1973 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1975 if (CHIP_IS_E1x(bp)) {
1976 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1979 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1980 vlan_obj->check_del = bnx2x_check_vlan_del;
1981 vlan_obj->check_add = bnx2x_check_vlan_add;
1982 vlan_obj->check_move = bnx2x_check_move;
1983 vlan_obj->ramrod_cmd =
1984 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1985 vlan_obj->get_n_elements = bnx2x_get_n_elements;
1988 bnx2x_exe_queue_init(bp,
1989 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1990 qable_obj, bnx2x_validate_vlan_mac,
1991 bnx2x_remove_vlan_mac,
1992 bnx2x_optimize_vlan_mac,
1993 bnx2x_execute_vlan_mac,
1994 bnx2x_exeq_get_vlan);
1998 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1999 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2000 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2001 dma_addr_t rdata_mapping, int state,
2002 unsigned long *pstate, bnx2x_obj_type type,
2003 struct bnx2x_credit_pool_obj *macs_pool,
2004 struct bnx2x_credit_pool_obj *vlans_pool)
2006 union bnx2x_qable_obj *qable_obj =
2007 (union bnx2x_qable_obj *)vlan_mac_obj;
2009 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2010 rdata_mapping, state, pstate, type,
2011 macs_pool, vlans_pool);
2013 /* CAM pool handling */
2014 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2015 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2016 /* CAM offset is relevant for 57710 and 57711 chips only which have a
2017 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2018 * will be taken from MACs' pool object only.
2020 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2021 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2023 if (CHIP_IS_E1(bp)) {
2024 BNX2X_ERR("Do not support chips others than E2\n");
2026 } else if (CHIP_IS_E1H(bp)) {
2027 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2028 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2029 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2030 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2031 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2034 bnx2x_exe_queue_init(bp,
2035 &vlan_mac_obj->exe_queue, 1, qable_obj,
2036 bnx2x_validate_vlan_mac,
2037 bnx2x_remove_vlan_mac,
2038 bnx2x_optimize_vlan_mac,
2039 bnx2x_execute_vlan_mac,
2040 bnx2x_exeq_get_vlan_mac);
2042 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2043 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2044 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2045 vlan_mac_obj->check_move = bnx2x_check_move;
2046 vlan_mac_obj->ramrod_cmd =
2047 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2050 bnx2x_exe_queue_init(bp,
2051 &vlan_mac_obj->exe_queue,
2052 CLASSIFY_RULES_COUNT,
2053 qable_obj, bnx2x_validate_vlan_mac,
2054 bnx2x_remove_vlan_mac,
2055 bnx2x_optimize_vlan_mac,
2056 bnx2x_execute_vlan_mac,
2057 bnx2x_exeq_get_vlan_mac);
2061 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2062 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2063 struct tstorm_eth_mac_filter_config *mac_filters,
2066 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2068 u32 addr = BAR_TSTRORM_INTMEM +
2069 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2071 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2074 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2075 struct bnx2x_rx_mode_ramrod_params *p)
2077 /* update the bp MAC filter structure */
2078 u32 mask = (1 << p->cl_id);
2080 struct tstorm_eth_mac_filter_config *mac_filters =
2081 (struct tstorm_eth_mac_filter_config *)p->rdata;
2083 /* initial setting is drop-all */
2084 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2085 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2086 u8 unmatched_unicast = 0;
2088 /* In e1x there we only take into account rx accept flag since tx switching
2090 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2091 /* accept matched ucast */
2094 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2095 /* accept matched mcast */
2098 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2099 /* accept all mcast */
2103 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2104 /* accept all mcast */
2108 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2109 /* accept (all) bcast */
2111 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2112 /* accept unmatched unicasts */
2113 unmatched_unicast = 1;
2115 mac_filters->ucast_drop_all = drop_all_ucast ?
2116 mac_filters->ucast_drop_all | mask :
2117 mac_filters->ucast_drop_all & ~mask;
2119 mac_filters->mcast_drop_all = drop_all_mcast ?
2120 mac_filters->mcast_drop_all | mask :
2121 mac_filters->mcast_drop_all & ~mask;
2123 mac_filters->ucast_accept_all = accp_all_ucast ?
2124 mac_filters->ucast_accept_all | mask :
2125 mac_filters->ucast_accept_all & ~mask;
2127 mac_filters->mcast_accept_all = accp_all_mcast ?
2128 mac_filters->mcast_accept_all | mask :
2129 mac_filters->mcast_accept_all & ~mask;
2131 mac_filters->bcast_accept_all = accp_all_bcast ?
2132 mac_filters->bcast_accept_all | mask :
2133 mac_filters->bcast_accept_all & ~mask;
2135 mac_filters->unmatched_unicast = unmatched_unicast ?
2136 mac_filters->unmatched_unicast | mask :
2137 mac_filters->unmatched_unicast & ~mask;
2139 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2140 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2141 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2142 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2143 mac_filters->bcast_accept_all);
2145 /* write the MAC filter structure*/
2146 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2148 /* The operation is completed */
2149 clear_bit(p->state, p->pstate);
2150 smp_mb__after_clear_bit();
2155 /* Setup ramrod data */
2156 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2157 struct eth_classify_header *hdr,
2160 hdr->echo = cpu_to_le32(cid);
2161 hdr->rule_cnt = rule_cnt;
2164 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2165 unsigned long *accept_flags,
2166 struct eth_filter_rules_cmd *cmd,
2167 bool clear_accept_all)
2171 /* start with 'drop-all' */
2172 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2173 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2175 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2176 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2178 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2179 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2181 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2182 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2183 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2186 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2187 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2188 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2191 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2192 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2194 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2195 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2196 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2199 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2200 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2202 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2203 if (clear_accept_all) {
2204 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2205 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2206 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2207 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2210 cmd->state = cpu_to_le16(state);
2213 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2214 struct bnx2x_rx_mode_ramrod_params *p)
2216 struct eth_filter_rules_ramrod_data *data = p->rdata;
2220 /* Reset the ramrod data buffer */
2221 memset(data, 0, sizeof(*data));
2223 /* Setup ramrod data */
2225 /* Tx (internal switching) */
2226 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2227 data->rules[rule_idx].client_id = p->cl_id;
2228 data->rules[rule_idx].func_id = p->func_id;
2230 data->rules[rule_idx].cmd_general_data =
2231 ETH_FILTER_RULES_CMD_TX_CMD;
2233 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2234 &(data->rules[rule_idx++]),
2239 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2240 data->rules[rule_idx].client_id = p->cl_id;
2241 data->rules[rule_idx].func_id = p->func_id;
2243 data->rules[rule_idx].cmd_general_data =
2244 ETH_FILTER_RULES_CMD_RX_CMD;
2246 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2247 &(data->rules[rule_idx++]),
2251 /* If FCoE Queue configuration has been requested configure the Rx and
2252 * internal switching modes for this queue in separate rules.
2254 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2255 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2257 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2258 /* Tx (internal switching) */
2259 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2260 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2261 data->rules[rule_idx].func_id = p->func_id;
2263 data->rules[rule_idx].cmd_general_data =
2264 ETH_FILTER_RULES_CMD_TX_CMD;
2266 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2267 &(data->rules[rule_idx]),
2273 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2274 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2275 data->rules[rule_idx].func_id = p->func_id;
2277 data->rules[rule_idx].cmd_general_data =
2278 ETH_FILTER_RULES_CMD_RX_CMD;
2280 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2281 &(data->rules[rule_idx]),
2287 /* Set the ramrod header (most importantly - number of rules to
2290 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2292 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2293 data->header.rule_cnt, p->rx_accept_flags,
2294 p->tx_accept_flags);
2296 /* No need for an explicit memory barrier here as long we would
2297 * need to ensure the ordering of writing to the SPQ element
2298 * and updating of the SPQ producer which involves a memory
2299 * read and we will have to put a full memory barrier there
2300 * (inside bnx2x_sp_post()).
2304 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2305 U64_HI(p->rdata_mapping),
2306 U64_LO(p->rdata_mapping),
2307 ETH_CONNECTION_TYPE);
2311 /* Ramrod completion is pending */
2315 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2316 struct bnx2x_rx_mode_ramrod_params *p)
2318 return bnx2x_state_wait(bp, p->state, p->pstate);
2321 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2322 struct bnx2x_rx_mode_ramrod_params *p)
2328 int bnx2x_config_rx_mode(struct bnx2x *bp,
2329 struct bnx2x_rx_mode_ramrod_params *p)
2333 /* Configure the new classification in the chip */
2334 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2338 /* Wait for a ramrod completion if was requested */
2339 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2340 rc = p->rx_mode_obj->wait_comp(bp, p);
2348 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2349 struct bnx2x_rx_mode_obj *o)
2351 if (CHIP_IS_E1x(bp)) {
2352 o->wait_comp = bnx2x_empty_rx_mode_wait;
2353 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2355 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2356 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2360 /********************* Multicast verbs: SET, CLEAR ****************************/
2361 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2363 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2366 struct bnx2x_mcast_mac_elem {
2367 struct list_head link;
2369 u8 pad[2]; /* For a natural alignment of the following buffer */
2372 struct bnx2x_pending_mcast_cmd {
2373 struct list_head link;
2374 int type; /* BNX2X_MCAST_CMD_X */
2376 struct list_head macs_head;
2377 u32 macs_num; /* Needed for DEL command */
2378 int next_bin; /* Needed for RESTORE flow with aprox match */
2381 bool done; /* set to true, when the command has been handled,
2382 * practically used in 57712 handling only, where one pending
2383 * command may be handled in a few operations. As long as for
2384 * other chips every operation handling is completed in a
2385 * single ramrod, there is no need to utilize this field.
2389 static int bnx2x_mcast_wait(struct bnx2x *bp,
2390 struct bnx2x_mcast_obj *o)
2392 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2393 o->raw.wait_comp(bp, &o->raw))
2399 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2400 struct bnx2x_mcast_obj *o,
2401 struct bnx2x_mcast_ramrod_params *p,
2402 enum bnx2x_mcast_cmd cmd)
2405 struct bnx2x_pending_mcast_cmd *new_cmd;
2406 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2407 struct bnx2x_mcast_list_elem *pos;
2408 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2409 p->mcast_list_len : 0);
2411 /* If the command is empty ("handle pending commands only"), break */
2412 if (!p->mcast_list_len)
2415 total_sz = sizeof(*new_cmd) +
2416 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2418 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2419 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2424 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2425 cmd, macs_list_len);
2427 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2429 new_cmd->type = cmd;
2430 new_cmd->done = false;
2433 case BNX2X_MCAST_CMD_ADD:
2434 cur_mac = (struct bnx2x_mcast_mac_elem *)
2435 ((u8 *)new_cmd + sizeof(*new_cmd));
2437 /* Push the MACs of the current command into the pending command
2440 list_for_each_entry(pos, &p->mcast_list, link) {
2441 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2442 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2448 case BNX2X_MCAST_CMD_DEL:
2449 new_cmd->data.macs_num = p->mcast_list_len;
2452 case BNX2X_MCAST_CMD_RESTORE:
2453 new_cmd->data.next_bin = 0;
2458 BNX2X_ERR("Unknown command: %d\n", cmd);
2462 /* Push the new pending command to the tail of the pending list: FIFO */
2463 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2471 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2474 * @last: index to start looking from (including)
2476 * returns the next found (set) bin or a negative value if none is found.
2478 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2480 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2482 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2483 if (o->registry.aprox_match.vec[i])
2484 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2485 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2486 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2499 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2503 * returns the index of the found bin or -1 if none is found
2505 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2507 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2510 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2515 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2517 struct bnx2x_raw_obj *raw = &o->raw;
2520 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2521 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2522 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2524 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2525 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2526 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2531 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2532 struct bnx2x_mcast_obj *o, int idx,
2533 union bnx2x_mcast_config_data *cfg_data,
2534 enum bnx2x_mcast_cmd cmd)
2536 struct bnx2x_raw_obj *r = &o->raw;
2537 struct eth_multicast_rules_ramrod_data *data =
2538 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2539 u8 func_id = r->func_id;
2540 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2543 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2544 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2546 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2548 /* Get a bin and update a bins' vector */
2550 case BNX2X_MCAST_CMD_ADD:
2551 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2552 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2555 case BNX2X_MCAST_CMD_DEL:
2556 /* If there were no more bins to clear
2557 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2558 * clear any (0xff) bin.
2559 * See bnx2x_mcast_validate_e2() for explanation when it may
2562 bin = bnx2x_mcast_clear_first_bin(o);
2565 case BNX2X_MCAST_CMD_RESTORE:
2566 bin = cfg_data->bin;
2570 BNX2X_ERR("Unknown command: %d\n", cmd);
2574 DP(BNX2X_MSG_SP, "%s bin %d\n",
2575 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2576 "Setting" : "Clearing"), bin);
2578 data->rules[idx].bin_id = (u8)bin;
2579 data->rules[idx].func_id = func_id;
2580 data->rules[idx].engine_id = o->engine_id;
2584 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2586 * @bp: device handle
2588 * @start_bin: index in the registry to start from (including)
2589 * @rdata_idx: index in the ramrod data to start from
2591 * returns last handled bin index or -1 if all bins have been handled
2593 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2594 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2597 int cur_bin, cnt = *rdata_idx;
2598 union bnx2x_mcast_config_data cfg_data = {NULL};
2600 /* go through the registry and configure the bins from it */
2601 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2602 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2604 cfg_data.bin = (u8)cur_bin;
2605 o->set_one_rule(bp, o, cnt, &cfg_data,
2606 BNX2X_MCAST_CMD_RESTORE);
2610 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2612 /* Break if we reached the maximum number
2615 if (cnt >= o->max_cmd_len)
2624 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2625 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2628 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2629 int cnt = *line_idx;
2630 union bnx2x_mcast_config_data cfg_data = {NULL};
2632 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2635 cfg_data.mac = &pmac_pos->mac[0];
2636 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2640 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2643 list_del(&pmac_pos->link);
2645 /* Break if we reached the maximum number
2648 if (cnt >= o->max_cmd_len)
2654 /* if no more MACs to configure - we are done */
2655 if (list_empty(&cmd_pos->data.macs_head))
2656 cmd_pos->done = true;
2659 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2660 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2663 int cnt = *line_idx;
2665 while (cmd_pos->data.macs_num) {
2666 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2670 cmd_pos->data.macs_num--;
2672 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2673 cmd_pos->data.macs_num, cnt);
2675 /* Break if we reached the maximum
2678 if (cnt >= o->max_cmd_len)
2684 /* If we cleared all bins - we are done */
2685 if (!cmd_pos->data.macs_num)
2686 cmd_pos->done = true;
2689 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2690 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2693 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2696 if (cmd_pos->data.next_bin < 0)
2697 /* If o->set_restore returned -1 we are done */
2698 cmd_pos->done = true;
2700 /* Start from the next bin next time */
2701 cmd_pos->data.next_bin++;
2704 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2705 struct bnx2x_mcast_ramrod_params *p)
2707 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2709 struct bnx2x_mcast_obj *o = p->mcast_obj;
2711 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2713 switch (cmd_pos->type) {
2714 case BNX2X_MCAST_CMD_ADD:
2715 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2718 case BNX2X_MCAST_CMD_DEL:
2719 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2722 case BNX2X_MCAST_CMD_RESTORE:
2723 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2728 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2732 /* If the command has been completed - remove it from the list
2733 * and free the memory
2735 if (cmd_pos->done) {
2736 list_del(&cmd_pos->link);
2740 /* Break if we reached the maximum number of rules */
2741 if (cnt >= o->max_cmd_len)
2748 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2749 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2752 struct bnx2x_mcast_list_elem *mlist_pos;
2753 union bnx2x_mcast_config_data cfg_data = {NULL};
2754 int cnt = *line_idx;
2756 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2757 cfg_data.mac = mlist_pos->mac;
2758 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2762 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2769 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2770 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2773 int cnt = *line_idx, i;
2775 for (i = 0; i < p->mcast_list_len; i++) {
2776 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2780 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2781 p->mcast_list_len - i - 1);
2788 * bnx2x_mcast_handle_current_cmd -
2790 * @bp: device handle
2793 * @start_cnt: first line in the ramrod data that may be used
2795 * This function is called iff there is enough place for the current command in
2797 * Returns number of lines filled in the ramrod data in total.
2799 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2800 struct bnx2x_mcast_ramrod_params *p,
2801 enum bnx2x_mcast_cmd cmd,
2804 struct bnx2x_mcast_obj *o = p->mcast_obj;
2805 int cnt = start_cnt;
2807 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2810 case BNX2X_MCAST_CMD_ADD:
2811 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2814 case BNX2X_MCAST_CMD_DEL:
2815 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2818 case BNX2X_MCAST_CMD_RESTORE:
2819 o->hdl_restore(bp, o, 0, &cnt);
2823 BNX2X_ERR("Unknown command: %d\n", cmd);
2827 /* The current command has been handled */
2828 p->mcast_list_len = 0;
2833 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2834 struct bnx2x_mcast_ramrod_params *p,
2835 enum bnx2x_mcast_cmd cmd)
2837 struct bnx2x_mcast_obj *o = p->mcast_obj;
2838 int reg_sz = o->get_registry_size(o);
2841 /* DEL command deletes all currently configured MACs */
2842 case BNX2X_MCAST_CMD_DEL:
2843 o->set_registry_size(o, 0);
2846 /* RESTORE command will restore the entire multicast configuration */
2847 case BNX2X_MCAST_CMD_RESTORE:
2848 /* Here we set the approximate amount of work to do, which in
2849 * fact may be only less as some MACs in postponed ADD
2850 * command(s) scheduled before this command may fall into
2851 * the same bin and the actual number of bins set in the
2852 * registry would be less than we estimated here. See
2853 * bnx2x_mcast_set_one_rule_e2() for further details.
2855 p->mcast_list_len = reg_sz;
2858 case BNX2X_MCAST_CMD_ADD:
2859 case BNX2X_MCAST_CMD_CONT:
2860 /* Here we assume that all new MACs will fall into new bins.
2861 * However we will correct the real registry size after we
2862 * handle all pending commands.
2864 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2868 BNX2X_ERR("Unknown command: %d\n", cmd);
2872 /* Increase the total number of MACs pending to be configured */
2873 o->total_pending_num += p->mcast_list_len;
2878 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2879 struct bnx2x_mcast_ramrod_params *p,
2882 struct bnx2x_mcast_obj *o = p->mcast_obj;
2884 o->set_registry_size(o, old_num_bins);
2885 o->total_pending_num -= p->mcast_list_len;
2889 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2891 * @bp: device handle
2893 * @len: number of rules to handle
2895 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2896 struct bnx2x_mcast_ramrod_params *p,
2899 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2900 struct eth_multicast_rules_ramrod_data *data =
2901 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2903 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2904 (BNX2X_FILTER_MCAST_PENDING <<
2905 BNX2X_SWCID_SHIFT));
2906 data->header.rule_cnt = len;
2910 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2912 * @bp: device handle
2915 * Recalculate the actual number of set bins in the registry using Brian
2916 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2918 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2920 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2921 struct bnx2x_mcast_obj *o)
2926 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2927 elem = o->registry.aprox_match.vec[i];
2932 o->set_registry_size(o, cnt);
2937 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2938 struct bnx2x_mcast_ramrod_params *p,
2939 enum bnx2x_mcast_cmd cmd)
2941 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2942 struct bnx2x_mcast_obj *o = p->mcast_obj;
2943 struct eth_multicast_rules_ramrod_data *data =
2944 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2947 /* Reset the ramrod data buffer */
2948 memset(data, 0, sizeof(*data));
2950 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2952 /* If there are no more pending commands - clear SCHEDULED state */
2953 if (list_empty(&o->pending_cmds_head))
2956 /* The below may be true iff there was enough room in ramrod
2957 * data for all pending commands and for the current
2958 * command. Otherwise the current command would have been added
2959 * to the pending commands and p->mcast_list_len would have been
2962 if (p->mcast_list_len > 0)
2963 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2965 /* We've pulled out some MACs - update the total number of
2968 o->total_pending_num -= cnt;
2971 WARN_ON(o->total_pending_num < 0);
2972 WARN_ON(cnt > o->max_cmd_len);
2974 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2976 /* Update a registry size if there are no more pending operations.
2978 * We don't want to change the value of the registry size if there are
2979 * pending operations because we want it to always be equal to the
2980 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2981 * set bins after the last requested operation in order to properly
2982 * evaluate the size of the next DEL/RESTORE operation.
2984 * Note that we update the registry itself during command(s) handling
2985 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2986 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2987 * with a limited amount of update commands (per MAC/bin) and we don't
2988 * know in this scope what the actual state of bins configuration is
2989 * going to be after this ramrod.
2991 if (!o->total_pending_num)
2992 bnx2x_mcast_refresh_registry_e2(bp, o);
2994 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2995 * RAMROD_PENDING status immediately.
2997 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2998 raw->clear_pending(raw);
3001 /* No need for an explicit memory barrier here as long we would
3002 * need to ensure the ordering of writing to the SPQ element
3003 * and updating of the SPQ producer which involves a memory
3004 * read and we will have to put a full memory barrier there
3005 * (inside bnx2x_sp_post()).
3009 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3010 raw->cid, U64_HI(raw->rdata_mapping),
3011 U64_LO(raw->rdata_mapping),
3012 ETH_CONNECTION_TYPE);
3016 /* Ramrod completion is pending */
3021 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3022 struct bnx2x_mcast_ramrod_params *p,
3023 enum bnx2x_mcast_cmd cmd)
3025 /* Mark, that there is a work to do */
3026 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3027 p->mcast_list_len = 1;
3032 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3033 struct bnx2x_mcast_ramrod_params *p,
3039 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3041 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3044 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3045 struct bnx2x_mcast_obj *o,
3046 struct bnx2x_mcast_ramrod_params *p,
3049 struct bnx2x_mcast_list_elem *mlist_pos;
3052 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3053 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3054 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3056 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3057 mlist_pos->mac, bit);
3059 /* bookkeeping... */
3060 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3065 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3066 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3071 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3073 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3074 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3075 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3079 /* On 57711 we write the multicast MACs' approximate match
3080 * table by directly into the TSTORM's internal RAM. So we don't
3081 * really need to handle any tricks to make it work.
3083 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3084 struct bnx2x_mcast_ramrod_params *p,
3085 enum bnx2x_mcast_cmd cmd)
3088 struct bnx2x_mcast_obj *o = p->mcast_obj;
3089 struct bnx2x_raw_obj *r = &o->raw;
3091 /* If CLEAR_ONLY has been requested - clear the registry
3092 * and clear a pending bit.
3094 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3095 u32 mc_filter[MC_HASH_SIZE] = {0};
3097 /* Set the multicast filter bits before writing it into
3098 * the internal memory.
3101 case BNX2X_MCAST_CMD_ADD:
3102 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3105 case BNX2X_MCAST_CMD_DEL:
3107 "Invalidating multicast MACs configuration\n");
3109 /* clear the registry */
3110 memset(o->registry.aprox_match.vec, 0,
3111 sizeof(o->registry.aprox_match.vec));
3114 case BNX2X_MCAST_CMD_RESTORE:
3115 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3119 BNX2X_ERR("Unknown command: %d\n", cmd);
3123 /* Set the mcast filter in the internal memory */
3124 for (i = 0; i < MC_HASH_SIZE; i++)
3125 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3127 /* clear the registry */
3128 memset(o->registry.aprox_match.vec, 0,
3129 sizeof(o->registry.aprox_match.vec));
3132 r->clear_pending(r);
3137 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3138 struct bnx2x_mcast_ramrod_params *p,
3139 enum bnx2x_mcast_cmd cmd)
3141 struct bnx2x_mcast_obj *o = p->mcast_obj;
3142 int reg_sz = o->get_registry_size(o);
3145 /* DEL command deletes all currently configured MACs */
3146 case BNX2X_MCAST_CMD_DEL:
3147 o->set_registry_size(o, 0);
3150 /* RESTORE command will restore the entire multicast configuration */
3151 case BNX2X_MCAST_CMD_RESTORE:
3152 p->mcast_list_len = reg_sz;
3153 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3154 cmd, p->mcast_list_len);
3157 case BNX2X_MCAST_CMD_ADD:
3158 case BNX2X_MCAST_CMD_CONT:
3159 /* Multicast MACs on 57710 are configured as unicast MACs and
3160 * there is only a limited number of CAM entries for that
3163 if (p->mcast_list_len > o->max_cmd_len) {
3164 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3168 /* Every configured MAC should be cleared if DEL command is
3169 * called. Only the last ADD command is relevant as long as
3170 * every ADD commands overrides the previous configuration.
3172 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3173 if (p->mcast_list_len > 0)
3174 o->set_registry_size(o, p->mcast_list_len);
3179 BNX2X_ERR("Unknown command: %d\n", cmd);
3183 /* We want to ensure that commands are executed one by one for 57710.
3184 * Therefore each none-empty command will consume o->max_cmd_len.
3186 if (p->mcast_list_len)
3187 o->total_pending_num += o->max_cmd_len;
3192 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3193 struct bnx2x_mcast_ramrod_params *p,
3196 struct bnx2x_mcast_obj *o = p->mcast_obj;
3198 o->set_registry_size(o, old_num_macs);
3200 /* If current command hasn't been handled yet and we are
3201 * here means that it's meant to be dropped and we have to
3202 * update the number of outstanding MACs accordingly.
3204 if (p->mcast_list_len)
3205 o->total_pending_num -= o->max_cmd_len;
3208 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3209 struct bnx2x_mcast_obj *o, int idx,
3210 union bnx2x_mcast_config_data *cfg_data,
3211 enum bnx2x_mcast_cmd cmd)
3213 struct bnx2x_raw_obj *r = &o->raw;
3214 struct mac_configuration_cmd *data =
3215 (struct mac_configuration_cmd *)(r->rdata);
3218 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3219 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3220 &data->config_table[idx].middle_mac_addr,
3221 &data->config_table[idx].lsb_mac_addr,
3224 data->config_table[idx].vlan_id = 0;
3225 data->config_table[idx].pf_id = r->func_id;
3226 data->config_table[idx].clients_bit_vector =
3227 cpu_to_le32(1 << r->cl_id);
3229 SET_FLAG(data->config_table[idx].flags,
3230 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3231 T_ETH_MAC_COMMAND_SET);
3236 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3238 * @bp: device handle
3240 * @len: number of rules to handle
3242 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3243 struct bnx2x_mcast_ramrod_params *p,
3246 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3247 struct mac_configuration_cmd *data =
3248 (struct mac_configuration_cmd *)(r->rdata);
3250 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3251 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3252 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3254 data->hdr.offset = offset;
3255 data->hdr.client_id = cpu_to_le16(0xff);
3256 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3257 (BNX2X_FILTER_MCAST_PENDING <<
3258 BNX2X_SWCID_SHIFT));
3259 data->hdr.length = len;
3263 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3265 * @bp: device handle
3267 * @start_idx: index in the registry to start from
3268 * @rdata_idx: index in the ramrod data to start from
3270 * restore command for 57710 is like all other commands - always a stand alone
3271 * command - start_idx and rdata_idx will always be 0. This function will always
3273 * returns -1 to comply with 57712 variant.
3275 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3276 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3279 struct bnx2x_mcast_mac_elem *elem;
3281 union bnx2x_mcast_config_data cfg_data = {NULL};
3283 /* go through the registry and configure the MACs from it. */
3284 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3285 cfg_data.mac = &elem->mac[0];
3286 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3290 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3299 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3300 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3302 struct bnx2x_pending_mcast_cmd *cmd_pos;
3303 struct bnx2x_mcast_mac_elem *pmac_pos;
3304 struct bnx2x_mcast_obj *o = p->mcast_obj;
3305 union bnx2x_mcast_config_data cfg_data = {NULL};
3308 /* If nothing to be done - return */
3309 if (list_empty(&o->pending_cmds_head))
3312 /* Handle the first command */
3313 cmd_pos = list_first_entry(&o->pending_cmds_head,
3314 struct bnx2x_pending_mcast_cmd, link);
3316 switch (cmd_pos->type) {
3317 case BNX2X_MCAST_CMD_ADD:
3318 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3319 cfg_data.mac = &pmac_pos->mac[0];
3320 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3324 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3329 case BNX2X_MCAST_CMD_DEL:
3330 cnt = cmd_pos->data.macs_num;
3331 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3334 case BNX2X_MCAST_CMD_RESTORE:
3335 o->hdl_restore(bp, o, 0, &cnt);
3339 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3343 list_del(&cmd_pos->link);
3350 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3357 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3358 __le16 *fw_lo, u8 *mac)
3360 mac[1] = ((u8 *)fw_hi)[0];
3361 mac[0] = ((u8 *)fw_hi)[1];
3362 mac[3] = ((u8 *)fw_mid)[0];
3363 mac[2] = ((u8 *)fw_mid)[1];
3364 mac[5] = ((u8 *)fw_lo)[0];
3365 mac[4] = ((u8 *)fw_lo)[1];
3369 * bnx2x_mcast_refresh_registry_e1 -
3371 * @bp: device handle
3374 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3375 * and update the registry correspondingly: if ADD - allocate a memory and add
3376 * the entries to the registry (list), if DELETE - clear the registry and free
3379 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3380 struct bnx2x_mcast_obj *o)
3382 struct bnx2x_raw_obj *raw = &o->raw;
3383 struct bnx2x_mcast_mac_elem *elem;
3384 struct mac_configuration_cmd *data =
3385 (struct mac_configuration_cmd *)(raw->rdata);
3387 /* If first entry contains a SET bit - the command was ADD,
3388 * otherwise - DEL_ALL
3390 if (GET_FLAG(data->config_table[0].flags,
3391 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3392 int i, len = data->hdr.length;
3394 /* Break if it was a RESTORE command */
3395 if (!list_empty(&o->registry.exact_match.macs))
3398 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3400 BNX2X_ERR("Failed to allocate registry memory\n");
3404 for (i = 0; i < len; i++, elem++) {
3405 bnx2x_get_fw_mac_addr(
3406 &data->config_table[i].msb_mac_addr,
3407 &data->config_table[i].middle_mac_addr,
3408 &data->config_table[i].lsb_mac_addr,
3410 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3412 list_add_tail(&elem->link,
3413 &o->registry.exact_match.macs);
3416 elem = list_first_entry(&o->registry.exact_match.macs,
3417 struct bnx2x_mcast_mac_elem, link);
3418 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3420 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3426 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3427 struct bnx2x_mcast_ramrod_params *p,
3428 enum bnx2x_mcast_cmd cmd)
3430 struct bnx2x_mcast_obj *o = p->mcast_obj;
3431 struct bnx2x_raw_obj *raw = &o->raw;
3432 struct mac_configuration_cmd *data =
3433 (struct mac_configuration_cmd *)(raw->rdata);
3436 /* Reset the ramrod data buffer */
3437 memset(data, 0, sizeof(*data));
3439 /* First set all entries as invalid */
3440 for (i = 0; i < o->max_cmd_len ; i++)
3441 SET_FLAG(data->config_table[i].flags,
3442 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3443 T_ETH_MAC_COMMAND_INVALIDATE);
3445 /* Handle pending commands first */
3446 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3448 /* If there are no more pending commands - clear SCHEDULED state */
3449 if (list_empty(&o->pending_cmds_head))
3452 /* The below may be true iff there were no pending commands */
3454 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3456 /* For 57710 every command has o->max_cmd_len length to ensure that
3457 * commands are done one at a time.
3459 o->total_pending_num -= o->max_cmd_len;
3463 WARN_ON(cnt > o->max_cmd_len);
3465 /* Set ramrod header (in particular, a number of entries to update) */
3466 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3468 /* update a registry: we need the registry contents to be always up
3469 * to date in order to be able to execute a RESTORE opcode. Here
3470 * we use the fact that for 57710 we sent one command at a time
3471 * hence we may take the registry update out of the command handling
3472 * and do it in a simpler way here.
3474 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3478 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3479 * RAMROD_PENDING status immediately.
3481 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3482 raw->clear_pending(raw);
3485 /* No need for an explicit memory barrier here as long we would
3486 * need to ensure the ordering of writing to the SPQ element
3487 * and updating of the SPQ producer which involves a memory
3488 * read and we will have to put a full memory barrier there
3489 * (inside bnx2x_sp_post()).
3493 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3494 U64_HI(raw->rdata_mapping),
3495 U64_LO(raw->rdata_mapping),
3496 ETH_CONNECTION_TYPE);
3500 /* Ramrod completion is pending */
3505 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3507 return o->registry.exact_match.num_macs_set;
3510 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3512 return o->registry.aprox_match.num_bins_set;
3515 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3518 o->registry.exact_match.num_macs_set = n;
3521 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3524 o->registry.aprox_match.num_bins_set = n;
3527 int bnx2x_config_mcast(struct bnx2x *bp,
3528 struct bnx2x_mcast_ramrod_params *p,
3529 enum bnx2x_mcast_cmd cmd)
3531 struct bnx2x_mcast_obj *o = p->mcast_obj;
3532 struct bnx2x_raw_obj *r = &o->raw;
3533 int rc = 0, old_reg_size;
3535 /* This is needed to recover number of currently configured mcast macs
3536 * in case of failure.
3538 old_reg_size = o->get_registry_size(o);
3540 /* Do some calculations and checks */
3541 rc = o->validate(bp, p, cmd);
3545 /* Return if there is no work to do */
3546 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3549 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3550 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3552 /* Enqueue the current command to the pending list if we can't complete
3553 * it in the current iteration
3555 if (r->check_pending(r) ||
3556 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3557 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3561 /* As long as the current command is in a command list we
3562 * don't need to handle it separately.
3564 p->mcast_list_len = 0;
3567 if (!r->check_pending(r)) {
3569 /* Set 'pending' state */
3572 /* Configure the new classification in the chip */
3573 rc = o->config_mcast(bp, p, cmd);
3577 /* Wait for a ramrod completion if was requested */
3578 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3579 rc = o->wait_comp(bp, o);
3585 r->clear_pending(r);
3588 o->revert(bp, p, old_reg_size);
3593 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3595 smp_mb__before_clear_bit();
3596 clear_bit(o->sched_state, o->raw.pstate);
3597 smp_mb__after_clear_bit();
3600 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3602 smp_mb__before_clear_bit();
3603 set_bit(o->sched_state, o->raw.pstate);
3604 smp_mb__after_clear_bit();
3607 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3609 return !!test_bit(o->sched_state, o->raw.pstate);
3612 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3614 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3617 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3618 struct bnx2x_mcast_obj *mcast_obj,
3619 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3620 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3621 int state, unsigned long *pstate, bnx2x_obj_type type)
3623 memset(mcast_obj, 0, sizeof(*mcast_obj));
3625 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3626 rdata, rdata_mapping, state, pstate, type);
3628 mcast_obj->engine_id = engine_id;
3630 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3632 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3633 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3634 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3635 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3637 if (CHIP_IS_E1(bp)) {
3638 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3639 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3640 mcast_obj->hdl_restore =
3641 bnx2x_mcast_handle_restore_cmd_e1;
3642 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3644 if (CHIP_REV_IS_SLOW(bp))
3645 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3647 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3649 mcast_obj->wait_comp = bnx2x_mcast_wait;
3650 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3651 mcast_obj->validate = bnx2x_mcast_validate_e1;
3652 mcast_obj->revert = bnx2x_mcast_revert_e1;
3653 mcast_obj->get_registry_size =
3654 bnx2x_mcast_get_registry_size_exact;
3655 mcast_obj->set_registry_size =
3656 bnx2x_mcast_set_registry_size_exact;
3658 /* 57710 is the only chip that uses the exact match for mcast
3661 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3663 } else if (CHIP_IS_E1H(bp)) {
3664 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3665 mcast_obj->enqueue_cmd = NULL;
3666 mcast_obj->hdl_restore = NULL;
3667 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3669 /* 57711 doesn't send a ramrod, so it has unlimited credit
3672 mcast_obj->max_cmd_len = -1;
3673 mcast_obj->wait_comp = bnx2x_mcast_wait;
3674 mcast_obj->set_one_rule = NULL;
3675 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3676 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3677 mcast_obj->get_registry_size =
3678 bnx2x_mcast_get_registry_size_aprox;
3679 mcast_obj->set_registry_size =
3680 bnx2x_mcast_set_registry_size_aprox;
3682 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3683 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3684 mcast_obj->hdl_restore =
3685 bnx2x_mcast_handle_restore_cmd_e2;
3686 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3687 /* TODO: There should be a proper HSI define for this number!!!
3689 mcast_obj->max_cmd_len = 16;
3690 mcast_obj->wait_comp = bnx2x_mcast_wait;
3691 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3692 mcast_obj->validate = bnx2x_mcast_validate_e2;
3693 mcast_obj->revert = bnx2x_mcast_revert_e2;
3694 mcast_obj->get_registry_size =
3695 bnx2x_mcast_get_registry_size_aprox;
3696 mcast_obj->set_registry_size =
3697 bnx2x_mcast_set_registry_size_aprox;
3701 /*************************** Credit handling **********************************/
3704 * atomic_add_ifless - add if the result is less than a given value.
3706 * @v: pointer of type atomic_t
3707 * @a: the amount to add to v...
3708 * @u: ...if (v + a) is less than u.
3710 * returns true if (v + a) was less than u, and false otherwise.
3713 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3719 if (unlikely(c + a >= u))
3722 old = atomic_cmpxchg((v), c, c + a);
3723 if (likely(old == c))
3732 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3734 * @v: pointer of type atomic_t
3735 * @a: the amount to dec from v...
3736 * @u: ...if (v - a) is more or equal than u.
3738 * returns true if (v - a) was more or equal than u, and false
3741 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3747 if (unlikely(c - a < u))
3750 old = atomic_cmpxchg((v), c, c - a);
3751 if (likely(old == c))
3759 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3764 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3770 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3776 /* Don't let to refill if credit + cnt > pool_sz */
3777 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3784 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3789 cur_credit = atomic_read(&o->credit);
3794 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3800 static bool bnx2x_credit_pool_get_entry(
3801 struct bnx2x_credit_pool_obj *o,
3808 /* Find "internal cam-offset" then add to base for this object... */
3809 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3811 /* Skip the current vector if there are no free entries in it */
3812 if (!o->pool_mirror[vec])
3815 /* If we've got here we are going to find a free entry */
3816 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3817 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3819 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3821 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3822 *offset = o->base_pool_offset + idx;
3830 static bool bnx2x_credit_pool_put_entry(
3831 struct bnx2x_credit_pool_obj *o,
3834 if (offset < o->base_pool_offset)
3837 offset -= o->base_pool_offset;
3839 if (offset >= o->pool_sz)
3842 /* Return the entry to the pool */
3843 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3848 static bool bnx2x_credit_pool_put_entry_always_true(
3849 struct bnx2x_credit_pool_obj *o,
3855 static bool bnx2x_credit_pool_get_entry_always_true(
3856 struct bnx2x_credit_pool_obj *o,
3863 * bnx2x_init_credit_pool - initialize credit pool internals.
3866 * @base: Base entry in the CAM to use.
3867 * @credit: pool size.
3869 * If base is negative no CAM entries handling will be performed.
3870 * If credit is negative pool operations will always succeed (unlimited pool).
3873 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3874 int base, int credit)
3876 /* Zero the object first */
3877 memset(p, 0, sizeof(*p));
3879 /* Set the table to all 1s */
3880 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3882 /* Init a pool as full */
3883 atomic_set(&p->credit, credit);
3885 /* The total poll size */
3886 p->pool_sz = credit;
3888 p->base_pool_offset = base;
3890 /* Commit the change */
3893 p->check = bnx2x_credit_pool_check;
3895 /* if pool credit is negative - disable the checks */
3897 p->put = bnx2x_credit_pool_put;
3898 p->get = bnx2x_credit_pool_get;
3899 p->put_entry = bnx2x_credit_pool_put_entry;
3900 p->get_entry = bnx2x_credit_pool_get_entry;
3902 p->put = bnx2x_credit_pool_always_true;
3903 p->get = bnx2x_credit_pool_always_true;
3904 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3905 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3908 /* If base is negative - disable entries handling */
3910 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3911 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3915 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3916 struct bnx2x_credit_pool_obj *p, u8 func_id,
3919 /* TODO: this will be defined in consts as well... */
3920 #define BNX2X_CAM_SIZE_EMUL 5
3924 if (CHIP_IS_E1(bp)) {
3925 /* In E1, Multicast is saved in cam... */
3926 if (!CHIP_REV_IS_SLOW(bp))
3927 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3929 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3931 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3933 } else if (CHIP_IS_E1H(bp)) {
3934 /* CAM credit is equaly divided between all active functions
3937 if ((func_num > 0)) {
3938 if (!CHIP_REV_IS_SLOW(bp))
3939 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3941 cam_sz = BNX2X_CAM_SIZE_EMUL;
3942 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3944 /* this should never happen! Block MAC operations. */
3945 bnx2x_init_credit_pool(p, 0, 0);
3950 /* CAM credit is equaly divided between all active functions
3953 if ((func_num > 0)) {
3954 if (!CHIP_REV_IS_SLOW(bp))
3955 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3957 cam_sz = BNX2X_CAM_SIZE_EMUL;
3959 /* No need for CAM entries handling for 57712 and
3962 bnx2x_init_credit_pool(p, -1, cam_sz);
3964 /* this should never happen! Block MAC operations. */
3965 bnx2x_init_credit_pool(p, 0, 0);
3970 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3971 struct bnx2x_credit_pool_obj *p,
3975 if (CHIP_IS_E1x(bp)) {
3976 /* There is no VLAN credit in HW on 57710 and 57711 only
3977 * MAC / MAC-VLAN can be set
3979 bnx2x_init_credit_pool(p, 0, -1);
3981 /* CAM credit is equally divided between all active functions
3985 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3986 bnx2x_init_credit_pool(p, func_id * credit, credit);
3988 /* this should never happen! Block VLAN operations. */
3989 bnx2x_init_credit_pool(p, 0, 0);
3993 /****************** RSS Configuration ******************/
3995 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3997 * @bp: driver handle
3998 * @p: pointer to rss configuration
4000 * Prints it when NETIF_MSG_IFUP debug level is configured.
4002 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4003 struct bnx2x_config_rss_params *p)
4007 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4008 DP(BNX2X_MSG_SP, "0x0000: ");
4009 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4010 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4012 /* Print 4 bytes in a line */
4013 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4014 (((i + 1) & 0x3) == 0)) {
4015 DP_CONT(BNX2X_MSG_SP, "\n");
4016 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4020 DP_CONT(BNX2X_MSG_SP, "\n");
4024 * bnx2x_setup_rss - configure RSS
4026 * @bp: device handle
4027 * @p: rss configuration
4029 * sends on UPDATE ramrod for that matter.
4031 static int bnx2x_setup_rss(struct bnx2x *bp,
4032 struct bnx2x_config_rss_params *p)
4034 struct bnx2x_rss_config_obj *o = p->rss_obj;
4035 struct bnx2x_raw_obj *r = &o->raw;
4036 struct eth_rss_update_ramrod_data *data =
4037 (struct eth_rss_update_ramrod_data *)(r->rdata);
4041 memset(data, 0, sizeof(*data));
4043 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4045 /* Set an echo field */
4046 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4047 (r->state << BNX2X_SWCID_SHIFT));
4050 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4051 rss_mode = ETH_RSS_MODE_DISABLED;
4052 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4053 rss_mode = ETH_RSS_MODE_REGULAR;
4055 data->rss_mode = rss_mode;
4057 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4059 /* RSS capabilities */
4060 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4061 data->capabilities |=
4062 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4064 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4065 data->capabilities |=
4066 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4068 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4069 data->capabilities |=
4070 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4072 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4073 data->capabilities |=
4074 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4076 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4077 data->capabilities |=
4078 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4080 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4081 data->capabilities |=
4082 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4085 data->rss_result_mask = p->rss_result_mask;
4088 data->rss_engine_id = o->engine_id;
4090 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4092 /* Indirection table */
4093 memcpy(data->indirection_table, p->ind_table,
4094 T_ETH_INDIRECTION_TABLE_SIZE);
4096 /* Remember the last configuration */
4097 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4099 /* Print the indirection table */
4100 if (netif_msg_ifup(bp))
4101 bnx2x_debug_print_ind_table(bp, p);
4104 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4105 memcpy(&data->rss_key[0], &p->rss_key[0],
4106 sizeof(data->rss_key));
4107 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4110 /* No need for an explicit memory barrier here as long we would
4111 * need to ensure the ordering of writing to the SPQ element
4112 * and updating of the SPQ producer which involves a memory
4113 * read and we will have to put a full memory barrier there
4114 * (inside bnx2x_sp_post()).
4118 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4119 U64_HI(r->rdata_mapping),
4120 U64_LO(r->rdata_mapping),
4121 ETH_CONNECTION_TYPE);
4129 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4132 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4135 int bnx2x_config_rss(struct bnx2x *bp,
4136 struct bnx2x_config_rss_params *p)
4139 struct bnx2x_rss_config_obj *o = p->rss_obj;
4140 struct bnx2x_raw_obj *r = &o->raw;
4142 /* Do nothing if only driver cleanup was requested */
4143 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4148 rc = o->config_rss(bp, p);
4150 r->clear_pending(r);
4154 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4155 rc = r->wait_comp(bp, r);
4160 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4161 struct bnx2x_rss_config_obj *rss_obj,
4162 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4163 void *rdata, dma_addr_t rdata_mapping,
4164 int state, unsigned long *pstate,
4165 bnx2x_obj_type type)
4167 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4168 rdata_mapping, state, pstate, type);
4170 rss_obj->engine_id = engine_id;
4171 rss_obj->config_rss = bnx2x_setup_rss;
4174 /********************** Queue state object ***********************************/
4177 * bnx2x_queue_state_change - perform Queue state change transition
4179 * @bp: device handle
4180 * @params: parameters to perform the transition
4182 * returns 0 in case of successfully completed transition, negative error
4183 * code in case of failure, positive (EBUSY) value if there is a completion
4184 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4185 * not set in params->ramrod_flags for asynchronous commands).
4188 int bnx2x_queue_state_change(struct bnx2x *bp,
4189 struct bnx2x_queue_state_params *params)
4191 struct bnx2x_queue_sp_obj *o = params->q_obj;
4192 int rc, pending_bit;
4193 unsigned long *pending = &o->pending;
4195 /* Check that the requested transition is legal */
4196 rc = o->check_transition(bp, o, params);
4198 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4202 /* Set "pending" bit */
4203 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4204 pending_bit = o->set_pending(o, params);
4205 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4207 /* Don't send a command if only driver cleanup was requested */
4208 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4209 o->complete_cmd(bp, o, pending_bit);
4212 rc = o->send_cmd(bp, params);
4214 o->next_state = BNX2X_Q_STATE_MAX;
4215 clear_bit(pending_bit, pending);
4216 smp_mb__after_clear_bit();
4220 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4221 rc = o->wait_comp(bp, o, pending_bit);
4229 return !!test_bit(pending_bit, pending);
4232 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4233 struct bnx2x_queue_state_params *params)
4235 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4237 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4240 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4241 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4242 bit = BNX2X_Q_CMD_UPDATE;
4246 set_bit(bit, &obj->pending);
4250 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4251 struct bnx2x_queue_sp_obj *o,
4252 enum bnx2x_queue_cmd cmd)
4254 return bnx2x_state_wait(bp, cmd, &o->pending);
4258 * bnx2x_queue_comp_cmd - complete the state change command.
4260 * @bp: device handle
4264 * Checks that the arrived completion is expected.
4266 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4267 struct bnx2x_queue_sp_obj *o,
4268 enum bnx2x_queue_cmd cmd)
4270 unsigned long cur_pending = o->pending;
4272 if (!test_and_clear_bit(cmd, &cur_pending)) {
4273 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4274 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4275 o->state, cur_pending, o->next_state);
4279 if (o->next_tx_only >= o->max_cos)
4280 /* >= because tx only must always be smaller than cos since the
4281 * primary connection supports COS 0
4283 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4284 o->next_tx_only, o->max_cos);
4287 "Completing command %d for queue %d, setting state to %d\n",
4288 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4290 if (o->next_tx_only) /* print num tx-only if any exist */
4291 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4292 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4294 o->state = o->next_state;
4295 o->num_tx_only = o->next_tx_only;
4296 o->next_state = BNX2X_Q_STATE_MAX;
4298 /* It's important that o->state and o->next_state are
4299 * updated before o->pending.
4303 clear_bit(cmd, &o->pending);
4304 smp_mb__after_clear_bit();
4309 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4310 struct bnx2x_queue_state_params *cmd_params,
4311 struct client_init_ramrod_data *data)
4313 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4317 /* IPv6 TPA supported for E2 and above only */
4318 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4319 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4322 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4323 struct bnx2x_queue_sp_obj *o,
4324 struct bnx2x_general_setup_params *params,
4325 struct client_init_general_data *gen_data,
4326 unsigned long *flags)
4328 gen_data->client_id = o->cl_id;
4330 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4331 gen_data->statistics_counter_id =
4333 gen_data->statistics_en_flg = 1;
4334 gen_data->statistics_zero_flg =
4335 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4337 gen_data->statistics_counter_id =
4338 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4340 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4341 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4342 gen_data->sp_client_id = params->spcl_id;
4343 gen_data->mtu = cpu_to_le16(params->mtu);
4344 gen_data->func_id = o->func_id;
4346 gen_data->cos = params->cos;
4348 gen_data->traffic_type =
4349 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4350 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4352 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4353 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4356 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4357 struct bnx2x_txq_setup_params *params,
4358 struct client_init_tx_data *tx_data,
4359 unsigned long *flags)
4361 tx_data->enforce_security_flg =
4362 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4363 tx_data->default_vlan =
4364 cpu_to_le16(params->default_vlan);
4365 tx_data->default_vlan_flg =
4366 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4367 tx_data->tx_switching_flg =
4368 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4369 tx_data->anti_spoofing_flg =
4370 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4371 tx_data->force_default_pri_flg =
4372 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4374 tx_data->tunnel_lso_inc_ip_id =
4375 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4376 tx_data->tunnel_non_lso_pcsum_location =
4377 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4380 tx_data->tx_status_block_id = params->fw_sb_id;
4381 tx_data->tx_sb_index_number = params->sb_cq_index;
4382 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4384 tx_data->tx_bd_page_base.lo =
4385 cpu_to_le32(U64_LO(params->dscr_map));
4386 tx_data->tx_bd_page_base.hi =
4387 cpu_to_le32(U64_HI(params->dscr_map));
4389 /* Don't configure any Tx switching mode during queue SETUP */
4393 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4394 struct rxq_pause_params *params,
4395 struct client_init_rx_data *rx_data)
4397 /* flow control data */
4398 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4399 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4400 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4401 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4402 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4403 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4404 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4407 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4408 struct bnx2x_rxq_setup_params *params,
4409 struct client_init_rx_data *rx_data,
4410 unsigned long *flags)
4412 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4413 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4414 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4415 CLIENT_INIT_RX_DATA_TPA_MODE;
4416 rx_data->vmqueue_mode_en_flg = 0;
4418 rx_data->cache_line_alignment_log_size =
4419 params->cache_line_log;
4420 rx_data->enable_dynamic_hc =
4421 test_bit(BNX2X_Q_FLG_DHC, flags);
4422 rx_data->max_sges_for_packet = params->max_sges_pkt;
4423 rx_data->client_qzone_id = params->cl_qzone_id;
4424 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4426 /* Always start in DROP_ALL mode */
4427 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4428 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4430 /* We don't set drop flags */
4431 rx_data->drop_ip_cs_err_flg = 0;
4432 rx_data->drop_tcp_cs_err_flg = 0;
4433 rx_data->drop_ttl0_flg = 0;
4434 rx_data->drop_udp_cs_err_flg = 0;
4435 rx_data->inner_vlan_removal_enable_flg =
4436 test_bit(BNX2X_Q_FLG_VLAN, flags);
4437 rx_data->outer_vlan_removal_enable_flg =
4438 test_bit(BNX2X_Q_FLG_OV, flags);
4439 rx_data->status_block_id = params->fw_sb_id;
4440 rx_data->rx_sb_index_number = params->sb_cq_index;
4441 rx_data->max_tpa_queues = params->max_tpa_queues;
4442 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4443 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4444 rx_data->bd_page_base.lo =
4445 cpu_to_le32(U64_LO(params->dscr_map));
4446 rx_data->bd_page_base.hi =
4447 cpu_to_le32(U64_HI(params->dscr_map));
4448 rx_data->sge_page_base.lo =
4449 cpu_to_le32(U64_LO(params->sge_map));
4450 rx_data->sge_page_base.hi =
4451 cpu_to_le32(U64_HI(params->sge_map));
4452 rx_data->cqe_page_base.lo =
4453 cpu_to_le32(U64_LO(params->rcq_map));
4454 rx_data->cqe_page_base.hi =
4455 cpu_to_le32(U64_HI(params->rcq_map));
4456 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4458 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4459 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4460 rx_data->is_approx_mcast = 1;
4463 rx_data->rss_engine_id = params->rss_engine_id;
4465 /* silent vlan removal */
4466 rx_data->silent_vlan_removal_flg =
4467 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4468 rx_data->silent_vlan_value =
4469 cpu_to_le16(params->silent_removal_value);
4470 rx_data->silent_vlan_mask =
4471 cpu_to_le16(params->silent_removal_mask);
4474 /* initialize the general, tx and rx parts of a queue object */
4475 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4476 struct bnx2x_queue_state_params *cmd_params,
4477 struct client_init_ramrod_data *data)
4479 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4480 &cmd_params->params.setup.gen_params,
4482 &cmd_params->params.setup.flags);
4484 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4485 &cmd_params->params.setup.txq_params,
4487 &cmd_params->params.setup.flags);
4489 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4490 &cmd_params->params.setup.rxq_params,
4492 &cmd_params->params.setup.flags);
4494 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4495 &cmd_params->params.setup.pause_params,
4499 /* initialize the general and tx parts of a tx-only queue object */
4500 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4501 struct bnx2x_queue_state_params *cmd_params,
4502 struct tx_queue_init_ramrod_data *data)
4504 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4505 &cmd_params->params.tx_only.gen_params,
4507 &cmd_params->params.tx_only.flags);
4509 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4510 &cmd_params->params.tx_only.txq_params,
4512 &cmd_params->params.tx_only.flags);
4514 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4515 cmd_params->q_obj->cids[0],
4516 data->tx.tx_bd_page_base.lo,
4517 data->tx.tx_bd_page_base.hi);
4521 * bnx2x_q_init - init HW/FW queue
4523 * @bp: device handle
4526 * HW/FW initial Queue configuration:
4528 * - CDU context validation
4531 static inline int bnx2x_q_init(struct bnx2x *bp,
4532 struct bnx2x_queue_state_params *params)
4534 struct bnx2x_queue_sp_obj *o = params->q_obj;
4535 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4539 /* Tx HC configuration */
4540 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4541 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4542 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4544 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4545 init->tx.sb_cq_index,
4546 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4550 /* Rx HC configuration */
4551 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4552 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4553 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4555 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4556 init->rx.sb_cq_index,
4557 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4561 /* Set CDU context validation values */
4562 for (cos = 0; cos < o->max_cos; cos++) {
4563 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4565 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4566 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4569 /* As no ramrod is sent, complete the command immediately */
4570 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4578 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4579 struct bnx2x_queue_state_params *params)
4581 struct bnx2x_queue_sp_obj *o = params->q_obj;
4582 struct client_init_ramrod_data *rdata =
4583 (struct client_init_ramrod_data *)o->rdata;
4584 dma_addr_t data_mapping = o->rdata_mapping;
4585 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4587 /* Clear the ramrod data */
4588 memset(rdata, 0, sizeof(*rdata));
4590 /* Fill the ramrod data */
4591 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4593 /* No need for an explicit memory barrier here as long we would
4594 * need to ensure the ordering of writing to the SPQ element
4595 * and updating of the SPQ producer which involves a memory
4596 * read and we will have to put a full memory barrier there
4597 * (inside bnx2x_sp_post()).
4600 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4601 U64_HI(data_mapping),
4602 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4605 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4606 struct bnx2x_queue_state_params *params)
4608 struct bnx2x_queue_sp_obj *o = params->q_obj;
4609 struct client_init_ramrod_data *rdata =
4610 (struct client_init_ramrod_data *)o->rdata;
4611 dma_addr_t data_mapping = o->rdata_mapping;
4612 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4614 /* Clear the ramrod data */
4615 memset(rdata, 0, sizeof(*rdata));
4617 /* Fill the ramrod data */
4618 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4619 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4621 /* No need for an explicit memory barrier here as long we would
4622 * need to ensure the ordering of writing to the SPQ element
4623 * and updating of the SPQ producer which involves a memory
4624 * read and we will have to put a full memory barrier there
4625 * (inside bnx2x_sp_post()).
4628 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4629 U64_HI(data_mapping),
4630 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4633 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4634 struct bnx2x_queue_state_params *params)
4636 struct bnx2x_queue_sp_obj *o = params->q_obj;
4637 struct tx_queue_init_ramrod_data *rdata =
4638 (struct tx_queue_init_ramrod_data *)o->rdata;
4639 dma_addr_t data_mapping = o->rdata_mapping;
4640 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4641 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4642 ¶ms->params.tx_only;
4643 u8 cid_index = tx_only_params->cid_index;
4645 if (cid_index >= o->max_cos) {
4646 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4647 o->cl_id, cid_index);
4651 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4652 tx_only_params->gen_params.cos,
4653 tx_only_params->gen_params.spcl_id);
4655 /* Clear the ramrod data */
4656 memset(rdata, 0, sizeof(*rdata));
4658 /* Fill the ramrod data */
4659 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4661 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4662 o->cids[cid_index], rdata->general.client_id,
4663 rdata->general.sp_client_id, rdata->general.cos);
4665 /* No need for an explicit memory barrier here as long we would
4666 * need to ensure the ordering of writing to the SPQ element
4667 * and updating of the SPQ producer which involves a memory
4668 * read and we will have to put a full memory barrier there
4669 * (inside bnx2x_sp_post()).
4672 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4673 U64_HI(data_mapping),
4674 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4677 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4678 struct bnx2x_queue_sp_obj *obj,
4679 struct bnx2x_queue_update_params *params,
4680 struct client_update_ramrod_data *data)
4682 /* Client ID of the client to update */
4683 data->client_id = obj->cl_id;
4685 /* Function ID of the client to update */
4686 data->func_id = obj->func_id;
4688 /* Default VLAN value */
4689 data->default_vlan = cpu_to_le16(params->def_vlan);
4691 /* Inner VLAN stripping */
4692 data->inner_vlan_removal_enable_flg =
4693 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4694 data->inner_vlan_removal_change_flg =
4695 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4696 ¶ms->update_flags);
4698 /* Outer VLAN stripping */
4699 data->outer_vlan_removal_enable_flg =
4700 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4701 data->outer_vlan_removal_change_flg =
4702 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4703 ¶ms->update_flags);
4705 /* Drop packets that have source MAC that doesn't belong to this
4708 data->anti_spoofing_enable_flg =
4709 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4710 data->anti_spoofing_change_flg =
4711 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4713 /* Activate/Deactivate */
4714 data->activate_flg =
4715 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4716 data->activate_change_flg =
4717 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4719 /* Enable default VLAN */
4720 data->default_vlan_enable_flg =
4721 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4722 data->default_vlan_change_flg =
4723 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4724 ¶ms->update_flags);
4726 /* silent vlan removal */
4727 data->silent_vlan_change_flg =
4728 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4729 ¶ms->update_flags);
4730 data->silent_vlan_removal_flg =
4731 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4732 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4733 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4736 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4737 struct bnx2x_queue_state_params *params)
4739 struct bnx2x_queue_sp_obj *o = params->q_obj;
4740 struct client_update_ramrod_data *rdata =
4741 (struct client_update_ramrod_data *)o->rdata;
4742 dma_addr_t data_mapping = o->rdata_mapping;
4743 struct bnx2x_queue_update_params *update_params =
4744 ¶ms->params.update;
4745 u8 cid_index = update_params->cid_index;
4747 if (cid_index >= o->max_cos) {
4748 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4749 o->cl_id, cid_index);
4753 /* Clear the ramrod data */
4754 memset(rdata, 0, sizeof(*rdata));
4756 /* Fill the ramrod data */
4757 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4759 /* No need for an explicit memory barrier here as long we would
4760 * need to ensure the ordering of writing to the SPQ element
4761 * and updating of the SPQ producer which involves a memory
4762 * read and we will have to put a full memory barrier there
4763 * (inside bnx2x_sp_post()).
4766 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4767 o->cids[cid_index], U64_HI(data_mapping),
4768 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4772 * bnx2x_q_send_deactivate - send DEACTIVATE command
4774 * @bp: device handle
4777 * implemented using the UPDATE command.
4779 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4780 struct bnx2x_queue_state_params *params)
4782 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4784 memset(update, 0, sizeof(*update));
4786 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4788 return bnx2x_q_send_update(bp, params);
4792 * bnx2x_q_send_activate - send ACTIVATE command
4794 * @bp: device handle
4797 * implemented using the UPDATE command.
4799 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4800 struct bnx2x_queue_state_params *params)
4802 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4804 memset(update, 0, sizeof(*update));
4806 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4807 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4809 return bnx2x_q_send_update(bp, params);
4812 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4813 struct bnx2x_queue_state_params *params)
4815 /* TODO: Not implemented yet. */
4819 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4820 struct bnx2x_queue_state_params *params)
4822 struct bnx2x_queue_sp_obj *o = params->q_obj;
4824 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4825 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4826 ETH_CONNECTION_TYPE);
4829 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4830 struct bnx2x_queue_state_params *params)
4832 struct bnx2x_queue_sp_obj *o = params->q_obj;
4833 u8 cid_idx = params->params.cfc_del.cid_index;
4835 if (cid_idx >= o->max_cos) {
4836 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4841 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4842 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4845 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4846 struct bnx2x_queue_state_params *params)
4848 struct bnx2x_queue_sp_obj *o = params->q_obj;
4849 u8 cid_index = params->params.terminate.cid_index;
4851 if (cid_index >= o->max_cos) {
4852 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4853 o->cl_id, cid_index);
4857 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4858 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4861 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4862 struct bnx2x_queue_state_params *params)
4864 struct bnx2x_queue_sp_obj *o = params->q_obj;
4866 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4867 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4868 ETH_CONNECTION_TYPE);
4871 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4872 struct bnx2x_queue_state_params *params)
4874 switch (params->cmd) {
4875 case BNX2X_Q_CMD_INIT:
4876 return bnx2x_q_init(bp, params);
4877 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4878 return bnx2x_q_send_setup_tx_only(bp, params);
4879 case BNX2X_Q_CMD_DEACTIVATE:
4880 return bnx2x_q_send_deactivate(bp, params);
4881 case BNX2X_Q_CMD_ACTIVATE:
4882 return bnx2x_q_send_activate(bp, params);
4883 case BNX2X_Q_CMD_UPDATE:
4884 return bnx2x_q_send_update(bp, params);
4885 case BNX2X_Q_CMD_UPDATE_TPA:
4886 return bnx2x_q_send_update_tpa(bp, params);
4887 case BNX2X_Q_CMD_HALT:
4888 return bnx2x_q_send_halt(bp, params);
4889 case BNX2X_Q_CMD_CFC_DEL:
4890 return bnx2x_q_send_cfc_del(bp, params);
4891 case BNX2X_Q_CMD_TERMINATE:
4892 return bnx2x_q_send_terminate(bp, params);
4893 case BNX2X_Q_CMD_EMPTY:
4894 return bnx2x_q_send_empty(bp, params);
4896 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4901 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4902 struct bnx2x_queue_state_params *params)
4904 switch (params->cmd) {
4905 case BNX2X_Q_CMD_SETUP:
4906 return bnx2x_q_send_setup_e1x(bp, params);
4907 case BNX2X_Q_CMD_INIT:
4908 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4909 case BNX2X_Q_CMD_DEACTIVATE:
4910 case BNX2X_Q_CMD_ACTIVATE:
4911 case BNX2X_Q_CMD_UPDATE:
4912 case BNX2X_Q_CMD_UPDATE_TPA:
4913 case BNX2X_Q_CMD_HALT:
4914 case BNX2X_Q_CMD_CFC_DEL:
4915 case BNX2X_Q_CMD_TERMINATE:
4916 case BNX2X_Q_CMD_EMPTY:
4917 return bnx2x_queue_send_cmd_cmn(bp, params);
4919 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4924 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4925 struct bnx2x_queue_state_params *params)
4927 switch (params->cmd) {
4928 case BNX2X_Q_CMD_SETUP:
4929 return bnx2x_q_send_setup_e2(bp, params);
4930 case BNX2X_Q_CMD_INIT:
4931 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4932 case BNX2X_Q_CMD_DEACTIVATE:
4933 case BNX2X_Q_CMD_ACTIVATE:
4934 case BNX2X_Q_CMD_UPDATE:
4935 case BNX2X_Q_CMD_UPDATE_TPA:
4936 case BNX2X_Q_CMD_HALT:
4937 case BNX2X_Q_CMD_CFC_DEL:
4938 case BNX2X_Q_CMD_TERMINATE:
4939 case BNX2X_Q_CMD_EMPTY:
4940 return bnx2x_queue_send_cmd_cmn(bp, params);
4942 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4948 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4950 * @bp: device handle
4955 * It both checks if the requested command is legal in a current
4956 * state and, if it's legal, sets a `next_state' in the object
4957 * that will be used in the completion flow to set the `state'
4960 * returns 0 if a requested command is a legal transition,
4961 * -EINVAL otherwise.
4963 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4964 struct bnx2x_queue_sp_obj *o,
4965 struct bnx2x_queue_state_params *params)
4967 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4968 enum bnx2x_queue_cmd cmd = params->cmd;
4969 struct bnx2x_queue_update_params *update_params =
4970 ¶ms->params.update;
4971 u8 next_tx_only = o->num_tx_only;
4973 /* Forget all pending for completion commands if a driver only state
4974 * transition has been requested.
4976 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4978 o->next_state = BNX2X_Q_STATE_MAX;
4981 /* Don't allow a next state transition if we are in the middle of
4985 BNX2X_ERR("Blocking transition since pending was %lx\n",
4991 case BNX2X_Q_STATE_RESET:
4992 if (cmd == BNX2X_Q_CMD_INIT)
4993 next_state = BNX2X_Q_STATE_INITIALIZED;
4996 case BNX2X_Q_STATE_INITIALIZED:
4997 if (cmd == BNX2X_Q_CMD_SETUP) {
4998 if (test_bit(BNX2X_Q_FLG_ACTIVE,
4999 ¶ms->params.setup.flags))
5000 next_state = BNX2X_Q_STATE_ACTIVE;
5002 next_state = BNX2X_Q_STATE_INACTIVE;
5006 case BNX2X_Q_STATE_ACTIVE:
5007 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5008 next_state = BNX2X_Q_STATE_INACTIVE;
5010 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5011 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5012 next_state = BNX2X_Q_STATE_ACTIVE;
5014 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5015 next_state = BNX2X_Q_STATE_MULTI_COS;
5019 else if (cmd == BNX2X_Q_CMD_HALT)
5020 next_state = BNX2X_Q_STATE_STOPPED;
5022 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5023 /* If "active" state change is requested, update the
5024 * state accordingly.
5026 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5027 &update_params->update_flags) &&
5028 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5029 &update_params->update_flags))
5030 next_state = BNX2X_Q_STATE_INACTIVE;
5032 next_state = BNX2X_Q_STATE_ACTIVE;
5036 case BNX2X_Q_STATE_MULTI_COS:
5037 if (cmd == BNX2X_Q_CMD_TERMINATE)
5038 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5040 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5041 next_state = BNX2X_Q_STATE_MULTI_COS;
5042 next_tx_only = o->num_tx_only + 1;
5045 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5046 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5047 next_state = BNX2X_Q_STATE_MULTI_COS;
5049 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5050 /* If "active" state change is requested, update the
5051 * state accordingly.
5053 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5054 &update_params->update_flags) &&
5055 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5056 &update_params->update_flags))
5057 next_state = BNX2X_Q_STATE_INACTIVE;
5059 next_state = BNX2X_Q_STATE_MULTI_COS;
5063 case BNX2X_Q_STATE_MCOS_TERMINATED:
5064 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5065 next_tx_only = o->num_tx_only - 1;
5066 if (next_tx_only == 0)
5067 next_state = BNX2X_Q_STATE_ACTIVE;
5069 next_state = BNX2X_Q_STATE_MULTI_COS;
5073 case BNX2X_Q_STATE_INACTIVE:
5074 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5075 next_state = BNX2X_Q_STATE_ACTIVE;
5077 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5078 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5079 next_state = BNX2X_Q_STATE_INACTIVE;
5081 else if (cmd == BNX2X_Q_CMD_HALT)
5082 next_state = BNX2X_Q_STATE_STOPPED;
5084 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5085 /* If "active" state change is requested, update the
5086 * state accordingly.
5088 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5089 &update_params->update_flags) &&
5090 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5091 &update_params->update_flags)){
5092 if (o->num_tx_only == 0)
5093 next_state = BNX2X_Q_STATE_ACTIVE;
5094 else /* tx only queues exist for this queue */
5095 next_state = BNX2X_Q_STATE_MULTI_COS;
5097 next_state = BNX2X_Q_STATE_INACTIVE;
5101 case BNX2X_Q_STATE_STOPPED:
5102 if (cmd == BNX2X_Q_CMD_TERMINATE)
5103 next_state = BNX2X_Q_STATE_TERMINATED;
5106 case BNX2X_Q_STATE_TERMINATED:
5107 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5108 next_state = BNX2X_Q_STATE_RESET;
5112 BNX2X_ERR("Illegal state: %d\n", state);
5115 /* Transition is assured */
5116 if (next_state != BNX2X_Q_STATE_MAX) {
5117 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5118 state, cmd, next_state);
5119 o->next_state = next_state;
5120 o->next_tx_only = next_tx_only;
5124 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5129 void bnx2x_init_queue_obj(struct bnx2x *bp,
5130 struct bnx2x_queue_sp_obj *obj,
5131 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5133 dma_addr_t rdata_mapping, unsigned long type)
5135 memset(obj, 0, sizeof(*obj));
5137 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5138 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5140 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5141 obj->max_cos = cid_cnt;
5143 obj->func_id = func_id;
5145 obj->rdata_mapping = rdata_mapping;
5147 obj->next_state = BNX2X_Q_STATE_MAX;
5149 if (CHIP_IS_E1x(bp))
5150 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5152 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5154 obj->check_transition = bnx2x_queue_chk_transition;
5156 obj->complete_cmd = bnx2x_queue_comp_cmd;
5157 obj->wait_comp = bnx2x_queue_wait_comp;
5158 obj->set_pending = bnx2x_queue_set_pending;
5161 /* return a queue object's logical state*/
5162 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5163 struct bnx2x_queue_sp_obj *obj)
5165 switch (obj->state) {
5166 case BNX2X_Q_STATE_ACTIVE:
5167 case BNX2X_Q_STATE_MULTI_COS:
5168 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5169 case BNX2X_Q_STATE_RESET:
5170 case BNX2X_Q_STATE_INITIALIZED:
5171 case BNX2X_Q_STATE_MCOS_TERMINATED:
5172 case BNX2X_Q_STATE_INACTIVE:
5173 case BNX2X_Q_STATE_STOPPED:
5174 case BNX2X_Q_STATE_TERMINATED:
5175 case BNX2X_Q_STATE_FLRED:
5176 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5182 /********************** Function state object *********************************/
5183 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5184 struct bnx2x_func_sp_obj *o)
5186 /* in the middle of transaction - return INVALID state */
5188 return BNX2X_F_STATE_MAX;
5190 /* unsure the order of reading of o->pending and o->state
5191 * o->pending should be read first
5198 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5199 struct bnx2x_func_sp_obj *o,
5200 enum bnx2x_func_cmd cmd)
5202 return bnx2x_state_wait(bp, cmd, &o->pending);
5206 * bnx2x_func_state_change_comp - complete the state machine transition
5208 * @bp: device handle
5212 * Called on state change transition. Completes the state
5213 * machine transition only - no HW interaction.
5215 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5216 struct bnx2x_func_sp_obj *o,
5217 enum bnx2x_func_cmd cmd)
5219 unsigned long cur_pending = o->pending;
5221 if (!test_and_clear_bit(cmd, &cur_pending)) {
5222 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5223 cmd, BP_FUNC(bp), o->state,
5224 cur_pending, o->next_state);
5229 "Completing command %d for func %d, setting state to %d\n",
5230 cmd, BP_FUNC(bp), o->next_state);
5232 o->state = o->next_state;
5233 o->next_state = BNX2X_F_STATE_MAX;
5235 /* It's important that o->state and o->next_state are
5236 * updated before o->pending.
5240 clear_bit(cmd, &o->pending);
5241 smp_mb__after_clear_bit();
5247 * bnx2x_func_comp_cmd - complete the state change command
5249 * @bp: device handle
5253 * Checks that the arrived completion is expected.
5255 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5256 struct bnx2x_func_sp_obj *o,
5257 enum bnx2x_func_cmd cmd)
5259 /* Complete the state machine part first, check if it's a
5262 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5267 * bnx2x_func_chk_transition - perform function state machine transition
5269 * @bp: device handle
5273 * It both checks if the requested command is legal in a current
5274 * state and, if it's legal, sets a `next_state' in the object
5275 * that will be used in the completion flow to set the `state'
5278 * returns 0 if a requested command is a legal transition,
5279 * -EINVAL otherwise.
5281 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5282 struct bnx2x_func_sp_obj *o,
5283 struct bnx2x_func_state_params *params)
5285 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5286 enum bnx2x_func_cmd cmd = params->cmd;
5288 /* Forget all pending for completion commands if a driver only state
5289 * transition has been requested.
5291 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5293 o->next_state = BNX2X_F_STATE_MAX;
5296 /* Don't allow a next state transition if we are in the middle of
5303 case BNX2X_F_STATE_RESET:
5304 if (cmd == BNX2X_F_CMD_HW_INIT)
5305 next_state = BNX2X_F_STATE_INITIALIZED;
5308 case BNX2X_F_STATE_INITIALIZED:
5309 if (cmd == BNX2X_F_CMD_START)
5310 next_state = BNX2X_F_STATE_STARTED;
5312 else if (cmd == BNX2X_F_CMD_HW_RESET)
5313 next_state = BNX2X_F_STATE_RESET;
5316 case BNX2X_F_STATE_STARTED:
5317 if (cmd == BNX2X_F_CMD_STOP)
5318 next_state = BNX2X_F_STATE_INITIALIZED;
5319 /* afex ramrods can be sent only in started mode, and only
5320 * if not pending for function_stop ramrod completion
5321 * for these events - next state remained STARTED.
5323 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5324 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5325 next_state = BNX2X_F_STATE_STARTED;
5327 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5328 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5329 next_state = BNX2X_F_STATE_STARTED;
5331 /* Switch_update ramrod can be sent in either started or
5332 * tx_stopped state, and it doesn't change the state.
5334 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5335 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5336 next_state = BNX2X_F_STATE_STARTED;
5338 else if (cmd == BNX2X_F_CMD_TX_STOP)
5339 next_state = BNX2X_F_STATE_TX_STOPPED;
5342 case BNX2X_F_STATE_TX_STOPPED:
5343 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5344 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5345 next_state = BNX2X_F_STATE_TX_STOPPED;
5347 else if (cmd == BNX2X_F_CMD_TX_START)
5348 next_state = BNX2X_F_STATE_STARTED;
5352 BNX2X_ERR("Unknown state: %d\n", state);
5355 /* Transition is assured */
5356 if (next_state != BNX2X_F_STATE_MAX) {
5357 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5358 state, cmd, next_state);
5359 o->next_state = next_state;
5363 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5370 * bnx2x_func_init_func - performs HW init at function stage
5372 * @bp: device handle
5375 * Init HW when the current phase is
5376 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5379 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5380 const struct bnx2x_func_sp_drv_ops *drv)
5382 return drv->init_hw_func(bp);
5386 * bnx2x_func_init_port - performs HW init at port stage
5388 * @bp: device handle
5391 * Init HW when the current phase is
5392 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5393 * FUNCTION-only HW blocks.
5396 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5397 const struct bnx2x_func_sp_drv_ops *drv)
5399 int rc = drv->init_hw_port(bp);
5403 return bnx2x_func_init_func(bp, drv);
5407 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5409 * @bp: device handle
5412 * Init HW when the current phase is
5413 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5414 * PORT-only and FUNCTION-only HW blocks.
5416 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5417 const struct bnx2x_func_sp_drv_ops *drv)
5419 int rc = drv->init_hw_cmn_chip(bp);
5423 return bnx2x_func_init_port(bp, drv);
5427 * bnx2x_func_init_cmn - performs HW init at common stage
5429 * @bp: device handle
5432 * Init HW when the current phase is
5433 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5434 * PORT-only and FUNCTION-only HW blocks.
5436 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5437 const struct bnx2x_func_sp_drv_ops *drv)
5439 int rc = drv->init_hw_cmn(bp);
5443 return bnx2x_func_init_port(bp, drv);
5446 static int bnx2x_func_hw_init(struct bnx2x *bp,
5447 struct bnx2x_func_state_params *params)
5449 u32 load_code = params->params.hw_init.load_phase;
5450 struct bnx2x_func_sp_obj *o = params->f_obj;
5451 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5454 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5455 BP_ABS_FUNC(bp), load_code);
5457 /* Prepare buffers for unzipping the FW */
5458 rc = drv->gunzip_init(bp);
5463 rc = drv->init_fw(bp);
5465 BNX2X_ERR("Error loading firmware\n");
5469 /* Handle the beginning of COMMON_XXX pases separately... */
5470 switch (load_code) {
5471 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5472 rc = bnx2x_func_init_cmn_chip(bp, drv);
5477 case FW_MSG_CODE_DRV_LOAD_COMMON:
5478 rc = bnx2x_func_init_cmn(bp, drv);
5483 case FW_MSG_CODE_DRV_LOAD_PORT:
5484 rc = bnx2x_func_init_port(bp, drv);
5489 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5490 rc = bnx2x_func_init_func(bp, drv);
5496 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5501 drv->gunzip_end(bp);
5503 /* In case of success, complete the command immediately: no ramrods
5507 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5513 * bnx2x_func_reset_func - reset HW at function stage
5515 * @bp: device handle
5518 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5519 * FUNCTION-only HW blocks.
5521 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5522 const struct bnx2x_func_sp_drv_ops *drv)
5524 drv->reset_hw_func(bp);
5528 * bnx2x_func_reset_port - reset HW at port stage
5530 * @bp: device handle
5533 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5534 * FUNCTION-only and PORT-only HW blocks.
5538 * It's important to call reset_port before reset_func() as the last thing
5539 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5540 * makes impossible any DMAE transactions.
5542 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5543 const struct bnx2x_func_sp_drv_ops *drv)
5545 drv->reset_hw_port(bp);
5546 bnx2x_func_reset_func(bp, drv);
5550 * bnx2x_func_reset_cmn - reset HW at common stage
5552 * @bp: device handle
5555 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5556 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5557 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5559 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5560 const struct bnx2x_func_sp_drv_ops *drv)
5562 bnx2x_func_reset_port(bp, drv);
5563 drv->reset_hw_cmn(bp);
5566 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5567 struct bnx2x_func_state_params *params)
5569 u32 reset_phase = params->params.hw_reset.reset_phase;
5570 struct bnx2x_func_sp_obj *o = params->f_obj;
5571 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5573 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5576 switch (reset_phase) {
5577 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5578 bnx2x_func_reset_cmn(bp, drv);
5580 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5581 bnx2x_func_reset_port(bp, drv);
5583 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5584 bnx2x_func_reset_func(bp, drv);
5587 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5592 /* Complete the command immediately: no ramrods have been sent. */
5593 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5598 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5599 struct bnx2x_func_state_params *params)
5601 struct bnx2x_func_sp_obj *o = params->f_obj;
5602 struct function_start_data *rdata =
5603 (struct function_start_data *)o->rdata;
5604 dma_addr_t data_mapping = o->rdata_mapping;
5605 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5607 memset(rdata, 0, sizeof(*rdata));
5609 /* Fill the ramrod data with provided parameters */
5610 rdata->function_mode = (u8)start_params->mf_mode;
5611 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5612 rdata->path_id = BP_PATH(bp);
5613 rdata->network_cos_mode = start_params->network_cos_mode;
5614 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5615 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5617 /* No need for an explicit memory barrier here as long we would
5618 * need to ensure the ordering of writing to the SPQ element
5619 * and updating of the SPQ producer which involves a memory
5620 * read and we will have to put a full memory barrier there
5621 * (inside bnx2x_sp_post()).
5624 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5625 U64_HI(data_mapping),
5626 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5629 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5630 struct bnx2x_func_state_params *params)
5632 struct bnx2x_func_sp_obj *o = params->f_obj;
5633 struct function_update_data *rdata =
5634 (struct function_update_data *)o->rdata;
5635 dma_addr_t data_mapping = o->rdata_mapping;
5636 struct bnx2x_func_switch_update_params *switch_update_params =
5637 ¶ms->params.switch_update;
5639 memset(rdata, 0, sizeof(*rdata));
5641 /* Fill the ramrod data with provided parameters */
5642 rdata->tx_switch_suspend_change_flg = 1;
5643 rdata->tx_switch_suspend = switch_update_params->suspend;
5644 rdata->echo = SWITCH_UPDATE;
5646 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5647 U64_HI(data_mapping),
5648 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5651 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5652 struct bnx2x_func_state_params *params)
5654 struct bnx2x_func_sp_obj *o = params->f_obj;
5655 struct function_update_data *rdata =
5656 (struct function_update_data *)o->afex_rdata;
5657 dma_addr_t data_mapping = o->afex_rdata_mapping;
5658 struct bnx2x_func_afex_update_params *afex_update_params =
5659 ¶ms->params.afex_update;
5661 memset(rdata, 0, sizeof(*rdata));
5663 /* Fill the ramrod data with provided parameters */
5664 rdata->vif_id_change_flg = 1;
5665 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5666 rdata->afex_default_vlan_change_flg = 1;
5667 rdata->afex_default_vlan =
5668 cpu_to_le16(afex_update_params->afex_default_vlan);
5669 rdata->allowed_priorities_change_flg = 1;
5670 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5671 rdata->echo = AFEX_UPDATE;
5673 /* No need for an explicit memory barrier here as long we would
5674 * need to ensure the ordering of writing to the SPQ element
5675 * and updating of the SPQ producer which involves a memory
5676 * read and we will have to put a full memory barrier there
5677 * (inside bnx2x_sp_post()).
5680 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5682 rdata->afex_default_vlan, rdata->allowed_priorities);
5684 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5685 U64_HI(data_mapping),
5686 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5690 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5691 struct bnx2x_func_state_params *params)
5693 struct bnx2x_func_sp_obj *o = params->f_obj;
5694 struct afex_vif_list_ramrod_data *rdata =
5695 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5696 struct bnx2x_func_afex_viflists_params *afex_vif_params =
5697 ¶ms->params.afex_viflists;
5698 u64 *p_rdata = (u64 *)rdata;
5700 memset(rdata, 0, sizeof(*rdata));
5702 /* Fill the ramrod data with provided parameters */
5703 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5704 rdata->func_bit_map = afex_vif_params->func_bit_map;
5705 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5706 rdata->func_to_clear = afex_vif_params->func_to_clear;
5708 /* send in echo type of sub command */
5709 rdata->echo = afex_vif_params->afex_vif_list_command;
5711 /* No need for an explicit memory barrier here as long we would
5712 * need to ensure the ordering of writing to the SPQ element
5713 * and updating of the SPQ producer which involves a memory
5714 * read and we will have to put a full memory barrier there
5715 * (inside bnx2x_sp_post()).
5718 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5719 rdata->afex_vif_list_command, rdata->vif_list_index,
5720 rdata->func_bit_map, rdata->func_to_clear);
5722 /* this ramrod sends data directly and not through DMA mapping */
5723 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5724 U64_HI(*p_rdata), U64_LO(*p_rdata),
5725 NONE_CONNECTION_TYPE);
5728 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5729 struct bnx2x_func_state_params *params)
5731 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5732 NONE_CONNECTION_TYPE);
5735 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5736 struct bnx2x_func_state_params *params)
5738 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5739 NONE_CONNECTION_TYPE);
5741 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5742 struct bnx2x_func_state_params *params)
5744 struct bnx2x_func_sp_obj *o = params->f_obj;
5745 struct flow_control_configuration *rdata =
5746 (struct flow_control_configuration *)o->rdata;
5747 dma_addr_t data_mapping = o->rdata_mapping;
5748 struct bnx2x_func_tx_start_params *tx_start_params =
5749 ¶ms->params.tx_start;
5752 memset(rdata, 0, sizeof(*rdata));
5754 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5755 rdata->dcb_version = tx_start_params->dcb_version;
5756 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5758 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5759 rdata->traffic_type_to_priority_cos[i] =
5760 tx_start_params->traffic_type_to_priority_cos[i];
5762 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5763 U64_HI(data_mapping),
5764 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5767 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5768 struct bnx2x_func_state_params *params)
5770 switch (params->cmd) {
5771 case BNX2X_F_CMD_HW_INIT:
5772 return bnx2x_func_hw_init(bp, params);
5773 case BNX2X_F_CMD_START:
5774 return bnx2x_func_send_start(bp, params);
5775 case BNX2X_F_CMD_STOP:
5776 return bnx2x_func_send_stop(bp, params);
5777 case BNX2X_F_CMD_HW_RESET:
5778 return bnx2x_func_hw_reset(bp, params);
5779 case BNX2X_F_CMD_AFEX_UPDATE:
5780 return bnx2x_func_send_afex_update(bp, params);
5781 case BNX2X_F_CMD_AFEX_VIFLISTS:
5782 return bnx2x_func_send_afex_viflists(bp, params);
5783 case BNX2X_F_CMD_TX_STOP:
5784 return bnx2x_func_send_tx_stop(bp, params);
5785 case BNX2X_F_CMD_TX_START:
5786 return bnx2x_func_send_tx_start(bp, params);
5787 case BNX2X_F_CMD_SWITCH_UPDATE:
5788 return bnx2x_func_send_switch_update(bp, params);
5790 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5795 void bnx2x_init_func_obj(struct bnx2x *bp,
5796 struct bnx2x_func_sp_obj *obj,
5797 void *rdata, dma_addr_t rdata_mapping,
5798 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5799 struct bnx2x_func_sp_drv_ops *drv_iface)
5801 memset(obj, 0, sizeof(*obj));
5803 mutex_init(&obj->one_pending_mutex);
5806 obj->rdata_mapping = rdata_mapping;
5807 obj->afex_rdata = afex_rdata;
5808 obj->afex_rdata_mapping = afex_rdata_mapping;
5809 obj->send_cmd = bnx2x_func_send_cmd;
5810 obj->check_transition = bnx2x_func_chk_transition;
5811 obj->complete_cmd = bnx2x_func_comp_cmd;
5812 obj->wait_comp = bnx2x_func_wait_comp;
5814 obj->drv = drv_iface;
5818 * bnx2x_func_state_change - perform Function state change transition
5820 * @bp: device handle
5821 * @params: parameters to perform the transaction
5823 * returns 0 in case of successfully completed transition,
5824 * negative error code in case of failure, positive
5825 * (EBUSY) value if there is a completion to that is
5826 * still pending (possible only if RAMROD_COMP_WAIT is
5827 * not set in params->ramrod_flags for asynchronous
5830 int bnx2x_func_state_change(struct bnx2x *bp,
5831 struct bnx2x_func_state_params *params)
5833 struct bnx2x_func_sp_obj *o = params->f_obj;
5835 enum bnx2x_func_cmd cmd = params->cmd;
5836 unsigned long *pending = &o->pending;
5838 mutex_lock(&o->one_pending_mutex);
5840 /* Check that the requested transition is legal */
5841 rc = o->check_transition(bp, o, params);
5842 if ((rc == -EBUSY) &&
5843 (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5844 while ((rc == -EBUSY) && (--cnt > 0)) {
5845 mutex_unlock(&o->one_pending_mutex);
5847 mutex_lock(&o->one_pending_mutex);
5848 rc = o->check_transition(bp, o, params);
5851 mutex_unlock(&o->one_pending_mutex);
5852 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5856 mutex_unlock(&o->one_pending_mutex);
5860 /* Set "pending" bit */
5861 set_bit(cmd, pending);
5863 /* Don't send a command if only driver cleanup was requested */
5864 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5865 bnx2x_func_state_change_comp(bp, o, cmd);
5866 mutex_unlock(&o->one_pending_mutex);
5869 rc = o->send_cmd(bp, params);
5871 mutex_unlock(&o->one_pending_mutex);
5874 o->next_state = BNX2X_F_STATE_MAX;
5875 clear_bit(cmd, pending);
5876 smp_mb__after_clear_bit();
5880 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5881 rc = o->wait_comp(bp, o, cmd);
5889 return !!test_bit(cmd, pending);