1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright (c) 2011-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
28 #include "bnx2x_cmn.h"
31 #define BNX2X_MAX_EMUL_MULTI 16
33 /**** Exe Queue interfaces ****/
36 * bnx2x_exe_queue_init - init the Exe Queue object
38 * @o: pointer to the object
40 * @owner: pointer to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47 struct bnx2x_exe_queue_obj *o,
49 union bnx2x_qable_obj *owner,
50 exe_q_validate validate,
52 exe_q_optimize optimize,
56 memset(o, 0, sizeof(*o));
58 INIT_LIST_HEAD(&o->exe_queue);
59 INIT_LIST_HEAD(&o->pending_comp);
61 spin_lock_init(&o->lock);
63 o->exe_chunk_len = exe_len;
66 /* Owner specific callbacks */
67 o->validate = validate;
69 o->optimize = optimize;
73 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 struct bnx2x_exeq_elem *elem)
80 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
86 struct bnx2x_exeq_elem *elem;
89 spin_lock_bh(&o->lock);
91 list_for_each_entry(elem, &o->exe_queue, link)
94 spin_unlock_bh(&o->lock);
100 * bnx2x_exe_queue_add - add a new element to the execution queue
104 * @cmd: new command to add
105 * @restore: true - do not optimize the command
107 * If the element is optimized or is illegal, frees it.
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 struct bnx2x_exe_queue_obj *o,
111 struct bnx2x_exeq_elem *elem,
116 spin_lock_bh(&o->lock);
119 /* Try to cancel this element queue */
120 rc = o->optimize(bp, o->owner, elem);
124 /* Check if this request is ok */
125 rc = o->validate(bp, o->owner, elem);
127 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
132 /* If so, add it to the execution queue */
133 list_add_tail(&elem->link, &o->exe_queue);
135 spin_unlock_bh(&o->lock);
140 bnx2x_exe_queue_free_elem(bp, elem);
142 spin_unlock_bh(&o->lock);
147 static inline void __bnx2x_exe_queue_reset_pending(
149 struct bnx2x_exe_queue_obj *o)
151 struct bnx2x_exeq_elem *elem;
153 while (!list_empty(&o->pending_comp)) {
154 elem = list_first_entry(&o->pending_comp,
155 struct bnx2x_exeq_elem, link);
157 list_del(&elem->link);
158 bnx2x_exe_queue_free_elem(bp, elem);
163 * bnx2x_exe_queue_step - execute one execution chunk atomically
167 * @ramrod_flags: flags
169 * (Should be called while holding the exe_queue->lock).
171 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
172 struct bnx2x_exe_queue_obj *o,
173 unsigned long *ramrod_flags)
175 struct bnx2x_exeq_elem *elem, spacer;
178 memset(&spacer, 0, sizeof(spacer));
180 /* Next step should not be performed until the current is finished,
181 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
182 * properly clear object internals without sending any command to the FW
183 * which also implies there won't be any completion to clear the
186 if (!list_empty(&o->pending_comp)) {
187 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
188 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
189 __bnx2x_exe_queue_reset_pending(bp, o);
195 /* Run through the pending commands list and create a next
198 while (!list_empty(&o->exe_queue)) {
199 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
201 WARN_ON(!elem->cmd_len);
203 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
204 cur_len += elem->cmd_len;
205 /* Prevent from both lists being empty when moving an
206 * element. This will allow the call of
207 * bnx2x_exe_queue_empty() without locking.
209 list_add_tail(&spacer.link, &o->pending_comp);
211 list_move_tail(&elem->link, &o->pending_comp);
212 list_del(&spacer.link);
221 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
223 /* In case of an error return the commands back to the queue
224 * and reset the pending_comp.
226 list_splice_init(&o->pending_comp, &o->exe_queue);
228 /* If zero is returned, means there are no outstanding pending
229 * completions and we may dismiss the pending list.
231 __bnx2x_exe_queue_reset_pending(bp, o);
236 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
238 bool empty = list_empty(&o->exe_queue);
240 /* Don't reorder!!! */
243 return empty && list_empty(&o->pending_comp);
246 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
249 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
250 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
253 /************************ raw_obj functions ***********************************/
254 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
256 return !!test_bit(o->state, o->pstate);
259 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
261 smp_mb__before_atomic();
262 clear_bit(o->state, o->pstate);
263 smp_mb__after_atomic();
266 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
268 smp_mb__before_atomic();
269 set_bit(o->state, o->pstate);
270 smp_mb__after_atomic();
274 * bnx2x_state_wait - wait until the given bit(state) is cleared
277 * @state: state which is to be cleared
278 * @state_p: state buffer
281 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
282 unsigned long *pstate)
284 /* can take a while if any port is running */
287 if (CHIP_REV_IS_EMUL(bp))
290 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
294 if (!test_bit(state, pstate)) {
295 #ifdef BNX2X_STOP_ON_ERROR
296 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
301 usleep_range(1000, 2000);
308 BNX2X_ERR("timeout waiting for state %d\n", state);
309 #ifdef BNX2X_STOP_ON_ERROR
316 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
318 return bnx2x_state_wait(bp, raw->state, raw->pstate);
321 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
322 /* credit handling callbacks */
323 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
325 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
329 return mp->get_entry(mp, offset);
332 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
334 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
338 return mp->get(mp, 1);
341 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
343 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
347 return vp->get_entry(vp, offset);
350 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
352 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
356 return vp->get(vp, 1);
358 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
360 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362 return mp->put_entry(mp, offset);
365 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
367 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
369 return mp->put(mp, 1);
372 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
374 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
376 return vp->put_entry(vp, offset);
379 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
381 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
383 return vp->put(vp, 1);
387 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
390 * @o: vlan_mac object
392 * @details: Non-blocking implementation; should be called under execution
395 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
396 struct bnx2x_vlan_mac_obj *o)
398 if (o->head_reader) {
399 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
403 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
408 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
411 * @o: vlan_mac object
413 * @details Should be called under execution queue lock; notice it might release
414 * and reclaim it during its run.
416 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
417 struct bnx2x_vlan_mac_obj *o)
420 unsigned long ramrod_flags = o->saved_ramrod_flags;
422 DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
424 o->head_exe_request = false;
425 o->saved_ramrod_flags = 0;
426 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
428 BNX2X_ERR("execution of pending commands failed with rc %d\n",
430 #ifdef BNX2X_STOP_ON_ERROR
437 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
440 * @o: vlan_mac object
441 * @ramrod_flags: ramrod flags of missed execution
443 * @details Should be called under execution queue lock.
445 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
446 struct bnx2x_vlan_mac_obj *o,
447 unsigned long ramrod_flags)
449 o->head_exe_request = true;
450 o->saved_ramrod_flags = ramrod_flags;
451 DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
456 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
459 * @o: vlan_mac object
461 * @details Should be called under execution queue lock. Notice if a pending
462 * execution exists, it would perform it - possibly releasing and
463 * reclaiming the execution queue lock.
465 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
466 struct bnx2x_vlan_mac_obj *o)
468 /* It's possible a new pending execution was added since this writer
469 * executed. If so, execute again. [Ad infinitum]
471 while (o->head_exe_request) {
472 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
473 __bnx2x_vlan_mac_h_exec_pending(bp, o);
479 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
482 * @o: vlan_mac object
484 * @details Should be called under the execution queue lock. May sleep. May
485 * release and reclaim execution queue lock during its run.
487 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
488 struct bnx2x_vlan_mac_obj *o)
490 /* If we got here, we're holding lock --> no WRITER exists */
492 DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
499 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
502 * @o: vlan_mac object
504 * @details May sleep. Claims and releases execution queue lock during its run.
506 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
507 struct bnx2x_vlan_mac_obj *o)
511 spin_lock_bh(&o->exe_queue.lock);
512 rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
513 spin_unlock_bh(&o->exe_queue.lock);
519 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
522 * @o: vlan_mac object
524 * @details Should be called under execution queue lock. Notice if a pending
525 * execution exists, it would be performed if this was the last
526 * reader. possibly releasing and reclaiming the execution queue lock.
528 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
529 struct bnx2x_vlan_mac_obj *o)
531 if (!o->head_reader) {
532 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
533 #ifdef BNX2X_STOP_ON_ERROR
538 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
542 /* It's possible a new pending execution was added, and that this reader
543 * was last - if so we need to execute the command.
545 if (!o->head_reader && o->head_exe_request) {
546 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
548 /* Writer release will do the trick */
549 __bnx2x_vlan_mac_h_write_unlock(bp, o);
554 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
557 * @o: vlan_mac object
559 * @details Notice if a pending execution exists, it would be performed if this
560 * was the last reader. Claims and releases the execution queue lock
563 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
564 struct bnx2x_vlan_mac_obj *o)
566 spin_lock_bh(&o->exe_queue.lock);
567 __bnx2x_vlan_mac_h_read_unlock(bp, o);
568 spin_unlock_bh(&o->exe_queue.lock);
571 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
572 int n, u8 *base, u8 stride, u8 size)
574 struct bnx2x_vlan_mac_registry_elem *pos;
579 DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
580 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
582 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
585 list_for_each_entry(pos, &o->head, link) {
587 memcpy(next, &pos->u, size);
589 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
591 next += stride + size;
595 if (read_lock == 0) {
596 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
597 bnx2x_vlan_mac_h_read_unlock(bp, o);
600 return counter * ETH_ALEN;
603 /* check_add() callbacks */
604 static int bnx2x_check_mac_add(struct bnx2x *bp,
605 struct bnx2x_vlan_mac_obj *o,
606 union bnx2x_classification_ramrod_data *data)
608 struct bnx2x_vlan_mac_registry_elem *pos;
610 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
612 if (!is_valid_ether_addr(data->mac.mac))
615 /* Check if a requested MAC already exists */
616 list_for_each_entry(pos, &o->head, link)
617 if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
618 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
624 static int bnx2x_check_vlan_add(struct bnx2x *bp,
625 struct bnx2x_vlan_mac_obj *o,
626 union bnx2x_classification_ramrod_data *data)
628 struct bnx2x_vlan_mac_registry_elem *pos;
630 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
632 list_for_each_entry(pos, &o->head, link)
633 if (data->vlan.vlan == pos->u.vlan.vlan)
639 /* check_del() callbacks */
640 static struct bnx2x_vlan_mac_registry_elem *
641 bnx2x_check_mac_del(struct bnx2x *bp,
642 struct bnx2x_vlan_mac_obj *o,
643 union bnx2x_classification_ramrod_data *data)
645 struct bnx2x_vlan_mac_registry_elem *pos;
647 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
649 list_for_each_entry(pos, &o->head, link)
650 if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
651 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
657 static struct bnx2x_vlan_mac_registry_elem *
658 bnx2x_check_vlan_del(struct bnx2x *bp,
659 struct bnx2x_vlan_mac_obj *o,
660 union bnx2x_classification_ramrod_data *data)
662 struct bnx2x_vlan_mac_registry_elem *pos;
664 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
666 list_for_each_entry(pos, &o->head, link)
667 if (data->vlan.vlan == pos->u.vlan.vlan)
673 /* check_move() callback */
674 static bool bnx2x_check_move(struct bnx2x *bp,
675 struct bnx2x_vlan_mac_obj *src_o,
676 struct bnx2x_vlan_mac_obj *dst_o,
677 union bnx2x_classification_ramrod_data *data)
679 struct bnx2x_vlan_mac_registry_elem *pos;
682 /* Check if we can delete the requested configuration from the first
685 pos = src_o->check_del(bp, src_o, data);
687 /* check if configuration can be added */
688 rc = dst_o->check_add(bp, dst_o, data);
690 /* If this classification can not be added (is already set)
691 * or can't be deleted - return an error.
699 static bool bnx2x_check_move_always_err(
701 struct bnx2x_vlan_mac_obj *src_o,
702 struct bnx2x_vlan_mac_obj *dst_o,
703 union bnx2x_classification_ramrod_data *data)
708 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
710 struct bnx2x_raw_obj *raw = &o->raw;
713 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
714 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
715 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
717 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
718 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
719 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
724 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
725 bool add, unsigned char *dev_addr, int index)
728 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
729 NIG_REG_LLH0_FUNC_MEM;
731 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
734 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
737 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
738 (add ? "ADD" : "DELETE"), index);
741 /* LLH_FUNC_MEM is a u64 WB register */
742 reg_offset += 8*index;
744 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
745 (dev_addr[4] << 8) | dev_addr[5]);
746 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
748 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
751 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
752 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
756 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
759 * @o: queue for which we want to configure this rule
760 * @add: if true the command is an ADD command, DEL otherwise
761 * @opcode: CLASSIFY_RULE_OPCODE_XXX
762 * @hdr: pointer to a header to setup
765 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
766 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
767 struct eth_classify_cmd_header *hdr)
769 struct bnx2x_raw_obj *raw = &o->raw;
771 hdr->client_id = raw->cl_id;
772 hdr->func_id = raw->func_id;
774 /* Rx or/and Tx (internal switching) configuration ? */
775 hdr->cmd_general_data |=
776 bnx2x_vlan_mac_get_rx_tx_flag(o);
779 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
781 hdr->cmd_general_data |=
782 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
786 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
788 * @cid: connection id
789 * @type: BNX2X_FILTER_XXX_PENDING
790 * @hdr: pointer to header to setup
793 * currently we always configure one rule and echo field to contain a CID and an
796 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
797 struct eth_classify_header *hdr, int rule_cnt)
799 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
800 (type << BNX2X_SWCID_SHIFT));
801 hdr->rule_cnt = (u8)rule_cnt;
804 /* hw_config() callbacks */
805 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
806 struct bnx2x_vlan_mac_obj *o,
807 struct bnx2x_exeq_elem *elem, int rule_idx,
810 struct bnx2x_raw_obj *raw = &o->raw;
811 struct eth_classify_rules_ramrod_data *data =
812 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
813 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
814 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
815 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
816 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
817 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
819 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
820 * relevant. In addition, current implementation is tuned for a
823 * When multiple unicast ETH MACs PF configuration in switch
824 * independent mode is required (NetQ, multiple netdev MACs,
825 * etc.), consider better utilisation of 8 per function MAC
826 * entries in the LLH register. There is also
827 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
828 * total number of CAM entries to 16.
830 * Currently we won't configure NIG for MACs other than a primary ETH
831 * MAC and iSCSI L2 MAC.
833 * If this MAC is moving from one Queue to another, no need to change
836 if (cmd != BNX2X_VLAN_MAC_MOVE) {
837 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
838 bnx2x_set_mac_in_nig(bp, add, mac,
839 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
840 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
841 bnx2x_set_mac_in_nig(bp, add, mac,
842 BNX2X_LLH_CAM_ETH_LINE);
845 /* Reset the ramrod data buffer for the first rule */
847 memset(data, 0, sizeof(*data));
849 /* Setup a command header */
850 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
851 &rule_entry->mac.header);
853 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
854 (add ? "add" : "delete"), mac, raw->cl_id);
856 /* Set a MAC itself */
857 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
858 &rule_entry->mac.mac_mid,
859 &rule_entry->mac.mac_lsb, mac);
860 rule_entry->mac.inner_mac =
861 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
863 /* MOVE: Add a rule that will add this MAC to the target Queue */
864 if (cmd == BNX2X_VLAN_MAC_MOVE) {
868 /* Setup ramrod data */
869 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
870 elem->cmd_data.vlan_mac.target_obj,
871 true, CLASSIFY_RULE_OPCODE_MAC,
872 &rule_entry->mac.header);
874 /* Set a MAC itself */
875 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
876 &rule_entry->mac.mac_mid,
877 &rule_entry->mac.mac_lsb, mac);
878 rule_entry->mac.inner_mac =
879 cpu_to_le16(elem->cmd_data.vlan_mac.
883 /* Set the ramrod data header */
884 /* TODO: take this to the higher level in order to prevent multiple
886 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
891 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
896 * @cam_offset: offset in cam memory
897 * @hdr: pointer to a header to setup
901 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
902 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
903 struct mac_configuration_hdr *hdr)
905 struct bnx2x_raw_obj *r = &o->raw;
908 hdr->offset = (u8)cam_offset;
909 hdr->client_id = cpu_to_le16(0xff);
910 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
911 (type << BNX2X_SWCID_SHIFT));
914 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
915 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
916 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
918 struct bnx2x_raw_obj *r = &o->raw;
919 u32 cl_bit_vec = (1 << r->cl_id);
921 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
922 cfg_entry->pf_id = r->func_id;
923 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
926 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
927 T_ETH_MAC_COMMAND_SET);
928 SET_FLAG(cfg_entry->flags,
929 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
931 /* Set a MAC in a ramrod data */
932 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
933 &cfg_entry->middle_mac_addr,
934 &cfg_entry->lsb_mac_addr, mac);
936 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
937 T_ETH_MAC_COMMAND_INVALIDATE);
940 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
941 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
942 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
944 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
945 struct bnx2x_raw_obj *raw = &o->raw;
947 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
949 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
952 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
953 (add ? "setting" : "clearing"),
954 mac, raw->cl_id, cam_offset);
958 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
961 * @o: bnx2x_vlan_mac_obj
962 * @elem: bnx2x_exeq_elem
963 * @rule_idx: rule_idx
964 * @cam_offset: cam_offset
966 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
967 struct bnx2x_vlan_mac_obj *o,
968 struct bnx2x_exeq_elem *elem, int rule_idx,
971 struct bnx2x_raw_obj *raw = &o->raw;
972 struct mac_configuration_cmd *config =
973 (struct mac_configuration_cmd *)(raw->rdata);
974 /* 57710 and 57711 do not support MOVE command,
975 * so it's either ADD or DEL
977 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
980 /* Reset the ramrod data buffer */
981 memset(config, 0, sizeof(*config));
983 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
985 elem->cmd_data.vlan_mac.u.mac.mac, 0,
986 ETH_VLAN_FILTER_ANY_VLAN, config);
989 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
990 struct bnx2x_vlan_mac_obj *o,
991 struct bnx2x_exeq_elem *elem, int rule_idx,
994 struct bnx2x_raw_obj *raw = &o->raw;
995 struct eth_classify_rules_ramrod_data *data =
996 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
997 int rule_cnt = rule_idx + 1;
998 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
999 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1000 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1001 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1003 /* Reset the ramrod data buffer for the first rule */
1005 memset(data, 0, sizeof(*data));
1007 /* Set a rule header */
1008 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1009 &rule_entry->vlan.header);
1011 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1014 /* Set a VLAN itself */
1015 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1017 /* MOVE: Add a rule that will add this MAC to the target Queue */
1018 if (cmd == BNX2X_VLAN_MAC_MOVE) {
1022 /* Setup ramrod data */
1023 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1024 elem->cmd_data.vlan_mac.target_obj,
1025 true, CLASSIFY_RULE_OPCODE_VLAN,
1026 &rule_entry->vlan.header);
1028 /* Set a VLAN itself */
1029 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1032 /* Set the ramrod data header */
1033 /* TODO: take this to the higher level in order to prevent multiple
1035 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1040 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1042 * @bp: device handle
1043 * @p: command parameters
1044 * @ppos: pointer to the cookie
1046 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1047 * previously configured elements list.
1049 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1052 * pointer to the cookie - that should be given back in the next call to make
1053 * function handle the next element. If *ppos is set to NULL it will restart the
1054 * iterator. If returned *ppos == NULL this means that the last element has been
1058 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1059 struct bnx2x_vlan_mac_ramrod_params *p,
1060 struct bnx2x_vlan_mac_registry_elem **ppos)
1062 struct bnx2x_vlan_mac_registry_elem *pos;
1063 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1065 /* If list is empty - there is nothing to do here */
1066 if (list_empty(&o->head)) {
1071 /* make a step... */
1073 *ppos = list_first_entry(&o->head,
1074 struct bnx2x_vlan_mac_registry_elem,
1077 *ppos = list_next_entry(*ppos, link);
1081 /* If it's the last step - return NULL */
1082 if (list_is_last(&pos->link, &o->head))
1085 /* Prepare a 'user_req' */
1086 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1088 /* Set the command */
1089 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1091 /* Set vlan_mac_flags */
1092 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1094 /* Set a restore bit */
1095 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1097 return bnx2x_config_vlan_mac(bp, p);
1100 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1101 * pointer to an element with a specific criteria and NULL if such an element
1102 * hasn't been found.
1104 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1105 struct bnx2x_exe_queue_obj *o,
1106 struct bnx2x_exeq_elem *elem)
1108 struct bnx2x_exeq_elem *pos;
1109 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1111 /* Check pending for execution commands */
1112 list_for_each_entry(pos, &o->exe_queue, link)
1113 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1115 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1121 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1122 struct bnx2x_exe_queue_obj *o,
1123 struct bnx2x_exeq_elem *elem)
1125 struct bnx2x_exeq_elem *pos;
1126 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1128 /* Check pending for execution commands */
1129 list_for_each_entry(pos, &o->exe_queue, link)
1130 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1132 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1139 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1141 * @bp: device handle
1142 * @qo: bnx2x_qable_obj
1143 * @elem: bnx2x_exeq_elem
1145 * Checks that the requested configuration can be added. If yes and if
1146 * requested, consume CAM credit.
1148 * The 'validate' is run after the 'optimize'.
1151 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1152 union bnx2x_qable_obj *qo,
1153 struct bnx2x_exeq_elem *elem)
1155 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1156 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1159 /* Check the registry */
1160 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1162 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1166 /* Check if there is a pending ADD command for this
1167 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1169 if (exeq->get(exeq, elem)) {
1170 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1174 /* TODO: Check the pending MOVE from other objects where this
1175 * object is a destination object.
1178 /* Consume the credit if not requested not to */
1179 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1180 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1188 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1190 * @bp: device handle
1191 * @qo: quable object to check
1192 * @elem: element that needs to be deleted
1194 * Checks that the requested configuration can be deleted. If yes and if
1195 * requested, returns a CAM credit.
1197 * The 'validate' is run after the 'optimize'.
1199 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1200 union bnx2x_qable_obj *qo,
1201 struct bnx2x_exeq_elem *elem)
1203 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1204 struct bnx2x_vlan_mac_registry_elem *pos;
1205 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1206 struct bnx2x_exeq_elem query_elem;
1208 /* If this classification can not be deleted (doesn't exist)
1209 * - return a BNX2X_EXIST.
1211 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1213 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1217 /* Check if there are pending DEL or MOVE commands for this
1218 * MAC/VLAN/VLAN-MAC. Return an error if so.
1220 memcpy(&query_elem, elem, sizeof(query_elem));
1222 /* Check for MOVE commands */
1223 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1224 if (exeq->get(exeq, &query_elem)) {
1225 BNX2X_ERR("There is a pending MOVE command already\n");
1229 /* Check for DEL commands */
1230 if (exeq->get(exeq, elem)) {
1231 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1235 /* Return the credit to the credit pool if not requested not to */
1236 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1237 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1238 o->put_credit(o))) {
1239 BNX2X_ERR("Failed to return a credit\n");
1247 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1249 * @bp: device handle
1250 * @qo: quable object to check (source)
1251 * @elem: element that needs to be moved
1253 * Checks that the requested configuration can be moved. If yes and if
1254 * requested, returns a CAM credit.
1256 * The 'validate' is run after the 'optimize'.
1258 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1259 union bnx2x_qable_obj *qo,
1260 struct bnx2x_exeq_elem *elem)
1262 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1263 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1264 struct bnx2x_exeq_elem query_elem;
1265 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1266 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1268 /* Check if we can perform this operation based on the current registry
1271 if (!src_o->check_move(bp, src_o, dest_o,
1272 &elem->cmd_data.vlan_mac.u)) {
1273 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1277 /* Check if there is an already pending DEL or MOVE command for the
1278 * source object or ADD command for a destination object. Return an
1281 memcpy(&query_elem, elem, sizeof(query_elem));
1283 /* Check DEL on source */
1284 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1285 if (src_exeq->get(src_exeq, &query_elem)) {
1286 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1290 /* Check MOVE on source */
1291 if (src_exeq->get(src_exeq, elem)) {
1292 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1296 /* Check ADD on destination */
1297 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1298 if (dest_exeq->get(dest_exeq, &query_elem)) {
1299 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1303 /* Consume the credit if not requested not to */
1304 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1305 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1306 dest_o->get_credit(dest_o)))
1309 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1310 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1311 src_o->put_credit(src_o))) {
1312 /* return the credit taken from dest... */
1313 dest_o->put_credit(dest_o);
1320 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1321 union bnx2x_qable_obj *qo,
1322 struct bnx2x_exeq_elem *elem)
1324 switch (elem->cmd_data.vlan_mac.cmd) {
1325 case BNX2X_VLAN_MAC_ADD:
1326 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1327 case BNX2X_VLAN_MAC_DEL:
1328 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1329 case BNX2X_VLAN_MAC_MOVE:
1330 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1336 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1337 union bnx2x_qable_obj *qo,
1338 struct bnx2x_exeq_elem *elem)
1342 /* If consumption wasn't required, nothing to do */
1343 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1344 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1347 switch (elem->cmd_data.vlan_mac.cmd) {
1348 case BNX2X_VLAN_MAC_ADD:
1349 case BNX2X_VLAN_MAC_MOVE:
1350 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1352 case BNX2X_VLAN_MAC_DEL:
1353 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1366 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1368 * @bp: device handle
1369 * @o: bnx2x_vlan_mac_obj
1372 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1373 struct bnx2x_vlan_mac_obj *o)
1376 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1377 struct bnx2x_raw_obj *raw = &o->raw;
1380 /* Wait for the current command to complete */
1381 rc = raw->wait_comp(bp, raw);
1385 /* Wait until there are no pending commands */
1386 if (!bnx2x_exe_queue_empty(exeq))
1387 usleep_range(1000, 2000);
1395 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1396 struct bnx2x_vlan_mac_obj *o,
1397 unsigned long *ramrod_flags)
1401 spin_lock_bh(&o->exe_queue.lock);
1403 DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1404 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1407 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1409 /* Calling function should not diffrentiate between this case
1410 * and the case in which there is already a pending ramrod
1414 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1416 spin_unlock_bh(&o->exe_queue.lock);
1422 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1424 * @bp: device handle
1425 * @o: bnx2x_vlan_mac_obj
1427 * @cont: if true schedule next execution chunk
1430 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1431 struct bnx2x_vlan_mac_obj *o,
1432 union event_ring_elem *cqe,
1433 unsigned long *ramrod_flags)
1435 struct bnx2x_raw_obj *r = &o->raw;
1438 /* Clearing the pending list & raw state should be made
1439 * atomically (as execution flow assumes they represent the same).
1441 spin_lock_bh(&o->exe_queue.lock);
1443 /* Reset pending list */
1444 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1447 r->clear_pending(r);
1449 spin_unlock_bh(&o->exe_queue.lock);
1451 /* If ramrod failed this is most likely a SW bug */
1452 if (cqe->message.error)
1455 /* Run the next bulk of pending commands if requested */
1456 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1457 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1463 /* If there is more work to do return PENDING */
1464 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1471 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1473 * @bp: device handle
1474 * @o: bnx2x_qable_obj
1475 * @elem: bnx2x_exeq_elem
1477 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1478 union bnx2x_qable_obj *qo,
1479 struct bnx2x_exeq_elem *elem)
1481 struct bnx2x_exeq_elem query, *pos;
1482 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1483 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1485 memcpy(&query, elem, sizeof(query));
1487 switch (elem->cmd_data.vlan_mac.cmd) {
1488 case BNX2X_VLAN_MAC_ADD:
1489 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1491 case BNX2X_VLAN_MAC_DEL:
1492 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1495 /* Don't handle anything other than ADD or DEL */
1499 /* If we found the appropriate element - delete it */
1500 pos = exeq->get(exeq, &query);
1503 /* Return the credit of the optimized command */
1504 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1505 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1506 if ((query.cmd_data.vlan_mac.cmd ==
1507 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1508 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1510 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1511 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1516 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1517 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1520 list_del(&pos->link);
1521 bnx2x_exe_queue_free_elem(bp, pos);
1529 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1531 * @bp: device handle
1537 * prepare a registry element according to the current command request.
1539 static inline int bnx2x_vlan_mac_get_registry_elem(
1541 struct bnx2x_vlan_mac_obj *o,
1542 struct bnx2x_exeq_elem *elem,
1544 struct bnx2x_vlan_mac_registry_elem **re)
1546 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1547 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1549 /* Allocate a new registry element if needed. */
1551 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1552 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1556 /* Get a new CAM offset */
1557 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1558 /* This shall never happen, because we have checked the
1559 * CAM availability in the 'validate'.
1566 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1568 /* Set a VLAN-MAC data */
1569 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1570 sizeof(reg_elem->u));
1572 /* Copy the flags (needed for DEL and RESTORE flows) */
1573 reg_elem->vlan_mac_flags =
1574 elem->cmd_data.vlan_mac.vlan_mac_flags;
1575 } else /* DEL, RESTORE */
1576 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1583 * bnx2x_execute_vlan_mac - execute vlan mac command
1585 * @bp: device handle
1590 * go and send a ramrod!
1592 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1593 union bnx2x_qable_obj *qo,
1594 struct list_head *exe_chunk,
1595 unsigned long *ramrod_flags)
1597 struct bnx2x_exeq_elem *elem;
1598 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1599 struct bnx2x_raw_obj *r = &o->raw;
1601 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1602 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1603 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1604 enum bnx2x_vlan_mac_cmd cmd;
1606 /* If DRIVER_ONLY execution is requested, cleanup a registry
1607 * and exit. Otherwise send a ramrod to FW.
1610 WARN_ON(r->check_pending(r));
1615 /* Fill the ramrod data */
1616 list_for_each_entry(elem, exe_chunk, link) {
1617 cmd = elem->cmd_data.vlan_mac.cmd;
1618 /* We will add to the target object in MOVE command, so
1619 * change the object for a CAM search.
1621 if (cmd == BNX2X_VLAN_MAC_MOVE)
1622 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1626 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1634 /* Push a new entry into the registry */
1636 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1637 (cmd == BNX2X_VLAN_MAC_MOVE)))
1638 list_add(®_elem->link, &cam_obj->head);
1640 /* Configure a single command in a ramrod data buffer */
1641 o->set_one_rule(bp, o, elem, idx,
1642 reg_elem->cam_offset);
1644 /* MOVE command consumes 2 entries in the ramrod data */
1645 if (cmd == BNX2X_VLAN_MAC_MOVE)
1651 /* No need for an explicit memory barrier here as long we would
1652 * need to ensure the ordering of writing to the SPQ element
1653 * and updating of the SPQ producer which involves a memory
1654 * read and we will have to put a full memory barrier there
1655 * (inside bnx2x_sp_post()).
1658 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1659 U64_HI(r->rdata_mapping),
1660 U64_LO(r->rdata_mapping),
1661 ETH_CONNECTION_TYPE);
1666 /* Now, when we are done with the ramrod - clean up the registry */
1667 list_for_each_entry(elem, exe_chunk, link) {
1668 cmd = elem->cmd_data.vlan_mac.cmd;
1669 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1670 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1671 reg_elem = o->check_del(bp, o,
1672 &elem->cmd_data.vlan_mac.u);
1676 o->put_cam_offset(o, reg_elem->cam_offset);
1677 list_del(®_elem->link);
1688 r->clear_pending(r);
1690 /* Cleanup a registry in case of a failure */
1691 list_for_each_entry(elem, exe_chunk, link) {
1692 cmd = elem->cmd_data.vlan_mac.cmd;
1694 if (cmd == BNX2X_VLAN_MAC_MOVE)
1695 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1699 /* Delete all newly added above entries */
1701 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1702 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1703 reg_elem = o->check_del(bp, cam_obj,
1704 &elem->cmd_data.vlan_mac.u);
1706 list_del(®_elem->link);
1715 static inline int bnx2x_vlan_mac_push_new_cmd(
1717 struct bnx2x_vlan_mac_ramrod_params *p)
1719 struct bnx2x_exeq_elem *elem;
1720 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1721 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1723 /* Allocate the execution queue element */
1724 elem = bnx2x_exe_queue_alloc_elem(bp);
1728 /* Set the command 'length' */
1729 switch (p->user_req.cmd) {
1730 case BNX2X_VLAN_MAC_MOVE:
1737 /* Fill the object specific info */
1738 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1740 /* Try to add a new command to the pending list */
1741 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1745 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1747 * @bp: device handle
1751 int bnx2x_config_vlan_mac(struct bnx2x *bp,
1752 struct bnx2x_vlan_mac_ramrod_params *p)
1755 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1756 unsigned long *ramrod_flags = &p->ramrod_flags;
1757 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1758 struct bnx2x_raw_obj *raw = &o->raw;
1761 * Add new elements to the execution list for commands that require it.
1764 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1769 /* If nothing will be executed further in this iteration we want to
1770 * return PENDING if there are pending commands
1772 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1775 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1776 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1777 raw->clear_pending(raw);
1780 /* Execute commands if required */
1781 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1782 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1783 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1789 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1790 * then user want to wait until the last command is done.
1792 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1793 /* Wait maximum for the current exe_queue length iterations plus
1794 * one (for the current pending command).
1796 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1798 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1801 /* Wait for the current command to complete */
1802 rc = raw->wait_comp(bp, raw);
1806 /* Make a next step */
1807 rc = __bnx2x_vlan_mac_execute_step(bp,
1821 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1823 * @bp: device handle
1826 * @ramrod_flags: execution flags to be used for this deletion
1828 * if the last operation has completed successfully and there are no
1829 * more elements left, positive value if the last operation has completed
1830 * successfully and there are more previously configured elements, negative
1831 * value is current operation has failed.
1833 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1834 struct bnx2x_vlan_mac_obj *o,
1835 unsigned long *vlan_mac_flags,
1836 unsigned long *ramrod_flags)
1838 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1839 struct bnx2x_vlan_mac_ramrod_params p;
1840 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1841 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1842 unsigned long flags;
1846 /* Clear pending commands first */
1848 spin_lock_bh(&exeq->lock);
1850 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1851 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
1852 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1853 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1854 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1856 BNX2X_ERR("Failed to remove command\n");
1857 spin_unlock_bh(&exeq->lock);
1860 list_del(&exeq_pos->link);
1861 bnx2x_exe_queue_free_elem(bp, exeq_pos);
1865 spin_unlock_bh(&exeq->lock);
1867 /* Prepare a command request */
1868 memset(&p, 0, sizeof(p));
1870 p.ramrod_flags = *ramrod_flags;
1871 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1873 /* Add all but the last VLAN-MAC to the execution queue without actually
1874 * execution anything.
1876 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1877 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1878 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1880 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
1881 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
1885 list_for_each_entry(pos, &o->head, link) {
1886 flags = pos->vlan_mac_flags;
1887 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1888 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1889 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1890 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1891 rc = bnx2x_config_vlan_mac(bp, &p);
1893 BNX2X_ERR("Failed to add a new DEL command\n");
1894 bnx2x_vlan_mac_h_read_unlock(bp, o);
1900 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
1901 bnx2x_vlan_mac_h_read_unlock(bp, o);
1903 p.ramrod_flags = *ramrod_flags;
1904 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1906 return bnx2x_config_vlan_mac(bp, &p);
1909 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1910 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1911 unsigned long *pstate, bnx2x_obj_type type)
1913 raw->func_id = func_id;
1917 raw->rdata_mapping = rdata_mapping;
1919 raw->pstate = pstate;
1920 raw->obj_type = type;
1921 raw->check_pending = bnx2x_raw_check_pending;
1922 raw->clear_pending = bnx2x_raw_clear_pending;
1923 raw->set_pending = bnx2x_raw_set_pending;
1924 raw->wait_comp = bnx2x_raw_wait;
1927 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1928 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1929 int state, unsigned long *pstate, bnx2x_obj_type type,
1930 struct bnx2x_credit_pool_obj *macs_pool,
1931 struct bnx2x_credit_pool_obj *vlans_pool)
1933 INIT_LIST_HEAD(&o->head);
1935 o->head_exe_request = false;
1936 o->saved_ramrod_flags = 0;
1938 o->macs_pool = macs_pool;
1939 o->vlans_pool = vlans_pool;
1941 o->delete_all = bnx2x_vlan_mac_del_all;
1942 o->restore = bnx2x_vlan_mac_restore;
1943 o->complete = bnx2x_complete_vlan_mac;
1944 o->wait = bnx2x_wait_vlan_mac;
1946 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1947 state, pstate, type);
1950 void bnx2x_init_mac_obj(struct bnx2x *bp,
1951 struct bnx2x_vlan_mac_obj *mac_obj,
1952 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1953 dma_addr_t rdata_mapping, int state,
1954 unsigned long *pstate, bnx2x_obj_type type,
1955 struct bnx2x_credit_pool_obj *macs_pool)
1957 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1959 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1960 rdata_mapping, state, pstate, type,
1963 /* CAM credit pool handling */
1964 mac_obj->get_credit = bnx2x_get_credit_mac;
1965 mac_obj->put_credit = bnx2x_put_credit_mac;
1966 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1967 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1969 if (CHIP_IS_E1x(bp)) {
1970 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1971 mac_obj->check_del = bnx2x_check_mac_del;
1972 mac_obj->check_add = bnx2x_check_mac_add;
1973 mac_obj->check_move = bnx2x_check_move_always_err;
1974 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1977 bnx2x_exe_queue_init(bp,
1978 &mac_obj->exe_queue, 1, qable_obj,
1979 bnx2x_validate_vlan_mac,
1980 bnx2x_remove_vlan_mac,
1981 bnx2x_optimize_vlan_mac,
1982 bnx2x_execute_vlan_mac,
1983 bnx2x_exeq_get_mac);
1985 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1986 mac_obj->check_del = bnx2x_check_mac_del;
1987 mac_obj->check_add = bnx2x_check_mac_add;
1988 mac_obj->check_move = bnx2x_check_move;
1989 mac_obj->ramrod_cmd =
1990 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1991 mac_obj->get_n_elements = bnx2x_get_n_elements;
1994 bnx2x_exe_queue_init(bp,
1995 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1996 qable_obj, bnx2x_validate_vlan_mac,
1997 bnx2x_remove_vlan_mac,
1998 bnx2x_optimize_vlan_mac,
1999 bnx2x_execute_vlan_mac,
2000 bnx2x_exeq_get_mac);
2004 void bnx2x_init_vlan_obj(struct bnx2x *bp,
2005 struct bnx2x_vlan_mac_obj *vlan_obj,
2006 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2007 dma_addr_t rdata_mapping, int state,
2008 unsigned long *pstate, bnx2x_obj_type type,
2009 struct bnx2x_credit_pool_obj *vlans_pool)
2011 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2013 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2014 rdata_mapping, state, pstate, type, NULL,
2017 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2018 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2019 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2020 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2022 if (CHIP_IS_E1x(bp)) {
2023 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2026 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2027 vlan_obj->check_del = bnx2x_check_vlan_del;
2028 vlan_obj->check_add = bnx2x_check_vlan_add;
2029 vlan_obj->check_move = bnx2x_check_move;
2030 vlan_obj->ramrod_cmd =
2031 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2032 vlan_obj->get_n_elements = bnx2x_get_n_elements;
2035 bnx2x_exe_queue_init(bp,
2036 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2037 qable_obj, bnx2x_validate_vlan_mac,
2038 bnx2x_remove_vlan_mac,
2039 bnx2x_optimize_vlan_mac,
2040 bnx2x_execute_vlan_mac,
2041 bnx2x_exeq_get_vlan);
2045 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2046 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2047 struct tstorm_eth_mac_filter_config *mac_filters,
2050 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2052 u32 addr = BAR_TSTRORM_INTMEM +
2053 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2055 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2058 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2059 struct bnx2x_rx_mode_ramrod_params *p)
2061 /* update the bp MAC filter structure */
2062 u32 mask = (1 << p->cl_id);
2064 struct tstorm_eth_mac_filter_config *mac_filters =
2065 (struct tstorm_eth_mac_filter_config *)p->rdata;
2067 /* initial setting is drop-all */
2068 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2069 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2070 u8 unmatched_unicast = 0;
2072 /* In e1x there we only take into account rx accept flag since tx switching
2074 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2075 /* accept matched ucast */
2078 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2079 /* accept matched mcast */
2082 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2083 /* accept all mcast */
2087 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2088 /* accept all mcast */
2092 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2093 /* accept (all) bcast */
2095 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2096 /* accept unmatched unicasts */
2097 unmatched_unicast = 1;
2099 mac_filters->ucast_drop_all = drop_all_ucast ?
2100 mac_filters->ucast_drop_all | mask :
2101 mac_filters->ucast_drop_all & ~mask;
2103 mac_filters->mcast_drop_all = drop_all_mcast ?
2104 mac_filters->mcast_drop_all | mask :
2105 mac_filters->mcast_drop_all & ~mask;
2107 mac_filters->ucast_accept_all = accp_all_ucast ?
2108 mac_filters->ucast_accept_all | mask :
2109 mac_filters->ucast_accept_all & ~mask;
2111 mac_filters->mcast_accept_all = accp_all_mcast ?
2112 mac_filters->mcast_accept_all | mask :
2113 mac_filters->mcast_accept_all & ~mask;
2115 mac_filters->bcast_accept_all = accp_all_bcast ?
2116 mac_filters->bcast_accept_all | mask :
2117 mac_filters->bcast_accept_all & ~mask;
2119 mac_filters->unmatched_unicast = unmatched_unicast ?
2120 mac_filters->unmatched_unicast | mask :
2121 mac_filters->unmatched_unicast & ~mask;
2123 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2124 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2125 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2126 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2127 mac_filters->bcast_accept_all);
2129 /* write the MAC filter structure*/
2130 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2132 /* The operation is completed */
2133 clear_bit(p->state, p->pstate);
2134 smp_mb__after_atomic();
2139 /* Setup ramrod data */
2140 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2141 struct eth_classify_header *hdr,
2144 hdr->echo = cpu_to_le32(cid);
2145 hdr->rule_cnt = rule_cnt;
2148 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2149 unsigned long *accept_flags,
2150 struct eth_filter_rules_cmd *cmd,
2151 bool clear_accept_all)
2155 /* start with 'drop-all' */
2156 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2157 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2159 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2160 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2162 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2163 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2165 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2166 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2167 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2170 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2171 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2172 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2175 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2176 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2178 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2179 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2180 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2183 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2184 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2186 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2187 if (clear_accept_all) {
2188 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2189 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2190 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2191 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2194 cmd->state = cpu_to_le16(state);
2197 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2198 struct bnx2x_rx_mode_ramrod_params *p)
2200 struct eth_filter_rules_ramrod_data *data = p->rdata;
2204 /* Reset the ramrod data buffer */
2205 memset(data, 0, sizeof(*data));
2207 /* Setup ramrod data */
2209 /* Tx (internal switching) */
2210 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2211 data->rules[rule_idx].client_id = p->cl_id;
2212 data->rules[rule_idx].func_id = p->func_id;
2214 data->rules[rule_idx].cmd_general_data =
2215 ETH_FILTER_RULES_CMD_TX_CMD;
2217 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2218 &(data->rules[rule_idx++]),
2223 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2224 data->rules[rule_idx].client_id = p->cl_id;
2225 data->rules[rule_idx].func_id = p->func_id;
2227 data->rules[rule_idx].cmd_general_data =
2228 ETH_FILTER_RULES_CMD_RX_CMD;
2230 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2231 &(data->rules[rule_idx++]),
2235 /* If FCoE Queue configuration has been requested configure the Rx and
2236 * internal switching modes for this queue in separate rules.
2238 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2239 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2241 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2242 /* Tx (internal switching) */
2243 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2244 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2245 data->rules[rule_idx].func_id = p->func_id;
2247 data->rules[rule_idx].cmd_general_data =
2248 ETH_FILTER_RULES_CMD_TX_CMD;
2250 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2251 &(data->rules[rule_idx]),
2257 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2258 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2259 data->rules[rule_idx].func_id = p->func_id;
2261 data->rules[rule_idx].cmd_general_data =
2262 ETH_FILTER_RULES_CMD_RX_CMD;
2264 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2265 &(data->rules[rule_idx]),
2271 /* Set the ramrod header (most importantly - number of rules to
2274 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2276 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2277 data->header.rule_cnt, p->rx_accept_flags,
2278 p->tx_accept_flags);
2280 /* No need for an explicit memory barrier here as long as we
2281 * ensure the ordering of writing to the SPQ element
2282 * and updating of the SPQ producer which involves a memory
2283 * read. If the memory read is removed we will have to put a
2284 * full memory barrier there (inside bnx2x_sp_post()).
2288 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2289 U64_HI(p->rdata_mapping),
2290 U64_LO(p->rdata_mapping),
2291 ETH_CONNECTION_TYPE);
2295 /* Ramrod completion is pending */
2299 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2300 struct bnx2x_rx_mode_ramrod_params *p)
2302 return bnx2x_state_wait(bp, p->state, p->pstate);
2305 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2306 struct bnx2x_rx_mode_ramrod_params *p)
2312 int bnx2x_config_rx_mode(struct bnx2x *bp,
2313 struct bnx2x_rx_mode_ramrod_params *p)
2317 /* Configure the new classification in the chip */
2318 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2322 /* Wait for a ramrod completion if was requested */
2323 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2324 rc = p->rx_mode_obj->wait_comp(bp, p);
2332 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2333 struct bnx2x_rx_mode_obj *o)
2335 if (CHIP_IS_E1x(bp)) {
2336 o->wait_comp = bnx2x_empty_rx_mode_wait;
2337 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2339 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2340 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2344 /********************* Multicast verbs: SET, CLEAR ****************************/
2345 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2347 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2350 struct bnx2x_mcast_mac_elem {
2351 struct list_head link;
2353 u8 pad[2]; /* For a natural alignment of the following buffer */
2356 struct bnx2x_pending_mcast_cmd {
2357 struct list_head link;
2358 int type; /* BNX2X_MCAST_CMD_X */
2360 struct list_head macs_head;
2361 u32 macs_num; /* Needed for DEL command */
2362 int next_bin; /* Needed for RESTORE flow with aprox match */
2365 bool done; /* set to true, when the command has been handled,
2366 * practically used in 57712 handling only, where one pending
2367 * command may be handled in a few operations. As long as for
2368 * other chips every operation handling is completed in a
2369 * single ramrod, there is no need to utilize this field.
2373 static int bnx2x_mcast_wait(struct bnx2x *bp,
2374 struct bnx2x_mcast_obj *o)
2376 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2377 o->raw.wait_comp(bp, &o->raw))
2383 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2384 struct bnx2x_mcast_obj *o,
2385 struct bnx2x_mcast_ramrod_params *p,
2386 enum bnx2x_mcast_cmd cmd)
2389 struct bnx2x_pending_mcast_cmd *new_cmd;
2390 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2391 struct bnx2x_mcast_list_elem *pos;
2392 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2393 p->mcast_list_len : 0);
2395 /* If the command is empty ("handle pending commands only"), break */
2396 if (!p->mcast_list_len)
2399 total_sz = sizeof(*new_cmd) +
2400 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2402 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2403 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2408 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2409 cmd, macs_list_len);
2411 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2413 new_cmd->type = cmd;
2414 new_cmd->done = false;
2417 case BNX2X_MCAST_CMD_ADD:
2418 cur_mac = (struct bnx2x_mcast_mac_elem *)
2419 ((u8 *)new_cmd + sizeof(*new_cmd));
2421 /* Push the MACs of the current command into the pending command
2424 list_for_each_entry(pos, &p->mcast_list, link) {
2425 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2426 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2432 case BNX2X_MCAST_CMD_DEL:
2433 new_cmd->data.macs_num = p->mcast_list_len;
2436 case BNX2X_MCAST_CMD_RESTORE:
2437 new_cmd->data.next_bin = 0;
2442 BNX2X_ERR("Unknown command: %d\n", cmd);
2446 /* Push the new pending command to the tail of the pending list: FIFO */
2447 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2455 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2458 * @last: index to start looking from (including)
2460 * returns the next found (set) bin or a negative value if none is found.
2462 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2464 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2466 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2467 if (o->registry.aprox_match.vec[i])
2468 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2469 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2470 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2483 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2487 * returns the index of the found bin or -1 if none is found
2489 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2491 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2494 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2499 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2501 struct bnx2x_raw_obj *raw = &o->raw;
2504 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2505 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2506 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2508 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2509 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2510 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2515 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2516 struct bnx2x_mcast_obj *o, int idx,
2517 union bnx2x_mcast_config_data *cfg_data,
2518 enum bnx2x_mcast_cmd cmd)
2520 struct bnx2x_raw_obj *r = &o->raw;
2521 struct eth_multicast_rules_ramrod_data *data =
2522 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2523 u8 func_id = r->func_id;
2524 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2527 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2528 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2530 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2532 /* Get a bin and update a bins' vector */
2534 case BNX2X_MCAST_CMD_ADD:
2535 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2536 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2539 case BNX2X_MCAST_CMD_DEL:
2540 /* If there were no more bins to clear
2541 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2542 * clear any (0xff) bin.
2543 * See bnx2x_mcast_validate_e2() for explanation when it may
2546 bin = bnx2x_mcast_clear_first_bin(o);
2549 case BNX2X_MCAST_CMD_RESTORE:
2550 bin = cfg_data->bin;
2554 BNX2X_ERR("Unknown command: %d\n", cmd);
2558 DP(BNX2X_MSG_SP, "%s bin %d\n",
2559 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2560 "Setting" : "Clearing"), bin);
2562 data->rules[idx].bin_id = (u8)bin;
2563 data->rules[idx].func_id = func_id;
2564 data->rules[idx].engine_id = o->engine_id;
2568 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2570 * @bp: device handle
2572 * @start_bin: index in the registry to start from (including)
2573 * @rdata_idx: index in the ramrod data to start from
2575 * returns last handled bin index or -1 if all bins have been handled
2577 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2578 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2581 int cur_bin, cnt = *rdata_idx;
2582 union bnx2x_mcast_config_data cfg_data = {NULL};
2584 /* go through the registry and configure the bins from it */
2585 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2586 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2588 cfg_data.bin = (u8)cur_bin;
2589 o->set_one_rule(bp, o, cnt, &cfg_data,
2590 BNX2X_MCAST_CMD_RESTORE);
2594 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2596 /* Break if we reached the maximum number
2599 if (cnt >= o->max_cmd_len)
2608 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2609 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2612 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2613 int cnt = *line_idx;
2614 union bnx2x_mcast_config_data cfg_data = {NULL};
2616 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2619 cfg_data.mac = &pmac_pos->mac[0];
2620 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2624 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2627 list_del(&pmac_pos->link);
2629 /* Break if we reached the maximum number
2632 if (cnt >= o->max_cmd_len)
2638 /* if no more MACs to configure - we are done */
2639 if (list_empty(&cmd_pos->data.macs_head))
2640 cmd_pos->done = true;
2643 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2644 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2647 int cnt = *line_idx;
2649 while (cmd_pos->data.macs_num) {
2650 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2654 cmd_pos->data.macs_num--;
2656 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2657 cmd_pos->data.macs_num, cnt);
2659 /* Break if we reached the maximum
2662 if (cnt >= o->max_cmd_len)
2668 /* If we cleared all bins - we are done */
2669 if (!cmd_pos->data.macs_num)
2670 cmd_pos->done = true;
2673 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2674 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2677 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2680 if (cmd_pos->data.next_bin < 0)
2681 /* If o->set_restore returned -1 we are done */
2682 cmd_pos->done = true;
2684 /* Start from the next bin next time */
2685 cmd_pos->data.next_bin++;
2688 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2689 struct bnx2x_mcast_ramrod_params *p)
2691 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2693 struct bnx2x_mcast_obj *o = p->mcast_obj;
2695 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2697 switch (cmd_pos->type) {
2698 case BNX2X_MCAST_CMD_ADD:
2699 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2702 case BNX2X_MCAST_CMD_DEL:
2703 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2706 case BNX2X_MCAST_CMD_RESTORE:
2707 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2712 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2716 /* If the command has been completed - remove it from the list
2717 * and free the memory
2719 if (cmd_pos->done) {
2720 list_del(&cmd_pos->link);
2724 /* Break if we reached the maximum number of rules */
2725 if (cnt >= o->max_cmd_len)
2732 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2733 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2736 struct bnx2x_mcast_list_elem *mlist_pos;
2737 union bnx2x_mcast_config_data cfg_data = {NULL};
2738 int cnt = *line_idx;
2740 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2741 cfg_data.mac = mlist_pos->mac;
2742 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2746 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2753 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2754 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2757 int cnt = *line_idx, i;
2759 for (i = 0; i < p->mcast_list_len; i++) {
2760 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2764 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2765 p->mcast_list_len - i - 1);
2772 * bnx2x_mcast_handle_current_cmd -
2774 * @bp: device handle
2777 * @start_cnt: first line in the ramrod data that may be used
2779 * This function is called iff there is enough place for the current command in
2781 * Returns number of lines filled in the ramrod data in total.
2783 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2784 struct bnx2x_mcast_ramrod_params *p,
2785 enum bnx2x_mcast_cmd cmd,
2788 struct bnx2x_mcast_obj *o = p->mcast_obj;
2789 int cnt = start_cnt;
2791 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2794 case BNX2X_MCAST_CMD_ADD:
2795 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2798 case BNX2X_MCAST_CMD_DEL:
2799 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2802 case BNX2X_MCAST_CMD_RESTORE:
2803 o->hdl_restore(bp, o, 0, &cnt);
2807 BNX2X_ERR("Unknown command: %d\n", cmd);
2811 /* The current command has been handled */
2812 p->mcast_list_len = 0;
2817 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2818 struct bnx2x_mcast_ramrod_params *p,
2819 enum bnx2x_mcast_cmd cmd)
2821 struct bnx2x_mcast_obj *o = p->mcast_obj;
2822 int reg_sz = o->get_registry_size(o);
2825 /* DEL command deletes all currently configured MACs */
2826 case BNX2X_MCAST_CMD_DEL:
2827 o->set_registry_size(o, 0);
2830 /* RESTORE command will restore the entire multicast configuration */
2831 case BNX2X_MCAST_CMD_RESTORE:
2832 /* Here we set the approximate amount of work to do, which in
2833 * fact may be only less as some MACs in postponed ADD
2834 * command(s) scheduled before this command may fall into
2835 * the same bin and the actual number of bins set in the
2836 * registry would be less than we estimated here. See
2837 * bnx2x_mcast_set_one_rule_e2() for further details.
2839 p->mcast_list_len = reg_sz;
2842 case BNX2X_MCAST_CMD_ADD:
2843 case BNX2X_MCAST_CMD_CONT:
2844 /* Here we assume that all new MACs will fall into new bins.
2845 * However we will correct the real registry size after we
2846 * handle all pending commands.
2848 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2852 BNX2X_ERR("Unknown command: %d\n", cmd);
2856 /* Increase the total number of MACs pending to be configured */
2857 o->total_pending_num += p->mcast_list_len;
2862 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2863 struct bnx2x_mcast_ramrod_params *p,
2866 struct bnx2x_mcast_obj *o = p->mcast_obj;
2868 o->set_registry_size(o, old_num_bins);
2869 o->total_pending_num -= p->mcast_list_len;
2873 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2875 * @bp: device handle
2877 * @len: number of rules to handle
2879 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2880 struct bnx2x_mcast_ramrod_params *p,
2883 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2884 struct eth_multicast_rules_ramrod_data *data =
2885 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2887 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2888 (BNX2X_FILTER_MCAST_PENDING <<
2889 BNX2X_SWCID_SHIFT));
2890 data->header.rule_cnt = len;
2894 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2896 * @bp: device handle
2899 * Recalculate the actual number of set bins in the registry using Brian
2900 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2902 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2904 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2905 struct bnx2x_mcast_obj *o)
2910 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2911 elem = o->registry.aprox_match.vec[i];
2916 o->set_registry_size(o, cnt);
2921 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2922 struct bnx2x_mcast_ramrod_params *p,
2923 enum bnx2x_mcast_cmd cmd)
2925 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2926 struct bnx2x_mcast_obj *o = p->mcast_obj;
2927 struct eth_multicast_rules_ramrod_data *data =
2928 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2931 /* Reset the ramrod data buffer */
2932 memset(data, 0, sizeof(*data));
2934 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2936 /* If there are no more pending commands - clear SCHEDULED state */
2937 if (list_empty(&o->pending_cmds_head))
2940 /* The below may be true iff there was enough room in ramrod
2941 * data for all pending commands and for the current
2942 * command. Otherwise the current command would have been added
2943 * to the pending commands and p->mcast_list_len would have been
2946 if (p->mcast_list_len > 0)
2947 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2949 /* We've pulled out some MACs - update the total number of
2952 o->total_pending_num -= cnt;
2955 WARN_ON(o->total_pending_num < 0);
2956 WARN_ON(cnt > o->max_cmd_len);
2958 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2960 /* Update a registry size if there are no more pending operations.
2962 * We don't want to change the value of the registry size if there are
2963 * pending operations because we want it to always be equal to the
2964 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2965 * set bins after the last requested operation in order to properly
2966 * evaluate the size of the next DEL/RESTORE operation.
2968 * Note that we update the registry itself during command(s) handling
2969 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2970 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2971 * with a limited amount of update commands (per MAC/bin) and we don't
2972 * know in this scope what the actual state of bins configuration is
2973 * going to be after this ramrod.
2975 if (!o->total_pending_num)
2976 bnx2x_mcast_refresh_registry_e2(bp, o);
2978 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2979 * RAMROD_PENDING status immediately.
2981 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2982 raw->clear_pending(raw);
2985 /* No need for an explicit memory barrier here as long as we
2986 * ensure the ordering of writing to the SPQ element
2987 * and updating of the SPQ producer which involves a memory
2988 * read. If the memory read is removed we will have to put a
2989 * full memory barrier there (inside bnx2x_sp_post()).
2993 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2994 raw->cid, U64_HI(raw->rdata_mapping),
2995 U64_LO(raw->rdata_mapping),
2996 ETH_CONNECTION_TYPE);
3000 /* Ramrod completion is pending */
3005 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3006 struct bnx2x_mcast_ramrod_params *p,
3007 enum bnx2x_mcast_cmd cmd)
3009 /* Mark, that there is a work to do */
3010 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3011 p->mcast_list_len = 1;
3016 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3017 struct bnx2x_mcast_ramrod_params *p,
3023 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3025 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3028 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3029 struct bnx2x_mcast_obj *o,
3030 struct bnx2x_mcast_ramrod_params *p,
3033 struct bnx2x_mcast_list_elem *mlist_pos;
3036 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3037 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3038 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3040 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3041 mlist_pos->mac, bit);
3043 /* bookkeeping... */
3044 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3049 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3050 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3055 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3057 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3058 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3059 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3063 /* On 57711 we write the multicast MACs' approximate match
3064 * table by directly into the TSTORM's internal RAM. So we don't
3065 * really need to handle any tricks to make it work.
3067 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3068 struct bnx2x_mcast_ramrod_params *p,
3069 enum bnx2x_mcast_cmd cmd)
3072 struct bnx2x_mcast_obj *o = p->mcast_obj;
3073 struct bnx2x_raw_obj *r = &o->raw;
3075 /* If CLEAR_ONLY has been requested - clear the registry
3076 * and clear a pending bit.
3078 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3079 u32 mc_filter[MC_HASH_SIZE] = {0};
3081 /* Set the multicast filter bits before writing it into
3082 * the internal memory.
3085 case BNX2X_MCAST_CMD_ADD:
3086 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3089 case BNX2X_MCAST_CMD_DEL:
3091 "Invalidating multicast MACs configuration\n");
3093 /* clear the registry */
3094 memset(o->registry.aprox_match.vec, 0,
3095 sizeof(o->registry.aprox_match.vec));
3098 case BNX2X_MCAST_CMD_RESTORE:
3099 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3103 BNX2X_ERR("Unknown command: %d\n", cmd);
3107 /* Set the mcast filter in the internal memory */
3108 for (i = 0; i < MC_HASH_SIZE; i++)
3109 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3111 /* clear the registry */
3112 memset(o->registry.aprox_match.vec, 0,
3113 sizeof(o->registry.aprox_match.vec));
3116 r->clear_pending(r);
3121 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3122 struct bnx2x_mcast_ramrod_params *p,
3123 enum bnx2x_mcast_cmd cmd)
3125 struct bnx2x_mcast_obj *o = p->mcast_obj;
3126 int reg_sz = o->get_registry_size(o);
3129 /* DEL command deletes all currently configured MACs */
3130 case BNX2X_MCAST_CMD_DEL:
3131 o->set_registry_size(o, 0);
3134 /* RESTORE command will restore the entire multicast configuration */
3135 case BNX2X_MCAST_CMD_RESTORE:
3136 p->mcast_list_len = reg_sz;
3137 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3138 cmd, p->mcast_list_len);
3141 case BNX2X_MCAST_CMD_ADD:
3142 case BNX2X_MCAST_CMD_CONT:
3143 /* Multicast MACs on 57710 are configured as unicast MACs and
3144 * there is only a limited number of CAM entries for that
3147 if (p->mcast_list_len > o->max_cmd_len) {
3148 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3152 /* Every configured MAC should be cleared if DEL command is
3153 * called. Only the last ADD command is relevant as long as
3154 * every ADD commands overrides the previous configuration.
3156 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3157 if (p->mcast_list_len > 0)
3158 o->set_registry_size(o, p->mcast_list_len);
3163 BNX2X_ERR("Unknown command: %d\n", cmd);
3167 /* We want to ensure that commands are executed one by one for 57710.
3168 * Therefore each none-empty command will consume o->max_cmd_len.
3170 if (p->mcast_list_len)
3171 o->total_pending_num += o->max_cmd_len;
3176 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3177 struct bnx2x_mcast_ramrod_params *p,
3180 struct bnx2x_mcast_obj *o = p->mcast_obj;
3182 o->set_registry_size(o, old_num_macs);
3184 /* If current command hasn't been handled yet and we are
3185 * here means that it's meant to be dropped and we have to
3186 * update the number of outstanding MACs accordingly.
3188 if (p->mcast_list_len)
3189 o->total_pending_num -= o->max_cmd_len;
3192 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3193 struct bnx2x_mcast_obj *o, int idx,
3194 union bnx2x_mcast_config_data *cfg_data,
3195 enum bnx2x_mcast_cmd cmd)
3197 struct bnx2x_raw_obj *r = &o->raw;
3198 struct mac_configuration_cmd *data =
3199 (struct mac_configuration_cmd *)(r->rdata);
3202 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3203 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3204 &data->config_table[idx].middle_mac_addr,
3205 &data->config_table[idx].lsb_mac_addr,
3208 data->config_table[idx].vlan_id = 0;
3209 data->config_table[idx].pf_id = r->func_id;
3210 data->config_table[idx].clients_bit_vector =
3211 cpu_to_le32(1 << r->cl_id);
3213 SET_FLAG(data->config_table[idx].flags,
3214 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3215 T_ETH_MAC_COMMAND_SET);
3220 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3222 * @bp: device handle
3224 * @len: number of rules to handle
3226 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3227 struct bnx2x_mcast_ramrod_params *p,
3230 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3231 struct mac_configuration_cmd *data =
3232 (struct mac_configuration_cmd *)(r->rdata);
3234 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3235 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3236 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3238 data->hdr.offset = offset;
3239 data->hdr.client_id = cpu_to_le16(0xff);
3240 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3241 (BNX2X_FILTER_MCAST_PENDING <<
3242 BNX2X_SWCID_SHIFT));
3243 data->hdr.length = len;
3247 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3249 * @bp: device handle
3251 * @start_idx: index in the registry to start from
3252 * @rdata_idx: index in the ramrod data to start from
3254 * restore command for 57710 is like all other commands - always a stand alone
3255 * command - start_idx and rdata_idx will always be 0. This function will always
3257 * returns -1 to comply with 57712 variant.
3259 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3260 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3263 struct bnx2x_mcast_mac_elem *elem;
3265 union bnx2x_mcast_config_data cfg_data = {NULL};
3267 /* go through the registry and configure the MACs from it. */
3268 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3269 cfg_data.mac = &elem->mac[0];
3270 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3274 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3283 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3284 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3286 struct bnx2x_pending_mcast_cmd *cmd_pos;
3287 struct bnx2x_mcast_mac_elem *pmac_pos;
3288 struct bnx2x_mcast_obj *o = p->mcast_obj;
3289 union bnx2x_mcast_config_data cfg_data = {NULL};
3292 /* If nothing to be done - return */
3293 if (list_empty(&o->pending_cmds_head))
3296 /* Handle the first command */
3297 cmd_pos = list_first_entry(&o->pending_cmds_head,
3298 struct bnx2x_pending_mcast_cmd, link);
3300 switch (cmd_pos->type) {
3301 case BNX2X_MCAST_CMD_ADD:
3302 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3303 cfg_data.mac = &pmac_pos->mac[0];
3304 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3308 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3313 case BNX2X_MCAST_CMD_DEL:
3314 cnt = cmd_pos->data.macs_num;
3315 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3318 case BNX2X_MCAST_CMD_RESTORE:
3319 o->hdl_restore(bp, o, 0, &cnt);
3323 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3327 list_del(&cmd_pos->link);
3334 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3341 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3342 __le16 *fw_lo, u8 *mac)
3344 mac[1] = ((u8 *)fw_hi)[0];
3345 mac[0] = ((u8 *)fw_hi)[1];
3346 mac[3] = ((u8 *)fw_mid)[0];
3347 mac[2] = ((u8 *)fw_mid)[1];
3348 mac[5] = ((u8 *)fw_lo)[0];
3349 mac[4] = ((u8 *)fw_lo)[1];
3353 * bnx2x_mcast_refresh_registry_e1 -
3355 * @bp: device handle
3358 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3359 * and update the registry correspondingly: if ADD - allocate a memory and add
3360 * the entries to the registry (list), if DELETE - clear the registry and free
3363 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3364 struct bnx2x_mcast_obj *o)
3366 struct bnx2x_raw_obj *raw = &o->raw;
3367 struct bnx2x_mcast_mac_elem *elem;
3368 struct mac_configuration_cmd *data =
3369 (struct mac_configuration_cmd *)(raw->rdata);
3371 /* If first entry contains a SET bit - the command was ADD,
3372 * otherwise - DEL_ALL
3374 if (GET_FLAG(data->config_table[0].flags,
3375 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3376 int i, len = data->hdr.length;
3378 /* Break if it was a RESTORE command */
3379 if (!list_empty(&o->registry.exact_match.macs))
3382 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3384 BNX2X_ERR("Failed to allocate registry memory\n");
3388 for (i = 0; i < len; i++, elem++) {
3389 bnx2x_get_fw_mac_addr(
3390 &data->config_table[i].msb_mac_addr,
3391 &data->config_table[i].middle_mac_addr,
3392 &data->config_table[i].lsb_mac_addr,
3394 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3396 list_add_tail(&elem->link,
3397 &o->registry.exact_match.macs);
3400 elem = list_first_entry(&o->registry.exact_match.macs,
3401 struct bnx2x_mcast_mac_elem, link);
3402 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3404 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3410 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3411 struct bnx2x_mcast_ramrod_params *p,
3412 enum bnx2x_mcast_cmd cmd)
3414 struct bnx2x_mcast_obj *o = p->mcast_obj;
3415 struct bnx2x_raw_obj *raw = &o->raw;
3416 struct mac_configuration_cmd *data =
3417 (struct mac_configuration_cmd *)(raw->rdata);
3420 /* Reset the ramrod data buffer */
3421 memset(data, 0, sizeof(*data));
3423 /* First set all entries as invalid */
3424 for (i = 0; i < o->max_cmd_len ; i++)
3425 SET_FLAG(data->config_table[i].flags,
3426 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3427 T_ETH_MAC_COMMAND_INVALIDATE);
3429 /* Handle pending commands first */
3430 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3432 /* If there are no more pending commands - clear SCHEDULED state */
3433 if (list_empty(&o->pending_cmds_head))
3436 /* The below may be true iff there were no pending commands */
3438 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3440 /* For 57710 every command has o->max_cmd_len length to ensure that
3441 * commands are done one at a time.
3443 o->total_pending_num -= o->max_cmd_len;
3447 WARN_ON(cnt > o->max_cmd_len);
3449 /* Set ramrod header (in particular, a number of entries to update) */
3450 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3452 /* update a registry: we need the registry contents to be always up
3453 * to date in order to be able to execute a RESTORE opcode. Here
3454 * we use the fact that for 57710 we sent one command at a time
3455 * hence we may take the registry update out of the command handling
3456 * and do it in a simpler way here.
3458 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3462 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3463 * RAMROD_PENDING status immediately.
3465 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3466 raw->clear_pending(raw);
3469 /* No need for an explicit memory barrier here as long as we
3470 * ensure the ordering of writing to the SPQ element
3471 * and updating of the SPQ producer which involves a memory
3472 * read. If the memory read is removed we will have to put a
3473 * full memory barrier there (inside bnx2x_sp_post()).
3477 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3478 U64_HI(raw->rdata_mapping),
3479 U64_LO(raw->rdata_mapping),
3480 ETH_CONNECTION_TYPE);
3484 /* Ramrod completion is pending */
3489 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3491 return o->registry.exact_match.num_macs_set;
3494 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3496 return o->registry.aprox_match.num_bins_set;
3499 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3502 o->registry.exact_match.num_macs_set = n;
3505 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3508 o->registry.aprox_match.num_bins_set = n;
3511 int bnx2x_config_mcast(struct bnx2x *bp,
3512 struct bnx2x_mcast_ramrod_params *p,
3513 enum bnx2x_mcast_cmd cmd)
3515 struct bnx2x_mcast_obj *o = p->mcast_obj;
3516 struct bnx2x_raw_obj *r = &o->raw;
3517 int rc = 0, old_reg_size;
3519 /* This is needed to recover number of currently configured mcast macs
3520 * in case of failure.
3522 old_reg_size = o->get_registry_size(o);
3524 /* Do some calculations and checks */
3525 rc = o->validate(bp, p, cmd);
3529 /* Return if there is no work to do */
3530 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3533 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3534 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3536 /* Enqueue the current command to the pending list if we can't complete
3537 * it in the current iteration
3539 if (r->check_pending(r) ||
3540 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3541 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3545 /* As long as the current command is in a command list we
3546 * don't need to handle it separately.
3548 p->mcast_list_len = 0;
3551 if (!r->check_pending(r)) {
3553 /* Set 'pending' state */
3556 /* Configure the new classification in the chip */
3557 rc = o->config_mcast(bp, p, cmd);
3561 /* Wait for a ramrod completion if was requested */
3562 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3563 rc = o->wait_comp(bp, o);
3569 r->clear_pending(r);
3572 o->revert(bp, p, old_reg_size);
3577 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3579 smp_mb__before_atomic();
3580 clear_bit(o->sched_state, o->raw.pstate);
3581 smp_mb__after_atomic();
3584 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3586 smp_mb__before_atomic();
3587 set_bit(o->sched_state, o->raw.pstate);
3588 smp_mb__after_atomic();
3591 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3593 return !!test_bit(o->sched_state, o->raw.pstate);
3596 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3598 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3601 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3602 struct bnx2x_mcast_obj *mcast_obj,
3603 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3604 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3605 int state, unsigned long *pstate, bnx2x_obj_type type)
3607 memset(mcast_obj, 0, sizeof(*mcast_obj));
3609 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3610 rdata, rdata_mapping, state, pstate, type);
3612 mcast_obj->engine_id = engine_id;
3614 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3616 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3617 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3618 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3619 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3621 if (CHIP_IS_E1(bp)) {
3622 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3623 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3624 mcast_obj->hdl_restore =
3625 bnx2x_mcast_handle_restore_cmd_e1;
3626 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3628 if (CHIP_REV_IS_SLOW(bp))
3629 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3631 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3633 mcast_obj->wait_comp = bnx2x_mcast_wait;
3634 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3635 mcast_obj->validate = bnx2x_mcast_validate_e1;
3636 mcast_obj->revert = bnx2x_mcast_revert_e1;
3637 mcast_obj->get_registry_size =
3638 bnx2x_mcast_get_registry_size_exact;
3639 mcast_obj->set_registry_size =
3640 bnx2x_mcast_set_registry_size_exact;
3642 /* 57710 is the only chip that uses the exact match for mcast
3645 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3647 } else if (CHIP_IS_E1H(bp)) {
3648 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3649 mcast_obj->enqueue_cmd = NULL;
3650 mcast_obj->hdl_restore = NULL;
3651 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3653 /* 57711 doesn't send a ramrod, so it has unlimited credit
3656 mcast_obj->max_cmd_len = -1;
3657 mcast_obj->wait_comp = bnx2x_mcast_wait;
3658 mcast_obj->set_one_rule = NULL;
3659 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3660 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3661 mcast_obj->get_registry_size =
3662 bnx2x_mcast_get_registry_size_aprox;
3663 mcast_obj->set_registry_size =
3664 bnx2x_mcast_set_registry_size_aprox;
3666 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3667 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3668 mcast_obj->hdl_restore =
3669 bnx2x_mcast_handle_restore_cmd_e2;
3670 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3671 /* TODO: There should be a proper HSI define for this number!!!
3673 mcast_obj->max_cmd_len = 16;
3674 mcast_obj->wait_comp = bnx2x_mcast_wait;
3675 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3676 mcast_obj->validate = bnx2x_mcast_validate_e2;
3677 mcast_obj->revert = bnx2x_mcast_revert_e2;
3678 mcast_obj->get_registry_size =
3679 bnx2x_mcast_get_registry_size_aprox;
3680 mcast_obj->set_registry_size =
3681 bnx2x_mcast_set_registry_size_aprox;
3685 /*************************** Credit handling **********************************/
3688 * atomic_add_ifless - add if the result is less than a given value.
3690 * @v: pointer of type atomic_t
3691 * @a: the amount to add to v...
3692 * @u: ...if (v + a) is less than u.
3694 * returns true if (v + a) was less than u, and false otherwise.
3697 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3703 if (unlikely(c + a >= u))
3706 old = atomic_cmpxchg((v), c, c + a);
3707 if (likely(old == c))
3716 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3718 * @v: pointer of type atomic_t
3719 * @a: the amount to dec from v...
3720 * @u: ...if (v - a) is more or equal than u.
3722 * returns true if (v - a) was more or equal than u, and false
3725 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3731 if (unlikely(c - a < u))
3734 old = atomic_cmpxchg((v), c, c - a);
3735 if (likely(old == c))
3743 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3748 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3754 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3760 /* Don't let to refill if credit + cnt > pool_sz */
3761 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3768 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3773 cur_credit = atomic_read(&o->credit);
3778 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3784 static bool bnx2x_credit_pool_get_entry(
3785 struct bnx2x_credit_pool_obj *o,
3792 /* Find "internal cam-offset" then add to base for this object... */
3793 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3795 /* Skip the current vector if there are no free entries in it */
3796 if (!o->pool_mirror[vec])
3799 /* If we've got here we are going to find a free entry */
3800 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3801 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3803 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3805 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3806 *offset = o->base_pool_offset + idx;
3814 static bool bnx2x_credit_pool_put_entry(
3815 struct bnx2x_credit_pool_obj *o,
3818 if (offset < o->base_pool_offset)
3821 offset -= o->base_pool_offset;
3823 if (offset >= o->pool_sz)
3826 /* Return the entry to the pool */
3827 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3832 static bool bnx2x_credit_pool_put_entry_always_true(
3833 struct bnx2x_credit_pool_obj *o,
3839 static bool bnx2x_credit_pool_get_entry_always_true(
3840 struct bnx2x_credit_pool_obj *o,
3847 * bnx2x_init_credit_pool - initialize credit pool internals.
3850 * @base: Base entry in the CAM to use.
3851 * @credit: pool size.
3853 * If base is negative no CAM entries handling will be performed.
3854 * If credit is negative pool operations will always succeed (unlimited pool).
3857 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3858 int base, int credit)
3860 /* Zero the object first */
3861 memset(p, 0, sizeof(*p));
3863 /* Set the table to all 1s */
3864 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3866 /* Init a pool as full */
3867 atomic_set(&p->credit, credit);
3869 /* The total poll size */
3870 p->pool_sz = credit;
3872 p->base_pool_offset = base;
3874 /* Commit the change */
3877 p->check = bnx2x_credit_pool_check;
3879 /* if pool credit is negative - disable the checks */
3881 p->put = bnx2x_credit_pool_put;
3882 p->get = bnx2x_credit_pool_get;
3883 p->put_entry = bnx2x_credit_pool_put_entry;
3884 p->get_entry = bnx2x_credit_pool_get_entry;
3886 p->put = bnx2x_credit_pool_always_true;
3887 p->get = bnx2x_credit_pool_always_true;
3888 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3889 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3892 /* If base is negative - disable entries handling */
3894 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3895 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3899 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3900 struct bnx2x_credit_pool_obj *p, u8 func_id,
3903 /* TODO: this will be defined in consts as well... */
3904 #define BNX2X_CAM_SIZE_EMUL 5
3908 if (CHIP_IS_E1(bp)) {
3909 /* In E1, Multicast is saved in cam... */
3910 if (!CHIP_REV_IS_SLOW(bp))
3911 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3913 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3915 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3917 } else if (CHIP_IS_E1H(bp)) {
3918 /* CAM credit is equaly divided between all active functions
3921 if ((func_num > 0)) {
3922 if (!CHIP_REV_IS_SLOW(bp))
3923 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3925 cam_sz = BNX2X_CAM_SIZE_EMUL;
3926 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3928 /* this should never happen! Block MAC operations. */
3929 bnx2x_init_credit_pool(p, 0, 0);
3934 /* CAM credit is equaly divided between all active functions
3937 if ((func_num > 0)) {
3938 if (!CHIP_REV_IS_SLOW(bp))
3939 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3941 cam_sz = BNX2X_CAM_SIZE_EMUL;
3943 /* No need for CAM entries handling for 57712 and
3946 bnx2x_init_credit_pool(p, -1, cam_sz);
3948 /* this should never happen! Block MAC operations. */
3949 bnx2x_init_credit_pool(p, 0, 0);
3954 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3955 struct bnx2x_credit_pool_obj *p,
3959 if (CHIP_IS_E1x(bp)) {
3960 /* There is no VLAN credit in HW on 57710 and 57711 only
3961 * MAC / MAC-VLAN can be set
3963 bnx2x_init_credit_pool(p, 0, -1);
3965 /* CAM credit is equally divided between all active functions
3969 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3970 bnx2x_init_credit_pool(p, func_id * credit, credit);
3972 /* this should never happen! Block VLAN operations. */
3973 bnx2x_init_credit_pool(p, 0, 0);
3977 /****************** RSS Configuration ******************/
3979 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3981 * @bp: driver handle
3982 * @p: pointer to rss configuration
3984 * Prints it when NETIF_MSG_IFUP debug level is configured.
3986 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3987 struct bnx2x_config_rss_params *p)
3991 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3992 DP(BNX2X_MSG_SP, "0x0000: ");
3993 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3994 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3996 /* Print 4 bytes in a line */
3997 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3998 (((i + 1) & 0x3) == 0)) {
3999 DP_CONT(BNX2X_MSG_SP, "\n");
4000 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4004 DP_CONT(BNX2X_MSG_SP, "\n");
4008 * bnx2x_setup_rss - configure RSS
4010 * @bp: device handle
4011 * @p: rss configuration
4013 * sends on UPDATE ramrod for that matter.
4015 static int bnx2x_setup_rss(struct bnx2x *bp,
4016 struct bnx2x_config_rss_params *p)
4018 struct bnx2x_rss_config_obj *o = p->rss_obj;
4019 struct bnx2x_raw_obj *r = &o->raw;
4020 struct eth_rss_update_ramrod_data *data =
4021 (struct eth_rss_update_ramrod_data *)(r->rdata);
4025 memset(data, 0, sizeof(*data));
4027 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4029 /* Set an echo field */
4030 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4031 (r->state << BNX2X_SWCID_SHIFT));
4034 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4035 rss_mode = ETH_RSS_MODE_DISABLED;
4036 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4037 rss_mode = ETH_RSS_MODE_REGULAR;
4039 data->rss_mode = rss_mode;
4041 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4043 /* RSS capabilities */
4044 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4045 data->capabilities |=
4046 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4048 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4049 data->capabilities |=
4050 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4052 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4053 data->capabilities |=
4054 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4056 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4057 data->capabilities |=
4058 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4060 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4061 data->capabilities |=
4062 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4064 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4065 data->capabilities |=
4066 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4069 data->rss_result_mask = p->rss_result_mask;
4072 data->rss_engine_id = o->engine_id;
4074 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4076 /* Indirection table */
4077 memcpy(data->indirection_table, p->ind_table,
4078 T_ETH_INDIRECTION_TABLE_SIZE);
4080 /* Remember the last configuration */
4081 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4083 /* Print the indirection table */
4084 if (netif_msg_ifup(bp))
4085 bnx2x_debug_print_ind_table(bp, p);
4088 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4089 memcpy(&data->rss_key[0], &p->rss_key[0],
4090 sizeof(data->rss_key));
4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4094 /* No need for an explicit memory barrier here as long as we
4095 * ensure the ordering of writing to the SPQ element
4096 * and updating of the SPQ producer which involves a memory
4097 * read. If the memory read is removed we will have to put a
4098 * full memory barrier there (inside bnx2x_sp_post()).
4102 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4103 U64_HI(r->rdata_mapping),
4104 U64_LO(r->rdata_mapping),
4105 ETH_CONNECTION_TYPE);
4113 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4116 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4119 int bnx2x_config_rss(struct bnx2x *bp,
4120 struct bnx2x_config_rss_params *p)
4123 struct bnx2x_rss_config_obj *o = p->rss_obj;
4124 struct bnx2x_raw_obj *r = &o->raw;
4126 /* Do nothing if only driver cleanup was requested */
4127 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4128 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4135 rc = o->config_rss(bp, p);
4137 r->clear_pending(r);
4141 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4142 rc = r->wait_comp(bp, r);
4147 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4148 struct bnx2x_rss_config_obj *rss_obj,
4149 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4150 void *rdata, dma_addr_t rdata_mapping,
4151 int state, unsigned long *pstate,
4152 bnx2x_obj_type type)
4154 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4155 rdata_mapping, state, pstate, type);
4157 rss_obj->engine_id = engine_id;
4158 rss_obj->config_rss = bnx2x_setup_rss;
4161 /********************** Queue state object ***********************************/
4164 * bnx2x_queue_state_change - perform Queue state change transition
4166 * @bp: device handle
4167 * @params: parameters to perform the transition
4169 * returns 0 in case of successfully completed transition, negative error
4170 * code in case of failure, positive (EBUSY) value if there is a completion
4171 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4172 * not set in params->ramrod_flags for asynchronous commands).
4175 int bnx2x_queue_state_change(struct bnx2x *bp,
4176 struct bnx2x_queue_state_params *params)
4178 struct bnx2x_queue_sp_obj *o = params->q_obj;
4179 int rc, pending_bit;
4180 unsigned long *pending = &o->pending;
4182 /* Check that the requested transition is legal */
4183 rc = o->check_transition(bp, o, params);
4185 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4189 /* Set "pending" bit */
4190 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4191 pending_bit = o->set_pending(o, params);
4192 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4194 /* Don't send a command if only driver cleanup was requested */
4195 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4196 o->complete_cmd(bp, o, pending_bit);
4199 rc = o->send_cmd(bp, params);
4201 o->next_state = BNX2X_Q_STATE_MAX;
4202 clear_bit(pending_bit, pending);
4203 smp_mb__after_atomic();
4207 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4208 rc = o->wait_comp(bp, o, pending_bit);
4216 return !!test_bit(pending_bit, pending);
4219 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4220 struct bnx2x_queue_state_params *params)
4222 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4224 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4227 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4228 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4229 bit = BNX2X_Q_CMD_UPDATE;
4233 set_bit(bit, &obj->pending);
4237 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4238 struct bnx2x_queue_sp_obj *o,
4239 enum bnx2x_queue_cmd cmd)
4241 return bnx2x_state_wait(bp, cmd, &o->pending);
4245 * bnx2x_queue_comp_cmd - complete the state change command.
4247 * @bp: device handle
4251 * Checks that the arrived completion is expected.
4253 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4254 struct bnx2x_queue_sp_obj *o,
4255 enum bnx2x_queue_cmd cmd)
4257 unsigned long cur_pending = o->pending;
4259 if (!test_and_clear_bit(cmd, &cur_pending)) {
4260 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4261 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4262 o->state, cur_pending, o->next_state);
4266 if (o->next_tx_only >= o->max_cos)
4267 /* >= because tx only must always be smaller than cos since the
4268 * primary connection supports COS 0
4270 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4271 o->next_tx_only, o->max_cos);
4274 "Completing command %d for queue %d, setting state to %d\n",
4275 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4277 if (o->next_tx_only) /* print num tx-only if any exist */
4278 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4279 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4281 o->state = o->next_state;
4282 o->num_tx_only = o->next_tx_only;
4283 o->next_state = BNX2X_Q_STATE_MAX;
4285 /* It's important that o->state and o->next_state are
4286 * updated before o->pending.
4290 clear_bit(cmd, &o->pending);
4291 smp_mb__after_atomic();
4296 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4297 struct bnx2x_queue_state_params *cmd_params,
4298 struct client_init_ramrod_data *data)
4300 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4304 /* IPv6 TPA supported for E2 and above only */
4305 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4306 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4309 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4310 struct bnx2x_queue_sp_obj *o,
4311 struct bnx2x_general_setup_params *params,
4312 struct client_init_general_data *gen_data,
4313 unsigned long *flags)
4315 gen_data->client_id = o->cl_id;
4317 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4318 gen_data->statistics_counter_id =
4320 gen_data->statistics_en_flg = 1;
4321 gen_data->statistics_zero_flg =
4322 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4324 gen_data->statistics_counter_id =
4325 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4327 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4328 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4329 gen_data->sp_client_id = params->spcl_id;
4330 gen_data->mtu = cpu_to_le16(params->mtu);
4331 gen_data->func_id = o->func_id;
4333 gen_data->cos = params->cos;
4335 gen_data->traffic_type =
4336 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4337 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4339 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4340 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4343 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4344 struct bnx2x_txq_setup_params *params,
4345 struct client_init_tx_data *tx_data,
4346 unsigned long *flags)
4348 tx_data->enforce_security_flg =
4349 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4350 tx_data->default_vlan =
4351 cpu_to_le16(params->default_vlan);
4352 tx_data->default_vlan_flg =
4353 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4354 tx_data->tx_switching_flg =
4355 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4356 tx_data->anti_spoofing_flg =
4357 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4358 tx_data->force_default_pri_flg =
4359 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4361 tx_data->tunnel_lso_inc_ip_id =
4362 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4363 tx_data->tunnel_non_lso_pcsum_location =
4364 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4367 tx_data->tx_status_block_id = params->fw_sb_id;
4368 tx_data->tx_sb_index_number = params->sb_cq_index;
4369 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4371 tx_data->tx_bd_page_base.lo =
4372 cpu_to_le32(U64_LO(params->dscr_map));
4373 tx_data->tx_bd_page_base.hi =
4374 cpu_to_le32(U64_HI(params->dscr_map));
4376 /* Don't configure any Tx switching mode during queue SETUP */
4380 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4381 struct rxq_pause_params *params,
4382 struct client_init_rx_data *rx_data)
4384 /* flow control data */
4385 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4386 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4387 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4388 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4389 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4390 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4391 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4394 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4395 struct bnx2x_rxq_setup_params *params,
4396 struct client_init_rx_data *rx_data,
4397 unsigned long *flags)
4399 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4400 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4401 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4402 CLIENT_INIT_RX_DATA_TPA_MODE;
4403 rx_data->vmqueue_mode_en_flg = 0;
4405 rx_data->cache_line_alignment_log_size =
4406 params->cache_line_log;
4407 rx_data->enable_dynamic_hc =
4408 test_bit(BNX2X_Q_FLG_DHC, flags);
4409 rx_data->max_sges_for_packet = params->max_sges_pkt;
4410 rx_data->client_qzone_id = params->cl_qzone_id;
4411 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4413 /* Always start in DROP_ALL mode */
4414 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4415 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4417 /* We don't set drop flags */
4418 rx_data->drop_ip_cs_err_flg = 0;
4419 rx_data->drop_tcp_cs_err_flg = 0;
4420 rx_data->drop_ttl0_flg = 0;
4421 rx_data->drop_udp_cs_err_flg = 0;
4422 rx_data->inner_vlan_removal_enable_flg =
4423 test_bit(BNX2X_Q_FLG_VLAN, flags);
4424 rx_data->outer_vlan_removal_enable_flg =
4425 test_bit(BNX2X_Q_FLG_OV, flags);
4426 rx_data->status_block_id = params->fw_sb_id;
4427 rx_data->rx_sb_index_number = params->sb_cq_index;
4428 rx_data->max_tpa_queues = params->max_tpa_queues;
4429 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4430 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4431 rx_data->bd_page_base.lo =
4432 cpu_to_le32(U64_LO(params->dscr_map));
4433 rx_data->bd_page_base.hi =
4434 cpu_to_le32(U64_HI(params->dscr_map));
4435 rx_data->sge_page_base.lo =
4436 cpu_to_le32(U64_LO(params->sge_map));
4437 rx_data->sge_page_base.hi =
4438 cpu_to_le32(U64_HI(params->sge_map));
4439 rx_data->cqe_page_base.lo =
4440 cpu_to_le32(U64_LO(params->rcq_map));
4441 rx_data->cqe_page_base.hi =
4442 cpu_to_le32(U64_HI(params->rcq_map));
4443 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4445 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4446 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4447 rx_data->is_approx_mcast = 1;
4450 rx_data->rss_engine_id = params->rss_engine_id;
4452 /* silent vlan removal */
4453 rx_data->silent_vlan_removal_flg =
4454 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4455 rx_data->silent_vlan_value =
4456 cpu_to_le16(params->silent_removal_value);
4457 rx_data->silent_vlan_mask =
4458 cpu_to_le16(params->silent_removal_mask);
4461 /* initialize the general, tx and rx parts of a queue object */
4462 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4463 struct bnx2x_queue_state_params *cmd_params,
4464 struct client_init_ramrod_data *data)
4466 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4467 &cmd_params->params.setup.gen_params,
4469 &cmd_params->params.setup.flags);
4471 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4472 &cmd_params->params.setup.txq_params,
4474 &cmd_params->params.setup.flags);
4476 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4477 &cmd_params->params.setup.rxq_params,
4479 &cmd_params->params.setup.flags);
4481 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4482 &cmd_params->params.setup.pause_params,
4486 /* initialize the general and tx parts of a tx-only queue object */
4487 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4488 struct bnx2x_queue_state_params *cmd_params,
4489 struct tx_queue_init_ramrod_data *data)
4491 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4492 &cmd_params->params.tx_only.gen_params,
4494 &cmd_params->params.tx_only.flags);
4496 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4497 &cmd_params->params.tx_only.txq_params,
4499 &cmd_params->params.tx_only.flags);
4501 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4502 cmd_params->q_obj->cids[0],
4503 data->tx.tx_bd_page_base.lo,
4504 data->tx.tx_bd_page_base.hi);
4508 * bnx2x_q_init - init HW/FW queue
4510 * @bp: device handle
4513 * HW/FW initial Queue configuration:
4515 * - CDU context validation
4518 static inline int bnx2x_q_init(struct bnx2x *bp,
4519 struct bnx2x_queue_state_params *params)
4521 struct bnx2x_queue_sp_obj *o = params->q_obj;
4522 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4526 /* Tx HC configuration */
4527 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4528 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4529 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4531 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4532 init->tx.sb_cq_index,
4533 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4537 /* Rx HC configuration */
4538 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4539 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4540 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4542 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4543 init->rx.sb_cq_index,
4544 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4548 /* Set CDU context validation values */
4549 for (cos = 0; cos < o->max_cos; cos++) {
4550 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4552 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4553 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4556 /* As no ramrod is sent, complete the command immediately */
4557 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4565 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4566 struct bnx2x_queue_state_params *params)
4568 struct bnx2x_queue_sp_obj *o = params->q_obj;
4569 struct client_init_ramrod_data *rdata =
4570 (struct client_init_ramrod_data *)o->rdata;
4571 dma_addr_t data_mapping = o->rdata_mapping;
4572 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4574 /* Clear the ramrod data */
4575 memset(rdata, 0, sizeof(*rdata));
4577 /* Fill the ramrod data */
4578 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4580 /* No need for an explicit memory barrier here as long as we
4581 * ensure the ordering of writing to the SPQ element
4582 * and updating of the SPQ producer which involves a memory
4583 * read. If the memory read is removed we will have to put a
4584 * full memory barrier there (inside bnx2x_sp_post()).
4586 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4587 U64_HI(data_mapping),
4588 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4591 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4592 struct bnx2x_queue_state_params *params)
4594 struct bnx2x_queue_sp_obj *o = params->q_obj;
4595 struct client_init_ramrod_data *rdata =
4596 (struct client_init_ramrod_data *)o->rdata;
4597 dma_addr_t data_mapping = o->rdata_mapping;
4598 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4600 /* Clear the ramrod data */
4601 memset(rdata, 0, sizeof(*rdata));
4603 /* Fill the ramrod data */
4604 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4605 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4607 /* No need for an explicit memory barrier here as long as we
4608 * ensure the ordering of writing to the SPQ element
4609 * and updating of the SPQ producer which involves a memory
4610 * read. If the memory read is removed we will have to put a
4611 * full memory barrier there (inside bnx2x_sp_post()).
4613 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4614 U64_HI(data_mapping),
4615 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4618 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4619 struct bnx2x_queue_state_params *params)
4621 struct bnx2x_queue_sp_obj *o = params->q_obj;
4622 struct tx_queue_init_ramrod_data *rdata =
4623 (struct tx_queue_init_ramrod_data *)o->rdata;
4624 dma_addr_t data_mapping = o->rdata_mapping;
4625 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4626 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4627 ¶ms->params.tx_only;
4628 u8 cid_index = tx_only_params->cid_index;
4630 if (cid_index >= o->max_cos) {
4631 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4632 o->cl_id, cid_index);
4636 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4637 tx_only_params->gen_params.cos,
4638 tx_only_params->gen_params.spcl_id);
4640 /* Clear the ramrod data */
4641 memset(rdata, 0, sizeof(*rdata));
4643 /* Fill the ramrod data */
4644 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4646 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4647 o->cids[cid_index], rdata->general.client_id,
4648 rdata->general.sp_client_id, rdata->general.cos);
4650 /* No need for an explicit memory barrier here as long as we
4651 * ensure the ordering of writing to the SPQ element
4652 * and updating of the SPQ producer which involves a memory
4653 * read. If the memory read is removed we will have to put a
4654 * full memory barrier there (inside bnx2x_sp_post()).
4656 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4657 U64_HI(data_mapping),
4658 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4661 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4662 struct bnx2x_queue_sp_obj *obj,
4663 struct bnx2x_queue_update_params *params,
4664 struct client_update_ramrod_data *data)
4666 /* Client ID of the client to update */
4667 data->client_id = obj->cl_id;
4669 /* Function ID of the client to update */
4670 data->func_id = obj->func_id;
4672 /* Default VLAN value */
4673 data->default_vlan = cpu_to_le16(params->def_vlan);
4675 /* Inner VLAN stripping */
4676 data->inner_vlan_removal_enable_flg =
4677 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4678 data->inner_vlan_removal_change_flg =
4679 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4680 ¶ms->update_flags);
4682 /* Outer VLAN stripping */
4683 data->outer_vlan_removal_enable_flg =
4684 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4685 data->outer_vlan_removal_change_flg =
4686 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4687 ¶ms->update_flags);
4689 /* Drop packets that have source MAC that doesn't belong to this
4692 data->anti_spoofing_enable_flg =
4693 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4694 data->anti_spoofing_change_flg =
4695 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4697 /* Activate/Deactivate */
4698 data->activate_flg =
4699 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4700 data->activate_change_flg =
4701 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4703 /* Enable default VLAN */
4704 data->default_vlan_enable_flg =
4705 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4706 data->default_vlan_change_flg =
4707 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4708 ¶ms->update_flags);
4710 /* silent vlan removal */
4711 data->silent_vlan_change_flg =
4712 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4713 ¶ms->update_flags);
4714 data->silent_vlan_removal_flg =
4715 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4716 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4717 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4720 data->tx_switching_flg =
4721 test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags);
4722 data->tx_switching_change_flg =
4723 test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
4724 ¶ms->update_flags);
4727 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4728 struct bnx2x_queue_state_params *params)
4730 struct bnx2x_queue_sp_obj *o = params->q_obj;
4731 struct client_update_ramrod_data *rdata =
4732 (struct client_update_ramrod_data *)o->rdata;
4733 dma_addr_t data_mapping = o->rdata_mapping;
4734 struct bnx2x_queue_update_params *update_params =
4735 ¶ms->params.update;
4736 u8 cid_index = update_params->cid_index;
4738 if (cid_index >= o->max_cos) {
4739 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4740 o->cl_id, cid_index);
4744 /* Clear the ramrod data */
4745 memset(rdata, 0, sizeof(*rdata));
4747 /* Fill the ramrod data */
4748 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4750 /* No need for an explicit memory barrier here as long as we
4751 * ensure the ordering of writing to the SPQ element
4752 * and updating of the SPQ producer which involves a memory
4753 * read. If the memory read is removed we will have to put a
4754 * full memory barrier there (inside bnx2x_sp_post()).
4756 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4757 o->cids[cid_index], U64_HI(data_mapping),
4758 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4762 * bnx2x_q_send_deactivate - send DEACTIVATE command
4764 * @bp: device handle
4767 * implemented using the UPDATE command.
4769 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4770 struct bnx2x_queue_state_params *params)
4772 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4774 memset(update, 0, sizeof(*update));
4776 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4778 return bnx2x_q_send_update(bp, params);
4782 * bnx2x_q_send_activate - send ACTIVATE command
4784 * @bp: device handle
4787 * implemented using the UPDATE command.
4789 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4790 struct bnx2x_queue_state_params *params)
4792 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4794 memset(update, 0, sizeof(*update));
4796 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4797 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4799 return bnx2x_q_send_update(bp, params);
4802 static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
4803 struct bnx2x_queue_sp_obj *obj,
4804 struct bnx2x_queue_update_tpa_params *params,
4805 struct tpa_update_ramrod_data *data)
4807 data->client_id = obj->cl_id;
4808 data->complete_on_both_clients = params->complete_on_both_clients;
4809 data->dont_verify_rings_pause_thr_flg =
4810 params->dont_verify_thr;
4811 data->max_agg_size = cpu_to_le16(params->max_agg_sz);
4812 data->max_sges_for_packet = params->max_sges_pkt;
4813 data->max_tpa_queues = params->max_tpa_queues;
4814 data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
4815 data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
4816 data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
4817 data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
4818 data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
4819 data->tpa_mode = params->tpa_mode;
4820 data->update_ipv4 = params->update_ipv4;
4821 data->update_ipv6 = params->update_ipv6;
4824 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4825 struct bnx2x_queue_state_params *params)
4827 struct bnx2x_queue_sp_obj *o = params->q_obj;
4828 struct tpa_update_ramrod_data *rdata =
4829 (struct tpa_update_ramrod_data *)o->rdata;
4830 dma_addr_t data_mapping = o->rdata_mapping;
4831 struct bnx2x_queue_update_tpa_params *update_tpa_params =
4832 ¶ms->params.update_tpa;
4835 /* Clear the ramrod data */
4836 memset(rdata, 0, sizeof(*rdata));
4838 /* Fill the ramrod data */
4839 bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
4841 /* Add the function id inside the type, so that sp post function
4842 * doesn't automatically add the PF func-id, this is required
4843 * for operations done by PFs on behalf of their VFs
4845 type = ETH_CONNECTION_TYPE |
4846 ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
4848 /* No need for an explicit memory barrier here as long as we
4849 * ensure the ordering of writing to the SPQ element
4850 * and updating of the SPQ producer which involves a memory
4851 * read. If the memory read is removed we will have to put a
4852 * full memory barrier there (inside bnx2x_sp_post()).
4854 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
4855 o->cids[BNX2X_PRIMARY_CID_INDEX],
4856 U64_HI(data_mapping),
4857 U64_LO(data_mapping), type);
4860 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4861 struct bnx2x_queue_state_params *params)
4863 struct bnx2x_queue_sp_obj *o = params->q_obj;
4865 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4866 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4867 ETH_CONNECTION_TYPE);
4870 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4871 struct bnx2x_queue_state_params *params)
4873 struct bnx2x_queue_sp_obj *o = params->q_obj;
4874 u8 cid_idx = params->params.cfc_del.cid_index;
4876 if (cid_idx >= o->max_cos) {
4877 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4882 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4883 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4886 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4887 struct bnx2x_queue_state_params *params)
4889 struct bnx2x_queue_sp_obj *o = params->q_obj;
4890 u8 cid_index = params->params.terminate.cid_index;
4892 if (cid_index >= o->max_cos) {
4893 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4894 o->cl_id, cid_index);
4898 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4899 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4902 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4903 struct bnx2x_queue_state_params *params)
4905 struct bnx2x_queue_sp_obj *o = params->q_obj;
4907 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4908 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4909 ETH_CONNECTION_TYPE);
4912 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4913 struct bnx2x_queue_state_params *params)
4915 switch (params->cmd) {
4916 case BNX2X_Q_CMD_INIT:
4917 return bnx2x_q_init(bp, params);
4918 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4919 return bnx2x_q_send_setup_tx_only(bp, params);
4920 case BNX2X_Q_CMD_DEACTIVATE:
4921 return bnx2x_q_send_deactivate(bp, params);
4922 case BNX2X_Q_CMD_ACTIVATE:
4923 return bnx2x_q_send_activate(bp, params);
4924 case BNX2X_Q_CMD_UPDATE:
4925 return bnx2x_q_send_update(bp, params);
4926 case BNX2X_Q_CMD_UPDATE_TPA:
4927 return bnx2x_q_send_update_tpa(bp, params);
4928 case BNX2X_Q_CMD_HALT:
4929 return bnx2x_q_send_halt(bp, params);
4930 case BNX2X_Q_CMD_CFC_DEL:
4931 return bnx2x_q_send_cfc_del(bp, params);
4932 case BNX2X_Q_CMD_TERMINATE:
4933 return bnx2x_q_send_terminate(bp, params);
4934 case BNX2X_Q_CMD_EMPTY:
4935 return bnx2x_q_send_empty(bp, params);
4937 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4942 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4943 struct bnx2x_queue_state_params *params)
4945 switch (params->cmd) {
4946 case BNX2X_Q_CMD_SETUP:
4947 return bnx2x_q_send_setup_e1x(bp, params);
4948 case BNX2X_Q_CMD_INIT:
4949 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4950 case BNX2X_Q_CMD_DEACTIVATE:
4951 case BNX2X_Q_CMD_ACTIVATE:
4952 case BNX2X_Q_CMD_UPDATE:
4953 case BNX2X_Q_CMD_UPDATE_TPA:
4954 case BNX2X_Q_CMD_HALT:
4955 case BNX2X_Q_CMD_CFC_DEL:
4956 case BNX2X_Q_CMD_TERMINATE:
4957 case BNX2X_Q_CMD_EMPTY:
4958 return bnx2x_queue_send_cmd_cmn(bp, params);
4960 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4965 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4966 struct bnx2x_queue_state_params *params)
4968 switch (params->cmd) {
4969 case BNX2X_Q_CMD_SETUP:
4970 return bnx2x_q_send_setup_e2(bp, params);
4971 case BNX2X_Q_CMD_INIT:
4972 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4973 case BNX2X_Q_CMD_DEACTIVATE:
4974 case BNX2X_Q_CMD_ACTIVATE:
4975 case BNX2X_Q_CMD_UPDATE:
4976 case BNX2X_Q_CMD_UPDATE_TPA:
4977 case BNX2X_Q_CMD_HALT:
4978 case BNX2X_Q_CMD_CFC_DEL:
4979 case BNX2X_Q_CMD_TERMINATE:
4980 case BNX2X_Q_CMD_EMPTY:
4981 return bnx2x_queue_send_cmd_cmn(bp, params);
4983 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4989 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4991 * @bp: device handle
4996 * It both checks if the requested command is legal in a current
4997 * state and, if it's legal, sets a `next_state' in the object
4998 * that will be used in the completion flow to set the `state'
5001 * returns 0 if a requested command is a legal transition,
5002 * -EINVAL otherwise.
5004 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5005 struct bnx2x_queue_sp_obj *o,
5006 struct bnx2x_queue_state_params *params)
5008 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5009 enum bnx2x_queue_cmd cmd = params->cmd;
5010 struct bnx2x_queue_update_params *update_params =
5011 ¶ms->params.update;
5012 u8 next_tx_only = o->num_tx_only;
5014 /* Forget all pending for completion commands if a driver only state
5015 * transition has been requested.
5017 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5019 o->next_state = BNX2X_Q_STATE_MAX;
5022 /* Don't allow a next state transition if we are in the middle of
5026 BNX2X_ERR("Blocking transition since pending was %lx\n",
5032 case BNX2X_Q_STATE_RESET:
5033 if (cmd == BNX2X_Q_CMD_INIT)
5034 next_state = BNX2X_Q_STATE_INITIALIZED;
5037 case BNX2X_Q_STATE_INITIALIZED:
5038 if (cmd == BNX2X_Q_CMD_SETUP) {
5039 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5040 ¶ms->params.setup.flags))
5041 next_state = BNX2X_Q_STATE_ACTIVE;
5043 next_state = BNX2X_Q_STATE_INACTIVE;
5047 case BNX2X_Q_STATE_ACTIVE:
5048 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5049 next_state = BNX2X_Q_STATE_INACTIVE;
5051 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5052 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5053 next_state = BNX2X_Q_STATE_ACTIVE;
5055 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5056 next_state = BNX2X_Q_STATE_MULTI_COS;
5060 else if (cmd == BNX2X_Q_CMD_HALT)
5061 next_state = BNX2X_Q_STATE_STOPPED;
5063 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5064 /* If "active" state change is requested, update the
5065 * state accordingly.
5067 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5068 &update_params->update_flags) &&
5069 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5070 &update_params->update_flags))
5071 next_state = BNX2X_Q_STATE_INACTIVE;
5073 next_state = BNX2X_Q_STATE_ACTIVE;
5077 case BNX2X_Q_STATE_MULTI_COS:
5078 if (cmd == BNX2X_Q_CMD_TERMINATE)
5079 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5081 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5082 next_state = BNX2X_Q_STATE_MULTI_COS;
5083 next_tx_only = o->num_tx_only + 1;
5086 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5087 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5088 next_state = BNX2X_Q_STATE_MULTI_COS;
5090 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5091 /* If "active" state change is requested, update the
5092 * state accordingly.
5094 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5095 &update_params->update_flags) &&
5096 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5097 &update_params->update_flags))
5098 next_state = BNX2X_Q_STATE_INACTIVE;
5100 next_state = BNX2X_Q_STATE_MULTI_COS;
5104 case BNX2X_Q_STATE_MCOS_TERMINATED:
5105 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5106 next_tx_only = o->num_tx_only - 1;
5107 if (next_tx_only == 0)
5108 next_state = BNX2X_Q_STATE_ACTIVE;
5110 next_state = BNX2X_Q_STATE_MULTI_COS;
5114 case BNX2X_Q_STATE_INACTIVE:
5115 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5116 next_state = BNX2X_Q_STATE_ACTIVE;
5118 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5119 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5120 next_state = BNX2X_Q_STATE_INACTIVE;
5122 else if (cmd == BNX2X_Q_CMD_HALT)
5123 next_state = BNX2X_Q_STATE_STOPPED;
5125 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5126 /* If "active" state change is requested, update the
5127 * state accordingly.
5129 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5130 &update_params->update_flags) &&
5131 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5132 &update_params->update_flags)){
5133 if (o->num_tx_only == 0)
5134 next_state = BNX2X_Q_STATE_ACTIVE;
5135 else /* tx only queues exist for this queue */
5136 next_state = BNX2X_Q_STATE_MULTI_COS;
5138 next_state = BNX2X_Q_STATE_INACTIVE;
5142 case BNX2X_Q_STATE_STOPPED:
5143 if (cmd == BNX2X_Q_CMD_TERMINATE)
5144 next_state = BNX2X_Q_STATE_TERMINATED;
5147 case BNX2X_Q_STATE_TERMINATED:
5148 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5149 next_state = BNX2X_Q_STATE_RESET;
5153 BNX2X_ERR("Illegal state: %d\n", state);
5156 /* Transition is assured */
5157 if (next_state != BNX2X_Q_STATE_MAX) {
5158 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5159 state, cmd, next_state);
5160 o->next_state = next_state;
5161 o->next_tx_only = next_tx_only;
5165 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5170 void bnx2x_init_queue_obj(struct bnx2x *bp,
5171 struct bnx2x_queue_sp_obj *obj,
5172 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5174 dma_addr_t rdata_mapping, unsigned long type)
5176 memset(obj, 0, sizeof(*obj));
5178 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5179 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5181 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5182 obj->max_cos = cid_cnt;
5184 obj->func_id = func_id;
5186 obj->rdata_mapping = rdata_mapping;
5188 obj->next_state = BNX2X_Q_STATE_MAX;
5190 if (CHIP_IS_E1x(bp))
5191 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5193 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5195 obj->check_transition = bnx2x_queue_chk_transition;
5197 obj->complete_cmd = bnx2x_queue_comp_cmd;
5198 obj->wait_comp = bnx2x_queue_wait_comp;
5199 obj->set_pending = bnx2x_queue_set_pending;
5202 /* return a queue object's logical state*/
5203 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5204 struct bnx2x_queue_sp_obj *obj)
5206 switch (obj->state) {
5207 case BNX2X_Q_STATE_ACTIVE:
5208 case BNX2X_Q_STATE_MULTI_COS:
5209 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5210 case BNX2X_Q_STATE_RESET:
5211 case BNX2X_Q_STATE_INITIALIZED:
5212 case BNX2X_Q_STATE_MCOS_TERMINATED:
5213 case BNX2X_Q_STATE_INACTIVE:
5214 case BNX2X_Q_STATE_STOPPED:
5215 case BNX2X_Q_STATE_TERMINATED:
5216 case BNX2X_Q_STATE_FLRED:
5217 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5223 /********************** Function state object *********************************/
5224 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5225 struct bnx2x_func_sp_obj *o)
5227 /* in the middle of transaction - return INVALID state */
5229 return BNX2X_F_STATE_MAX;
5231 /* unsure the order of reading of o->pending and o->state
5232 * o->pending should be read first
5239 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5240 struct bnx2x_func_sp_obj *o,
5241 enum bnx2x_func_cmd cmd)
5243 return bnx2x_state_wait(bp, cmd, &o->pending);
5247 * bnx2x_func_state_change_comp - complete the state machine transition
5249 * @bp: device handle
5253 * Called on state change transition. Completes the state
5254 * machine transition only - no HW interaction.
5256 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5257 struct bnx2x_func_sp_obj *o,
5258 enum bnx2x_func_cmd cmd)
5260 unsigned long cur_pending = o->pending;
5262 if (!test_and_clear_bit(cmd, &cur_pending)) {
5263 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5264 cmd, BP_FUNC(bp), o->state,
5265 cur_pending, o->next_state);
5270 "Completing command %d for func %d, setting state to %d\n",
5271 cmd, BP_FUNC(bp), o->next_state);
5273 o->state = o->next_state;
5274 o->next_state = BNX2X_F_STATE_MAX;
5276 /* It's important that o->state and o->next_state are
5277 * updated before o->pending.
5281 clear_bit(cmd, &o->pending);
5282 smp_mb__after_atomic();
5288 * bnx2x_func_comp_cmd - complete the state change command
5290 * @bp: device handle
5294 * Checks that the arrived completion is expected.
5296 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5297 struct bnx2x_func_sp_obj *o,
5298 enum bnx2x_func_cmd cmd)
5300 /* Complete the state machine part first, check if it's a
5303 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5308 * bnx2x_func_chk_transition - perform function state machine transition
5310 * @bp: device handle
5314 * It both checks if the requested command is legal in a current
5315 * state and, if it's legal, sets a `next_state' in the object
5316 * that will be used in the completion flow to set the `state'
5319 * returns 0 if a requested command is a legal transition,
5320 * -EINVAL otherwise.
5322 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5323 struct bnx2x_func_sp_obj *o,
5324 struct bnx2x_func_state_params *params)
5326 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5327 enum bnx2x_func_cmd cmd = params->cmd;
5329 /* Forget all pending for completion commands if a driver only state
5330 * transition has been requested.
5332 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5334 o->next_state = BNX2X_F_STATE_MAX;
5337 /* Don't allow a next state transition if we are in the middle of
5344 case BNX2X_F_STATE_RESET:
5345 if (cmd == BNX2X_F_CMD_HW_INIT)
5346 next_state = BNX2X_F_STATE_INITIALIZED;
5349 case BNX2X_F_STATE_INITIALIZED:
5350 if (cmd == BNX2X_F_CMD_START)
5351 next_state = BNX2X_F_STATE_STARTED;
5353 else if (cmd == BNX2X_F_CMD_HW_RESET)
5354 next_state = BNX2X_F_STATE_RESET;
5357 case BNX2X_F_STATE_STARTED:
5358 if (cmd == BNX2X_F_CMD_STOP)
5359 next_state = BNX2X_F_STATE_INITIALIZED;
5360 /* afex ramrods can be sent only in started mode, and only
5361 * if not pending for function_stop ramrod completion
5362 * for these events - next state remained STARTED.
5364 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5365 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5366 next_state = BNX2X_F_STATE_STARTED;
5368 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5369 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5370 next_state = BNX2X_F_STATE_STARTED;
5372 /* Switch_update ramrod can be sent in either started or
5373 * tx_stopped state, and it doesn't change the state.
5375 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5376 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5377 next_state = BNX2X_F_STATE_STARTED;
5379 else if (cmd == BNX2X_F_CMD_TX_STOP)
5380 next_state = BNX2X_F_STATE_TX_STOPPED;
5383 case BNX2X_F_STATE_TX_STOPPED:
5384 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5385 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5386 next_state = BNX2X_F_STATE_TX_STOPPED;
5388 else if (cmd == BNX2X_F_CMD_TX_START)
5389 next_state = BNX2X_F_STATE_STARTED;
5393 BNX2X_ERR("Unknown state: %d\n", state);
5396 /* Transition is assured */
5397 if (next_state != BNX2X_F_STATE_MAX) {
5398 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5399 state, cmd, next_state);
5400 o->next_state = next_state;
5404 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5411 * bnx2x_func_init_func - performs HW init at function stage
5413 * @bp: device handle
5416 * Init HW when the current phase is
5417 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5420 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5421 const struct bnx2x_func_sp_drv_ops *drv)
5423 return drv->init_hw_func(bp);
5427 * bnx2x_func_init_port - performs HW init at port stage
5429 * @bp: device handle
5432 * Init HW when the current phase is
5433 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5434 * FUNCTION-only HW blocks.
5437 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5438 const struct bnx2x_func_sp_drv_ops *drv)
5440 int rc = drv->init_hw_port(bp);
5444 return bnx2x_func_init_func(bp, drv);
5448 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5450 * @bp: device handle
5453 * Init HW when the current phase is
5454 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5455 * PORT-only and FUNCTION-only HW blocks.
5457 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5458 const struct bnx2x_func_sp_drv_ops *drv)
5460 int rc = drv->init_hw_cmn_chip(bp);
5464 return bnx2x_func_init_port(bp, drv);
5468 * bnx2x_func_init_cmn - performs HW init at common stage
5470 * @bp: device handle
5473 * Init HW when the current phase is
5474 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5475 * PORT-only and FUNCTION-only HW blocks.
5477 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5478 const struct bnx2x_func_sp_drv_ops *drv)
5480 int rc = drv->init_hw_cmn(bp);
5484 return bnx2x_func_init_port(bp, drv);
5487 static int bnx2x_func_hw_init(struct bnx2x *bp,
5488 struct bnx2x_func_state_params *params)
5490 u32 load_code = params->params.hw_init.load_phase;
5491 struct bnx2x_func_sp_obj *o = params->f_obj;
5492 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5495 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5496 BP_ABS_FUNC(bp), load_code);
5498 /* Prepare buffers for unzipping the FW */
5499 rc = drv->gunzip_init(bp);
5504 rc = drv->init_fw(bp);
5506 BNX2X_ERR("Error loading firmware\n");
5510 /* Handle the beginning of COMMON_XXX pases separately... */
5511 switch (load_code) {
5512 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5513 rc = bnx2x_func_init_cmn_chip(bp, drv);
5518 case FW_MSG_CODE_DRV_LOAD_COMMON:
5519 rc = bnx2x_func_init_cmn(bp, drv);
5524 case FW_MSG_CODE_DRV_LOAD_PORT:
5525 rc = bnx2x_func_init_port(bp, drv);
5530 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5531 rc = bnx2x_func_init_func(bp, drv);
5537 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5542 drv->gunzip_end(bp);
5544 /* In case of success, complete the command immediately: no ramrods
5548 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5554 * bnx2x_func_reset_func - reset HW at function stage
5556 * @bp: device handle
5559 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5560 * FUNCTION-only HW blocks.
5562 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5563 const struct bnx2x_func_sp_drv_ops *drv)
5565 drv->reset_hw_func(bp);
5569 * bnx2x_func_reset_port - reset HW at port stage
5571 * @bp: device handle
5574 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5575 * FUNCTION-only and PORT-only HW blocks.
5579 * It's important to call reset_port before reset_func() as the last thing
5580 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5581 * makes impossible any DMAE transactions.
5583 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5584 const struct bnx2x_func_sp_drv_ops *drv)
5586 drv->reset_hw_port(bp);
5587 bnx2x_func_reset_func(bp, drv);
5591 * bnx2x_func_reset_cmn - reset HW at common stage
5593 * @bp: device handle
5596 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5597 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5598 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5600 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5601 const struct bnx2x_func_sp_drv_ops *drv)
5603 bnx2x_func_reset_port(bp, drv);
5604 drv->reset_hw_cmn(bp);
5607 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5608 struct bnx2x_func_state_params *params)
5610 u32 reset_phase = params->params.hw_reset.reset_phase;
5611 struct bnx2x_func_sp_obj *o = params->f_obj;
5612 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5614 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5617 switch (reset_phase) {
5618 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5619 bnx2x_func_reset_cmn(bp, drv);
5621 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5622 bnx2x_func_reset_port(bp, drv);
5624 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5625 bnx2x_func_reset_func(bp, drv);
5628 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5633 /* Complete the command immediately: no ramrods have been sent. */
5634 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5639 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5640 struct bnx2x_func_state_params *params)
5642 struct bnx2x_func_sp_obj *o = params->f_obj;
5643 struct function_start_data *rdata =
5644 (struct function_start_data *)o->rdata;
5645 dma_addr_t data_mapping = o->rdata_mapping;
5646 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5648 memset(rdata, 0, sizeof(*rdata));
5650 /* Fill the ramrod data with provided parameters */
5651 rdata->function_mode = (u8)start_params->mf_mode;
5652 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5653 rdata->path_id = BP_PATH(bp);
5654 rdata->network_cos_mode = start_params->network_cos_mode;
5655 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5656 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5658 /* No need for an explicit memory barrier here as long we would
5659 * need to ensure the ordering of writing to the SPQ element
5660 * and updating of the SPQ producer which involves a memory
5661 * read and we will have to put a full memory barrier there
5662 * (inside bnx2x_sp_post()).
5665 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5666 U64_HI(data_mapping),
5667 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5670 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5671 struct bnx2x_func_state_params *params)
5673 struct bnx2x_func_sp_obj *o = params->f_obj;
5674 struct function_update_data *rdata =
5675 (struct function_update_data *)o->rdata;
5676 dma_addr_t data_mapping = o->rdata_mapping;
5677 struct bnx2x_func_switch_update_params *switch_update_params =
5678 ¶ms->params.switch_update;
5680 memset(rdata, 0, sizeof(*rdata));
5682 /* Fill the ramrod data with provided parameters */
5683 rdata->tx_switch_suspend_change_flg = 1;
5684 rdata->tx_switch_suspend = switch_update_params->suspend;
5685 rdata->echo = SWITCH_UPDATE;
5687 /* No need for an explicit memory barrier here as long as we
5688 * ensure the ordering of writing to the SPQ element
5689 * and updating of the SPQ producer which involves a memory
5690 * read. If the memory read is removed we will have to put a
5691 * full memory barrier there (inside bnx2x_sp_post()).
5693 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5694 U64_HI(data_mapping),
5695 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5698 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5699 struct bnx2x_func_state_params *params)
5701 struct bnx2x_func_sp_obj *o = params->f_obj;
5702 struct function_update_data *rdata =
5703 (struct function_update_data *)o->afex_rdata;
5704 dma_addr_t data_mapping = o->afex_rdata_mapping;
5705 struct bnx2x_func_afex_update_params *afex_update_params =
5706 ¶ms->params.afex_update;
5708 memset(rdata, 0, sizeof(*rdata));
5710 /* Fill the ramrod data with provided parameters */
5711 rdata->vif_id_change_flg = 1;
5712 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5713 rdata->afex_default_vlan_change_flg = 1;
5714 rdata->afex_default_vlan =
5715 cpu_to_le16(afex_update_params->afex_default_vlan);
5716 rdata->allowed_priorities_change_flg = 1;
5717 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5718 rdata->echo = AFEX_UPDATE;
5720 /* No need for an explicit memory barrier here as long as we
5721 * ensure the ordering of writing to the SPQ element
5722 * and updating of the SPQ producer which involves a memory
5723 * read. If the memory read is removed we will have to put a
5724 * full memory barrier there (inside bnx2x_sp_post()).
5727 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5729 rdata->afex_default_vlan, rdata->allowed_priorities);
5731 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5732 U64_HI(data_mapping),
5733 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5737 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5738 struct bnx2x_func_state_params *params)
5740 struct bnx2x_func_sp_obj *o = params->f_obj;
5741 struct afex_vif_list_ramrod_data *rdata =
5742 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5743 struct bnx2x_func_afex_viflists_params *afex_vif_params =
5744 ¶ms->params.afex_viflists;
5745 u64 *p_rdata = (u64 *)rdata;
5747 memset(rdata, 0, sizeof(*rdata));
5749 /* Fill the ramrod data with provided parameters */
5750 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5751 rdata->func_bit_map = afex_vif_params->func_bit_map;
5752 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5753 rdata->func_to_clear = afex_vif_params->func_to_clear;
5755 /* send in echo type of sub command */
5756 rdata->echo = afex_vif_params->afex_vif_list_command;
5758 /* No need for an explicit memory barrier here as long we would
5759 * need to ensure the ordering of writing to the SPQ element
5760 * and updating of the SPQ producer which involves a memory
5761 * read and we will have to put a full memory barrier there
5762 * (inside bnx2x_sp_post()).
5765 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5766 rdata->afex_vif_list_command, rdata->vif_list_index,
5767 rdata->func_bit_map, rdata->func_to_clear);
5769 /* this ramrod sends data directly and not through DMA mapping */
5770 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5771 U64_HI(*p_rdata), U64_LO(*p_rdata),
5772 NONE_CONNECTION_TYPE);
5775 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5776 struct bnx2x_func_state_params *params)
5778 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5779 NONE_CONNECTION_TYPE);
5782 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5783 struct bnx2x_func_state_params *params)
5785 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5786 NONE_CONNECTION_TYPE);
5788 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5789 struct bnx2x_func_state_params *params)
5791 struct bnx2x_func_sp_obj *o = params->f_obj;
5792 struct flow_control_configuration *rdata =
5793 (struct flow_control_configuration *)o->rdata;
5794 dma_addr_t data_mapping = o->rdata_mapping;
5795 struct bnx2x_func_tx_start_params *tx_start_params =
5796 ¶ms->params.tx_start;
5799 memset(rdata, 0, sizeof(*rdata));
5801 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5802 rdata->dcb_version = tx_start_params->dcb_version;
5803 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5805 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5806 rdata->traffic_type_to_priority_cos[i] =
5807 tx_start_params->traffic_type_to_priority_cos[i];
5809 /* No need for an explicit memory barrier here as long as we
5810 * ensure the ordering of writing to the SPQ element
5811 * and updating of the SPQ producer which involves a memory
5812 * read. If the memory read is removed we will have to put a
5813 * full memory barrier there (inside bnx2x_sp_post()).
5815 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5816 U64_HI(data_mapping),
5817 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5820 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5821 struct bnx2x_func_state_params *params)
5823 switch (params->cmd) {
5824 case BNX2X_F_CMD_HW_INIT:
5825 return bnx2x_func_hw_init(bp, params);
5826 case BNX2X_F_CMD_START:
5827 return bnx2x_func_send_start(bp, params);
5828 case BNX2X_F_CMD_STOP:
5829 return bnx2x_func_send_stop(bp, params);
5830 case BNX2X_F_CMD_HW_RESET:
5831 return bnx2x_func_hw_reset(bp, params);
5832 case BNX2X_F_CMD_AFEX_UPDATE:
5833 return bnx2x_func_send_afex_update(bp, params);
5834 case BNX2X_F_CMD_AFEX_VIFLISTS:
5835 return bnx2x_func_send_afex_viflists(bp, params);
5836 case BNX2X_F_CMD_TX_STOP:
5837 return bnx2x_func_send_tx_stop(bp, params);
5838 case BNX2X_F_CMD_TX_START:
5839 return bnx2x_func_send_tx_start(bp, params);
5840 case BNX2X_F_CMD_SWITCH_UPDATE:
5841 return bnx2x_func_send_switch_update(bp, params);
5843 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5848 void bnx2x_init_func_obj(struct bnx2x *bp,
5849 struct bnx2x_func_sp_obj *obj,
5850 void *rdata, dma_addr_t rdata_mapping,
5851 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5852 struct bnx2x_func_sp_drv_ops *drv_iface)
5854 memset(obj, 0, sizeof(*obj));
5856 mutex_init(&obj->one_pending_mutex);
5859 obj->rdata_mapping = rdata_mapping;
5860 obj->afex_rdata = afex_rdata;
5861 obj->afex_rdata_mapping = afex_rdata_mapping;
5862 obj->send_cmd = bnx2x_func_send_cmd;
5863 obj->check_transition = bnx2x_func_chk_transition;
5864 obj->complete_cmd = bnx2x_func_comp_cmd;
5865 obj->wait_comp = bnx2x_func_wait_comp;
5867 obj->drv = drv_iface;
5871 * bnx2x_func_state_change - perform Function state change transition
5873 * @bp: device handle
5874 * @params: parameters to perform the transaction
5876 * returns 0 in case of successfully completed transition,
5877 * negative error code in case of failure, positive
5878 * (EBUSY) value if there is a completion to that is
5879 * still pending (possible only if RAMROD_COMP_WAIT is
5880 * not set in params->ramrod_flags for asynchronous
5883 int bnx2x_func_state_change(struct bnx2x *bp,
5884 struct bnx2x_func_state_params *params)
5886 struct bnx2x_func_sp_obj *o = params->f_obj;
5888 enum bnx2x_func_cmd cmd = params->cmd;
5889 unsigned long *pending = &o->pending;
5891 mutex_lock(&o->one_pending_mutex);
5893 /* Check that the requested transition is legal */
5894 rc = o->check_transition(bp, o, params);
5895 if ((rc == -EBUSY) &&
5896 (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5897 while ((rc == -EBUSY) && (--cnt > 0)) {
5898 mutex_unlock(&o->one_pending_mutex);
5900 mutex_lock(&o->one_pending_mutex);
5901 rc = o->check_transition(bp, o, params);
5904 mutex_unlock(&o->one_pending_mutex);
5905 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5909 mutex_unlock(&o->one_pending_mutex);
5913 /* Set "pending" bit */
5914 set_bit(cmd, pending);
5916 /* Don't send a command if only driver cleanup was requested */
5917 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5918 bnx2x_func_state_change_comp(bp, o, cmd);
5919 mutex_unlock(&o->one_pending_mutex);
5922 rc = o->send_cmd(bp, params);
5924 mutex_unlock(&o->one_pending_mutex);
5927 o->next_state = BNX2X_F_STATE_MAX;
5928 clear_bit(cmd, pending);
5929 smp_mb__after_atomic();
5933 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5934 rc = o->wait_comp(bp, o, cmd);
5942 return !!test_bit(cmd, pending);