bnx2x: Revise comments and alignment
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sp.c
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30
31 #define BNX2X_MAX_EMUL_MULTI            16
32
33 /**** Exe Queue interfaces ****/
34
35 /**
36  * bnx2x_exe_queue_init - init the Exe Queue object
37  *
38  * @o:          pointer to the object
39  * @exe_len:    length
40  * @owner:      pointer to the owner
41  * @validate:   validate function pointer
42  * @optimize:   optimize function pointer
43  * @exec:       execute function pointer
44  * @get:        get function pointer
45  */
46 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47                                         struct bnx2x_exe_queue_obj *o,
48                                         int exe_len,
49                                         union bnx2x_qable_obj *owner,
50                                         exe_q_validate validate,
51                                         exe_q_remove remove,
52                                         exe_q_optimize optimize,
53                                         exe_q_execute exec,
54                                         exe_q_get get)
55 {
56         memset(o, 0, sizeof(*o));
57
58         INIT_LIST_HEAD(&o->exe_queue);
59         INIT_LIST_HEAD(&o->pending_comp);
60
61         spin_lock_init(&o->lock);
62
63         o->exe_chunk_len = exe_len;
64         o->owner         = owner;
65
66         /* Owner specific callbacks */
67         o->validate      = validate;
68         o->remove        = remove;
69         o->optimize      = optimize;
70         o->execute       = exec;
71         o->get           = get;
72
73         DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
74            exe_len);
75 }
76
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78                                              struct bnx2x_exeq_elem *elem)
79 {
80         DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81         kfree(elem);
82 }
83
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85 {
86         struct bnx2x_exeq_elem *elem;
87         int cnt = 0;
88
89         spin_lock_bh(&o->lock);
90
91         list_for_each_entry(elem, &o->exe_queue, link)
92                 cnt++;
93
94         spin_unlock_bh(&o->lock);
95
96         return cnt;
97 }
98
99 /**
100  * bnx2x_exe_queue_add - add a new element to the execution queue
101  *
102  * @bp:         driver handle
103  * @o:          queue
104  * @cmd:        new command to add
105  * @restore:    true - do not optimize the command
106  *
107  * If the element is optimized or is illegal, frees it.
108  */
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110                                       struct bnx2x_exe_queue_obj *o,
111                                       struct bnx2x_exeq_elem *elem,
112                                       bool restore)
113 {
114         int rc;
115
116         spin_lock_bh(&o->lock);
117
118         if (!restore) {
119                 /* Try to cancel this element queue */
120                 rc = o->optimize(bp, o->owner, elem);
121                 if (rc)
122                         goto free_and_exit;
123
124                 /* Check if this request is ok */
125                 rc = o->validate(bp, o->owner, elem);
126                 if (rc) {
127                         DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
128                         goto free_and_exit;
129                 }
130         }
131
132         /* If so, add it to the execution queue */
133         list_add_tail(&elem->link, &o->exe_queue);
134
135         spin_unlock_bh(&o->lock);
136
137         return 0;
138
139 free_and_exit:
140         bnx2x_exe_queue_free_elem(bp, elem);
141
142         spin_unlock_bh(&o->lock);
143
144         return rc;
145 }
146
147 static inline void __bnx2x_exe_queue_reset_pending(
148         struct bnx2x *bp,
149         struct bnx2x_exe_queue_obj *o)
150 {
151         struct bnx2x_exeq_elem *elem;
152
153         while (!list_empty(&o->pending_comp)) {
154                 elem = list_first_entry(&o->pending_comp,
155                                         struct bnx2x_exeq_elem, link);
156
157                 list_del(&elem->link);
158                 bnx2x_exe_queue_free_elem(bp, elem);
159         }
160 }
161
162 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
163                                                  struct bnx2x_exe_queue_obj *o)
164 {
165         spin_lock_bh(&o->lock);
166
167         __bnx2x_exe_queue_reset_pending(bp, o);
168
169         spin_unlock_bh(&o->lock);
170 }
171
172 /**
173  * bnx2x_exe_queue_step - execute one execution chunk atomically
174  *
175  * @bp:                 driver handle
176  * @o:                  queue
177  * @ramrod_flags:       flags
178  *
179  * (Atomicity is ensured using the exe_queue->lock).
180  */
181 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
182                                        struct bnx2x_exe_queue_obj *o,
183                                        unsigned long *ramrod_flags)
184 {
185         struct bnx2x_exeq_elem *elem, spacer;
186         int cur_len = 0, rc;
187
188         memset(&spacer, 0, sizeof(spacer));
189
190         spin_lock_bh(&o->lock);
191
192         /* Next step should not be performed until the current is finished,
193          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
194          * properly clear object internals without sending any command to the FW
195          * which also implies there won't be any completion to clear the
196          * 'pending' list.
197          */
198         if (!list_empty(&o->pending_comp)) {
199                 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
200                         DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
201                         __bnx2x_exe_queue_reset_pending(bp, o);
202                 } else {
203                         spin_unlock_bh(&o->lock);
204                         return 1;
205                 }
206         }
207
208         /* Run through the pending commands list and create a next
209          * execution chunk.
210          */
211         while (!list_empty(&o->exe_queue)) {
212                 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
213                                         link);
214                 WARN_ON(!elem->cmd_len);
215
216                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
217                         cur_len += elem->cmd_len;
218                         /* Prevent from both lists being empty when moving an
219                          * element. This will allow the call of
220                          * bnx2x_exe_queue_empty() without locking.
221                          */
222                         list_add_tail(&spacer.link, &o->pending_comp);
223                         mb();
224                         list_move_tail(&elem->link, &o->pending_comp);
225                         list_del(&spacer.link);
226                 } else
227                         break;
228         }
229
230         /* Sanity check */
231         if (!cur_len) {
232                 spin_unlock_bh(&o->lock);
233                 return 0;
234         }
235
236         rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
237         if (rc < 0)
238                 /* In case of an error return the commands back to the queue
239                  * and reset the pending_comp.
240                  */
241                 list_splice_init(&o->pending_comp, &o->exe_queue);
242         else if (!rc)
243                 /* If zero is returned, means there are no outstanding pending
244                  * completions and we may dismiss the pending list.
245                  */
246                 __bnx2x_exe_queue_reset_pending(bp, o);
247
248         spin_unlock_bh(&o->lock);
249         return rc;
250 }
251
252 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
253 {
254         bool empty = list_empty(&o->exe_queue);
255
256         /* Don't reorder!!! */
257         mb();
258
259         return empty && list_empty(&o->pending_comp);
260 }
261
262 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
263         struct bnx2x *bp)
264 {
265         DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
266         return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
267 }
268
269 /************************ raw_obj functions ***********************************/
270 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
271 {
272         return !!test_bit(o->state, o->pstate);
273 }
274
275 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
276 {
277         smp_mb__before_clear_bit();
278         clear_bit(o->state, o->pstate);
279         smp_mb__after_clear_bit();
280 }
281
282 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
283 {
284         smp_mb__before_clear_bit();
285         set_bit(o->state, o->pstate);
286         smp_mb__after_clear_bit();
287 }
288
289 /**
290  * bnx2x_state_wait - wait until the given bit(state) is cleared
291  *
292  * @bp:         device handle
293  * @state:      state which is to be cleared
294  * @state_p:    state buffer
295  *
296  */
297 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
298                                    unsigned long *pstate)
299 {
300         /* can take a while if any port is running */
301         int cnt = 5000;
302
303         if (CHIP_REV_IS_EMUL(bp))
304                 cnt *= 20;
305
306         DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
307
308         might_sleep();
309         while (cnt--) {
310                 if (!test_bit(state, pstate)) {
311 #ifdef BNX2X_STOP_ON_ERROR
312                         DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
313 #endif
314                         return 0;
315                 }
316
317                 usleep_range(1000, 2000);
318
319                 if (bp->panic)
320                         return -EIO;
321         }
322
323         /* timeout! */
324         BNX2X_ERR("timeout waiting for state %d\n", state);
325 #ifdef BNX2X_STOP_ON_ERROR
326         bnx2x_panic();
327 #endif
328
329         return -EBUSY;
330 }
331
332 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
333 {
334         return bnx2x_state_wait(bp, raw->state, raw->pstate);
335 }
336
337 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
338 /* credit handling callbacks */
339 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
340 {
341         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
342
343         WARN_ON(!mp);
344
345         return mp->get_entry(mp, offset);
346 }
347
348 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
349 {
350         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
351
352         WARN_ON(!mp);
353
354         return mp->get(mp, 1);
355 }
356
357 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
358 {
359         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
360
361         WARN_ON(!vp);
362
363         return vp->get_entry(vp, offset);
364 }
365
366 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
367 {
368         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
369
370         WARN_ON(!vp);
371
372         return vp->get(vp, 1);
373 }
374
375 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
376 {
377         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
378         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
379
380         if (!mp->get(mp, 1))
381                 return false;
382
383         if (!vp->get(vp, 1)) {
384                 mp->put(mp, 1);
385                 return false;
386         }
387
388         return true;
389 }
390
391 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
392 {
393         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
394
395         return mp->put_entry(mp, offset);
396 }
397
398 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
399 {
400         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
401
402         return mp->put(mp, 1);
403 }
404
405 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
406 {
407         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
408
409         return vp->put_entry(vp, offset);
410 }
411
412 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
413 {
414         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
415
416         return vp->put(vp, 1);
417 }
418
419 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
420 {
421         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
422         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
423
424         if (!mp->put(mp, 1))
425                 return false;
426
427         if (!vp->put(vp, 1)) {
428                 mp->get(mp, 1);
429                 return false;
430         }
431
432         return true;
433 }
434
435 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
436                                 int n, u8 *base, u8 stride, u8 size)
437 {
438         struct bnx2x_vlan_mac_registry_elem *pos;
439         u8 *next = base;
440         int counter = 0;
441
442         /* traverse list */
443         list_for_each_entry(pos, &o->head, link) {
444                 if (counter < n) {
445                         memcpy(next, &pos->u, size);
446                         counter++;
447                         DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
448                            counter, next);
449                         next += stride + size;
450                 }
451         }
452         return counter * ETH_ALEN;
453 }
454
455 /* check_add() callbacks */
456 static int bnx2x_check_mac_add(struct bnx2x *bp,
457                                struct bnx2x_vlan_mac_obj *o,
458                                union bnx2x_classification_ramrod_data *data)
459 {
460         struct bnx2x_vlan_mac_registry_elem *pos;
461
462         DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
463
464         if (!is_valid_ether_addr(data->mac.mac))
465                 return -EINVAL;
466
467         /* Check if a requested MAC already exists */
468         list_for_each_entry(pos, &o->head, link)
469                 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
470                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
471                         return -EEXIST;
472
473         return 0;
474 }
475
476 static int bnx2x_check_vlan_add(struct bnx2x *bp,
477                                 struct bnx2x_vlan_mac_obj *o,
478                                 union bnx2x_classification_ramrod_data *data)
479 {
480         struct bnx2x_vlan_mac_registry_elem *pos;
481
482         DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
483
484         list_for_each_entry(pos, &o->head, link)
485                 if (data->vlan.vlan == pos->u.vlan.vlan)
486                         return -EEXIST;
487
488         return 0;
489 }
490
491 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
492                                     struct bnx2x_vlan_mac_obj *o,
493                                    union bnx2x_classification_ramrod_data *data)
494 {
495         struct bnx2x_vlan_mac_registry_elem *pos;
496
497         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
498            data->vlan_mac.mac, data->vlan_mac.vlan);
499
500         list_for_each_entry(pos, &o->head, link)
501                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
502                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
503                                   ETH_ALEN)) &&
504                     (data->vlan_mac.is_inner_mac ==
505                      pos->u.vlan_mac.is_inner_mac))
506                         return -EEXIST;
507
508         return 0;
509 }
510
511 /* check_del() callbacks */
512 static struct bnx2x_vlan_mac_registry_elem *
513         bnx2x_check_mac_del(struct bnx2x *bp,
514                             struct bnx2x_vlan_mac_obj *o,
515                             union bnx2x_classification_ramrod_data *data)
516 {
517         struct bnx2x_vlan_mac_registry_elem *pos;
518
519         DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
520
521         list_for_each_entry(pos, &o->head, link)
522                 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
523                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
524                         return pos;
525
526         return NULL;
527 }
528
529 static struct bnx2x_vlan_mac_registry_elem *
530         bnx2x_check_vlan_del(struct bnx2x *bp,
531                              struct bnx2x_vlan_mac_obj *o,
532                              union bnx2x_classification_ramrod_data *data)
533 {
534         struct bnx2x_vlan_mac_registry_elem *pos;
535
536         DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
537
538         list_for_each_entry(pos, &o->head, link)
539                 if (data->vlan.vlan == pos->u.vlan.vlan)
540                         return pos;
541
542         return NULL;
543 }
544
545 static struct bnx2x_vlan_mac_registry_elem *
546         bnx2x_check_vlan_mac_del(struct bnx2x *bp,
547                                  struct bnx2x_vlan_mac_obj *o,
548                                  union bnx2x_classification_ramrod_data *data)
549 {
550         struct bnx2x_vlan_mac_registry_elem *pos;
551
552         DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
553            data->vlan_mac.mac, data->vlan_mac.vlan);
554
555         list_for_each_entry(pos, &o->head, link)
556                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
557                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
558                              ETH_ALEN)) &&
559                     (data->vlan_mac.is_inner_mac ==
560                      pos->u.vlan_mac.is_inner_mac))
561                         return pos;
562
563         return NULL;
564 }
565
566 /* check_move() callback */
567 static bool bnx2x_check_move(struct bnx2x *bp,
568                              struct bnx2x_vlan_mac_obj *src_o,
569                              struct bnx2x_vlan_mac_obj *dst_o,
570                              union bnx2x_classification_ramrod_data *data)
571 {
572         struct bnx2x_vlan_mac_registry_elem *pos;
573         int rc;
574
575         /* Check if we can delete the requested configuration from the first
576          * object.
577          */
578         pos = src_o->check_del(bp, src_o, data);
579
580         /*  check if configuration can be added */
581         rc = dst_o->check_add(bp, dst_o, data);
582
583         /* If this classification can not be added (is already set)
584          * or can't be deleted - return an error.
585          */
586         if (rc || !pos)
587                 return false;
588
589         return true;
590 }
591
592 static bool bnx2x_check_move_always_err(
593         struct bnx2x *bp,
594         struct bnx2x_vlan_mac_obj *src_o,
595         struct bnx2x_vlan_mac_obj *dst_o,
596         union bnx2x_classification_ramrod_data *data)
597 {
598         return false;
599 }
600
601 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
602 {
603         struct bnx2x_raw_obj *raw = &o->raw;
604         u8 rx_tx_flag = 0;
605
606         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
607             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
608                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
609
610         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
611             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
612                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
613
614         return rx_tx_flag;
615 }
616
617 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
618                           bool add, unsigned char *dev_addr, int index)
619 {
620         u32 wb_data[2];
621         u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
622                          NIG_REG_LLH0_FUNC_MEM;
623
624         if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
625                 return;
626
627         if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
628                 return;
629
630         DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
631                          (add ? "ADD" : "DELETE"), index);
632
633         if (add) {
634                 /* LLH_FUNC_MEM is a u64 WB register */
635                 reg_offset += 8*index;
636
637                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
638                               (dev_addr[4] <<  8) |  dev_addr[5]);
639                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
640
641                 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
642         }
643
644         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
645                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
646 }
647
648 /**
649  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
650  *
651  * @bp:         device handle
652  * @o:          queue for which we want to configure this rule
653  * @add:        if true the command is an ADD command, DEL otherwise
654  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
655  * @hdr:        pointer to a header to setup
656  *
657  */
658 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
659         struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
660         struct eth_classify_cmd_header *hdr)
661 {
662         struct bnx2x_raw_obj *raw = &o->raw;
663
664         hdr->client_id = raw->cl_id;
665         hdr->func_id = raw->func_id;
666
667         /* Rx or/and Tx (internal switching) configuration ? */
668         hdr->cmd_general_data |=
669                 bnx2x_vlan_mac_get_rx_tx_flag(o);
670
671         if (add)
672                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
673
674         hdr->cmd_general_data |=
675                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
676 }
677
678 /**
679  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
680  *
681  * @cid:        connection id
682  * @type:       BNX2X_FILTER_XXX_PENDING
683  * @hdr:        pointer to header to setup
684  * @rule_cnt:
685  *
686  * currently we always configure one rule and echo field to contain a CID and an
687  * opcode type.
688  */
689 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
690                                 struct eth_classify_header *hdr, int rule_cnt)
691 {
692         hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
693                                 (type << BNX2X_SWCID_SHIFT));
694         hdr->rule_cnt = (u8)rule_cnt;
695 }
696
697 /* hw_config() callbacks */
698 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
699                                  struct bnx2x_vlan_mac_obj *o,
700                                  struct bnx2x_exeq_elem *elem, int rule_idx,
701                                  int cam_offset)
702 {
703         struct bnx2x_raw_obj *raw = &o->raw;
704         struct eth_classify_rules_ramrod_data *data =
705                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
706         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
707         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
708         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
709         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
710         u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
711
712         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
713          * relevant. In addition, current implementation is tuned for a
714          * single ETH MAC.
715          *
716          * When multiple unicast ETH MACs PF configuration in switch
717          * independent mode is required (NetQ, multiple netdev MACs,
718          * etc.), consider better utilisation of 8 per function MAC
719          * entries in the LLH register. There is also
720          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
721          * total number of CAM entries to 16.
722          *
723          * Currently we won't configure NIG for MACs other than a primary ETH
724          * MAC and iSCSI L2 MAC.
725          *
726          * If this MAC is moving from one Queue to another, no need to change
727          * NIG configuration.
728          */
729         if (cmd != BNX2X_VLAN_MAC_MOVE) {
730                 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
731                         bnx2x_set_mac_in_nig(bp, add, mac,
732                                              BNX2X_LLH_CAM_ISCSI_ETH_LINE);
733                 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
734                         bnx2x_set_mac_in_nig(bp, add, mac,
735                                              BNX2X_LLH_CAM_ETH_LINE);
736         }
737
738         /* Reset the ramrod data buffer for the first rule */
739         if (rule_idx == 0)
740                 memset(data, 0, sizeof(*data));
741
742         /* Setup a command header */
743         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
744                                       &rule_entry->mac.header);
745
746         DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
747            (add ? "add" : "delete"), mac, raw->cl_id);
748
749         /* Set a MAC itself */
750         bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
751                               &rule_entry->mac.mac_mid,
752                               &rule_entry->mac.mac_lsb, mac);
753         rule_entry->mac.inner_mac =
754                 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
755
756         /* MOVE: Add a rule that will add this MAC to the target Queue */
757         if (cmd == BNX2X_VLAN_MAC_MOVE) {
758                 rule_entry++;
759                 rule_cnt++;
760
761                 /* Setup ramrod data */
762                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
763                                         elem->cmd_data.vlan_mac.target_obj,
764                                               true, CLASSIFY_RULE_OPCODE_MAC,
765                                               &rule_entry->mac.header);
766
767                 /* Set a MAC itself */
768                 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
769                                       &rule_entry->mac.mac_mid,
770                                       &rule_entry->mac.mac_lsb, mac);
771                 rule_entry->mac.inner_mac =
772                         cpu_to_le16(elem->cmd_data.vlan_mac.
773                                                 u.mac.is_inner_mac);
774         }
775
776         /* Set the ramrod data header */
777         /* TODO: take this to the higher level in order to prevent multiple
778                  writing */
779         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
780                                         rule_cnt);
781 }
782
783 /**
784  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
785  *
786  * @bp:         device handle
787  * @o:          queue
788  * @type:
789  * @cam_offset: offset in cam memory
790  * @hdr:        pointer to a header to setup
791  *
792  * E1/E1H
793  */
794 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
795         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
796         struct mac_configuration_hdr *hdr)
797 {
798         struct bnx2x_raw_obj *r = &o->raw;
799
800         hdr->length = 1;
801         hdr->offset = (u8)cam_offset;
802         hdr->client_id = cpu_to_le16(0xff);
803         hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
804                                 (type << BNX2X_SWCID_SHIFT));
805 }
806
807 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
808         struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
809         u16 vlan_id, struct mac_configuration_entry *cfg_entry)
810 {
811         struct bnx2x_raw_obj *r = &o->raw;
812         u32 cl_bit_vec = (1 << r->cl_id);
813
814         cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
815         cfg_entry->pf_id = r->func_id;
816         cfg_entry->vlan_id = cpu_to_le16(vlan_id);
817
818         if (add) {
819                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
820                          T_ETH_MAC_COMMAND_SET);
821                 SET_FLAG(cfg_entry->flags,
822                          MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
823
824                 /* Set a MAC in a ramrod data */
825                 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
826                                       &cfg_entry->middle_mac_addr,
827                                       &cfg_entry->lsb_mac_addr, mac);
828         } else
829                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
830                          T_ETH_MAC_COMMAND_INVALIDATE);
831 }
832
833 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
834         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
835         u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
836 {
837         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
838         struct bnx2x_raw_obj *raw = &o->raw;
839
840         bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
841                                          &config->hdr);
842         bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
843                                          cfg_entry);
844
845         DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
846                          (add ? "setting" : "clearing"),
847                          mac, raw->cl_id, cam_offset);
848 }
849
850 /**
851  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
852  *
853  * @bp:         device handle
854  * @o:          bnx2x_vlan_mac_obj
855  * @elem:       bnx2x_exeq_elem
856  * @rule_idx:   rule_idx
857  * @cam_offset: cam_offset
858  */
859 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
860                                   struct bnx2x_vlan_mac_obj *o,
861                                   struct bnx2x_exeq_elem *elem, int rule_idx,
862                                   int cam_offset)
863 {
864         struct bnx2x_raw_obj *raw = &o->raw;
865         struct mac_configuration_cmd *config =
866                 (struct mac_configuration_cmd *)(raw->rdata);
867         /* 57710 and 57711 do not support MOVE command,
868          * so it's either ADD or DEL
869          */
870         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
871                 true : false;
872
873         /* Reset the ramrod data buffer */
874         memset(config, 0, sizeof(*config));
875
876         bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
877                                      cam_offset, add,
878                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
879                                      ETH_VLAN_FILTER_ANY_VLAN, config);
880 }
881
882 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
883                                   struct bnx2x_vlan_mac_obj *o,
884                                   struct bnx2x_exeq_elem *elem, int rule_idx,
885                                   int cam_offset)
886 {
887         struct bnx2x_raw_obj *raw = &o->raw;
888         struct eth_classify_rules_ramrod_data *data =
889                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
890         int rule_cnt = rule_idx + 1;
891         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
892         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
893         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
894         u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
895
896         /* Reset the ramrod data buffer for the first rule */
897         if (rule_idx == 0)
898                 memset(data, 0, sizeof(*data));
899
900         /* Set a rule header */
901         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
902                                       &rule_entry->vlan.header);
903
904         DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
905                          vlan);
906
907         /* Set a VLAN itself */
908         rule_entry->vlan.vlan = cpu_to_le16(vlan);
909
910         /* MOVE: Add a rule that will add this MAC to the target Queue */
911         if (cmd == BNX2X_VLAN_MAC_MOVE) {
912                 rule_entry++;
913                 rule_cnt++;
914
915                 /* Setup ramrod data */
916                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
917                                         elem->cmd_data.vlan_mac.target_obj,
918                                               true, CLASSIFY_RULE_OPCODE_VLAN,
919                                               &rule_entry->vlan.header);
920
921                 /* Set a VLAN itself */
922                 rule_entry->vlan.vlan = cpu_to_le16(vlan);
923         }
924
925         /* Set the ramrod data header */
926         /* TODO: take this to the higher level in order to prevent multiple
927                  writing */
928         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
929                                         rule_cnt);
930 }
931
932 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
933                                       struct bnx2x_vlan_mac_obj *o,
934                                       struct bnx2x_exeq_elem *elem,
935                                       int rule_idx, int cam_offset)
936 {
937         struct bnx2x_raw_obj *raw = &o->raw;
938         struct eth_classify_rules_ramrod_data *data =
939                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
940         int rule_cnt = rule_idx + 1;
941         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
942         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
943         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
944         u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
945         u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
946
947         /* Reset the ramrod data buffer for the first rule */
948         if (rule_idx == 0)
949                 memset(data, 0, sizeof(*data));
950
951         /* Set a rule header */
952         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
953                                       &rule_entry->pair.header);
954
955         /* Set VLAN and MAC themselves */
956         rule_entry->pair.vlan = cpu_to_le16(vlan);
957         bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
958                               &rule_entry->pair.mac_mid,
959                               &rule_entry->pair.mac_lsb, mac);
960         rule_entry->pair.inner_mac =
961                 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
962         /* MOVE: Add a rule that will add this MAC to the target Queue */
963         if (cmd == BNX2X_VLAN_MAC_MOVE) {
964                 rule_entry++;
965                 rule_cnt++;
966
967                 /* Setup ramrod data */
968                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
969                                         elem->cmd_data.vlan_mac.target_obj,
970                                               true, CLASSIFY_RULE_OPCODE_PAIR,
971                                               &rule_entry->pair.header);
972
973                 /* Set a VLAN itself */
974                 rule_entry->pair.vlan = cpu_to_le16(vlan);
975                 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
976                                       &rule_entry->pair.mac_mid,
977                                       &rule_entry->pair.mac_lsb, mac);
978                 rule_entry->pair.inner_mac =
979                         cpu_to_le16(elem->cmd_data.vlan_mac.u.
980                                                 vlan_mac.is_inner_mac);
981         }
982
983         /* Set the ramrod data header */
984         /* TODO: take this to the higher level in order to prevent multiple
985                  writing */
986         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
987                                         rule_cnt);
988 }
989
990 /**
991  * bnx2x_set_one_vlan_mac_e1h -
992  *
993  * @bp:         device handle
994  * @o:          bnx2x_vlan_mac_obj
995  * @elem:       bnx2x_exeq_elem
996  * @rule_idx:   rule_idx
997  * @cam_offset: cam_offset
998  */
999 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1000                                        struct bnx2x_vlan_mac_obj *o,
1001                                        struct bnx2x_exeq_elem *elem,
1002                                        int rule_idx, int cam_offset)
1003 {
1004         struct bnx2x_raw_obj *raw = &o->raw;
1005         struct mac_configuration_cmd *config =
1006                 (struct mac_configuration_cmd *)(raw->rdata);
1007         /* 57710 and 57711 do not support MOVE command,
1008          * so it's either ADD or DEL
1009          */
1010         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1011                 true : false;
1012
1013         /* Reset the ramrod data buffer */
1014         memset(config, 0, sizeof(*config));
1015
1016         bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1017                                      cam_offset, add,
1018                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1019                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1020                                      ETH_VLAN_FILTER_CLASSIFY, config);
1021 }
1022
1023 #define list_next_entry(pos, member) \
1024         list_entry((pos)->member.next, typeof(*(pos)), member)
1025
1026 /**
1027  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1028  *
1029  * @bp:         device handle
1030  * @p:          command parameters
1031  * @ppos:       pointer to the cookie
1032  *
1033  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1034  * previously configured elements list.
1035  *
1036  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1037  * into an account
1038  *
1039  * pointer to the cookie  - that should be given back in the next call to make
1040  * function handle the next element. If *ppos is set to NULL it will restart the
1041  * iterator. If returned *ppos == NULL this means that the last element has been
1042  * handled.
1043  *
1044  */
1045 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1046                            struct bnx2x_vlan_mac_ramrod_params *p,
1047                            struct bnx2x_vlan_mac_registry_elem **ppos)
1048 {
1049         struct bnx2x_vlan_mac_registry_elem *pos;
1050         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1051
1052         /* If list is empty - there is nothing to do here */
1053         if (list_empty(&o->head)) {
1054                 *ppos = NULL;
1055                 return 0;
1056         }
1057
1058         /* make a step... */
1059         if (*ppos == NULL)
1060                 *ppos = list_first_entry(&o->head,
1061                                          struct bnx2x_vlan_mac_registry_elem,
1062                                          link);
1063         else
1064                 *ppos = list_next_entry(*ppos, link);
1065
1066         pos = *ppos;
1067
1068         /* If it's the last step - return NULL */
1069         if (list_is_last(&pos->link, &o->head))
1070                 *ppos = NULL;
1071
1072         /* Prepare a 'user_req' */
1073         memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1074
1075         /* Set the command */
1076         p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1077
1078         /* Set vlan_mac_flags */
1079         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1080
1081         /* Set a restore bit */
1082         __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1083
1084         return bnx2x_config_vlan_mac(bp, p);
1085 }
1086
1087 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1088  * pointer to an element with a specific criteria and NULL if such an element
1089  * hasn't been found.
1090  */
1091 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1092         struct bnx2x_exe_queue_obj *o,
1093         struct bnx2x_exeq_elem *elem)
1094 {
1095         struct bnx2x_exeq_elem *pos;
1096         struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1097
1098         /* Check pending for execution commands */
1099         list_for_each_entry(pos, &o->exe_queue, link)
1100                 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1101                               sizeof(*data)) &&
1102                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1103                         return pos;
1104
1105         return NULL;
1106 }
1107
1108 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1109         struct bnx2x_exe_queue_obj *o,
1110         struct bnx2x_exeq_elem *elem)
1111 {
1112         struct bnx2x_exeq_elem *pos;
1113         struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1114
1115         /* Check pending for execution commands */
1116         list_for_each_entry(pos, &o->exe_queue, link)
1117                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1118                               sizeof(*data)) &&
1119                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1120                         return pos;
1121
1122         return NULL;
1123 }
1124
1125 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1126         struct bnx2x_exe_queue_obj *o,
1127         struct bnx2x_exeq_elem *elem)
1128 {
1129         struct bnx2x_exeq_elem *pos;
1130         struct bnx2x_vlan_mac_ramrod_data *data =
1131                 &elem->cmd_data.vlan_mac.u.vlan_mac;
1132
1133         /* Check pending for execution commands */
1134         list_for_each_entry(pos, &o->exe_queue, link)
1135                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1136                               sizeof(*data)) &&
1137                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1138                         return pos;
1139
1140         return NULL;
1141 }
1142
1143 /**
1144  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1145  *
1146  * @bp:         device handle
1147  * @qo:         bnx2x_qable_obj
1148  * @elem:       bnx2x_exeq_elem
1149  *
1150  * Checks that the requested configuration can be added. If yes and if
1151  * requested, consume CAM credit.
1152  *
1153  * The 'validate' is run after the 'optimize'.
1154  *
1155  */
1156 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1157                                               union bnx2x_qable_obj *qo,
1158                                               struct bnx2x_exeq_elem *elem)
1159 {
1160         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1161         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1162         int rc;
1163
1164         /* Check the registry */
1165         rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1166         if (rc) {
1167                 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1168                 return rc;
1169         }
1170
1171         /* Check if there is a pending ADD command for this
1172          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1173          */
1174         if (exeq->get(exeq, elem)) {
1175                 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1176                 return -EEXIST;
1177         }
1178
1179         /* TODO: Check the pending MOVE from other objects where this
1180          * object is a destination object.
1181          */
1182
1183         /* Consume the credit if not requested not to */
1184         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1185                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1186             o->get_credit(o)))
1187                 return -EINVAL;
1188
1189         return 0;
1190 }
1191
1192 /**
1193  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1194  *
1195  * @bp:         device handle
1196  * @qo:         quable object to check
1197  * @elem:       element that needs to be deleted
1198  *
1199  * Checks that the requested configuration can be deleted. If yes and if
1200  * requested, returns a CAM credit.
1201  *
1202  * The 'validate' is run after the 'optimize'.
1203  */
1204 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1205                                               union bnx2x_qable_obj *qo,
1206                                               struct bnx2x_exeq_elem *elem)
1207 {
1208         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1209         struct bnx2x_vlan_mac_registry_elem *pos;
1210         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1211         struct bnx2x_exeq_elem query_elem;
1212
1213         /* If this classification can not be deleted (doesn't exist)
1214          * - return a BNX2X_EXIST.
1215          */
1216         pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1217         if (!pos) {
1218                 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1219                 return -EEXIST;
1220         }
1221
1222         /* Check if there are pending DEL or MOVE commands for this
1223          * MAC/VLAN/VLAN-MAC. Return an error if so.
1224          */
1225         memcpy(&query_elem, elem, sizeof(query_elem));
1226
1227         /* Check for MOVE commands */
1228         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1229         if (exeq->get(exeq, &query_elem)) {
1230                 BNX2X_ERR("There is a pending MOVE command already\n");
1231                 return -EINVAL;
1232         }
1233
1234         /* Check for DEL commands */
1235         if (exeq->get(exeq, elem)) {
1236                 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1237                 return -EEXIST;
1238         }
1239
1240         /* Return the credit to the credit pool if not requested not to */
1241         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1242                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1243             o->put_credit(o))) {
1244                 BNX2X_ERR("Failed to return a credit\n");
1245                 return -EINVAL;
1246         }
1247
1248         return 0;
1249 }
1250
1251 /**
1252  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1253  *
1254  * @bp:         device handle
1255  * @qo:         quable object to check (source)
1256  * @elem:       element that needs to be moved
1257  *
1258  * Checks that the requested configuration can be moved. If yes and if
1259  * requested, returns a CAM credit.
1260  *
1261  * The 'validate' is run after the 'optimize'.
1262  */
1263 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1264                                                union bnx2x_qable_obj *qo,
1265                                                struct bnx2x_exeq_elem *elem)
1266 {
1267         struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1268         struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1269         struct bnx2x_exeq_elem query_elem;
1270         struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1271         struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1272
1273         /* Check if we can perform this operation based on the current registry
1274          * state.
1275          */
1276         if (!src_o->check_move(bp, src_o, dest_o,
1277                                &elem->cmd_data.vlan_mac.u)) {
1278                 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1279                 return -EINVAL;
1280         }
1281
1282         /* Check if there is an already pending DEL or MOVE command for the
1283          * source object or ADD command for a destination object. Return an
1284          * error if so.
1285          */
1286         memcpy(&query_elem, elem, sizeof(query_elem));
1287
1288         /* Check DEL on source */
1289         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1290         if (src_exeq->get(src_exeq, &query_elem)) {
1291                 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1292                 return -EINVAL;
1293         }
1294
1295         /* Check MOVE on source */
1296         if (src_exeq->get(src_exeq, elem)) {
1297                 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1298                 return -EEXIST;
1299         }
1300
1301         /* Check ADD on destination */
1302         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1303         if (dest_exeq->get(dest_exeq, &query_elem)) {
1304                 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1305                 return -EINVAL;
1306         }
1307
1308         /* Consume the credit if not requested not to */
1309         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1310                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1311             dest_o->get_credit(dest_o)))
1312                 return -EINVAL;
1313
1314         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1315                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1316             src_o->put_credit(src_o))) {
1317                 /* return the credit taken from dest... */
1318                 dest_o->put_credit(dest_o);
1319                 return -EINVAL;
1320         }
1321
1322         return 0;
1323 }
1324
1325 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1326                                    union bnx2x_qable_obj *qo,
1327                                    struct bnx2x_exeq_elem *elem)
1328 {
1329         switch (elem->cmd_data.vlan_mac.cmd) {
1330         case BNX2X_VLAN_MAC_ADD:
1331                 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1332         case BNX2X_VLAN_MAC_DEL:
1333                 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1334         case BNX2X_VLAN_MAC_MOVE:
1335                 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1336         default:
1337                 return -EINVAL;
1338         }
1339 }
1340
1341 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1342                                   union bnx2x_qable_obj *qo,
1343                                   struct bnx2x_exeq_elem *elem)
1344 {
1345         int rc = 0;
1346
1347         /* If consumption wasn't required, nothing to do */
1348         if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1349                      &elem->cmd_data.vlan_mac.vlan_mac_flags))
1350                 return 0;
1351
1352         switch (elem->cmd_data.vlan_mac.cmd) {
1353         case BNX2X_VLAN_MAC_ADD:
1354         case BNX2X_VLAN_MAC_MOVE:
1355                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1356                 break;
1357         case BNX2X_VLAN_MAC_DEL:
1358                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1359                 break;
1360         default:
1361                 return -EINVAL;
1362         }
1363
1364         if (rc != true)
1365                 return -EINVAL;
1366
1367         return 0;
1368 }
1369
1370 /**
1371  * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1372  *
1373  * @bp:         device handle
1374  * @o:          bnx2x_vlan_mac_obj
1375  *
1376  */
1377 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1378                                struct bnx2x_vlan_mac_obj *o)
1379 {
1380         int cnt = 5000, rc;
1381         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1382         struct bnx2x_raw_obj *raw = &o->raw;
1383
1384         while (cnt--) {
1385                 /* Wait for the current command to complete */
1386                 rc = raw->wait_comp(bp, raw);
1387                 if (rc)
1388                         return rc;
1389
1390                 /* Wait until there are no pending commands */
1391                 if (!bnx2x_exe_queue_empty(exeq))
1392                         usleep_range(1000, 2000);
1393                 else
1394                         return 0;
1395         }
1396
1397         return -EBUSY;
1398 }
1399
1400 /**
1401  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1402  *
1403  * @bp:         device handle
1404  * @o:          bnx2x_vlan_mac_obj
1405  * @cqe:
1406  * @cont:       if true schedule next execution chunk
1407  *
1408  */
1409 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1410                                    struct bnx2x_vlan_mac_obj *o,
1411                                    union event_ring_elem *cqe,
1412                                    unsigned long *ramrod_flags)
1413 {
1414         struct bnx2x_raw_obj *r = &o->raw;
1415         int rc;
1416
1417         /* Reset pending list */
1418         bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1419
1420         /* Clear pending */
1421         r->clear_pending(r);
1422
1423         /* If ramrod failed this is most likely a SW bug */
1424         if (cqe->message.error)
1425                 return -EINVAL;
1426
1427         /* Run the next bulk of pending commands if requested */
1428         if (test_bit(RAMROD_CONT, ramrod_flags)) {
1429                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1430                 if (rc < 0)
1431                         return rc;
1432         }
1433
1434         /* If there is more work to do return PENDING */
1435         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1436                 return 1;
1437
1438         return 0;
1439 }
1440
1441 /**
1442  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1443  *
1444  * @bp:         device handle
1445  * @o:          bnx2x_qable_obj
1446  * @elem:       bnx2x_exeq_elem
1447  */
1448 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1449                                    union bnx2x_qable_obj *qo,
1450                                    struct bnx2x_exeq_elem *elem)
1451 {
1452         struct bnx2x_exeq_elem query, *pos;
1453         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1454         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1455
1456         memcpy(&query, elem, sizeof(query));
1457
1458         switch (elem->cmd_data.vlan_mac.cmd) {
1459         case BNX2X_VLAN_MAC_ADD:
1460                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1461                 break;
1462         case BNX2X_VLAN_MAC_DEL:
1463                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1464                 break;
1465         default:
1466                 /* Don't handle anything other than ADD or DEL */
1467                 return 0;
1468         }
1469
1470         /* If we found the appropriate element - delete it */
1471         pos = exeq->get(exeq, &query);
1472         if (pos) {
1473
1474                 /* Return the credit of the optimized command */
1475                 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1476                               &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1477                         if ((query.cmd_data.vlan_mac.cmd ==
1478                              BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1479                                 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1480                                 return -EINVAL;
1481                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1482                                 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1483                                 return -EINVAL;
1484                         }
1485                 }
1486
1487                 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1488                            (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1489                            "ADD" : "DEL");
1490
1491                 list_del(&pos->link);
1492                 bnx2x_exe_queue_free_elem(bp, pos);
1493                 return 1;
1494         }
1495
1496         return 0;
1497 }
1498
1499 /**
1500  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1501  *
1502  * @bp:   device handle
1503  * @o:
1504  * @elem:
1505  * @restore:
1506  * @re:
1507  *
1508  * prepare a registry element according to the current command request.
1509  */
1510 static inline int bnx2x_vlan_mac_get_registry_elem(
1511         struct bnx2x *bp,
1512         struct bnx2x_vlan_mac_obj *o,
1513         struct bnx2x_exeq_elem *elem,
1514         bool restore,
1515         struct bnx2x_vlan_mac_registry_elem **re)
1516 {
1517         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1518         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1519
1520         /* Allocate a new registry element if needed. */
1521         if (!restore &&
1522             ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1523                 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1524                 if (!reg_elem)
1525                         return -ENOMEM;
1526
1527                 /* Get a new CAM offset */
1528                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1529                         /* This shall never happen, because we have checked the
1530                          * CAM availability in the 'validate'.
1531                          */
1532                         WARN_ON(1);
1533                         kfree(reg_elem);
1534                         return -EINVAL;
1535                 }
1536
1537                 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1538
1539                 /* Set a VLAN-MAC data */
1540                 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1541                           sizeof(reg_elem->u));
1542
1543                 /* Copy the flags (needed for DEL and RESTORE flows) */
1544                 reg_elem->vlan_mac_flags =
1545                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1546         } else /* DEL, RESTORE */
1547                 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1548
1549         *re = reg_elem;
1550         return 0;
1551 }
1552
1553 /**
1554  * bnx2x_execute_vlan_mac - execute vlan mac command
1555  *
1556  * @bp:                 device handle
1557  * @qo:
1558  * @exe_chunk:
1559  * @ramrod_flags:
1560  *
1561  * go and send a ramrod!
1562  */
1563 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1564                                   union bnx2x_qable_obj *qo,
1565                                   struct list_head *exe_chunk,
1566                                   unsigned long *ramrod_flags)
1567 {
1568         struct bnx2x_exeq_elem *elem;
1569         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1570         struct bnx2x_raw_obj *r = &o->raw;
1571         int rc, idx = 0;
1572         bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1573         bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1574         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1575         enum bnx2x_vlan_mac_cmd cmd;
1576
1577         /* If DRIVER_ONLY execution is requested, cleanup a registry
1578          * and exit. Otherwise send a ramrod to FW.
1579          */
1580         if (!drv_only) {
1581                 WARN_ON(r->check_pending(r));
1582
1583                 /* Set pending */
1584                 r->set_pending(r);
1585
1586                 /* Fill the ramrod data */
1587                 list_for_each_entry(elem, exe_chunk, link) {
1588                         cmd = elem->cmd_data.vlan_mac.cmd;
1589                         /* We will add to the target object in MOVE command, so
1590                          * change the object for a CAM search.
1591                          */
1592                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1593                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1594                         else
1595                                 cam_obj = o;
1596
1597                         rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1598                                                               elem, restore,
1599                                                               &reg_elem);
1600                         if (rc)
1601                                 goto error_exit;
1602
1603                         WARN_ON(!reg_elem);
1604
1605                         /* Push a new entry into the registry */
1606                         if (!restore &&
1607                             ((cmd == BNX2X_VLAN_MAC_ADD) ||
1608                             (cmd == BNX2X_VLAN_MAC_MOVE)))
1609                                 list_add(&reg_elem->link, &cam_obj->head);
1610
1611                         /* Configure a single command in a ramrod data buffer */
1612                         o->set_one_rule(bp, o, elem, idx,
1613                                         reg_elem->cam_offset);
1614
1615                         /* MOVE command consumes 2 entries in the ramrod data */
1616                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1617                                 idx += 2;
1618                         else
1619                                 idx++;
1620                 }
1621
1622                 /* No need for an explicit memory barrier here as long we would
1623                  * need to ensure the ordering of writing to the SPQ element
1624                  * and updating of the SPQ producer which involves a memory
1625                  * read and we will have to put a full memory barrier there
1626                  * (inside bnx2x_sp_post()).
1627                  */
1628
1629                 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1630                                    U64_HI(r->rdata_mapping),
1631                                    U64_LO(r->rdata_mapping),
1632                                    ETH_CONNECTION_TYPE);
1633                 if (rc)
1634                         goto error_exit;
1635         }
1636
1637         /* Now, when we are done with the ramrod - clean up the registry */
1638         list_for_each_entry(elem, exe_chunk, link) {
1639                 cmd = elem->cmd_data.vlan_mac.cmd;
1640                 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1641                     (cmd == BNX2X_VLAN_MAC_MOVE)) {
1642                         reg_elem = o->check_del(bp, o,
1643                                                 &elem->cmd_data.vlan_mac.u);
1644
1645                         WARN_ON(!reg_elem);
1646
1647                         o->put_cam_offset(o, reg_elem->cam_offset);
1648                         list_del(&reg_elem->link);
1649                         kfree(reg_elem);
1650                 }
1651         }
1652
1653         if (!drv_only)
1654                 return 1;
1655         else
1656                 return 0;
1657
1658 error_exit:
1659         r->clear_pending(r);
1660
1661         /* Cleanup a registry in case of a failure */
1662         list_for_each_entry(elem, exe_chunk, link) {
1663                 cmd = elem->cmd_data.vlan_mac.cmd;
1664
1665                 if (cmd == BNX2X_VLAN_MAC_MOVE)
1666                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1667                 else
1668                         cam_obj = o;
1669
1670                 /* Delete all newly added above entries */
1671                 if (!restore &&
1672                     ((cmd == BNX2X_VLAN_MAC_ADD) ||
1673                     (cmd == BNX2X_VLAN_MAC_MOVE))) {
1674                         reg_elem = o->check_del(bp, cam_obj,
1675                                                 &elem->cmd_data.vlan_mac.u);
1676                         if (reg_elem) {
1677                                 list_del(&reg_elem->link);
1678                                 kfree(reg_elem);
1679                         }
1680                 }
1681         }
1682
1683         return rc;
1684 }
1685
1686 static inline int bnx2x_vlan_mac_push_new_cmd(
1687         struct bnx2x *bp,
1688         struct bnx2x_vlan_mac_ramrod_params *p)
1689 {
1690         struct bnx2x_exeq_elem *elem;
1691         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1692         bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1693
1694         /* Allocate the execution queue element */
1695         elem = bnx2x_exe_queue_alloc_elem(bp);
1696         if (!elem)
1697                 return -ENOMEM;
1698
1699         /* Set the command 'length' */
1700         switch (p->user_req.cmd) {
1701         case BNX2X_VLAN_MAC_MOVE:
1702                 elem->cmd_len = 2;
1703                 break;
1704         default:
1705                 elem->cmd_len = 1;
1706         }
1707
1708         /* Fill the object specific info */
1709         memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1710
1711         /* Try to add a new command to the pending list */
1712         return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1713 }
1714
1715 /**
1716  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1717  *
1718  * @bp:   device handle
1719  * @p:
1720  *
1721  */
1722 int bnx2x_config_vlan_mac(
1723         struct bnx2x *bp,
1724         struct bnx2x_vlan_mac_ramrod_params *p)
1725 {
1726         int rc = 0;
1727         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1728         unsigned long *ramrod_flags = &p->ramrod_flags;
1729         bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1730         struct bnx2x_raw_obj *raw = &o->raw;
1731
1732         /*
1733          * Add new elements to the execution list for commands that require it.
1734          */
1735         if (!cont) {
1736                 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1737                 if (rc)
1738                         return rc;
1739         }
1740
1741         /* If nothing will be executed further in this iteration we want to
1742          * return PENDING if there are pending commands
1743          */
1744         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1745                 rc = 1;
1746
1747         if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1748                 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1749                 raw->clear_pending(raw);
1750         }
1751
1752         /* Execute commands if required */
1753         if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1754             test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1755                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1756                 if (rc < 0)
1757                         return rc;
1758         }
1759
1760         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1761          * then user want to wait until the last command is done.
1762          */
1763         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1764                 /* Wait maximum for the current exe_queue length iterations plus
1765                  * one (for the current pending command).
1766                  */
1767                 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1768
1769                 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1770                        max_iterations--) {
1771
1772                         /* Wait for the current command to complete */
1773                         rc = raw->wait_comp(bp, raw);
1774                         if (rc)
1775                                 return rc;
1776
1777                         /* Make a next step */
1778                         rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1779                                                   ramrod_flags);
1780                         if (rc < 0)
1781                                 return rc;
1782                 }
1783
1784                 return 0;
1785         }
1786
1787         return rc;
1788 }
1789
1790 /**
1791  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1792  *
1793  * @bp:                 device handle
1794  * @o:
1795  * @vlan_mac_flags:
1796  * @ramrod_flags:       execution flags to be used for this deletion
1797  *
1798  * if the last operation has completed successfully and there are no
1799  * more elements left, positive value if the last operation has completed
1800  * successfully and there are more previously configured elements, negative
1801  * value is current operation has failed.
1802  */
1803 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1804                                   struct bnx2x_vlan_mac_obj *o,
1805                                   unsigned long *vlan_mac_flags,
1806                                   unsigned long *ramrod_flags)
1807 {
1808         struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1809         int rc = 0;
1810         struct bnx2x_vlan_mac_ramrod_params p;
1811         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1812         struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1813
1814         /* Clear pending commands first */
1815
1816         spin_lock_bh(&exeq->lock);
1817
1818         list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1819                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1820                     *vlan_mac_flags) {
1821                         rc = exeq->remove(bp, exeq->owner, exeq_pos);
1822                         if (rc) {
1823                                 BNX2X_ERR("Failed to remove command\n");
1824                                 spin_unlock_bh(&exeq->lock);
1825                                 return rc;
1826                         }
1827                         list_del(&exeq_pos->link);
1828                         bnx2x_exe_queue_free_elem(bp, exeq_pos);
1829                 }
1830         }
1831
1832         spin_unlock_bh(&exeq->lock);
1833
1834         /* Prepare a command request */
1835         memset(&p, 0, sizeof(p));
1836         p.vlan_mac_obj = o;
1837         p.ramrod_flags = *ramrod_flags;
1838         p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1839
1840         /* Add all but the last VLAN-MAC to the execution queue without actually
1841          * execution anything.
1842          */
1843         __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1844         __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1845         __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1846
1847         list_for_each_entry(pos, &o->head, link) {
1848                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1849                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1850                         memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1851                         rc = bnx2x_config_vlan_mac(bp, &p);
1852                         if (rc < 0) {
1853                                 BNX2X_ERR("Failed to add a new DEL command\n");
1854                                 return rc;
1855                         }
1856                 }
1857         }
1858
1859         p.ramrod_flags = *ramrod_flags;
1860         __set_bit(RAMROD_CONT, &p.ramrod_flags);
1861
1862         return bnx2x_config_vlan_mac(bp, &p);
1863 }
1864
1865 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1866         u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1867         unsigned long *pstate, bnx2x_obj_type type)
1868 {
1869         raw->func_id = func_id;
1870         raw->cid = cid;
1871         raw->cl_id = cl_id;
1872         raw->rdata = rdata;
1873         raw->rdata_mapping = rdata_mapping;
1874         raw->state = state;
1875         raw->pstate = pstate;
1876         raw->obj_type = type;
1877         raw->check_pending = bnx2x_raw_check_pending;
1878         raw->clear_pending = bnx2x_raw_clear_pending;
1879         raw->set_pending = bnx2x_raw_set_pending;
1880         raw->wait_comp = bnx2x_raw_wait;
1881 }
1882
1883 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1884         u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1885         int state, unsigned long *pstate, bnx2x_obj_type type,
1886         struct bnx2x_credit_pool_obj *macs_pool,
1887         struct bnx2x_credit_pool_obj *vlans_pool)
1888 {
1889         INIT_LIST_HEAD(&o->head);
1890
1891         o->macs_pool = macs_pool;
1892         o->vlans_pool = vlans_pool;
1893
1894         o->delete_all = bnx2x_vlan_mac_del_all;
1895         o->restore = bnx2x_vlan_mac_restore;
1896         o->complete = bnx2x_complete_vlan_mac;
1897         o->wait = bnx2x_wait_vlan_mac;
1898
1899         bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1900                            state, pstate, type);
1901 }
1902
1903 void bnx2x_init_mac_obj(struct bnx2x *bp,
1904                         struct bnx2x_vlan_mac_obj *mac_obj,
1905                         u8 cl_id, u32 cid, u8 func_id, void *rdata,
1906                         dma_addr_t rdata_mapping, int state,
1907                         unsigned long *pstate, bnx2x_obj_type type,
1908                         struct bnx2x_credit_pool_obj *macs_pool)
1909 {
1910         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1911
1912         bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1913                                    rdata_mapping, state, pstate, type,
1914                                    macs_pool, NULL);
1915
1916         /* CAM credit pool handling */
1917         mac_obj->get_credit = bnx2x_get_credit_mac;
1918         mac_obj->put_credit = bnx2x_put_credit_mac;
1919         mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1920         mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1921
1922         if (CHIP_IS_E1x(bp)) {
1923                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1924                 mac_obj->check_del         = bnx2x_check_mac_del;
1925                 mac_obj->check_add         = bnx2x_check_mac_add;
1926                 mac_obj->check_move        = bnx2x_check_move_always_err;
1927                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1928
1929                 /* Exe Queue */
1930                 bnx2x_exe_queue_init(bp,
1931                                      &mac_obj->exe_queue, 1, qable_obj,
1932                                      bnx2x_validate_vlan_mac,
1933                                      bnx2x_remove_vlan_mac,
1934                                      bnx2x_optimize_vlan_mac,
1935                                      bnx2x_execute_vlan_mac,
1936                                      bnx2x_exeq_get_mac);
1937         } else {
1938                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1939                 mac_obj->check_del         = bnx2x_check_mac_del;
1940                 mac_obj->check_add         = bnx2x_check_mac_add;
1941                 mac_obj->check_move        = bnx2x_check_move;
1942                 mac_obj->ramrod_cmd        =
1943                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1944                 mac_obj->get_n_elements    = bnx2x_get_n_elements;
1945
1946                 /* Exe Queue */
1947                 bnx2x_exe_queue_init(bp,
1948                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1949                                      qable_obj, bnx2x_validate_vlan_mac,
1950                                      bnx2x_remove_vlan_mac,
1951                                      bnx2x_optimize_vlan_mac,
1952                                      bnx2x_execute_vlan_mac,
1953                                      bnx2x_exeq_get_mac);
1954         }
1955 }
1956
1957 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1958                          struct bnx2x_vlan_mac_obj *vlan_obj,
1959                          u8 cl_id, u32 cid, u8 func_id, void *rdata,
1960                          dma_addr_t rdata_mapping, int state,
1961                          unsigned long *pstate, bnx2x_obj_type type,
1962                          struct bnx2x_credit_pool_obj *vlans_pool)
1963 {
1964         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1965
1966         bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1967                                    rdata_mapping, state, pstate, type, NULL,
1968                                    vlans_pool);
1969
1970         vlan_obj->get_credit = bnx2x_get_credit_vlan;
1971         vlan_obj->put_credit = bnx2x_put_credit_vlan;
1972         vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1973         vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1974
1975         if (CHIP_IS_E1x(bp)) {
1976                 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1977                 BUG();
1978         } else {
1979                 vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
1980                 vlan_obj->check_del         = bnx2x_check_vlan_del;
1981                 vlan_obj->check_add         = bnx2x_check_vlan_add;
1982                 vlan_obj->check_move        = bnx2x_check_move;
1983                 vlan_obj->ramrod_cmd        =
1984                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1985                 vlan_obj->get_n_elements    = bnx2x_get_n_elements;
1986
1987                 /* Exe Queue */
1988                 bnx2x_exe_queue_init(bp,
1989                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1990                                      qable_obj, bnx2x_validate_vlan_mac,
1991                                      bnx2x_remove_vlan_mac,
1992                                      bnx2x_optimize_vlan_mac,
1993                                      bnx2x_execute_vlan_mac,
1994                                      bnx2x_exeq_get_vlan);
1995         }
1996 }
1997
1998 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1999                              struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2000                              u8 cl_id, u32 cid, u8 func_id, void *rdata,
2001                              dma_addr_t rdata_mapping, int state,
2002                              unsigned long *pstate, bnx2x_obj_type type,
2003                              struct bnx2x_credit_pool_obj *macs_pool,
2004                              struct bnx2x_credit_pool_obj *vlans_pool)
2005 {
2006         union bnx2x_qable_obj *qable_obj =
2007                 (union bnx2x_qable_obj *)vlan_mac_obj;
2008
2009         bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2010                                    rdata_mapping, state, pstate, type,
2011                                    macs_pool, vlans_pool);
2012
2013         /* CAM pool handling */
2014         vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2015         vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2016         /* CAM offset is relevant for 57710 and 57711 chips only which have a
2017          * single CAM for both MACs and VLAN-MAC pairs. So the offset
2018          * will be taken from MACs' pool object only.
2019          */
2020         vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2021         vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2022
2023         if (CHIP_IS_E1(bp)) {
2024                 BNX2X_ERR("Do not support chips others than E2\n");
2025                 BUG();
2026         } else if (CHIP_IS_E1H(bp)) {
2027                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2028                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2029                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2030                 vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2031                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2032
2033                 /* Exe Queue */
2034                 bnx2x_exe_queue_init(bp,
2035                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
2036                                      bnx2x_validate_vlan_mac,
2037                                      bnx2x_remove_vlan_mac,
2038                                      bnx2x_optimize_vlan_mac,
2039                                      bnx2x_execute_vlan_mac,
2040                                      bnx2x_exeq_get_vlan_mac);
2041         } else {
2042                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2043                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2044                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2045                 vlan_mac_obj->check_move        = bnx2x_check_move;
2046                 vlan_mac_obj->ramrod_cmd        =
2047                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2048
2049                 /* Exe Queue */
2050                 bnx2x_exe_queue_init(bp,
2051                                      &vlan_mac_obj->exe_queue,
2052                                      CLASSIFY_RULES_COUNT,
2053                                      qable_obj, bnx2x_validate_vlan_mac,
2054                                      bnx2x_remove_vlan_mac,
2055                                      bnx2x_optimize_vlan_mac,
2056                                      bnx2x_execute_vlan_mac,
2057                                      bnx2x_exeq_get_vlan_mac);
2058         }
2059 }
2060
2061 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2062 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2063                         struct tstorm_eth_mac_filter_config *mac_filters,
2064                         u16 pf_id)
2065 {
2066         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2067
2068         u32 addr = BAR_TSTRORM_INTMEM +
2069                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2070
2071         __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2072 }
2073
2074 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2075                                  struct bnx2x_rx_mode_ramrod_params *p)
2076 {
2077         /* update the bp MAC filter structure */
2078         u32 mask = (1 << p->cl_id);
2079
2080         struct tstorm_eth_mac_filter_config *mac_filters =
2081                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2082
2083         /* initial setting is drop-all */
2084         u8 drop_all_ucast = 1, drop_all_mcast = 1;
2085         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2086         u8 unmatched_unicast = 0;
2087
2088     /* In e1x there we only take into account rx accept flag since tx switching
2089      * isn't enabled. */
2090         if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2091                 /* accept matched ucast */
2092                 drop_all_ucast = 0;
2093
2094         if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2095                 /* accept matched mcast */
2096                 drop_all_mcast = 0;
2097
2098         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2099                 /* accept all mcast */
2100                 drop_all_ucast = 0;
2101                 accp_all_ucast = 1;
2102         }
2103         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2104                 /* accept all mcast */
2105                 drop_all_mcast = 0;
2106                 accp_all_mcast = 1;
2107         }
2108         if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2109                 /* accept (all) bcast */
2110                 accp_all_bcast = 1;
2111         if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2112                 /* accept unmatched unicasts */
2113                 unmatched_unicast = 1;
2114
2115         mac_filters->ucast_drop_all = drop_all_ucast ?
2116                 mac_filters->ucast_drop_all | mask :
2117                 mac_filters->ucast_drop_all & ~mask;
2118
2119         mac_filters->mcast_drop_all = drop_all_mcast ?
2120                 mac_filters->mcast_drop_all | mask :
2121                 mac_filters->mcast_drop_all & ~mask;
2122
2123         mac_filters->ucast_accept_all = accp_all_ucast ?
2124                 mac_filters->ucast_accept_all | mask :
2125                 mac_filters->ucast_accept_all & ~mask;
2126
2127         mac_filters->mcast_accept_all = accp_all_mcast ?
2128                 mac_filters->mcast_accept_all | mask :
2129                 mac_filters->mcast_accept_all & ~mask;
2130
2131         mac_filters->bcast_accept_all = accp_all_bcast ?
2132                 mac_filters->bcast_accept_all | mask :
2133                 mac_filters->bcast_accept_all & ~mask;
2134
2135         mac_filters->unmatched_unicast = unmatched_unicast ?
2136                 mac_filters->unmatched_unicast | mask :
2137                 mac_filters->unmatched_unicast & ~mask;
2138
2139         DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2140                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2141            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2142            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2143            mac_filters->bcast_accept_all);
2144
2145         /* write the MAC filter structure*/
2146         __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2147
2148         /* The operation is completed */
2149         clear_bit(p->state, p->pstate);
2150         smp_mb__after_clear_bit();
2151
2152         return 0;
2153 }
2154
2155 /* Setup ramrod data */
2156 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2157                                 struct eth_classify_header *hdr,
2158                                 u8 rule_cnt)
2159 {
2160         hdr->echo = cpu_to_le32(cid);
2161         hdr->rule_cnt = rule_cnt;
2162 }
2163
2164 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2165                                 unsigned long *accept_flags,
2166                                 struct eth_filter_rules_cmd *cmd,
2167                                 bool clear_accept_all)
2168 {
2169         u16 state;
2170
2171         /* start with 'drop-all' */
2172         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2173                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2174
2175         if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2176                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2177
2178         if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2179                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2180
2181         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2182                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2183                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2184         }
2185
2186         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2187                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2188                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2189         }
2190
2191         if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2192                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2193
2194         if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2195                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2196                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2197         }
2198
2199         if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2200                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2201
2202         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2203         if (clear_accept_all) {
2204                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2205                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2206                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2207                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2208         }
2209
2210         cmd->state = cpu_to_le16(state);
2211 }
2212
2213 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2214                                 struct bnx2x_rx_mode_ramrod_params *p)
2215 {
2216         struct eth_filter_rules_ramrod_data *data = p->rdata;
2217         int rc;
2218         u8 rule_idx = 0;
2219
2220         /* Reset the ramrod data buffer */
2221         memset(data, 0, sizeof(*data));
2222
2223         /* Setup ramrod data */
2224
2225         /* Tx (internal switching) */
2226         if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2227                 data->rules[rule_idx].client_id = p->cl_id;
2228                 data->rules[rule_idx].func_id = p->func_id;
2229
2230                 data->rules[rule_idx].cmd_general_data =
2231                         ETH_FILTER_RULES_CMD_TX_CMD;
2232
2233                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2234                                                &(data->rules[rule_idx++]),
2235                                                false);
2236         }
2237
2238         /* Rx */
2239         if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2240                 data->rules[rule_idx].client_id = p->cl_id;
2241                 data->rules[rule_idx].func_id = p->func_id;
2242
2243                 data->rules[rule_idx].cmd_general_data =
2244                         ETH_FILTER_RULES_CMD_RX_CMD;
2245
2246                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2247                                                &(data->rules[rule_idx++]),
2248                                                false);
2249         }
2250
2251         /* If FCoE Queue configuration has been requested configure the Rx and
2252          * internal switching modes for this queue in separate rules.
2253          *
2254          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2255          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2256          */
2257         if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2258                 /*  Tx (internal switching) */
2259                 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2260                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2261                         data->rules[rule_idx].func_id = p->func_id;
2262
2263                         data->rules[rule_idx].cmd_general_data =
2264                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2265
2266                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2267                                                        &(data->rules[rule_idx]),
2268                                                        true);
2269                         rule_idx++;
2270                 }
2271
2272                 /* Rx */
2273                 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2274                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2275                         data->rules[rule_idx].func_id = p->func_id;
2276
2277                         data->rules[rule_idx].cmd_general_data =
2278                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2279
2280                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2281                                                        &(data->rules[rule_idx]),
2282                                                        true);
2283                         rule_idx++;
2284                 }
2285         }
2286
2287         /* Set the ramrod header (most importantly - number of rules to
2288          * configure).
2289          */
2290         bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2291
2292         DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2293                          data->header.rule_cnt, p->rx_accept_flags,
2294                          p->tx_accept_flags);
2295
2296         /* No need for an explicit memory barrier here as long we would
2297          * need to ensure the ordering of writing to the SPQ element
2298          * and updating of the SPQ producer which involves a memory
2299          * read and we will have to put a full memory barrier there
2300          * (inside bnx2x_sp_post()).
2301          */
2302
2303         /* Send a ramrod */
2304         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2305                            U64_HI(p->rdata_mapping),
2306                            U64_LO(p->rdata_mapping),
2307                            ETH_CONNECTION_TYPE);
2308         if (rc)
2309                 return rc;
2310
2311         /* Ramrod completion is pending */
2312         return 1;
2313 }
2314
2315 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2316                                       struct bnx2x_rx_mode_ramrod_params *p)
2317 {
2318         return bnx2x_state_wait(bp, p->state, p->pstate);
2319 }
2320
2321 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2322                                     struct bnx2x_rx_mode_ramrod_params *p)
2323 {
2324         /* Do nothing */
2325         return 0;
2326 }
2327
2328 int bnx2x_config_rx_mode(struct bnx2x *bp,
2329                          struct bnx2x_rx_mode_ramrod_params *p)
2330 {
2331         int rc;
2332
2333         /* Configure the new classification in the chip */
2334         rc = p->rx_mode_obj->config_rx_mode(bp, p);
2335         if (rc < 0)
2336                 return rc;
2337
2338         /* Wait for a ramrod completion if was requested */
2339         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2340                 rc = p->rx_mode_obj->wait_comp(bp, p);
2341                 if (rc)
2342                         return rc;
2343         }
2344
2345         return rc;
2346 }
2347
2348 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2349                             struct bnx2x_rx_mode_obj *o)
2350 {
2351         if (CHIP_IS_E1x(bp)) {
2352                 o->wait_comp      = bnx2x_empty_rx_mode_wait;
2353                 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2354         } else {
2355                 o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2356                 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2357         }
2358 }
2359
2360 /********************* Multicast verbs: SET, CLEAR ****************************/
2361 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2362 {
2363         return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2364 }
2365
2366 struct bnx2x_mcast_mac_elem {
2367         struct list_head link;
2368         u8 mac[ETH_ALEN];
2369         u8 pad[2]; /* For a natural alignment of the following buffer */
2370 };
2371
2372 struct bnx2x_pending_mcast_cmd {
2373         struct list_head link;
2374         int type; /* BNX2X_MCAST_CMD_X */
2375         union {
2376                 struct list_head macs_head;
2377                 u32 macs_num; /* Needed for DEL command */
2378                 int next_bin; /* Needed for RESTORE flow with aprox match */
2379         } data;
2380
2381         bool done; /* set to true, when the command has been handled,
2382                     * practically used in 57712 handling only, where one pending
2383                     * command may be handled in a few operations. As long as for
2384                     * other chips every operation handling is completed in a
2385                     * single ramrod, there is no need to utilize this field.
2386                     */
2387 };
2388
2389 static int bnx2x_mcast_wait(struct bnx2x *bp,
2390                             struct bnx2x_mcast_obj *o)
2391 {
2392         if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2393                         o->raw.wait_comp(bp, &o->raw))
2394                 return -EBUSY;
2395
2396         return 0;
2397 }
2398
2399 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2400                                    struct bnx2x_mcast_obj *o,
2401                                    struct bnx2x_mcast_ramrod_params *p,
2402                                    enum bnx2x_mcast_cmd cmd)
2403 {
2404         int total_sz;
2405         struct bnx2x_pending_mcast_cmd *new_cmd;
2406         struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2407         struct bnx2x_mcast_list_elem *pos;
2408         int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2409                              p->mcast_list_len : 0);
2410
2411         /* If the command is empty ("handle pending commands only"), break */
2412         if (!p->mcast_list_len)
2413                 return 0;
2414
2415         total_sz = sizeof(*new_cmd) +
2416                 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2417
2418         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2419         new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2420
2421         if (!new_cmd)
2422                 return -ENOMEM;
2423
2424         DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2425            cmd, macs_list_len);
2426
2427         INIT_LIST_HEAD(&new_cmd->data.macs_head);
2428
2429         new_cmd->type = cmd;
2430         new_cmd->done = false;
2431
2432         switch (cmd) {
2433         case BNX2X_MCAST_CMD_ADD:
2434                 cur_mac = (struct bnx2x_mcast_mac_elem *)
2435                           ((u8 *)new_cmd + sizeof(*new_cmd));
2436
2437                 /* Push the MACs of the current command into the pending command
2438                  * MACs list: FIFO
2439                  */
2440                 list_for_each_entry(pos, &p->mcast_list, link) {
2441                         memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2442                         list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2443                         cur_mac++;
2444                 }
2445
2446                 break;
2447
2448         case BNX2X_MCAST_CMD_DEL:
2449                 new_cmd->data.macs_num = p->mcast_list_len;
2450                 break;
2451
2452         case BNX2X_MCAST_CMD_RESTORE:
2453                 new_cmd->data.next_bin = 0;
2454                 break;
2455
2456         default:
2457                 kfree(new_cmd);
2458                 BNX2X_ERR("Unknown command: %d\n", cmd);
2459                 return -EINVAL;
2460         }
2461
2462         /* Push the new pending command to the tail of the pending list: FIFO */
2463         list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2464
2465         o->set_sched(o);
2466
2467         return 1;
2468 }
2469
2470 /**
2471  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2472  *
2473  * @o:
2474  * @last:       index to start looking from (including)
2475  *
2476  * returns the next found (set) bin or a negative value if none is found.
2477  */
2478 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2479 {
2480         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2481
2482         for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2483                 if (o->registry.aprox_match.vec[i])
2484                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2485                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2486                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2487                                                        vec, cur_bit)) {
2488                                         return cur_bit;
2489                                 }
2490                         }
2491                 inner_start = 0;
2492         }
2493
2494         /* None found */
2495         return -1;
2496 }
2497
2498 /**
2499  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2500  *
2501  * @o:
2502  *
2503  * returns the index of the found bin or -1 if none is found
2504  */
2505 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2506 {
2507         int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2508
2509         if (cur_bit >= 0)
2510                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2511
2512         return cur_bit;
2513 }
2514
2515 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2516 {
2517         struct bnx2x_raw_obj *raw = &o->raw;
2518         u8 rx_tx_flag = 0;
2519
2520         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2521             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2522                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2523
2524         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2525             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2526                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2527
2528         return rx_tx_flag;
2529 }
2530
2531 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2532                                         struct bnx2x_mcast_obj *o, int idx,
2533                                         union bnx2x_mcast_config_data *cfg_data,
2534                                         enum bnx2x_mcast_cmd cmd)
2535 {
2536         struct bnx2x_raw_obj *r = &o->raw;
2537         struct eth_multicast_rules_ramrod_data *data =
2538                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2539         u8 func_id = r->func_id;
2540         u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2541         int bin;
2542
2543         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2544                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2545
2546         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2547
2548         /* Get a bin and update a bins' vector */
2549         switch (cmd) {
2550         case BNX2X_MCAST_CMD_ADD:
2551                 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2552                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2553                 break;
2554
2555         case BNX2X_MCAST_CMD_DEL:
2556                 /* If there were no more bins to clear
2557                  * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2558                  * clear any (0xff) bin.
2559                  * See bnx2x_mcast_validate_e2() for explanation when it may
2560                  * happen.
2561                  */
2562                 bin = bnx2x_mcast_clear_first_bin(o);
2563                 break;
2564
2565         case BNX2X_MCAST_CMD_RESTORE:
2566                 bin = cfg_data->bin;
2567                 break;
2568
2569         default:
2570                 BNX2X_ERR("Unknown command: %d\n", cmd);
2571                 return;
2572         }
2573
2574         DP(BNX2X_MSG_SP, "%s bin %d\n",
2575                          ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2576                          "Setting"  : "Clearing"), bin);
2577
2578         data->rules[idx].bin_id    = (u8)bin;
2579         data->rules[idx].func_id   = func_id;
2580         data->rules[idx].engine_id = o->engine_id;
2581 }
2582
2583 /**
2584  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2585  *
2586  * @bp:         device handle
2587  * @o:
2588  * @start_bin:  index in the registry to start from (including)
2589  * @rdata_idx:  index in the ramrod data to start from
2590  *
2591  * returns last handled bin index or -1 if all bins have been handled
2592  */
2593 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2594         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2595         int *rdata_idx)
2596 {
2597         int cur_bin, cnt = *rdata_idx;
2598         union bnx2x_mcast_config_data cfg_data = {NULL};
2599
2600         /* go through the registry and configure the bins from it */
2601         for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2602             cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2603
2604                 cfg_data.bin = (u8)cur_bin;
2605                 o->set_one_rule(bp, o, cnt, &cfg_data,
2606                                 BNX2X_MCAST_CMD_RESTORE);
2607
2608                 cnt++;
2609
2610                 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2611
2612                 /* Break if we reached the maximum number
2613                  * of rules.
2614                  */
2615                 if (cnt >= o->max_cmd_len)
2616                         break;
2617         }
2618
2619         *rdata_idx = cnt;
2620
2621         return cur_bin;
2622 }
2623
2624 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2625         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2626         int *line_idx)
2627 {
2628         struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2629         int cnt = *line_idx;
2630         union bnx2x_mcast_config_data cfg_data = {NULL};
2631
2632         list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2633                                  link) {
2634
2635                 cfg_data.mac = &pmac_pos->mac[0];
2636                 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2637
2638                 cnt++;
2639
2640                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2641                    pmac_pos->mac);
2642
2643                 list_del(&pmac_pos->link);
2644
2645                 /* Break if we reached the maximum number
2646                  * of rules.
2647                  */
2648                 if (cnt >= o->max_cmd_len)
2649                         break;
2650         }
2651
2652         *line_idx = cnt;
2653
2654         /* if no more MACs to configure - we are done */
2655         if (list_empty(&cmd_pos->data.macs_head))
2656                 cmd_pos->done = true;
2657 }
2658
2659 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2660         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2661         int *line_idx)
2662 {
2663         int cnt = *line_idx;
2664
2665         while (cmd_pos->data.macs_num) {
2666                 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2667
2668                 cnt++;
2669
2670                 cmd_pos->data.macs_num--;
2671
2672                   DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2673                                    cmd_pos->data.macs_num, cnt);
2674
2675                 /* Break if we reached the maximum
2676                  * number of rules.
2677                  */
2678                 if (cnt >= o->max_cmd_len)
2679                         break;
2680         }
2681
2682         *line_idx = cnt;
2683
2684         /* If we cleared all bins - we are done */
2685         if (!cmd_pos->data.macs_num)
2686                 cmd_pos->done = true;
2687 }
2688
2689 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2690         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2691         int *line_idx)
2692 {
2693         cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2694                                                 line_idx);
2695
2696         if (cmd_pos->data.next_bin < 0)
2697                 /* If o->set_restore returned -1 we are done */
2698                 cmd_pos->done = true;
2699         else
2700                 /* Start from the next bin next time */
2701                 cmd_pos->data.next_bin++;
2702 }
2703
2704 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2705                                 struct bnx2x_mcast_ramrod_params *p)
2706 {
2707         struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2708         int cnt = 0;
2709         struct bnx2x_mcast_obj *o = p->mcast_obj;
2710
2711         list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2712                                  link) {
2713                 switch (cmd_pos->type) {
2714                 case BNX2X_MCAST_CMD_ADD:
2715                         bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2716                         break;
2717
2718                 case BNX2X_MCAST_CMD_DEL:
2719                         bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2720                         break;
2721
2722                 case BNX2X_MCAST_CMD_RESTORE:
2723                         bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2724                                                            &cnt);
2725                         break;
2726
2727                 default:
2728                         BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2729                         return -EINVAL;
2730                 }
2731
2732                 /* If the command has been completed - remove it from the list
2733                  * and free the memory
2734                  */
2735                 if (cmd_pos->done) {
2736                         list_del(&cmd_pos->link);
2737                         kfree(cmd_pos);
2738                 }
2739
2740                 /* Break if we reached the maximum number of rules */
2741                 if (cnt >= o->max_cmd_len)
2742                         break;
2743         }
2744
2745         return cnt;
2746 }
2747
2748 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2749         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2750         int *line_idx)
2751 {
2752         struct bnx2x_mcast_list_elem *mlist_pos;
2753         union bnx2x_mcast_config_data cfg_data = {NULL};
2754         int cnt = *line_idx;
2755
2756         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2757                 cfg_data.mac = mlist_pos->mac;
2758                 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2759
2760                 cnt++;
2761
2762                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2763                    mlist_pos->mac);
2764         }
2765
2766         *line_idx = cnt;
2767 }
2768
2769 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2770         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2771         int *line_idx)
2772 {
2773         int cnt = *line_idx, i;
2774
2775         for (i = 0; i < p->mcast_list_len; i++) {
2776                 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2777
2778                 cnt++;
2779
2780                 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2781                                  p->mcast_list_len - i - 1);
2782         }
2783
2784         *line_idx = cnt;
2785 }
2786
2787 /**
2788  * bnx2x_mcast_handle_current_cmd -
2789  *
2790  * @bp:         device handle
2791  * @p:
2792  * @cmd:
2793  * @start_cnt:  first line in the ramrod data that may be used
2794  *
2795  * This function is called iff there is enough place for the current command in
2796  * the ramrod data.
2797  * Returns number of lines filled in the ramrod data in total.
2798  */
2799 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2800                         struct bnx2x_mcast_ramrod_params *p,
2801                         enum bnx2x_mcast_cmd cmd,
2802                         int start_cnt)
2803 {
2804         struct bnx2x_mcast_obj *o = p->mcast_obj;
2805         int cnt = start_cnt;
2806
2807         DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2808
2809         switch (cmd) {
2810         case BNX2X_MCAST_CMD_ADD:
2811                 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2812                 break;
2813
2814         case BNX2X_MCAST_CMD_DEL:
2815                 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2816                 break;
2817
2818         case BNX2X_MCAST_CMD_RESTORE:
2819                 o->hdl_restore(bp, o, 0, &cnt);
2820                 break;
2821
2822         default:
2823                 BNX2X_ERR("Unknown command: %d\n", cmd);
2824                 return -EINVAL;
2825         }
2826
2827         /* The current command has been handled */
2828         p->mcast_list_len = 0;
2829
2830         return cnt;
2831 }
2832
2833 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2834                                    struct bnx2x_mcast_ramrod_params *p,
2835                                    enum bnx2x_mcast_cmd cmd)
2836 {
2837         struct bnx2x_mcast_obj *o = p->mcast_obj;
2838         int reg_sz = o->get_registry_size(o);
2839
2840         switch (cmd) {
2841         /* DEL command deletes all currently configured MACs */
2842         case BNX2X_MCAST_CMD_DEL:
2843                 o->set_registry_size(o, 0);
2844                 /* Don't break */
2845
2846         /* RESTORE command will restore the entire multicast configuration */
2847         case BNX2X_MCAST_CMD_RESTORE:
2848                 /* Here we set the approximate amount of work to do, which in
2849                  * fact may be only less as some MACs in postponed ADD
2850                  * command(s) scheduled before this command may fall into
2851                  * the same bin and the actual number of bins set in the
2852                  * registry would be less than we estimated here. See
2853                  * bnx2x_mcast_set_one_rule_e2() for further details.
2854                  */
2855                 p->mcast_list_len = reg_sz;
2856                 break;
2857
2858         case BNX2X_MCAST_CMD_ADD:
2859         case BNX2X_MCAST_CMD_CONT:
2860                 /* Here we assume that all new MACs will fall into new bins.
2861                  * However we will correct the real registry size after we
2862                  * handle all pending commands.
2863                  */
2864                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2865                 break;
2866
2867         default:
2868                 BNX2X_ERR("Unknown command: %d\n", cmd);
2869                 return -EINVAL;
2870         }
2871
2872         /* Increase the total number of MACs pending to be configured */
2873         o->total_pending_num += p->mcast_list_len;
2874
2875         return 0;
2876 }
2877
2878 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2879                                       struct bnx2x_mcast_ramrod_params *p,
2880                                       int old_num_bins)
2881 {
2882         struct bnx2x_mcast_obj *o = p->mcast_obj;
2883
2884         o->set_registry_size(o, old_num_bins);
2885         o->total_pending_num -= p->mcast_list_len;
2886 }
2887
2888 /**
2889  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2890  *
2891  * @bp:         device handle
2892  * @p:
2893  * @len:        number of rules to handle
2894  */
2895 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2896                                         struct bnx2x_mcast_ramrod_params *p,
2897                                         u8 len)
2898 {
2899         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2900         struct eth_multicast_rules_ramrod_data *data =
2901                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2902
2903         data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2904                                         (BNX2X_FILTER_MCAST_PENDING <<
2905                                          BNX2X_SWCID_SHIFT));
2906         data->header.rule_cnt = len;
2907 }
2908
2909 /**
2910  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2911  *
2912  * @bp:         device handle
2913  * @o:
2914  *
2915  * Recalculate the actual number of set bins in the registry using Brian
2916  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2917  *
2918  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2919  */
2920 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2921                                                   struct bnx2x_mcast_obj *o)
2922 {
2923         int i, cnt = 0;
2924         u64 elem;
2925
2926         for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2927                 elem = o->registry.aprox_match.vec[i];
2928                 for (; elem; cnt++)
2929                         elem &= elem - 1;
2930         }
2931
2932         o->set_registry_size(o, cnt);
2933
2934         return 0;
2935 }
2936
2937 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2938                                 struct bnx2x_mcast_ramrod_params *p,
2939                                 enum bnx2x_mcast_cmd cmd)
2940 {
2941         struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2942         struct bnx2x_mcast_obj *o = p->mcast_obj;
2943         struct eth_multicast_rules_ramrod_data *data =
2944                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2945         int cnt = 0, rc;
2946
2947         /* Reset the ramrod data buffer */
2948         memset(data, 0, sizeof(*data));
2949
2950         cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2951
2952         /* If there are no more pending commands - clear SCHEDULED state */
2953         if (list_empty(&o->pending_cmds_head))
2954                 o->clear_sched(o);
2955
2956         /* The below may be true iff there was enough room in ramrod
2957          * data for all pending commands and for the current
2958          * command. Otherwise the current command would have been added
2959          * to the pending commands and p->mcast_list_len would have been
2960          * zeroed.
2961          */
2962         if (p->mcast_list_len > 0)
2963                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2964
2965         /* We've pulled out some MACs - update the total number of
2966          * outstanding.
2967          */
2968         o->total_pending_num -= cnt;
2969
2970         /* send a ramrod */
2971         WARN_ON(o->total_pending_num < 0);
2972         WARN_ON(cnt > o->max_cmd_len);
2973
2974         bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2975
2976         /* Update a registry size if there are no more pending operations.
2977          *
2978          * We don't want to change the value of the registry size if there are
2979          * pending operations because we want it to always be equal to the
2980          * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2981          * set bins after the last requested operation in order to properly
2982          * evaluate the size of the next DEL/RESTORE operation.
2983          *
2984          * Note that we update the registry itself during command(s) handling
2985          * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2986          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2987          * with a limited amount of update commands (per MAC/bin) and we don't
2988          * know in this scope what the actual state of bins configuration is
2989          * going to be after this ramrod.
2990          */
2991         if (!o->total_pending_num)
2992                 bnx2x_mcast_refresh_registry_e2(bp, o);
2993
2994         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2995          * RAMROD_PENDING status immediately.
2996          */
2997         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2998                 raw->clear_pending(raw);
2999                 return 0;
3000         } else {
3001                 /* No need for an explicit memory barrier here as long we would
3002                  * need to ensure the ordering of writing to the SPQ element
3003                  * and updating of the SPQ producer which involves a memory
3004                  * read and we will have to put a full memory barrier there
3005                  * (inside bnx2x_sp_post()).
3006                  */
3007
3008                 /* Send a ramrod */
3009                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3010                                    raw->cid, U64_HI(raw->rdata_mapping),
3011                                    U64_LO(raw->rdata_mapping),
3012                                    ETH_CONNECTION_TYPE);
3013                 if (rc)
3014                         return rc;
3015
3016                 /* Ramrod completion is pending */
3017                 return 1;
3018         }
3019 }
3020
3021 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3022                                     struct bnx2x_mcast_ramrod_params *p,
3023                                     enum bnx2x_mcast_cmd cmd)
3024 {
3025         /* Mark, that there is a work to do */
3026         if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3027                 p->mcast_list_len = 1;
3028
3029         return 0;
3030 }
3031
3032 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3033                                        struct bnx2x_mcast_ramrod_params *p,
3034                                        int old_num_bins)
3035 {
3036         /* Do nothing */
3037 }
3038
3039 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3040 do { \
3041         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3042 } while (0)
3043
3044 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3045                                            struct bnx2x_mcast_obj *o,
3046                                            struct bnx2x_mcast_ramrod_params *p,
3047                                            u32 *mc_filter)
3048 {
3049         struct bnx2x_mcast_list_elem *mlist_pos;
3050         int bit;
3051
3052         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3053                 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3054                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3055
3056                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3057                    mlist_pos->mac, bit);
3058
3059                 /* bookkeeping... */
3060                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3061                                   bit);
3062         }
3063 }
3064
3065 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3066         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3067         u32 *mc_filter)
3068 {
3069         int bit;
3070
3071         for (bit = bnx2x_mcast_get_next_bin(o, 0);
3072              bit >= 0;
3073              bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3074                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3075                 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3076         }
3077 }
3078
3079 /* On 57711 we write the multicast MACs' approximate match
3080  * table by directly into the TSTORM's internal RAM. So we don't
3081  * really need to handle any tricks to make it work.
3082  */
3083 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3084                                  struct bnx2x_mcast_ramrod_params *p,
3085                                  enum bnx2x_mcast_cmd cmd)
3086 {
3087         int i;
3088         struct bnx2x_mcast_obj *o = p->mcast_obj;
3089         struct bnx2x_raw_obj *r = &o->raw;
3090
3091         /* If CLEAR_ONLY has been requested - clear the registry
3092          * and clear a pending bit.
3093          */
3094         if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3095                 u32 mc_filter[MC_HASH_SIZE] = {0};
3096
3097                 /* Set the multicast filter bits before writing it into
3098                  * the internal memory.
3099                  */
3100                 switch (cmd) {
3101                 case BNX2X_MCAST_CMD_ADD:
3102                         bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3103                         break;
3104
3105                 case BNX2X_MCAST_CMD_DEL:
3106                         DP(BNX2X_MSG_SP,
3107                            "Invalidating multicast MACs configuration\n");
3108
3109                         /* clear the registry */
3110                         memset(o->registry.aprox_match.vec, 0,
3111                                sizeof(o->registry.aprox_match.vec));
3112                         break;
3113
3114                 case BNX2X_MCAST_CMD_RESTORE:
3115                         bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3116                         break;
3117
3118                 default:
3119                         BNX2X_ERR("Unknown command: %d\n", cmd);
3120                         return -EINVAL;
3121                 }
3122
3123                 /* Set the mcast filter in the internal memory */
3124                 for (i = 0; i < MC_HASH_SIZE; i++)
3125                         REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3126         } else
3127                 /* clear the registry */
3128                 memset(o->registry.aprox_match.vec, 0,
3129                        sizeof(o->registry.aprox_match.vec));
3130
3131         /* We are done */
3132         r->clear_pending(r);
3133
3134         return 0;
3135 }
3136
3137 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3138                                    struct bnx2x_mcast_ramrod_params *p,
3139                                    enum bnx2x_mcast_cmd cmd)
3140 {
3141         struct bnx2x_mcast_obj *o = p->mcast_obj;
3142         int reg_sz = o->get_registry_size(o);
3143
3144         switch (cmd) {
3145         /* DEL command deletes all currently configured MACs */
3146         case BNX2X_MCAST_CMD_DEL:
3147                 o->set_registry_size(o, 0);
3148                 /* Don't break */
3149
3150         /* RESTORE command will restore the entire multicast configuration */
3151         case BNX2X_MCAST_CMD_RESTORE:
3152                 p->mcast_list_len = reg_sz;
3153                   DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3154                                    cmd, p->mcast_list_len);
3155                 break;
3156
3157         case BNX2X_MCAST_CMD_ADD:
3158         case BNX2X_MCAST_CMD_CONT:
3159                 /* Multicast MACs on 57710 are configured as unicast MACs and
3160                  * there is only a limited number of CAM entries for that
3161                  * matter.
3162                  */
3163                 if (p->mcast_list_len > o->max_cmd_len) {
3164                         BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3165                                   o->max_cmd_len);
3166                         return -EINVAL;
3167                 }
3168                 /* Every configured MAC should be cleared if DEL command is
3169                  * called. Only the last ADD command is relevant as long as
3170                  * every ADD commands overrides the previous configuration.
3171                  */
3172                 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3173                 if (p->mcast_list_len > 0)
3174                         o->set_registry_size(o, p->mcast_list_len);
3175
3176                 break;
3177
3178         default:
3179                 BNX2X_ERR("Unknown command: %d\n", cmd);
3180                 return -EINVAL;
3181         }
3182
3183         /* We want to ensure that commands are executed one by one for 57710.
3184          * Therefore each none-empty command will consume o->max_cmd_len.
3185          */
3186         if (p->mcast_list_len)
3187                 o->total_pending_num += o->max_cmd_len;
3188
3189         return 0;
3190 }
3191
3192 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3193                                       struct bnx2x_mcast_ramrod_params *p,
3194                                       int old_num_macs)
3195 {
3196         struct bnx2x_mcast_obj *o = p->mcast_obj;
3197
3198         o->set_registry_size(o, old_num_macs);
3199
3200         /* If current command hasn't been handled yet and we are
3201          * here means that it's meant to be dropped and we have to
3202          * update the number of outstanding MACs accordingly.
3203          */
3204         if (p->mcast_list_len)
3205                 o->total_pending_num -= o->max_cmd_len;
3206 }
3207
3208 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3209                                         struct bnx2x_mcast_obj *o, int idx,
3210                                         union bnx2x_mcast_config_data *cfg_data,
3211                                         enum bnx2x_mcast_cmd cmd)
3212 {
3213         struct bnx2x_raw_obj *r = &o->raw;
3214         struct mac_configuration_cmd *data =
3215                 (struct mac_configuration_cmd *)(r->rdata);
3216
3217         /* copy mac */
3218         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3219                 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3220                                       &data->config_table[idx].middle_mac_addr,
3221                                       &data->config_table[idx].lsb_mac_addr,
3222                                       cfg_data->mac);
3223
3224                 data->config_table[idx].vlan_id = 0;
3225                 data->config_table[idx].pf_id = r->func_id;
3226                 data->config_table[idx].clients_bit_vector =
3227                         cpu_to_le32(1 << r->cl_id);
3228
3229                 SET_FLAG(data->config_table[idx].flags,
3230                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3231                          T_ETH_MAC_COMMAND_SET);
3232         }
3233 }
3234
3235 /**
3236  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3237  *
3238  * @bp:         device handle
3239  * @p:
3240  * @len:        number of rules to handle
3241  */
3242 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3243                                         struct bnx2x_mcast_ramrod_params *p,
3244                                         u8 len)
3245 {
3246         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3247         struct mac_configuration_cmd *data =
3248                 (struct mac_configuration_cmd *)(r->rdata);
3249
3250         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3251                      BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3252                      BNX2X_MAX_MULTICAST*(1 + r->func_id));
3253
3254         data->hdr.offset = offset;
3255         data->hdr.client_id = cpu_to_le16(0xff);
3256         data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3257                                      (BNX2X_FILTER_MCAST_PENDING <<
3258                                       BNX2X_SWCID_SHIFT));
3259         data->hdr.length = len;
3260 }
3261
3262 /**
3263  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3264  *
3265  * @bp:         device handle
3266  * @o:
3267  * @start_idx:  index in the registry to start from
3268  * @rdata_idx:  index in the ramrod data to start from
3269  *
3270  * restore command for 57710 is like all other commands - always a stand alone
3271  * command - start_idx and rdata_idx will always be 0. This function will always
3272  * succeed.
3273  * returns -1 to comply with 57712 variant.
3274  */
3275 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3276         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3277         int *rdata_idx)
3278 {
3279         struct bnx2x_mcast_mac_elem *elem;
3280         int i = 0;
3281         union bnx2x_mcast_config_data cfg_data = {NULL};
3282
3283         /* go through the registry and configure the MACs from it. */
3284         list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3285                 cfg_data.mac = &elem->mac[0];
3286                 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3287
3288                 i++;
3289
3290                   DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3291                      cfg_data.mac);
3292         }
3293
3294         *rdata_idx = i;
3295
3296         return -1;
3297 }
3298
3299 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3300         struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3301 {
3302         struct bnx2x_pending_mcast_cmd *cmd_pos;
3303         struct bnx2x_mcast_mac_elem *pmac_pos;
3304         struct bnx2x_mcast_obj *o = p->mcast_obj;
3305         union bnx2x_mcast_config_data cfg_data = {NULL};
3306         int cnt = 0;
3307
3308         /* If nothing to be done - return */
3309         if (list_empty(&o->pending_cmds_head))
3310                 return 0;
3311
3312         /* Handle the first command */
3313         cmd_pos = list_first_entry(&o->pending_cmds_head,
3314                                    struct bnx2x_pending_mcast_cmd, link);
3315
3316         switch (cmd_pos->type) {
3317         case BNX2X_MCAST_CMD_ADD:
3318                 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3319                         cfg_data.mac = &pmac_pos->mac[0];
3320                         o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3321
3322                         cnt++;
3323
3324                         DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3325                            pmac_pos->mac);
3326                 }
3327                 break;
3328
3329         case BNX2X_MCAST_CMD_DEL:
3330                 cnt = cmd_pos->data.macs_num;
3331                 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3332                 break;
3333
3334         case BNX2X_MCAST_CMD_RESTORE:
3335                 o->hdl_restore(bp, o, 0, &cnt);
3336                 break;
3337
3338         default:
3339                 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3340                 return -EINVAL;
3341         }
3342
3343         list_del(&cmd_pos->link);
3344         kfree(cmd_pos);
3345
3346         return cnt;
3347 }
3348
3349 /**
3350  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3351  *
3352  * @fw_hi:
3353  * @fw_mid:
3354  * @fw_lo:
3355  * @mac:
3356  */
3357 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3358                                          __le16 *fw_lo, u8 *mac)
3359 {
3360         mac[1] = ((u8 *)fw_hi)[0];
3361         mac[0] = ((u8 *)fw_hi)[1];
3362         mac[3] = ((u8 *)fw_mid)[0];
3363         mac[2] = ((u8 *)fw_mid)[1];
3364         mac[5] = ((u8 *)fw_lo)[0];
3365         mac[4] = ((u8 *)fw_lo)[1];
3366 }
3367
3368 /**
3369  * bnx2x_mcast_refresh_registry_e1 -
3370  *
3371  * @bp:         device handle
3372  * @cnt:
3373  *
3374  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3375  * and update the registry correspondingly: if ADD - allocate a memory and add
3376  * the entries to the registry (list), if DELETE - clear the registry and free
3377  * the memory.
3378  */
3379 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3380                                                   struct bnx2x_mcast_obj *o)
3381 {
3382         struct bnx2x_raw_obj *raw = &o->raw;
3383         struct bnx2x_mcast_mac_elem *elem;
3384         struct mac_configuration_cmd *data =
3385                         (struct mac_configuration_cmd *)(raw->rdata);
3386
3387         /* If first entry contains a SET bit - the command was ADD,
3388          * otherwise - DEL_ALL
3389          */
3390         if (GET_FLAG(data->config_table[0].flags,
3391                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3392                 int i, len = data->hdr.length;
3393
3394                 /* Break if it was a RESTORE command */
3395                 if (!list_empty(&o->registry.exact_match.macs))
3396                         return 0;
3397
3398                 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3399                 if (!elem) {
3400                         BNX2X_ERR("Failed to allocate registry memory\n");
3401                         return -ENOMEM;
3402                 }
3403
3404                 for (i = 0; i < len; i++, elem++) {
3405                         bnx2x_get_fw_mac_addr(
3406                                 &data->config_table[i].msb_mac_addr,
3407                                 &data->config_table[i].middle_mac_addr,
3408                                 &data->config_table[i].lsb_mac_addr,
3409                                 elem->mac);
3410                         DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3411                            elem->mac);
3412                         list_add_tail(&elem->link,
3413                                       &o->registry.exact_match.macs);
3414                 }
3415         } else {
3416                 elem = list_first_entry(&o->registry.exact_match.macs,
3417                                         struct bnx2x_mcast_mac_elem, link);
3418                 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3419                 kfree(elem);
3420                 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3421         }
3422
3423         return 0;
3424 }
3425
3426 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3427                                 struct bnx2x_mcast_ramrod_params *p,
3428                                 enum bnx2x_mcast_cmd cmd)
3429 {
3430         struct bnx2x_mcast_obj *o = p->mcast_obj;
3431         struct bnx2x_raw_obj *raw = &o->raw;
3432         struct mac_configuration_cmd *data =
3433                 (struct mac_configuration_cmd *)(raw->rdata);
3434         int cnt = 0, i, rc;
3435
3436         /* Reset the ramrod data buffer */
3437         memset(data, 0, sizeof(*data));
3438
3439         /* First set all entries as invalid */
3440         for (i = 0; i < o->max_cmd_len ; i++)
3441                 SET_FLAG(data->config_table[i].flags,
3442                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3443                          T_ETH_MAC_COMMAND_INVALIDATE);
3444
3445         /* Handle pending commands first */
3446         cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3447
3448         /* If there are no more pending commands - clear SCHEDULED state */
3449         if (list_empty(&o->pending_cmds_head))
3450                 o->clear_sched(o);
3451
3452         /* The below may be true iff there were no pending commands */
3453         if (!cnt)
3454                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3455
3456         /* For 57710 every command has o->max_cmd_len length to ensure that
3457          * commands are done one at a time.
3458          */
3459         o->total_pending_num -= o->max_cmd_len;
3460
3461         /* send a ramrod */
3462
3463         WARN_ON(cnt > o->max_cmd_len);
3464
3465         /* Set ramrod header (in particular, a number of entries to update) */
3466         bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3467
3468         /* update a registry: we need the registry contents to be always up
3469          * to date in order to be able to execute a RESTORE opcode. Here
3470          * we use the fact that for 57710 we sent one command at a time
3471          * hence we may take the registry update out of the command handling
3472          * and do it in a simpler way here.
3473          */
3474         rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3475         if (rc)
3476                 return rc;
3477
3478         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3479          * RAMROD_PENDING status immediately.
3480          */
3481         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3482                 raw->clear_pending(raw);
3483                 return 0;
3484         } else {
3485                 /* No need for an explicit memory barrier here as long we would
3486                  * need to ensure the ordering of writing to the SPQ element
3487                  * and updating of the SPQ producer which involves a memory
3488                  * read and we will have to put a full memory barrier there
3489                  * (inside bnx2x_sp_post()).
3490                  */
3491
3492                 /* Send a ramrod */
3493                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3494                                    U64_HI(raw->rdata_mapping),
3495                                    U64_LO(raw->rdata_mapping),
3496                                    ETH_CONNECTION_TYPE);
3497                 if (rc)
3498                         return rc;
3499
3500                 /* Ramrod completion is pending */
3501                 return 1;
3502         }
3503 }
3504
3505 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3506 {
3507         return o->registry.exact_match.num_macs_set;
3508 }
3509
3510 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3511 {
3512         return o->registry.aprox_match.num_bins_set;
3513 }
3514
3515 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3516                                                 int n)
3517 {
3518         o->registry.exact_match.num_macs_set = n;
3519 }
3520
3521 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3522                                                 int n)
3523 {
3524         o->registry.aprox_match.num_bins_set = n;
3525 }
3526
3527 int bnx2x_config_mcast(struct bnx2x *bp,
3528                        struct bnx2x_mcast_ramrod_params *p,
3529                        enum bnx2x_mcast_cmd cmd)
3530 {
3531         struct bnx2x_mcast_obj *o = p->mcast_obj;
3532         struct bnx2x_raw_obj *r = &o->raw;
3533         int rc = 0, old_reg_size;
3534
3535         /* This is needed to recover number of currently configured mcast macs
3536          * in case of failure.
3537          */
3538         old_reg_size = o->get_registry_size(o);
3539
3540         /* Do some calculations and checks */
3541         rc = o->validate(bp, p, cmd);
3542         if (rc)
3543                 return rc;
3544
3545         /* Return if there is no work to do */
3546         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3547                 return 0;
3548
3549         DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3550            o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3551
3552         /* Enqueue the current command to the pending list if we can't complete
3553          * it in the current iteration
3554          */
3555         if (r->check_pending(r) ||
3556             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3557                 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3558                 if (rc < 0)
3559                         goto error_exit1;
3560
3561                 /* As long as the current command is in a command list we
3562                  * don't need to handle it separately.
3563                  */
3564                 p->mcast_list_len = 0;
3565         }
3566
3567         if (!r->check_pending(r)) {
3568
3569                 /* Set 'pending' state */
3570                 r->set_pending(r);
3571
3572                 /* Configure the new classification in the chip */
3573                 rc = o->config_mcast(bp, p, cmd);
3574                 if (rc < 0)
3575                         goto error_exit2;
3576
3577                 /* Wait for a ramrod completion if was requested */
3578                 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3579                         rc = o->wait_comp(bp, o);
3580         }
3581
3582         return rc;
3583
3584 error_exit2:
3585         r->clear_pending(r);
3586
3587 error_exit1:
3588         o->revert(bp, p, old_reg_size);
3589
3590         return rc;
3591 }
3592
3593 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3594 {
3595         smp_mb__before_clear_bit();
3596         clear_bit(o->sched_state, o->raw.pstate);
3597         smp_mb__after_clear_bit();
3598 }
3599
3600 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3601 {
3602         smp_mb__before_clear_bit();
3603         set_bit(o->sched_state, o->raw.pstate);
3604         smp_mb__after_clear_bit();
3605 }
3606
3607 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3608 {
3609         return !!test_bit(o->sched_state, o->raw.pstate);
3610 }
3611
3612 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3613 {
3614         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3615 }
3616
3617 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3618                           struct bnx2x_mcast_obj *mcast_obj,
3619                           u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3620                           u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3621                           int state, unsigned long *pstate, bnx2x_obj_type type)
3622 {
3623         memset(mcast_obj, 0, sizeof(*mcast_obj));
3624
3625         bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3626                            rdata, rdata_mapping, state, pstate, type);
3627
3628         mcast_obj->engine_id = engine_id;
3629
3630         INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3631
3632         mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3633         mcast_obj->check_sched = bnx2x_mcast_check_sched;
3634         mcast_obj->set_sched = bnx2x_mcast_set_sched;
3635         mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3636
3637         if (CHIP_IS_E1(bp)) {
3638                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3639                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3640                 mcast_obj->hdl_restore       =
3641                         bnx2x_mcast_handle_restore_cmd_e1;
3642                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3643
3644                 if (CHIP_REV_IS_SLOW(bp))
3645                         mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3646                 else
3647                         mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3648
3649                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3650                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3651                 mcast_obj->validate          = bnx2x_mcast_validate_e1;
3652                 mcast_obj->revert            = bnx2x_mcast_revert_e1;
3653                 mcast_obj->get_registry_size =
3654                         bnx2x_mcast_get_registry_size_exact;
3655                 mcast_obj->set_registry_size =
3656                         bnx2x_mcast_set_registry_size_exact;
3657
3658                 /* 57710 is the only chip that uses the exact match for mcast
3659                  * at the moment.
3660                  */
3661                 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3662
3663         } else if (CHIP_IS_E1H(bp)) {
3664                 mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3665                 mcast_obj->enqueue_cmd   = NULL;
3666                 mcast_obj->hdl_restore   = NULL;
3667                 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3668
3669                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3670                  * for one command.
3671                  */
3672                 mcast_obj->max_cmd_len       = -1;
3673                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3674                 mcast_obj->set_one_rule      = NULL;
3675                 mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3676                 mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3677                 mcast_obj->get_registry_size =
3678                         bnx2x_mcast_get_registry_size_aprox;
3679                 mcast_obj->set_registry_size =
3680                         bnx2x_mcast_set_registry_size_aprox;
3681         } else {
3682                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3683                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3684                 mcast_obj->hdl_restore       =
3685                         bnx2x_mcast_handle_restore_cmd_e2;
3686                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3687                 /* TODO: There should be a proper HSI define for this number!!!
3688                  */
3689                 mcast_obj->max_cmd_len       = 16;
3690                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3691                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3692                 mcast_obj->validate          = bnx2x_mcast_validate_e2;
3693                 mcast_obj->revert            = bnx2x_mcast_revert_e2;
3694                 mcast_obj->get_registry_size =
3695                         bnx2x_mcast_get_registry_size_aprox;
3696                 mcast_obj->set_registry_size =
3697                         bnx2x_mcast_set_registry_size_aprox;
3698         }
3699 }
3700
3701 /*************************** Credit handling **********************************/
3702
3703 /**
3704  * atomic_add_ifless - add if the result is less than a given value.
3705  *
3706  * @v:  pointer of type atomic_t
3707  * @a:  the amount to add to v...
3708  * @u:  ...if (v + a) is less than u.
3709  *
3710  * returns true if (v + a) was less than u, and false otherwise.
3711  *
3712  */
3713 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3714 {
3715         int c, old;
3716
3717         c = atomic_read(v);
3718         for (;;) {
3719                 if (unlikely(c + a >= u))
3720                         return false;
3721
3722                 old = atomic_cmpxchg((v), c, c + a);
3723                 if (likely(old == c))
3724                         break;
3725                 c = old;
3726         }
3727
3728         return true;
3729 }
3730
3731 /**
3732  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3733  *
3734  * @v:  pointer of type atomic_t
3735  * @a:  the amount to dec from v...
3736  * @u:  ...if (v - a) is more or equal than u.
3737  *
3738  * returns true if (v - a) was more or equal than u, and false
3739  * otherwise.
3740  */
3741 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3742 {
3743         int c, old;
3744
3745         c = atomic_read(v);
3746         for (;;) {
3747                 if (unlikely(c - a < u))
3748                         return false;
3749
3750                 old = atomic_cmpxchg((v), c, c - a);
3751                 if (likely(old == c))
3752                         break;
3753                 c = old;
3754         }
3755
3756         return true;
3757 }
3758
3759 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3760 {
3761         bool rc;
3762
3763         smp_mb();
3764         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3765         smp_mb();
3766
3767         return rc;
3768 }
3769
3770 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3771 {
3772         bool rc;
3773
3774         smp_mb();
3775
3776         /* Don't let to refill if credit + cnt > pool_sz */
3777         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3778
3779         smp_mb();
3780
3781         return rc;
3782 }
3783
3784 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3785 {
3786         int cur_credit;
3787
3788         smp_mb();
3789         cur_credit = atomic_read(&o->credit);
3790
3791         return cur_credit;
3792 }
3793
3794 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3795                                           int cnt)
3796 {
3797         return true;
3798 }
3799
3800 static bool bnx2x_credit_pool_get_entry(
3801         struct bnx2x_credit_pool_obj *o,
3802         int *offset)
3803 {
3804         int idx, vec, i;
3805
3806         *offset = -1;
3807
3808         /* Find "internal cam-offset" then add to base for this object... */
3809         for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3810
3811                 /* Skip the current vector if there are no free entries in it */
3812                 if (!o->pool_mirror[vec])
3813                         continue;
3814
3815                 /* If we've got here we are going to find a free entry */
3816                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3817                       i < BIT_VEC64_ELEM_SZ; idx++, i++)
3818
3819                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3820                                 /* Got one!! */
3821                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3822                                 *offset = o->base_pool_offset + idx;
3823                                 return true;
3824                         }
3825         }
3826
3827         return false;
3828 }
3829
3830 static bool bnx2x_credit_pool_put_entry(
3831         struct bnx2x_credit_pool_obj *o,
3832         int offset)
3833 {
3834         if (offset < o->base_pool_offset)
3835                 return false;
3836
3837         offset -= o->base_pool_offset;
3838
3839         if (offset >= o->pool_sz)
3840                 return false;
3841
3842         /* Return the entry to the pool */
3843         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3844
3845         return true;
3846 }
3847
3848 static bool bnx2x_credit_pool_put_entry_always_true(
3849         struct bnx2x_credit_pool_obj *o,
3850         int offset)
3851 {
3852         return true;
3853 }
3854
3855 static bool bnx2x_credit_pool_get_entry_always_true(
3856         struct bnx2x_credit_pool_obj *o,
3857         int *offset)
3858 {
3859         *offset = -1;
3860         return true;
3861 }
3862 /**
3863  * bnx2x_init_credit_pool - initialize credit pool internals.
3864  *
3865  * @p:
3866  * @base:       Base entry in the CAM to use.
3867  * @credit:     pool size.
3868  *
3869  * If base is negative no CAM entries handling will be performed.
3870  * If credit is negative pool operations will always succeed (unlimited pool).
3871  *
3872  */
3873 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3874                                           int base, int credit)
3875 {
3876         /* Zero the object first */
3877         memset(p, 0, sizeof(*p));
3878
3879         /* Set the table to all 1s */
3880         memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3881
3882         /* Init a pool as full */
3883         atomic_set(&p->credit, credit);
3884
3885         /* The total poll size */
3886         p->pool_sz = credit;
3887
3888         p->base_pool_offset = base;
3889
3890         /* Commit the change */
3891         smp_mb();
3892
3893         p->check = bnx2x_credit_pool_check;
3894
3895         /* if pool credit is negative - disable the checks */
3896         if (credit >= 0) {
3897                 p->put      = bnx2x_credit_pool_put;
3898                 p->get      = bnx2x_credit_pool_get;
3899                 p->put_entry = bnx2x_credit_pool_put_entry;
3900                 p->get_entry = bnx2x_credit_pool_get_entry;
3901         } else {
3902                 p->put      = bnx2x_credit_pool_always_true;
3903                 p->get      = bnx2x_credit_pool_always_true;
3904                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3905                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3906         }
3907
3908         /* If base is negative - disable entries handling */
3909         if (base < 0) {
3910                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3911                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3912         }
3913 }
3914
3915 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3916                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
3917                                 u8 func_num)
3918 {
3919 /* TODO: this will be defined in consts as well... */
3920 #define BNX2X_CAM_SIZE_EMUL 5
3921
3922         int cam_sz;
3923
3924         if (CHIP_IS_E1(bp)) {
3925                 /* In E1, Multicast is saved in cam... */
3926                 if (!CHIP_REV_IS_SLOW(bp))
3927                         cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3928                 else
3929                         cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3930
3931                 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3932
3933         } else if (CHIP_IS_E1H(bp)) {
3934                 /* CAM credit is equaly divided between all active functions
3935                  * on the PORT!.
3936                  */
3937                 if ((func_num > 0)) {
3938                         if (!CHIP_REV_IS_SLOW(bp))
3939                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3940                         else
3941                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
3942                         bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3943                 } else {
3944                         /* this should never happen! Block MAC operations. */
3945                         bnx2x_init_credit_pool(p, 0, 0);
3946                 }
3947
3948         } else {
3949
3950                 /* CAM credit is equaly divided between all active functions
3951                  * on the PATH.
3952                  */
3953                 if ((func_num > 0)) {
3954                         if (!CHIP_REV_IS_SLOW(bp))
3955                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3956                         else
3957                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
3958
3959                         /* No need for CAM entries handling for 57712 and
3960                          * newer.
3961                          */
3962                         bnx2x_init_credit_pool(p, -1, cam_sz);
3963                 } else {
3964                         /* this should never happen! Block MAC operations. */
3965                         bnx2x_init_credit_pool(p, 0, 0);
3966                 }
3967         }
3968 }
3969
3970 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3971                                  struct bnx2x_credit_pool_obj *p,
3972                                  u8 func_id,
3973                                  u8 func_num)
3974 {
3975         if (CHIP_IS_E1x(bp)) {
3976                 /* There is no VLAN credit in HW on 57710 and 57711 only
3977                  * MAC / MAC-VLAN can be set
3978                  */
3979                 bnx2x_init_credit_pool(p, 0, -1);
3980         } else {
3981                 /* CAM credit is equally divided between all active functions
3982                  * on the PATH.
3983                  */
3984                 if (func_num > 0) {
3985                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
3986                         bnx2x_init_credit_pool(p, func_id * credit, credit);
3987                 } else
3988                         /* this should never happen! Block VLAN operations. */
3989                         bnx2x_init_credit_pool(p, 0, 0);
3990         }
3991 }
3992
3993 /****************** RSS Configuration ******************/
3994 /**
3995  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3996  *
3997  * @bp:         driver handle
3998  * @p:          pointer to rss configuration
3999  *
4000  * Prints it when NETIF_MSG_IFUP debug level is configured.
4001  */
4002 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4003                                         struct bnx2x_config_rss_params *p)
4004 {
4005         int i;
4006
4007         DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4008         DP(BNX2X_MSG_SP, "0x0000: ");
4009         for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4010                 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4011
4012                 /* Print 4 bytes in a line */
4013                 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4014                     (((i + 1) & 0x3) == 0)) {
4015                         DP_CONT(BNX2X_MSG_SP, "\n");
4016                         DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4017                 }
4018         }
4019
4020         DP_CONT(BNX2X_MSG_SP, "\n");
4021 }
4022
4023 /**
4024  * bnx2x_setup_rss - configure RSS
4025  *
4026  * @bp:         device handle
4027  * @p:          rss configuration
4028  *
4029  * sends on UPDATE ramrod for that matter.
4030  */
4031 static int bnx2x_setup_rss(struct bnx2x *bp,
4032                            struct bnx2x_config_rss_params *p)
4033 {
4034         struct bnx2x_rss_config_obj *o = p->rss_obj;
4035         struct bnx2x_raw_obj *r = &o->raw;
4036         struct eth_rss_update_ramrod_data *data =
4037                 (struct eth_rss_update_ramrod_data *)(r->rdata);
4038         u8 rss_mode = 0;
4039         int rc;
4040
4041         memset(data, 0, sizeof(*data));
4042
4043         DP(BNX2X_MSG_SP, "Configuring RSS\n");
4044
4045         /* Set an echo field */
4046         data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4047                                  (r->state << BNX2X_SWCID_SHIFT));
4048
4049         /* RSS mode */
4050         if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4051                 rss_mode = ETH_RSS_MODE_DISABLED;
4052         else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4053                 rss_mode = ETH_RSS_MODE_REGULAR;
4054
4055         data->rss_mode = rss_mode;
4056
4057         DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4058
4059         /* RSS capabilities */
4060         if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4061                 data->capabilities |=
4062                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4063
4064         if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4065                 data->capabilities |=
4066                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4067
4068         if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4069                 data->capabilities |=
4070                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4071
4072         if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4073                 data->capabilities |=
4074                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4075
4076         if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4077                 data->capabilities |=
4078                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4079
4080         if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4081                 data->capabilities |=
4082                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4083
4084         /* Hashing mask */
4085         data->rss_result_mask = p->rss_result_mask;
4086
4087         /* RSS engine ID */
4088         data->rss_engine_id = o->engine_id;
4089
4090         DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4091
4092         /* Indirection table */
4093         memcpy(data->indirection_table, p->ind_table,
4094                   T_ETH_INDIRECTION_TABLE_SIZE);
4095
4096         /* Remember the last configuration */
4097         memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4098
4099         /* Print the indirection table */
4100         if (netif_msg_ifup(bp))
4101                 bnx2x_debug_print_ind_table(bp, p);
4102
4103         /* RSS keys */
4104         if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4105                 memcpy(&data->rss_key[0], &p->rss_key[0],
4106                        sizeof(data->rss_key));
4107                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4108         }
4109
4110         /* No need for an explicit memory barrier here as long we would
4111          * need to ensure the ordering of writing to the SPQ element
4112          * and updating of the SPQ producer which involves a memory
4113          * read and we will have to put a full memory barrier there
4114          * (inside bnx2x_sp_post()).
4115          */
4116
4117         /* Send a ramrod */
4118         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4119                            U64_HI(r->rdata_mapping),
4120                            U64_LO(r->rdata_mapping),
4121                            ETH_CONNECTION_TYPE);
4122
4123         if (rc < 0)
4124                 return rc;
4125
4126         return 1;
4127 }
4128
4129 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4130                              u8 *ind_table)
4131 {
4132         memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4133 }
4134
4135 int bnx2x_config_rss(struct bnx2x *bp,
4136                      struct bnx2x_config_rss_params *p)
4137 {
4138         int rc;
4139         struct bnx2x_rss_config_obj *o = p->rss_obj;
4140         struct bnx2x_raw_obj *r = &o->raw;
4141
4142         /* Do nothing if only driver cleanup was requested */
4143         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4144                 return 0;
4145
4146         r->set_pending(r);
4147
4148         rc = o->config_rss(bp, p);
4149         if (rc < 0) {
4150                 r->clear_pending(r);
4151                 return rc;
4152         }
4153
4154         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4155                 rc = r->wait_comp(bp, r);
4156
4157         return rc;
4158 }
4159
4160 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4161                                struct bnx2x_rss_config_obj *rss_obj,
4162                                u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4163                                void *rdata, dma_addr_t rdata_mapping,
4164                                int state, unsigned long *pstate,
4165                                bnx2x_obj_type type)
4166 {
4167         bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4168                            rdata_mapping, state, pstate, type);
4169
4170         rss_obj->engine_id  = engine_id;
4171         rss_obj->config_rss = bnx2x_setup_rss;
4172 }
4173
4174 /********************** Queue state object ***********************************/
4175
4176 /**
4177  * bnx2x_queue_state_change - perform Queue state change transition
4178  *
4179  * @bp:         device handle
4180  * @params:     parameters to perform the transition
4181  *
4182  * returns 0 in case of successfully completed transition, negative error
4183  * code in case of failure, positive (EBUSY) value if there is a completion
4184  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4185  * not set in params->ramrod_flags for asynchronous commands).
4186  *
4187  */
4188 int bnx2x_queue_state_change(struct bnx2x *bp,
4189                              struct bnx2x_queue_state_params *params)
4190 {
4191         struct bnx2x_queue_sp_obj *o = params->q_obj;
4192         int rc, pending_bit;
4193         unsigned long *pending = &o->pending;
4194
4195         /* Check that the requested transition is legal */
4196         rc = o->check_transition(bp, o, params);
4197         if (rc) {
4198                 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4199                 return -EINVAL;
4200         }
4201
4202         /* Set "pending" bit */
4203         DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4204         pending_bit = o->set_pending(o, params);
4205         DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4206
4207         /* Don't send a command if only driver cleanup was requested */
4208         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4209                 o->complete_cmd(bp, o, pending_bit);
4210         else {
4211                 /* Send a ramrod */
4212                 rc = o->send_cmd(bp, params);
4213                 if (rc) {
4214                         o->next_state = BNX2X_Q_STATE_MAX;
4215                         clear_bit(pending_bit, pending);
4216                         smp_mb__after_clear_bit();
4217                         return rc;
4218                 }
4219
4220                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4221                         rc = o->wait_comp(bp, o, pending_bit);
4222                         if (rc)
4223                                 return rc;
4224
4225                         return 0;
4226                 }
4227         }
4228
4229         return !!test_bit(pending_bit, pending);
4230 }
4231
4232 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4233                                    struct bnx2x_queue_state_params *params)
4234 {
4235         enum bnx2x_queue_cmd cmd = params->cmd, bit;
4236
4237         /* ACTIVATE and DEACTIVATE commands are implemented on top of
4238          * UPDATE command.
4239          */
4240         if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4241             (cmd == BNX2X_Q_CMD_DEACTIVATE))
4242                 bit = BNX2X_Q_CMD_UPDATE;
4243         else
4244                 bit = cmd;
4245
4246         set_bit(bit, &obj->pending);
4247         return bit;
4248 }
4249
4250 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4251                                  struct bnx2x_queue_sp_obj *o,
4252                                  enum bnx2x_queue_cmd cmd)
4253 {
4254         return bnx2x_state_wait(bp, cmd, &o->pending);
4255 }
4256
4257 /**
4258  * bnx2x_queue_comp_cmd - complete the state change command.
4259  *
4260  * @bp:         device handle
4261  * @o:
4262  * @cmd:
4263  *
4264  * Checks that the arrived completion is expected.
4265  */
4266 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4267                                 struct bnx2x_queue_sp_obj *o,
4268                                 enum bnx2x_queue_cmd cmd)
4269 {
4270         unsigned long cur_pending = o->pending;
4271
4272         if (!test_and_clear_bit(cmd, &cur_pending)) {
4273                 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4274                           cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4275                           o->state, cur_pending, o->next_state);
4276                 return -EINVAL;
4277         }
4278
4279         if (o->next_tx_only >= o->max_cos)
4280                 /* >= because tx only must always be smaller than cos since the
4281                  * primary connection supports COS 0
4282                  */
4283                 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4284                            o->next_tx_only, o->max_cos);
4285
4286         DP(BNX2X_MSG_SP,
4287            "Completing command %d for queue %d, setting state to %d\n",
4288            cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4289
4290         if (o->next_tx_only)  /* print num tx-only if any exist */
4291                 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4292                    o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4293
4294         o->state = o->next_state;
4295         o->num_tx_only = o->next_tx_only;
4296         o->next_state = BNX2X_Q_STATE_MAX;
4297
4298         /* It's important that o->state and o->next_state are
4299          * updated before o->pending.
4300          */
4301         wmb();
4302
4303         clear_bit(cmd, &o->pending);
4304         smp_mb__after_clear_bit();
4305
4306         return 0;
4307 }
4308
4309 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4310                                 struct bnx2x_queue_state_params *cmd_params,
4311                                 struct client_init_ramrod_data *data)
4312 {
4313         struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4314
4315         /* Rx data */
4316
4317         /* IPv6 TPA supported for E2 and above only */
4318         data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4319                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4320 }
4321
4322 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4323                                 struct bnx2x_queue_sp_obj *o,
4324                                 struct bnx2x_general_setup_params *params,
4325                                 struct client_init_general_data *gen_data,
4326                                 unsigned long *flags)
4327 {
4328         gen_data->client_id = o->cl_id;
4329
4330         if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4331                 gen_data->statistics_counter_id =
4332                                         params->stat_id;
4333                 gen_data->statistics_en_flg = 1;
4334                 gen_data->statistics_zero_flg =
4335                         test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4336         } else
4337                 gen_data->statistics_counter_id =
4338                                         DISABLE_STATISTIC_COUNTER_ID_VALUE;
4339
4340         gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4341         gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4342         gen_data->sp_client_id = params->spcl_id;
4343         gen_data->mtu = cpu_to_le16(params->mtu);
4344         gen_data->func_id = o->func_id;
4345
4346         gen_data->cos = params->cos;
4347
4348         gen_data->traffic_type =
4349                 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4350                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4351
4352         DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4353            gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4354 }
4355
4356 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4357                                 struct bnx2x_txq_setup_params *params,
4358                                 struct client_init_tx_data *tx_data,
4359                                 unsigned long *flags)
4360 {
4361         tx_data->enforce_security_flg =
4362                 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4363         tx_data->default_vlan =
4364                 cpu_to_le16(params->default_vlan);
4365         tx_data->default_vlan_flg =
4366                 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4367         tx_data->tx_switching_flg =
4368                 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4369         tx_data->anti_spoofing_flg =
4370                 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4371         tx_data->force_default_pri_flg =
4372                 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4373
4374         tx_data->tunnel_lso_inc_ip_id =
4375                 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4376         tx_data->tunnel_non_lso_pcsum_location =
4377                 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4378                                                                   PCSUM_ON_BD;
4379
4380         tx_data->tx_status_block_id = params->fw_sb_id;
4381         tx_data->tx_sb_index_number = params->sb_cq_index;
4382         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4383
4384         tx_data->tx_bd_page_base.lo =
4385                 cpu_to_le32(U64_LO(params->dscr_map));
4386         tx_data->tx_bd_page_base.hi =
4387                 cpu_to_le32(U64_HI(params->dscr_map));
4388
4389         /* Don't configure any Tx switching mode during queue SETUP */
4390         tx_data->state = 0;
4391 }
4392
4393 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4394                                 struct rxq_pause_params *params,
4395                                 struct client_init_rx_data *rx_data)
4396 {
4397         /* flow control data */
4398         rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4399         rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4400         rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4401         rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4402         rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4403         rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4404         rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4405 }
4406
4407 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4408                                 struct bnx2x_rxq_setup_params *params,
4409                                 struct client_init_rx_data *rx_data,
4410                                 unsigned long *flags)
4411 {
4412         rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4413                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4414         rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4415                                 CLIENT_INIT_RX_DATA_TPA_MODE;
4416         rx_data->vmqueue_mode_en_flg = 0;
4417
4418         rx_data->cache_line_alignment_log_size =
4419                 params->cache_line_log;
4420         rx_data->enable_dynamic_hc =
4421                 test_bit(BNX2X_Q_FLG_DHC, flags);
4422         rx_data->max_sges_for_packet = params->max_sges_pkt;
4423         rx_data->client_qzone_id = params->cl_qzone_id;
4424         rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4425
4426         /* Always start in DROP_ALL mode */
4427         rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4428                                      CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4429
4430         /* We don't set drop flags */
4431         rx_data->drop_ip_cs_err_flg = 0;
4432         rx_data->drop_tcp_cs_err_flg = 0;
4433         rx_data->drop_ttl0_flg = 0;
4434         rx_data->drop_udp_cs_err_flg = 0;
4435         rx_data->inner_vlan_removal_enable_flg =
4436                 test_bit(BNX2X_Q_FLG_VLAN, flags);
4437         rx_data->outer_vlan_removal_enable_flg =
4438                 test_bit(BNX2X_Q_FLG_OV, flags);
4439         rx_data->status_block_id = params->fw_sb_id;
4440         rx_data->rx_sb_index_number = params->sb_cq_index;
4441         rx_data->max_tpa_queues = params->max_tpa_queues;
4442         rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4443         rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4444         rx_data->bd_page_base.lo =
4445                 cpu_to_le32(U64_LO(params->dscr_map));
4446         rx_data->bd_page_base.hi =
4447                 cpu_to_le32(U64_HI(params->dscr_map));
4448         rx_data->sge_page_base.lo =
4449                 cpu_to_le32(U64_LO(params->sge_map));
4450         rx_data->sge_page_base.hi =
4451                 cpu_to_le32(U64_HI(params->sge_map));
4452         rx_data->cqe_page_base.lo =
4453                 cpu_to_le32(U64_LO(params->rcq_map));
4454         rx_data->cqe_page_base.hi =
4455                 cpu_to_le32(U64_HI(params->rcq_map));
4456         rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4457
4458         if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4459                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4460                 rx_data->is_approx_mcast = 1;
4461         }
4462
4463         rx_data->rss_engine_id = params->rss_engine_id;
4464
4465         /* silent vlan removal */
4466         rx_data->silent_vlan_removal_flg =
4467                 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4468         rx_data->silent_vlan_value =
4469                 cpu_to_le16(params->silent_removal_value);
4470         rx_data->silent_vlan_mask =
4471                 cpu_to_le16(params->silent_removal_mask);
4472 }
4473
4474 /* initialize the general, tx and rx parts of a queue object */
4475 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4476                                 struct bnx2x_queue_state_params *cmd_params,
4477                                 struct client_init_ramrod_data *data)
4478 {
4479         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4480                                        &cmd_params->params.setup.gen_params,
4481                                        &data->general,
4482                                        &cmd_params->params.setup.flags);
4483
4484         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4485                                   &cmd_params->params.setup.txq_params,
4486                                   &data->tx,
4487                                   &cmd_params->params.setup.flags);
4488
4489         bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4490                                   &cmd_params->params.setup.rxq_params,
4491                                   &data->rx,
4492                                   &cmd_params->params.setup.flags);
4493
4494         bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4495                                      &cmd_params->params.setup.pause_params,
4496                                      &data->rx);
4497 }
4498
4499 /* initialize the general and tx parts of a tx-only queue object */
4500 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4501                                 struct bnx2x_queue_state_params *cmd_params,
4502                                 struct tx_queue_init_ramrod_data *data)
4503 {
4504         bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4505                                        &cmd_params->params.tx_only.gen_params,
4506                                        &data->general,
4507                                        &cmd_params->params.tx_only.flags);
4508
4509         bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4510                                   &cmd_params->params.tx_only.txq_params,
4511                                   &data->tx,
4512                                   &cmd_params->params.tx_only.flags);
4513
4514         DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4515                          cmd_params->q_obj->cids[0],
4516                          data->tx.tx_bd_page_base.lo,
4517                          data->tx.tx_bd_page_base.hi);
4518 }
4519
4520 /**
4521  * bnx2x_q_init - init HW/FW queue
4522  *
4523  * @bp:         device handle
4524  * @params:
4525  *
4526  * HW/FW initial Queue configuration:
4527  *      - HC: Rx and Tx
4528  *      - CDU context validation
4529  *
4530  */
4531 static inline int bnx2x_q_init(struct bnx2x *bp,
4532                                struct bnx2x_queue_state_params *params)
4533 {
4534         struct bnx2x_queue_sp_obj *o = params->q_obj;
4535         struct bnx2x_queue_init_params *init = &params->params.init;
4536         u16 hc_usec;
4537         u8 cos;
4538
4539         /* Tx HC configuration */
4540         if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4541             test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4542                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4543
4544                 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4545                         init->tx.sb_cq_index,
4546                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4547                         hc_usec);
4548         }
4549
4550         /* Rx HC configuration */
4551         if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4552             test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4553                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4554
4555                 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4556                         init->rx.sb_cq_index,
4557                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4558                         hc_usec);
4559         }
4560
4561         /* Set CDU context validation values */
4562         for (cos = 0; cos < o->max_cos; cos++) {
4563                 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4564                                  o->cids[cos], cos);
4565                 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4566                 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4567         }
4568
4569         /* As no ramrod is sent, complete the command immediately  */
4570         o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4571
4572         mmiowb();
4573         smp_mb();
4574
4575         return 0;
4576 }
4577
4578 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4579                                         struct bnx2x_queue_state_params *params)
4580 {
4581         struct bnx2x_queue_sp_obj *o = params->q_obj;
4582         struct client_init_ramrod_data *rdata =
4583                 (struct client_init_ramrod_data *)o->rdata;
4584         dma_addr_t data_mapping = o->rdata_mapping;
4585         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4586
4587         /* Clear the ramrod data */
4588         memset(rdata, 0, sizeof(*rdata));
4589
4590         /* Fill the ramrod data */
4591         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4592
4593         /* No need for an explicit memory barrier here as long we would
4594          * need to ensure the ordering of writing to the SPQ element
4595          * and updating of the SPQ producer which involves a memory
4596          * read and we will have to put a full memory barrier there
4597          * (inside bnx2x_sp_post()).
4598          */
4599
4600         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4601                              U64_HI(data_mapping),
4602                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4603 }
4604
4605 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4606                                         struct bnx2x_queue_state_params *params)
4607 {
4608         struct bnx2x_queue_sp_obj *o = params->q_obj;
4609         struct client_init_ramrod_data *rdata =
4610                 (struct client_init_ramrod_data *)o->rdata;
4611         dma_addr_t data_mapping = o->rdata_mapping;
4612         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4613
4614         /* Clear the ramrod data */
4615         memset(rdata, 0, sizeof(*rdata));
4616
4617         /* Fill the ramrod data */
4618         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4619         bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4620
4621         /* No need for an explicit memory barrier here as long we would
4622          * need to ensure the ordering of writing to the SPQ element
4623          * and updating of the SPQ producer which involves a memory
4624          * read and we will have to put a full memory barrier there
4625          * (inside bnx2x_sp_post()).
4626          */
4627
4628         return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4629                              U64_HI(data_mapping),
4630                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4631 }
4632
4633 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4634                                   struct bnx2x_queue_state_params *params)
4635 {
4636         struct bnx2x_queue_sp_obj *o = params->q_obj;
4637         struct tx_queue_init_ramrod_data *rdata =
4638                 (struct tx_queue_init_ramrod_data *)o->rdata;
4639         dma_addr_t data_mapping = o->rdata_mapping;
4640         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4641         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4642                 &params->params.tx_only;
4643         u8 cid_index = tx_only_params->cid_index;
4644
4645         if (cid_index >= o->max_cos) {
4646                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4647                           o->cl_id, cid_index);
4648                 return -EINVAL;
4649         }
4650
4651         DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4652                          tx_only_params->gen_params.cos,
4653                          tx_only_params->gen_params.spcl_id);
4654
4655         /* Clear the ramrod data */
4656         memset(rdata, 0, sizeof(*rdata));
4657
4658         /* Fill the ramrod data */
4659         bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4660
4661         DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4662                          o->cids[cid_index], rdata->general.client_id,
4663                          rdata->general.sp_client_id, rdata->general.cos);
4664
4665         /* No need for an explicit memory barrier here as long we would
4666          * need to ensure the ordering of writing to the SPQ element
4667          * and updating of the SPQ producer which involves a memory
4668          * read and we will have to put a full memory barrier there
4669          * (inside bnx2x_sp_post()).
4670          */
4671
4672         return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4673                              U64_HI(data_mapping),
4674                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4675 }
4676
4677 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4678                                      struct bnx2x_queue_sp_obj *obj,
4679                                      struct bnx2x_queue_update_params *params,
4680                                      struct client_update_ramrod_data *data)
4681 {
4682         /* Client ID of the client to update */
4683         data->client_id = obj->cl_id;
4684
4685         /* Function ID of the client to update */
4686         data->func_id = obj->func_id;
4687
4688         /* Default VLAN value */
4689         data->default_vlan = cpu_to_le16(params->def_vlan);
4690
4691         /* Inner VLAN stripping */
4692         data->inner_vlan_removal_enable_flg =
4693                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4694         data->inner_vlan_removal_change_flg =
4695                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4696                          &params->update_flags);
4697
4698         /* Outer VLAN stripping */
4699         data->outer_vlan_removal_enable_flg =
4700                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4701         data->outer_vlan_removal_change_flg =
4702                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4703                          &params->update_flags);
4704
4705         /* Drop packets that have source MAC that doesn't belong to this
4706          * Queue.
4707          */
4708         data->anti_spoofing_enable_flg =
4709                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4710         data->anti_spoofing_change_flg =
4711                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4712
4713         /* Activate/Deactivate */
4714         data->activate_flg =
4715                 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4716         data->activate_change_flg =
4717                 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4718
4719         /* Enable default VLAN */
4720         data->default_vlan_enable_flg =
4721                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4722         data->default_vlan_change_flg =
4723                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4724                          &params->update_flags);
4725
4726         /* silent vlan removal */
4727         data->silent_vlan_change_flg =
4728                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4729                          &params->update_flags);
4730         data->silent_vlan_removal_flg =
4731                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4732         data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4733         data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4734 }
4735
4736 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4737                                       struct bnx2x_queue_state_params *params)
4738 {
4739         struct bnx2x_queue_sp_obj *o = params->q_obj;
4740         struct client_update_ramrod_data *rdata =
4741                 (struct client_update_ramrod_data *)o->rdata;
4742         dma_addr_t data_mapping = o->rdata_mapping;
4743         struct bnx2x_queue_update_params *update_params =
4744                 &params->params.update;
4745         u8 cid_index = update_params->cid_index;
4746
4747         if (cid_index >= o->max_cos) {
4748                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4749                           o->cl_id, cid_index);
4750                 return -EINVAL;
4751         }
4752
4753         /* Clear the ramrod data */
4754         memset(rdata, 0, sizeof(*rdata));
4755
4756         /* Fill the ramrod data */
4757         bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4758
4759         /* No need for an explicit memory barrier here as long we would
4760          * need to ensure the ordering of writing to the SPQ element
4761          * and updating of the SPQ producer which involves a memory
4762          * read and we will have to put a full memory barrier there
4763          * (inside bnx2x_sp_post()).
4764          */
4765
4766         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4767                              o->cids[cid_index], U64_HI(data_mapping),
4768                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4769 }
4770
4771 /**
4772  * bnx2x_q_send_deactivate - send DEACTIVATE command
4773  *
4774  * @bp:         device handle
4775  * @params:
4776  *
4777  * implemented using the UPDATE command.
4778  */
4779 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4780                                         struct bnx2x_queue_state_params *params)
4781 {
4782         struct bnx2x_queue_update_params *update = &params->params.update;
4783
4784         memset(update, 0, sizeof(*update));
4785
4786         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4787
4788         return bnx2x_q_send_update(bp, params);
4789 }
4790
4791 /**
4792  * bnx2x_q_send_activate - send ACTIVATE command
4793  *
4794  * @bp:         device handle
4795  * @params:
4796  *
4797  * implemented using the UPDATE command.
4798  */
4799 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4800                                         struct bnx2x_queue_state_params *params)
4801 {
4802         struct bnx2x_queue_update_params *update = &params->params.update;
4803
4804         memset(update, 0, sizeof(*update));
4805
4806         __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4807         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4808
4809         return bnx2x_q_send_update(bp, params);
4810 }
4811
4812 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4813                                         struct bnx2x_queue_state_params *params)
4814 {
4815         /* TODO: Not implemented yet. */
4816         return -1;
4817 }
4818
4819 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4820                                     struct bnx2x_queue_state_params *params)
4821 {
4822         struct bnx2x_queue_sp_obj *o = params->q_obj;
4823
4824         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4825                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4826                              ETH_CONNECTION_TYPE);
4827 }
4828
4829 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4830                                        struct bnx2x_queue_state_params *params)
4831 {
4832         struct bnx2x_queue_sp_obj *o = params->q_obj;
4833         u8 cid_idx = params->params.cfc_del.cid_index;
4834
4835         if (cid_idx >= o->max_cos) {
4836                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4837                           o->cl_id, cid_idx);
4838                 return -EINVAL;
4839         }
4840
4841         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4842                              o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4843 }
4844
4845 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4846                                         struct bnx2x_queue_state_params *params)
4847 {
4848         struct bnx2x_queue_sp_obj *o = params->q_obj;
4849         u8 cid_index = params->params.terminate.cid_index;
4850
4851         if (cid_index >= o->max_cos) {
4852                 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4853                           o->cl_id, cid_index);
4854                 return -EINVAL;
4855         }
4856
4857         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4858                              o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4859 }
4860
4861 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4862                                      struct bnx2x_queue_state_params *params)
4863 {
4864         struct bnx2x_queue_sp_obj *o = params->q_obj;
4865
4866         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4867                              o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4868                              ETH_CONNECTION_TYPE);
4869 }
4870
4871 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4872                                         struct bnx2x_queue_state_params *params)
4873 {
4874         switch (params->cmd) {
4875         case BNX2X_Q_CMD_INIT:
4876                 return bnx2x_q_init(bp, params);
4877         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4878                 return bnx2x_q_send_setup_tx_only(bp, params);
4879         case BNX2X_Q_CMD_DEACTIVATE:
4880                 return bnx2x_q_send_deactivate(bp, params);
4881         case BNX2X_Q_CMD_ACTIVATE:
4882                 return bnx2x_q_send_activate(bp, params);
4883         case BNX2X_Q_CMD_UPDATE:
4884                 return bnx2x_q_send_update(bp, params);
4885         case BNX2X_Q_CMD_UPDATE_TPA:
4886                 return bnx2x_q_send_update_tpa(bp, params);
4887         case BNX2X_Q_CMD_HALT:
4888                 return bnx2x_q_send_halt(bp, params);
4889         case BNX2X_Q_CMD_CFC_DEL:
4890                 return bnx2x_q_send_cfc_del(bp, params);
4891         case BNX2X_Q_CMD_TERMINATE:
4892                 return bnx2x_q_send_terminate(bp, params);
4893         case BNX2X_Q_CMD_EMPTY:
4894                 return bnx2x_q_send_empty(bp, params);
4895         default:
4896                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4897                 return -EINVAL;
4898         }
4899 }
4900
4901 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4902                                     struct bnx2x_queue_state_params *params)
4903 {
4904         switch (params->cmd) {
4905         case BNX2X_Q_CMD_SETUP:
4906                 return bnx2x_q_send_setup_e1x(bp, params);
4907         case BNX2X_Q_CMD_INIT:
4908         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4909         case BNX2X_Q_CMD_DEACTIVATE:
4910         case BNX2X_Q_CMD_ACTIVATE:
4911         case BNX2X_Q_CMD_UPDATE:
4912         case BNX2X_Q_CMD_UPDATE_TPA:
4913         case BNX2X_Q_CMD_HALT:
4914         case BNX2X_Q_CMD_CFC_DEL:
4915         case BNX2X_Q_CMD_TERMINATE:
4916         case BNX2X_Q_CMD_EMPTY:
4917                 return bnx2x_queue_send_cmd_cmn(bp, params);
4918         default:
4919                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4920                 return -EINVAL;
4921         }
4922 }
4923
4924 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4925                                    struct bnx2x_queue_state_params *params)
4926 {
4927         switch (params->cmd) {
4928         case BNX2X_Q_CMD_SETUP:
4929                 return bnx2x_q_send_setup_e2(bp, params);
4930         case BNX2X_Q_CMD_INIT:
4931         case BNX2X_Q_CMD_SETUP_TX_ONLY:
4932         case BNX2X_Q_CMD_DEACTIVATE:
4933         case BNX2X_Q_CMD_ACTIVATE:
4934         case BNX2X_Q_CMD_UPDATE:
4935         case BNX2X_Q_CMD_UPDATE_TPA:
4936         case BNX2X_Q_CMD_HALT:
4937         case BNX2X_Q_CMD_CFC_DEL:
4938         case BNX2X_Q_CMD_TERMINATE:
4939         case BNX2X_Q_CMD_EMPTY:
4940                 return bnx2x_queue_send_cmd_cmn(bp, params);
4941         default:
4942                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4943                 return -EINVAL;
4944         }
4945 }
4946
4947 /**
4948  * bnx2x_queue_chk_transition - check state machine of a regular Queue
4949  *
4950  * @bp:         device handle
4951  * @o:
4952  * @params:
4953  *
4954  * (not Forwarding)
4955  * It both checks if the requested command is legal in a current
4956  * state and, if it's legal, sets a `next_state' in the object
4957  * that will be used in the completion flow to set the `state'
4958  * of the object.
4959  *
4960  * returns 0 if a requested command is a legal transition,
4961  *         -EINVAL otherwise.
4962  */
4963 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4964                                       struct bnx2x_queue_sp_obj *o,
4965                                       struct bnx2x_queue_state_params *params)
4966 {
4967         enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4968         enum bnx2x_queue_cmd cmd = params->cmd;
4969         struct bnx2x_queue_update_params *update_params =
4970                  &params->params.update;
4971         u8 next_tx_only = o->num_tx_only;
4972
4973         /* Forget all pending for completion commands if a driver only state
4974          * transition has been requested.
4975          */
4976         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4977                 o->pending = 0;
4978                 o->next_state = BNX2X_Q_STATE_MAX;
4979         }
4980
4981         /* Don't allow a next state transition if we are in the middle of
4982          * the previous one.
4983          */
4984         if (o->pending) {
4985                 BNX2X_ERR("Blocking transition since pending was %lx\n",
4986                           o->pending);
4987                 return -EBUSY;
4988         }
4989
4990         switch (state) {
4991         case BNX2X_Q_STATE_RESET:
4992                 if (cmd == BNX2X_Q_CMD_INIT)
4993                         next_state = BNX2X_Q_STATE_INITIALIZED;
4994
4995                 break;
4996         case BNX2X_Q_STATE_INITIALIZED:
4997                 if (cmd == BNX2X_Q_CMD_SETUP) {
4998                         if (test_bit(BNX2X_Q_FLG_ACTIVE,
4999                                      &params->params.setup.flags))
5000                                 next_state = BNX2X_Q_STATE_ACTIVE;
5001                         else
5002                                 next_state = BNX2X_Q_STATE_INACTIVE;
5003                 }
5004
5005                 break;
5006         case BNX2X_Q_STATE_ACTIVE:
5007                 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5008                         next_state = BNX2X_Q_STATE_INACTIVE;
5009
5010                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5011                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5012                         next_state = BNX2X_Q_STATE_ACTIVE;
5013
5014                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5015                         next_state = BNX2X_Q_STATE_MULTI_COS;
5016                         next_tx_only = 1;
5017                 }
5018
5019                 else if (cmd == BNX2X_Q_CMD_HALT)
5020                         next_state = BNX2X_Q_STATE_STOPPED;
5021
5022                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5023                         /* If "active" state change is requested, update the
5024                          *  state accordingly.
5025                          */
5026                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5027                                      &update_params->update_flags) &&
5028                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5029                                       &update_params->update_flags))
5030                                 next_state = BNX2X_Q_STATE_INACTIVE;
5031                         else
5032                                 next_state = BNX2X_Q_STATE_ACTIVE;
5033                 }
5034
5035                 break;
5036         case BNX2X_Q_STATE_MULTI_COS:
5037                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5038                         next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5039
5040                 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5041                         next_state = BNX2X_Q_STATE_MULTI_COS;
5042                         next_tx_only = o->num_tx_only + 1;
5043                 }
5044
5045                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5046                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5047                         next_state = BNX2X_Q_STATE_MULTI_COS;
5048
5049                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5050                         /* If "active" state change is requested, update the
5051                          *  state accordingly.
5052                          */
5053                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5054                                      &update_params->update_flags) &&
5055                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5056                                       &update_params->update_flags))
5057                                 next_state = BNX2X_Q_STATE_INACTIVE;
5058                         else
5059                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5060                 }
5061
5062                 break;
5063         case BNX2X_Q_STATE_MCOS_TERMINATED:
5064                 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5065                         next_tx_only = o->num_tx_only - 1;
5066                         if (next_tx_only == 0)
5067                                 next_state = BNX2X_Q_STATE_ACTIVE;
5068                         else
5069                                 next_state = BNX2X_Q_STATE_MULTI_COS;
5070                 }
5071
5072                 break;
5073         case BNX2X_Q_STATE_INACTIVE:
5074                 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5075                         next_state = BNX2X_Q_STATE_ACTIVE;
5076
5077                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5078                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5079                         next_state = BNX2X_Q_STATE_INACTIVE;
5080
5081                 else if (cmd == BNX2X_Q_CMD_HALT)
5082                         next_state = BNX2X_Q_STATE_STOPPED;
5083
5084                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5085                         /* If "active" state change is requested, update the
5086                          * state accordingly.
5087                          */
5088                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5089                                      &update_params->update_flags) &&
5090                             test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5091                                      &update_params->update_flags)){
5092                                 if (o->num_tx_only == 0)
5093                                         next_state = BNX2X_Q_STATE_ACTIVE;
5094                                 else /* tx only queues exist for this queue */
5095                                         next_state = BNX2X_Q_STATE_MULTI_COS;
5096                         } else
5097                                 next_state = BNX2X_Q_STATE_INACTIVE;
5098                 }
5099
5100                 break;
5101         case BNX2X_Q_STATE_STOPPED:
5102                 if (cmd == BNX2X_Q_CMD_TERMINATE)
5103                         next_state = BNX2X_Q_STATE_TERMINATED;
5104
5105                 break;
5106         case BNX2X_Q_STATE_TERMINATED:
5107                 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5108                         next_state = BNX2X_Q_STATE_RESET;
5109
5110                 break;
5111         default:
5112                 BNX2X_ERR("Illegal state: %d\n", state);
5113         }
5114
5115         /* Transition is assured */
5116         if (next_state != BNX2X_Q_STATE_MAX) {
5117                 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5118                                  state, cmd, next_state);
5119                 o->next_state = next_state;
5120                 o->next_tx_only = next_tx_only;
5121                 return 0;
5122         }
5123
5124         DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5125
5126         return -EINVAL;
5127 }
5128
5129 void bnx2x_init_queue_obj(struct bnx2x *bp,
5130                           struct bnx2x_queue_sp_obj *obj,
5131                           u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5132                           void *rdata,
5133                           dma_addr_t rdata_mapping, unsigned long type)
5134 {
5135         memset(obj, 0, sizeof(*obj));
5136
5137         /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5138         BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5139
5140         memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5141         obj->max_cos = cid_cnt;
5142         obj->cl_id = cl_id;
5143         obj->func_id = func_id;
5144         obj->rdata = rdata;
5145         obj->rdata_mapping = rdata_mapping;
5146         obj->type = type;
5147         obj->next_state = BNX2X_Q_STATE_MAX;
5148
5149         if (CHIP_IS_E1x(bp))
5150                 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5151         else
5152                 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5153
5154         obj->check_transition = bnx2x_queue_chk_transition;
5155
5156         obj->complete_cmd = bnx2x_queue_comp_cmd;
5157         obj->wait_comp = bnx2x_queue_wait_comp;
5158         obj->set_pending = bnx2x_queue_set_pending;
5159 }
5160
5161 /* return a queue object's logical state*/
5162 int bnx2x_get_q_logical_state(struct bnx2x *bp,
5163                                struct bnx2x_queue_sp_obj *obj)
5164 {
5165         switch (obj->state) {
5166         case BNX2X_Q_STATE_ACTIVE:
5167         case BNX2X_Q_STATE_MULTI_COS:
5168                 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5169         case BNX2X_Q_STATE_RESET:
5170         case BNX2X_Q_STATE_INITIALIZED:
5171         case BNX2X_Q_STATE_MCOS_TERMINATED:
5172         case BNX2X_Q_STATE_INACTIVE:
5173         case BNX2X_Q_STATE_STOPPED:
5174         case BNX2X_Q_STATE_TERMINATED:
5175         case BNX2X_Q_STATE_FLRED:
5176                 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5177         default:
5178                 return -EINVAL;
5179         }
5180 }
5181
5182 /********************** Function state object *********************************/
5183 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5184                                            struct bnx2x_func_sp_obj *o)
5185 {
5186         /* in the middle of transaction - return INVALID state */
5187         if (o->pending)
5188                 return BNX2X_F_STATE_MAX;
5189
5190         /* unsure the order of reading of o->pending and o->state
5191          * o->pending should be read first
5192          */
5193         rmb();
5194
5195         return o->state;
5196 }
5197
5198 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5199                                 struct bnx2x_func_sp_obj *o,
5200                                 enum bnx2x_func_cmd cmd)
5201 {
5202         return bnx2x_state_wait(bp, cmd, &o->pending);
5203 }
5204
5205 /**
5206  * bnx2x_func_state_change_comp - complete the state machine transition
5207  *
5208  * @bp:         device handle
5209  * @o:
5210  * @cmd:
5211  *
5212  * Called on state change transition. Completes the state
5213  * machine transition only - no HW interaction.
5214  */
5215 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5216                                                struct bnx2x_func_sp_obj *o,
5217                                                enum bnx2x_func_cmd cmd)
5218 {
5219         unsigned long cur_pending = o->pending;
5220
5221         if (!test_and_clear_bit(cmd, &cur_pending)) {
5222                 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5223                           cmd, BP_FUNC(bp), o->state,
5224                           cur_pending, o->next_state);
5225                 return -EINVAL;
5226         }
5227
5228         DP(BNX2X_MSG_SP,
5229            "Completing command %d for func %d, setting state to %d\n",
5230            cmd, BP_FUNC(bp), o->next_state);
5231
5232         o->state = o->next_state;
5233         o->next_state = BNX2X_F_STATE_MAX;
5234
5235         /* It's important that o->state and o->next_state are
5236          * updated before o->pending.
5237          */
5238         wmb();
5239
5240         clear_bit(cmd, &o->pending);
5241         smp_mb__after_clear_bit();
5242
5243         return 0;
5244 }
5245
5246 /**
5247  * bnx2x_func_comp_cmd - complete the state change command
5248  *
5249  * @bp:         device handle
5250  * @o:
5251  * @cmd:
5252  *
5253  * Checks that the arrived completion is expected.
5254  */
5255 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5256                                struct bnx2x_func_sp_obj *o,
5257                                enum bnx2x_func_cmd cmd)
5258 {
5259         /* Complete the state machine part first, check if it's a
5260          * legal completion.
5261          */
5262         int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5263         return rc;
5264 }
5265
5266 /**
5267  * bnx2x_func_chk_transition - perform function state machine transition
5268  *
5269  * @bp:         device handle
5270  * @o:
5271  * @params:
5272  *
5273  * It both checks if the requested command is legal in a current
5274  * state and, if it's legal, sets a `next_state' in the object
5275  * that will be used in the completion flow to set the `state'
5276  * of the object.
5277  *
5278  * returns 0 if a requested command is a legal transition,
5279  *         -EINVAL otherwise.
5280  */
5281 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5282                                      struct bnx2x_func_sp_obj *o,
5283                                      struct bnx2x_func_state_params *params)
5284 {
5285         enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5286         enum bnx2x_func_cmd cmd = params->cmd;
5287
5288         /* Forget all pending for completion commands if a driver only state
5289          * transition has been requested.
5290          */
5291         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5292                 o->pending = 0;
5293                 o->next_state = BNX2X_F_STATE_MAX;
5294         }
5295
5296         /* Don't allow a next state transition if we are in the middle of
5297          * the previous one.
5298          */
5299         if (o->pending)
5300                 return -EBUSY;
5301
5302         switch (state) {
5303         case BNX2X_F_STATE_RESET:
5304                 if (cmd == BNX2X_F_CMD_HW_INIT)
5305                         next_state = BNX2X_F_STATE_INITIALIZED;
5306
5307                 break;
5308         case BNX2X_F_STATE_INITIALIZED:
5309                 if (cmd == BNX2X_F_CMD_START)
5310                         next_state = BNX2X_F_STATE_STARTED;
5311
5312                 else if (cmd == BNX2X_F_CMD_HW_RESET)
5313                         next_state = BNX2X_F_STATE_RESET;
5314
5315                 break;
5316         case BNX2X_F_STATE_STARTED:
5317                 if (cmd == BNX2X_F_CMD_STOP)
5318                         next_state = BNX2X_F_STATE_INITIALIZED;
5319                 /* afex ramrods can be sent only in started mode, and only
5320                  * if not pending for function_stop ramrod completion
5321                  * for these events - next state remained STARTED.
5322                  */
5323                 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5324                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5325                         next_state = BNX2X_F_STATE_STARTED;
5326
5327                 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5328                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5329                         next_state = BNX2X_F_STATE_STARTED;
5330
5331                 /* Switch_update ramrod can be sent in either started or
5332                  * tx_stopped state, and it doesn't change the state.
5333                  */
5334                 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5335                          (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5336                         next_state = BNX2X_F_STATE_STARTED;
5337
5338                 else if (cmd == BNX2X_F_CMD_TX_STOP)
5339                         next_state = BNX2X_F_STATE_TX_STOPPED;
5340
5341                 break;
5342         case BNX2X_F_STATE_TX_STOPPED:
5343                 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5344                     (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5345                         next_state = BNX2X_F_STATE_TX_STOPPED;
5346
5347                 else if (cmd == BNX2X_F_CMD_TX_START)
5348                         next_state = BNX2X_F_STATE_STARTED;
5349
5350                 break;
5351         default:
5352                 BNX2X_ERR("Unknown state: %d\n", state);
5353         }
5354
5355         /* Transition is assured */
5356         if (next_state != BNX2X_F_STATE_MAX) {
5357                 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5358                                  state, cmd, next_state);
5359                 o->next_state = next_state;
5360                 return 0;
5361         }
5362
5363         DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5364                          state, cmd);
5365
5366         return -EINVAL;
5367 }
5368
5369 /**
5370  * bnx2x_func_init_func - performs HW init at function stage
5371  *
5372  * @bp:         device handle
5373  * @drv:
5374  *
5375  * Init HW when the current phase is
5376  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5377  * HW blocks.
5378  */
5379 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5380                                        const struct bnx2x_func_sp_drv_ops *drv)
5381 {
5382         return drv->init_hw_func(bp);
5383 }
5384
5385 /**
5386  * bnx2x_func_init_port - performs HW init at port stage
5387  *
5388  * @bp:         device handle
5389  * @drv:
5390  *
5391  * Init HW when the current phase is
5392  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5393  * FUNCTION-only HW blocks.
5394  *
5395  */
5396 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5397                                        const struct bnx2x_func_sp_drv_ops *drv)
5398 {
5399         int rc = drv->init_hw_port(bp);
5400         if (rc)
5401                 return rc;
5402
5403         return bnx2x_func_init_func(bp, drv);
5404 }
5405
5406 /**
5407  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5408  *
5409  * @bp:         device handle
5410  * @drv:
5411  *
5412  * Init HW when the current phase is
5413  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5414  * PORT-only and FUNCTION-only HW blocks.
5415  */
5416 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5417                                         const struct bnx2x_func_sp_drv_ops *drv)
5418 {
5419         int rc = drv->init_hw_cmn_chip(bp);
5420         if (rc)
5421                 return rc;
5422
5423         return bnx2x_func_init_port(bp, drv);
5424 }
5425
5426 /**
5427  * bnx2x_func_init_cmn - performs HW init at common stage
5428  *
5429  * @bp:         device handle
5430  * @drv:
5431  *
5432  * Init HW when the current phase is
5433  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5434  * PORT-only and FUNCTION-only HW blocks.
5435  */
5436 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5437                                       const struct bnx2x_func_sp_drv_ops *drv)
5438 {
5439         int rc = drv->init_hw_cmn(bp);
5440         if (rc)
5441                 return rc;
5442
5443         return bnx2x_func_init_port(bp, drv);
5444 }
5445
5446 static int bnx2x_func_hw_init(struct bnx2x *bp,
5447                               struct bnx2x_func_state_params *params)
5448 {
5449         u32 load_code = params->params.hw_init.load_phase;
5450         struct bnx2x_func_sp_obj *o = params->f_obj;
5451         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5452         int rc = 0;
5453
5454         DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5455                          BP_ABS_FUNC(bp), load_code);
5456
5457         /* Prepare buffers for unzipping the FW */
5458         rc = drv->gunzip_init(bp);
5459         if (rc)
5460                 return rc;
5461
5462         /* Prepare FW */
5463         rc = drv->init_fw(bp);
5464         if (rc) {
5465                 BNX2X_ERR("Error loading firmware\n");
5466                 goto init_err;
5467         }
5468
5469         /* Handle the beginning of COMMON_XXX pases separately... */
5470         switch (load_code) {
5471         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5472                 rc = bnx2x_func_init_cmn_chip(bp, drv);
5473                 if (rc)
5474                         goto init_err;
5475
5476                 break;
5477         case FW_MSG_CODE_DRV_LOAD_COMMON:
5478                 rc = bnx2x_func_init_cmn(bp, drv);
5479                 if (rc)
5480                         goto init_err;
5481
5482                 break;
5483         case FW_MSG_CODE_DRV_LOAD_PORT:
5484                 rc = bnx2x_func_init_port(bp, drv);
5485                 if (rc)
5486                         goto init_err;
5487
5488                 break;
5489         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5490                 rc = bnx2x_func_init_func(bp, drv);
5491                 if (rc)
5492                         goto init_err;
5493
5494                 break;
5495         default:
5496                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5497                 rc = -EINVAL;
5498         }
5499
5500 init_err:
5501         drv->gunzip_end(bp);
5502
5503         /* In case of success, complete the command immediately: no ramrods
5504          * have been sent.
5505          */
5506         if (!rc)
5507                 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5508
5509         return rc;
5510 }
5511
5512 /**
5513  * bnx2x_func_reset_func - reset HW at function stage
5514  *
5515  * @bp:         device handle
5516  * @drv:
5517  *
5518  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5519  * FUNCTION-only HW blocks.
5520  */
5521 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5522                                         const struct bnx2x_func_sp_drv_ops *drv)
5523 {
5524         drv->reset_hw_func(bp);
5525 }
5526
5527 /**
5528  * bnx2x_func_reset_port - reset HW at port stage
5529  *
5530  * @bp:         device handle
5531  * @drv:
5532  *
5533  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5534  * FUNCTION-only and PORT-only HW blocks.
5535  *
5536  *                 !!!IMPORTANT!!!
5537  *
5538  * It's important to call reset_port before reset_func() as the last thing
5539  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5540  * makes impossible any DMAE transactions.
5541  */
5542 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5543                                         const struct bnx2x_func_sp_drv_ops *drv)
5544 {
5545         drv->reset_hw_port(bp);
5546         bnx2x_func_reset_func(bp, drv);
5547 }
5548
5549 /**
5550  * bnx2x_func_reset_cmn - reset HW at common stage
5551  *
5552  * @bp:         device handle
5553  * @drv:
5554  *
5555  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5556  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5557  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5558  */
5559 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5560                                         const struct bnx2x_func_sp_drv_ops *drv)
5561 {
5562         bnx2x_func_reset_port(bp, drv);
5563         drv->reset_hw_cmn(bp);
5564 }
5565
5566 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5567                                       struct bnx2x_func_state_params *params)
5568 {
5569         u32 reset_phase = params->params.hw_reset.reset_phase;
5570         struct bnx2x_func_sp_obj *o = params->f_obj;
5571         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5572
5573         DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5574                          reset_phase);
5575
5576         switch (reset_phase) {
5577         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5578                 bnx2x_func_reset_cmn(bp, drv);
5579                 break;
5580         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5581                 bnx2x_func_reset_port(bp, drv);
5582                 break;
5583         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5584                 bnx2x_func_reset_func(bp, drv);
5585                 break;
5586         default:
5587                 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5588                            reset_phase);
5589                 break;
5590         }
5591
5592         /* Complete the command immediately: no ramrods have been sent. */
5593         o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5594
5595         return 0;
5596 }
5597
5598 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5599                                         struct bnx2x_func_state_params *params)
5600 {
5601         struct bnx2x_func_sp_obj *o = params->f_obj;
5602         struct function_start_data *rdata =
5603                 (struct function_start_data *)o->rdata;
5604         dma_addr_t data_mapping = o->rdata_mapping;
5605         struct bnx2x_func_start_params *start_params = &params->params.start;
5606
5607         memset(rdata, 0, sizeof(*rdata));
5608
5609         /* Fill the ramrod data with provided parameters */
5610         rdata->function_mode    = (u8)start_params->mf_mode;
5611         rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
5612         rdata->path_id          = BP_PATH(bp);
5613         rdata->network_cos_mode = start_params->network_cos_mode;
5614         rdata->gre_tunnel_mode  = start_params->gre_tunnel_mode;
5615         rdata->gre_tunnel_rss   = start_params->gre_tunnel_rss;
5616
5617         /* No need for an explicit memory barrier here as long we would
5618          * need to ensure the ordering of writing to the SPQ element
5619          * and updating of the SPQ producer which involves a memory
5620          * read and we will have to put a full memory barrier there
5621          * (inside bnx2x_sp_post()).
5622          */
5623
5624         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5625                              U64_HI(data_mapping),
5626                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5627 }
5628
5629 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5630                                         struct bnx2x_func_state_params *params)
5631 {
5632         struct bnx2x_func_sp_obj *o = params->f_obj;
5633         struct function_update_data *rdata =
5634                 (struct function_update_data *)o->rdata;
5635         dma_addr_t data_mapping = o->rdata_mapping;
5636         struct bnx2x_func_switch_update_params *switch_update_params =
5637                 &params->params.switch_update;
5638
5639         memset(rdata, 0, sizeof(*rdata));
5640
5641         /* Fill the ramrod data with provided parameters */
5642         rdata->tx_switch_suspend_change_flg = 1;
5643         rdata->tx_switch_suspend = switch_update_params->suspend;
5644         rdata->echo = SWITCH_UPDATE;
5645
5646         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5647                              U64_HI(data_mapping),
5648                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5649 }
5650
5651 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5652                                          struct bnx2x_func_state_params *params)
5653 {
5654         struct bnx2x_func_sp_obj *o = params->f_obj;
5655         struct function_update_data *rdata =
5656                 (struct function_update_data *)o->afex_rdata;
5657         dma_addr_t data_mapping = o->afex_rdata_mapping;
5658         struct bnx2x_func_afex_update_params *afex_update_params =
5659                 &params->params.afex_update;
5660
5661         memset(rdata, 0, sizeof(*rdata));
5662
5663         /* Fill the ramrod data with provided parameters */
5664         rdata->vif_id_change_flg = 1;
5665         rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5666         rdata->afex_default_vlan_change_flg = 1;
5667         rdata->afex_default_vlan =
5668                 cpu_to_le16(afex_update_params->afex_default_vlan);
5669         rdata->allowed_priorities_change_flg = 1;
5670         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5671         rdata->echo = AFEX_UPDATE;
5672
5673         /*  No need for an explicit memory barrier here as long we would
5674          *  need to ensure the ordering of writing to the SPQ element
5675          *  and updating of the SPQ producer which involves a memory
5676          *  read and we will have to put a full memory barrier there
5677          *  (inside bnx2x_sp_post()).
5678          */
5679         DP(BNX2X_MSG_SP,
5680            "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5681            rdata->vif_id,
5682            rdata->afex_default_vlan, rdata->allowed_priorities);
5683
5684         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5685                              U64_HI(data_mapping),
5686                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5687 }
5688
5689 static
5690 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5691                                          struct bnx2x_func_state_params *params)
5692 {
5693         struct bnx2x_func_sp_obj *o = params->f_obj;
5694         struct afex_vif_list_ramrod_data *rdata =
5695                 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5696         struct bnx2x_func_afex_viflists_params *afex_vif_params =
5697                 &params->params.afex_viflists;
5698         u64 *p_rdata = (u64 *)rdata;
5699
5700         memset(rdata, 0, sizeof(*rdata));
5701
5702         /* Fill the ramrod data with provided parameters */
5703         rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5704         rdata->func_bit_map          = afex_vif_params->func_bit_map;
5705         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5706         rdata->func_to_clear         = afex_vif_params->func_to_clear;
5707
5708         /* send in echo type of sub command */
5709         rdata->echo = afex_vif_params->afex_vif_list_command;
5710
5711         /*  No need for an explicit memory barrier here as long we would
5712          *  need to ensure the ordering of writing to the SPQ element
5713          *  and updating of the SPQ producer which involves a memory
5714          *  read and we will have to put a full memory barrier there
5715          *  (inside bnx2x_sp_post()).
5716          */
5717
5718         DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5719            rdata->afex_vif_list_command, rdata->vif_list_index,
5720            rdata->func_bit_map, rdata->func_to_clear);
5721
5722         /* this ramrod sends data directly and not through DMA mapping */
5723         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5724                              U64_HI(*p_rdata), U64_LO(*p_rdata),
5725                              NONE_CONNECTION_TYPE);
5726 }
5727
5728 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5729                                        struct bnx2x_func_state_params *params)
5730 {
5731         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5732                              NONE_CONNECTION_TYPE);
5733 }
5734
5735 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5736                                        struct bnx2x_func_state_params *params)
5737 {
5738         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5739                              NONE_CONNECTION_TYPE);
5740 }
5741 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5742                                        struct bnx2x_func_state_params *params)
5743 {
5744         struct bnx2x_func_sp_obj *o = params->f_obj;
5745         struct flow_control_configuration *rdata =
5746                 (struct flow_control_configuration *)o->rdata;
5747         dma_addr_t data_mapping = o->rdata_mapping;
5748         struct bnx2x_func_tx_start_params *tx_start_params =
5749                 &params->params.tx_start;
5750         int i;
5751
5752         memset(rdata, 0, sizeof(*rdata));
5753
5754         rdata->dcb_enabled = tx_start_params->dcb_enabled;
5755         rdata->dcb_version = tx_start_params->dcb_version;
5756         rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5757
5758         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5759                 rdata->traffic_type_to_priority_cos[i] =
5760                         tx_start_params->traffic_type_to_priority_cos[i];
5761
5762         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5763                              U64_HI(data_mapping),
5764                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5765 }
5766
5767 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5768                                struct bnx2x_func_state_params *params)
5769 {
5770         switch (params->cmd) {
5771         case BNX2X_F_CMD_HW_INIT:
5772                 return bnx2x_func_hw_init(bp, params);
5773         case BNX2X_F_CMD_START:
5774                 return bnx2x_func_send_start(bp, params);
5775         case BNX2X_F_CMD_STOP:
5776                 return bnx2x_func_send_stop(bp, params);
5777         case BNX2X_F_CMD_HW_RESET:
5778                 return bnx2x_func_hw_reset(bp, params);
5779         case BNX2X_F_CMD_AFEX_UPDATE:
5780                 return bnx2x_func_send_afex_update(bp, params);
5781         case BNX2X_F_CMD_AFEX_VIFLISTS:
5782                 return bnx2x_func_send_afex_viflists(bp, params);
5783         case BNX2X_F_CMD_TX_STOP:
5784                 return bnx2x_func_send_tx_stop(bp, params);
5785         case BNX2X_F_CMD_TX_START:
5786                 return bnx2x_func_send_tx_start(bp, params);
5787         case BNX2X_F_CMD_SWITCH_UPDATE:
5788                 return bnx2x_func_send_switch_update(bp, params);
5789         default:
5790                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5791                 return -EINVAL;
5792         }
5793 }
5794
5795 void bnx2x_init_func_obj(struct bnx2x *bp,
5796                          struct bnx2x_func_sp_obj *obj,
5797                          void *rdata, dma_addr_t rdata_mapping,
5798                          void *afex_rdata, dma_addr_t afex_rdata_mapping,
5799                          struct bnx2x_func_sp_drv_ops *drv_iface)
5800 {
5801         memset(obj, 0, sizeof(*obj));
5802
5803         mutex_init(&obj->one_pending_mutex);
5804
5805         obj->rdata = rdata;
5806         obj->rdata_mapping = rdata_mapping;
5807         obj->afex_rdata = afex_rdata;
5808         obj->afex_rdata_mapping = afex_rdata_mapping;
5809         obj->send_cmd = bnx2x_func_send_cmd;
5810         obj->check_transition = bnx2x_func_chk_transition;
5811         obj->complete_cmd = bnx2x_func_comp_cmd;
5812         obj->wait_comp = bnx2x_func_wait_comp;
5813
5814         obj->drv = drv_iface;
5815 }
5816
5817 /**
5818  * bnx2x_func_state_change - perform Function state change transition
5819  *
5820  * @bp:         device handle
5821  * @params:     parameters to perform the transaction
5822  *
5823  * returns 0 in case of successfully completed transition,
5824  *         negative error code in case of failure, positive
5825  *         (EBUSY) value if there is a completion to that is
5826  *         still pending (possible only if RAMROD_COMP_WAIT is
5827  *         not set in params->ramrod_flags for asynchronous
5828  *         commands).
5829  */
5830 int bnx2x_func_state_change(struct bnx2x *bp,
5831                             struct bnx2x_func_state_params *params)
5832 {
5833         struct bnx2x_func_sp_obj *o = params->f_obj;
5834         int rc, cnt = 300;
5835         enum bnx2x_func_cmd cmd = params->cmd;
5836         unsigned long *pending = &o->pending;
5837
5838         mutex_lock(&o->one_pending_mutex);
5839
5840         /* Check that the requested transition is legal */
5841         rc = o->check_transition(bp, o, params);
5842         if ((rc == -EBUSY) &&
5843             (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5844                 while ((rc == -EBUSY) && (--cnt > 0)) {
5845                         mutex_unlock(&o->one_pending_mutex);
5846                         msleep(10);
5847                         mutex_lock(&o->one_pending_mutex);
5848                         rc = o->check_transition(bp, o, params);
5849                 }
5850                 if (rc == -EBUSY) {
5851                         mutex_unlock(&o->one_pending_mutex);
5852                         BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5853                         return rc;
5854                 }
5855         } else if (rc) {
5856                 mutex_unlock(&o->one_pending_mutex);
5857                 return rc;
5858         }
5859
5860         /* Set "pending" bit */
5861         set_bit(cmd, pending);
5862
5863         /* Don't send a command if only driver cleanup was requested */
5864         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5865                 bnx2x_func_state_change_comp(bp, o, cmd);
5866                 mutex_unlock(&o->one_pending_mutex);
5867         } else {
5868                 /* Send a ramrod */
5869                 rc = o->send_cmd(bp, params);
5870
5871                 mutex_unlock(&o->one_pending_mutex);
5872
5873                 if (rc) {
5874                         o->next_state = BNX2X_F_STATE_MAX;
5875                         clear_bit(cmd, pending);
5876                         smp_mb__after_clear_bit();
5877                         return rc;
5878                 }
5879
5880                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5881                         rc = o->wait_comp(bp, o, cmd);
5882                         if (rc)
5883                                 return rc;
5884
5885                         return 0;
5886                 }
5887         }
5888
5889         return !!test_bit(cmd, pending);
5890 }