be2net: move un-exported routines from be.h to respective src files
[linux-2.6-block.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23         {
24                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25                 CMD_SUBSYSTEM_ETH,
26                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28         },
29         {
30                 OPCODE_COMMON_GET_FLOW_CONTROL,
31                 CMD_SUBSYSTEM_COMMON,
32                 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34         },
35         {
36                 OPCODE_COMMON_SET_FLOW_CONTROL,
37                 CMD_SUBSYSTEM_COMMON,
38                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40         },
41         {
42                 OPCODE_ETH_GET_PPORT_STATS,
43                 CMD_SUBSYSTEM_ETH,
44                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46         },
47         {
48                 OPCODE_COMMON_GET_PHY_DETAILS,
49                 CMD_SUBSYSTEM_COMMON,
50                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52         }
53 };
54
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
56 {
57         int i;
58         int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
59         u32 cmd_privileges = adapter->cmd_privileges;
60
61         for (i = 0; i < num_entries; i++)
62                 if (opcode == cmd_priv_map[i].opcode &&
63                     subsystem == cmd_priv_map[i].subsystem)
64                         if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
65                                 return false;
66
67         return true;
68 }
69
70 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
71 {
72         return wrb->payload.embedded_payload;
73 }
74
75 static void be_mcc_notify(struct be_adapter *adapter)
76 {
77         struct be_queue_info *mccq = &adapter->mcc_obj.q;
78         u32 val = 0;
79
80         if (be_error(adapter))
81                 return;
82
83         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
84         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
85
86         wmb();
87         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
88 }
89
90 /* To check if valid bit is set, check the entire word as we don't know
91  * the endianness of the data (old entry is host endian while a new entry is
92  * little endian) */
93 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
94 {
95         u32 flags;
96
97         if (compl->flags != 0) {
98                 flags = le32_to_cpu(compl->flags);
99                 if (flags & CQE_FLAGS_VALID_MASK) {
100                         compl->flags = flags;
101                         return true;
102                 }
103         }
104         return false;
105 }
106
107 /* Need to reset the entire word that houses the valid bit */
108 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
109 {
110         compl->flags = 0;
111 }
112
113 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
114 {
115         unsigned long addr;
116
117         addr = tag1;
118         addr = ((addr << 16) << 16) | tag0;
119         return (void *)addr;
120 }
121
122 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
123 {
124         if (base_status == MCC_STATUS_NOT_SUPPORTED ||
125             base_status == MCC_STATUS_ILLEGAL_REQUEST ||
126             addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
127             (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
128             (base_status == MCC_STATUS_ILLEGAL_FIELD ||
129              addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
130                 return true;
131         else
132                 return false;
133 }
134
135 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
136  * loop (has not issued be_mcc_notify_wait())
137  */
138 static void be_async_cmd_process(struct be_adapter *adapter,
139                                  struct be_mcc_compl *compl,
140                                  struct be_cmd_resp_hdr *resp_hdr)
141 {
142         enum mcc_base_status base_status = base_status(compl->status);
143         u8 opcode = 0, subsystem = 0;
144
145         if (resp_hdr) {
146                 opcode = resp_hdr->opcode;
147                 subsystem = resp_hdr->subsystem;
148         }
149
150         if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
151             subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
152                 complete(&adapter->et_cmd_compl);
153                 return;
154         }
155
156         if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
157              opcode == OPCODE_COMMON_WRITE_OBJECT) &&
158             subsystem == CMD_SUBSYSTEM_COMMON) {
159                 adapter->flash_status = compl->status;
160                 complete(&adapter->et_cmd_compl);
161                 return;
162         }
163
164         if ((opcode == OPCODE_ETH_GET_STATISTICS ||
165              opcode == OPCODE_ETH_GET_PPORT_STATS) &&
166             subsystem == CMD_SUBSYSTEM_ETH &&
167             base_status == MCC_STATUS_SUCCESS) {
168                 be_parse_stats(adapter);
169                 adapter->stats_cmd_sent = false;
170                 return;
171         }
172
173         if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174             subsystem == CMD_SUBSYSTEM_COMMON) {
175                 if (base_status == MCC_STATUS_SUCCESS) {
176                         struct be_cmd_resp_get_cntl_addnl_attribs *resp =
177                                                         (void *)resp_hdr;
178                         adapter->drv_stats.be_on_die_temperature =
179                                                 resp->on_die_temperature;
180                 } else {
181                         adapter->be_get_temp_freq = 0;
182                 }
183                 return;
184         }
185 }
186
187 static int be_mcc_compl_process(struct be_adapter *adapter,
188                                 struct be_mcc_compl *compl)
189 {
190         enum mcc_base_status base_status;
191         enum mcc_addl_status addl_status;
192         struct be_cmd_resp_hdr *resp_hdr;
193         u8 opcode = 0, subsystem = 0;
194
195         /* Just swap the status to host endian; mcc tag is opaquely copied
196          * from mcc_wrb */
197         be_dws_le_to_cpu(compl, 4);
198
199         base_status = base_status(compl->status);
200         addl_status = addl_status(compl->status);
201
202         resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
203         if (resp_hdr) {
204                 opcode = resp_hdr->opcode;
205                 subsystem = resp_hdr->subsystem;
206         }
207
208         be_async_cmd_process(adapter, compl, resp_hdr);
209
210         if (base_status != MCC_STATUS_SUCCESS &&
211             !be_skip_err_log(opcode, base_status, addl_status)) {
212                 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
213                         dev_warn(&adapter->pdev->dev,
214                                  "VF is not privileged to issue opcode %d-%d\n",
215                                  opcode, subsystem);
216                 } else {
217                         dev_err(&adapter->pdev->dev,
218                                 "opcode %d-%d failed:status %d-%d\n",
219                                 opcode, subsystem, base_status, addl_status);
220                 }
221         }
222         return compl->status;
223 }
224
225 /* Link state evt is a string of bytes; no need for endian swapping */
226 static void be_async_link_state_process(struct be_adapter *adapter,
227                                         struct be_mcc_compl *compl)
228 {
229         struct be_async_event_link_state *evt =
230                         (struct be_async_event_link_state *)compl;
231
232         /* When link status changes, link speed must be re-queried from FW */
233         adapter->phy.link_speed = -1;
234
235         /* On BEx the FW does not send a separate link status
236          * notification for physical and logical link.
237          * On other chips just process the logical link
238          * status notification
239          */
240         if (!BEx_chip(adapter) &&
241             !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
242                 return;
243
244         /* For the initial link status do not rely on the ASYNC event as
245          * it may not be received in some cases.
246          */
247         if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
248                 be_link_status_update(adapter,
249                                       evt->port_link_status & LINK_STATUS_MASK);
250 }
251
252 /* Grp5 CoS Priority evt */
253 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
254                                                struct be_mcc_compl *compl)
255 {
256         struct be_async_event_grp5_cos_priority *evt =
257                         (struct be_async_event_grp5_cos_priority *)compl;
258
259         if (evt->valid) {
260                 adapter->vlan_prio_bmap = evt->available_priority_bmap;
261                 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
262                 adapter->recommended_prio =
263                         evt->reco_default_priority << VLAN_PRIO_SHIFT;
264         }
265 }
266
267 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
268 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
269                                             struct be_mcc_compl *compl)
270 {
271         struct be_async_event_grp5_qos_link_speed *evt =
272                         (struct be_async_event_grp5_qos_link_speed *)compl;
273
274         if (adapter->phy.link_speed >= 0 &&
275             evt->physical_port == adapter->port_num)
276                 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
277 }
278
279 /*Grp5 PVID evt*/
280 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
281                                              struct be_mcc_compl *compl)
282 {
283         struct be_async_event_grp5_pvid_state *evt =
284                         (struct be_async_event_grp5_pvid_state *)compl;
285
286         if (evt->enabled) {
287                 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
288                 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
289         } else {
290                 adapter->pvid = 0;
291         }
292 }
293
294 static void be_async_grp5_evt_process(struct be_adapter *adapter,
295                                       struct be_mcc_compl *compl)
296 {
297         u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
298                                 ASYNC_EVENT_TYPE_MASK;
299
300         switch (event_type) {
301         case ASYNC_EVENT_COS_PRIORITY:
302                 be_async_grp5_cos_priority_process(adapter, compl);
303                 break;
304         case ASYNC_EVENT_QOS_SPEED:
305                 be_async_grp5_qos_speed_process(adapter, compl);
306                 break;
307         case ASYNC_EVENT_PVID_STATE:
308                 be_async_grp5_pvid_state_process(adapter, compl);
309                 break;
310         default:
311                 break;
312         }
313 }
314
315 static void be_async_dbg_evt_process(struct be_adapter *adapter,
316                                      struct be_mcc_compl *cmp)
317 {
318         u8 event_type = 0;
319         struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
320
321         event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
322                         ASYNC_EVENT_TYPE_MASK;
323
324         switch (event_type) {
325         case ASYNC_DEBUG_EVENT_TYPE_QNQ:
326                 if (evt->valid)
327                         adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
328                 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
329         break;
330         default:
331                 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
332                          event_type);
333         break;
334         }
335 }
336
337 static inline bool is_link_state_evt(u32 flags)
338 {
339         return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
340                         ASYNC_EVENT_CODE_LINK_STATE;
341 }
342
343 static inline bool is_grp5_evt(u32 flags)
344 {
345         return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
346                         ASYNC_EVENT_CODE_GRP_5;
347 }
348
349 static inline bool is_dbg_evt(u32 flags)
350 {
351         return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
352                         ASYNC_EVENT_CODE_QNQ;
353 }
354
355 static void be_mcc_event_process(struct be_adapter *adapter,
356                                  struct be_mcc_compl *compl)
357 {
358         if (is_link_state_evt(compl->flags))
359                 be_async_link_state_process(adapter, compl);
360         else if (is_grp5_evt(compl->flags))
361                 be_async_grp5_evt_process(adapter, compl);
362         else if (is_dbg_evt(compl->flags))
363                 be_async_dbg_evt_process(adapter, compl);
364 }
365
366 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
367 {
368         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
369         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
370
371         if (be_mcc_compl_is_new(compl)) {
372                 queue_tail_inc(mcc_cq);
373                 return compl;
374         }
375         return NULL;
376 }
377
378 void be_async_mcc_enable(struct be_adapter *adapter)
379 {
380         spin_lock_bh(&adapter->mcc_cq_lock);
381
382         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
383         adapter->mcc_obj.rearm_cq = true;
384
385         spin_unlock_bh(&adapter->mcc_cq_lock);
386 }
387
388 void be_async_mcc_disable(struct be_adapter *adapter)
389 {
390         spin_lock_bh(&adapter->mcc_cq_lock);
391
392         adapter->mcc_obj.rearm_cq = false;
393         be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
394
395         spin_unlock_bh(&adapter->mcc_cq_lock);
396 }
397
398 int be_process_mcc(struct be_adapter *adapter)
399 {
400         struct be_mcc_compl *compl;
401         int num = 0, status = 0;
402         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
403
404         spin_lock(&adapter->mcc_cq_lock);
405
406         while ((compl = be_mcc_compl_get(adapter))) {
407                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
408                         be_mcc_event_process(adapter, compl);
409                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
410                         status = be_mcc_compl_process(adapter, compl);
411                         atomic_dec(&mcc_obj->q.used);
412                 }
413                 be_mcc_compl_use(compl);
414                 num++;
415         }
416
417         if (num)
418                 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
419
420         spin_unlock(&adapter->mcc_cq_lock);
421         return status;
422 }
423
424 /* Wait till no more pending mcc requests are present */
425 static int be_mcc_wait_compl(struct be_adapter *adapter)
426 {
427 #define mcc_timeout             120000 /* 12s timeout */
428         int i, status = 0;
429         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
430
431         for (i = 0; i < mcc_timeout; i++) {
432                 if (be_error(adapter))
433                         return -EIO;
434
435                 local_bh_disable();
436                 status = be_process_mcc(adapter);
437                 local_bh_enable();
438
439                 if (atomic_read(&mcc_obj->q.used) == 0)
440                         break;
441                 udelay(100);
442         }
443         if (i == mcc_timeout) {
444                 dev_err(&adapter->pdev->dev, "FW not responding\n");
445                 adapter->fw_timeout = true;
446                 return -EIO;
447         }
448         return status;
449 }
450
451 /* Notify MCC requests and wait for completion */
452 static int be_mcc_notify_wait(struct be_adapter *adapter)
453 {
454         int status;
455         struct be_mcc_wrb *wrb;
456         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
457         u16 index = mcc_obj->q.head;
458         struct be_cmd_resp_hdr *resp;
459
460         index_dec(&index, mcc_obj->q.len);
461         wrb = queue_index_node(&mcc_obj->q, index);
462
463         resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
464
465         be_mcc_notify(adapter);
466
467         status = be_mcc_wait_compl(adapter);
468         if (status == -EIO)
469                 goto out;
470
471         status = (resp->base_status |
472                   ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
473                    CQE_ADDL_STATUS_SHIFT));
474 out:
475         return status;
476 }
477
478 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
479 {
480         int msecs = 0;
481         u32 ready;
482
483         do {
484                 if (be_error(adapter))
485                         return -EIO;
486
487                 ready = ioread32(db);
488                 if (ready == 0xffffffff)
489                         return -1;
490
491                 ready &= MPU_MAILBOX_DB_RDY_MASK;
492                 if (ready)
493                         break;
494
495                 if (msecs > 4000) {
496                         dev_err(&adapter->pdev->dev, "FW not responding\n");
497                         adapter->fw_timeout = true;
498                         be_detect_error(adapter);
499                         return -1;
500                 }
501
502                 msleep(1);
503                 msecs++;
504         } while (true);
505
506         return 0;
507 }
508
509 /*
510  * Insert the mailbox address into the doorbell in two steps
511  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
512  */
513 static int be_mbox_notify_wait(struct be_adapter *adapter)
514 {
515         int status;
516         u32 val = 0;
517         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
518         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
519         struct be_mcc_mailbox *mbox = mbox_mem->va;
520         struct be_mcc_compl *compl = &mbox->compl;
521
522         /* wait for ready to be set */
523         status = be_mbox_db_ready_wait(adapter, db);
524         if (status != 0)
525                 return status;
526
527         val |= MPU_MAILBOX_DB_HI_MASK;
528         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
529         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
530         iowrite32(val, db);
531
532         /* wait for ready to be set */
533         status = be_mbox_db_ready_wait(adapter, db);
534         if (status != 0)
535                 return status;
536
537         val = 0;
538         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
539         val |= (u32)(mbox_mem->dma >> 4) << 2;
540         iowrite32(val, db);
541
542         status = be_mbox_db_ready_wait(adapter, db);
543         if (status != 0)
544                 return status;
545
546         /* A cq entry has been made now */
547         if (be_mcc_compl_is_new(compl)) {
548                 status = be_mcc_compl_process(adapter, &mbox->compl);
549                 be_mcc_compl_use(compl);
550                 if (status)
551                         return status;
552         } else {
553                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
554                 return -1;
555         }
556         return 0;
557 }
558
559 static u16 be_POST_stage_get(struct be_adapter *adapter)
560 {
561         u32 sem;
562
563         if (BEx_chip(adapter))
564                 sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
565         else
566                 pci_read_config_dword(adapter->pdev,
567                                       SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
568
569         return sem & POST_STAGE_MASK;
570 }
571
572 static int lancer_wait_ready(struct be_adapter *adapter)
573 {
574 #define SLIPORT_READY_TIMEOUT 30
575         u32 sliport_status;
576         int i;
577
578         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
579                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
580                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
581                         break;
582
583                 msleep(1000);
584         }
585
586         if (i == SLIPORT_READY_TIMEOUT)
587                 return sliport_status ? : -1;
588
589         return 0;
590 }
591
592 static bool lancer_provisioning_error(struct be_adapter *adapter)
593 {
594         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
595
596         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
597         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
598                 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
599                 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
600
601                 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
602                     sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
603                         return true;
604         }
605         return false;
606 }
607
608 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
609 {
610         int status;
611         u32 sliport_status, err, reset_needed;
612         bool resource_error;
613
614         resource_error = lancer_provisioning_error(adapter);
615         if (resource_error)
616                 return -EAGAIN;
617
618         status = lancer_wait_ready(adapter);
619         if (!status) {
620                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
621                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
622                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
623                 if (err && reset_needed) {
624                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
625                                   adapter->db + SLIPORT_CONTROL_OFFSET);
626
627                         /* check if adapter has corrected the error */
628                         status = lancer_wait_ready(adapter);
629                         sliport_status = ioread32(adapter->db +
630                                                   SLIPORT_STATUS_OFFSET);
631                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
632                                                 SLIPORT_STATUS_RN_MASK);
633                         if (status || sliport_status)
634                                 status = -1;
635                 } else if (err || reset_needed) {
636                         status = -1;
637                 }
638         }
639         /* Stop error recovery if error is not recoverable.
640          * No resource error is temporary errors and will go away
641          * when PF provisions resources.
642          */
643         resource_error = lancer_provisioning_error(adapter);
644         if (resource_error)
645                 status = -EAGAIN;
646
647         return status;
648 }
649
650 int be_fw_wait_ready(struct be_adapter *adapter)
651 {
652         u16 stage;
653         int status, timeout = 0;
654         struct device *dev = &adapter->pdev->dev;
655
656         if (lancer_chip(adapter)) {
657                 status = lancer_wait_ready(adapter);
658                 if (status) {
659                         stage = status;
660                         goto err;
661                 }
662                 return 0;
663         }
664
665         do {
666                 stage = be_POST_stage_get(adapter);
667                 if (stage == POST_STAGE_ARMFW_RDY)
668                         return 0;
669
670                 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
671                 if (msleep_interruptible(2000)) {
672                         dev_err(dev, "Waiting for POST aborted\n");
673                         return -EINTR;
674                 }
675                 timeout += 2;
676         } while (timeout < 60);
677
678 err:
679         dev_err(dev, "POST timeout; stage=%#x\n", stage);
680         return -1;
681 }
682
683 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
684 {
685         return &wrb->payload.sgl[0];
686 }
687
688 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
689 {
690         wrb->tag0 = addr & 0xFFFFFFFF;
691         wrb->tag1 = upper_32_bits(addr);
692 }
693
694 /* Don't touch the hdr after it's prepared */
695 /* mem will be NULL for embedded commands */
696 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
697                                    u8 subsystem, u8 opcode, int cmd_len,
698                                    struct be_mcc_wrb *wrb,
699                                    struct be_dma_mem *mem)
700 {
701         struct be_sge *sge;
702
703         req_hdr->opcode = opcode;
704         req_hdr->subsystem = subsystem;
705         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
706         req_hdr->version = 0;
707         fill_wrb_tags(wrb, (ulong) req_hdr);
708         wrb->payload_length = cmd_len;
709         if (mem) {
710                 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
711                         MCC_WRB_SGE_CNT_SHIFT;
712                 sge = nonembedded_sgl(wrb);
713                 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
714                 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
715                 sge->len = cpu_to_le32(mem->size);
716         } else
717                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
718         be_dws_cpu_to_le(wrb, 8);
719 }
720
721 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
722                                       struct be_dma_mem *mem)
723 {
724         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
725         u64 dma = (u64)mem->dma;
726
727         for (i = 0; i < buf_pages; i++) {
728                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
729                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
730                 dma += PAGE_SIZE_4K;
731         }
732 }
733
734 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
735 {
736         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
737         struct be_mcc_wrb *wrb
738                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
739         memset(wrb, 0, sizeof(*wrb));
740         return wrb;
741 }
742
743 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
744 {
745         struct be_queue_info *mccq = &adapter->mcc_obj.q;
746         struct be_mcc_wrb *wrb;
747
748         if (!mccq->created)
749                 return NULL;
750
751         if (atomic_read(&mccq->used) >= mccq->len)
752                 return NULL;
753
754         wrb = queue_head_node(mccq);
755         queue_head_inc(mccq);
756         atomic_inc(&mccq->used);
757         memset(wrb, 0, sizeof(*wrb));
758         return wrb;
759 }
760
761 static bool use_mcc(struct be_adapter *adapter)
762 {
763         return adapter->mcc_obj.q.created;
764 }
765
766 /* Must be used only in process context */
767 static int be_cmd_lock(struct be_adapter *adapter)
768 {
769         if (use_mcc(adapter)) {
770                 spin_lock_bh(&adapter->mcc_lock);
771                 return 0;
772         } else {
773                 return mutex_lock_interruptible(&adapter->mbox_lock);
774         }
775 }
776
777 /* Must be used only in process context */
778 static void be_cmd_unlock(struct be_adapter *adapter)
779 {
780         if (use_mcc(adapter))
781                 spin_unlock_bh(&adapter->mcc_lock);
782         else
783                 return mutex_unlock(&adapter->mbox_lock);
784 }
785
786 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
787                                       struct be_mcc_wrb *wrb)
788 {
789         struct be_mcc_wrb *dest_wrb;
790
791         if (use_mcc(adapter)) {
792                 dest_wrb = wrb_from_mccq(adapter);
793                 if (!dest_wrb)
794                         return NULL;
795         } else {
796                 dest_wrb = wrb_from_mbox(adapter);
797         }
798
799         memcpy(dest_wrb, wrb, sizeof(*wrb));
800         if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
801                 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
802
803         return dest_wrb;
804 }
805
806 /* Must be used only in process context */
807 static int be_cmd_notify_wait(struct be_adapter *adapter,
808                               struct be_mcc_wrb *wrb)
809 {
810         struct be_mcc_wrb *dest_wrb;
811         int status;
812
813         status = be_cmd_lock(adapter);
814         if (status)
815                 return status;
816
817         dest_wrb = be_cmd_copy(adapter, wrb);
818         if (!dest_wrb)
819                 return -EBUSY;
820
821         if (use_mcc(adapter))
822                 status = be_mcc_notify_wait(adapter);
823         else
824                 status = be_mbox_notify_wait(adapter);
825
826         if (!status)
827                 memcpy(wrb, dest_wrb, sizeof(*wrb));
828
829         be_cmd_unlock(adapter);
830         return status;
831 }
832
833 /* Tell fw we're about to start firing cmds by writing a
834  * special pattern across the wrb hdr; uses mbox
835  */
836 int be_cmd_fw_init(struct be_adapter *adapter)
837 {
838         u8 *wrb;
839         int status;
840
841         if (lancer_chip(adapter))
842                 return 0;
843
844         if (mutex_lock_interruptible(&adapter->mbox_lock))
845                 return -1;
846
847         wrb = (u8 *)wrb_from_mbox(adapter);
848         *wrb++ = 0xFF;
849         *wrb++ = 0x12;
850         *wrb++ = 0x34;
851         *wrb++ = 0xFF;
852         *wrb++ = 0xFF;
853         *wrb++ = 0x56;
854         *wrb++ = 0x78;
855         *wrb = 0xFF;
856
857         status = be_mbox_notify_wait(adapter);
858
859         mutex_unlock(&adapter->mbox_lock);
860         return status;
861 }
862
863 /* Tell fw we're done with firing cmds by writing a
864  * special pattern across the wrb hdr; uses mbox
865  */
866 int be_cmd_fw_clean(struct be_adapter *adapter)
867 {
868         u8 *wrb;
869         int status;
870
871         if (lancer_chip(adapter))
872                 return 0;
873
874         if (mutex_lock_interruptible(&adapter->mbox_lock))
875                 return -1;
876
877         wrb = (u8 *)wrb_from_mbox(adapter);
878         *wrb++ = 0xFF;
879         *wrb++ = 0xAA;
880         *wrb++ = 0xBB;
881         *wrb++ = 0xFF;
882         *wrb++ = 0xFF;
883         *wrb++ = 0xCC;
884         *wrb++ = 0xDD;
885         *wrb = 0xFF;
886
887         status = be_mbox_notify_wait(adapter);
888
889         mutex_unlock(&adapter->mbox_lock);
890         return status;
891 }
892
893 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
894 {
895         struct be_mcc_wrb *wrb;
896         struct be_cmd_req_eq_create *req;
897         struct be_dma_mem *q_mem = &eqo->q.dma_mem;
898         int status, ver = 0;
899
900         if (mutex_lock_interruptible(&adapter->mbox_lock))
901                 return -1;
902
903         wrb = wrb_from_mbox(adapter);
904         req = embedded_payload(wrb);
905
906         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
907                                OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
908                                NULL);
909
910         /* Support for EQ_CREATEv2 available only SH-R onwards */
911         if (!(BEx_chip(adapter) || lancer_chip(adapter)))
912                 ver = 2;
913
914         req->hdr.version = ver;
915         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
916
917         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
918         /* 4byte eqe*/
919         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
920         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
921                       __ilog2_u32(eqo->q.len / 256));
922         be_dws_cpu_to_le(req->context, sizeof(req->context));
923
924         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
925
926         status = be_mbox_notify_wait(adapter);
927         if (!status) {
928                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
929
930                 eqo->q.id = le16_to_cpu(resp->eq_id);
931                 eqo->msix_idx =
932                         (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
933                 eqo->q.created = true;
934         }
935
936         mutex_unlock(&adapter->mbox_lock);
937         return status;
938 }
939
940 /* Use MCC */
941 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
942                           bool permanent, u32 if_handle, u32 pmac_id)
943 {
944         struct be_mcc_wrb *wrb;
945         struct be_cmd_req_mac_query *req;
946         int status;
947
948         spin_lock_bh(&adapter->mcc_lock);
949
950         wrb = wrb_from_mccq(adapter);
951         if (!wrb) {
952                 status = -EBUSY;
953                 goto err;
954         }
955         req = embedded_payload(wrb);
956
957         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
958                                OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
959                                NULL);
960         req->type = MAC_ADDRESS_TYPE_NETWORK;
961         if (permanent) {
962                 req->permanent = 1;
963         } else {
964                 req->if_id = cpu_to_le16((u16)if_handle);
965                 req->pmac_id = cpu_to_le32(pmac_id);
966                 req->permanent = 0;
967         }
968
969         status = be_mcc_notify_wait(adapter);
970         if (!status) {
971                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
972
973                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
974         }
975
976 err:
977         spin_unlock_bh(&adapter->mcc_lock);
978         return status;
979 }
980
981 /* Uses synchronous MCCQ */
982 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
983                     u32 if_id, u32 *pmac_id, u32 domain)
984 {
985         struct be_mcc_wrb *wrb;
986         struct be_cmd_req_pmac_add *req;
987         int status;
988
989         spin_lock_bh(&adapter->mcc_lock);
990
991         wrb = wrb_from_mccq(adapter);
992         if (!wrb) {
993                 status = -EBUSY;
994                 goto err;
995         }
996         req = embedded_payload(wrb);
997
998         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
999                                OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1000                                NULL);
1001
1002         req->hdr.domain = domain;
1003         req->if_id = cpu_to_le32(if_id);
1004         memcpy(req->mac_address, mac_addr, ETH_ALEN);
1005
1006         status = be_mcc_notify_wait(adapter);
1007         if (!status) {
1008                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1009
1010                 *pmac_id = le32_to_cpu(resp->pmac_id);
1011         }
1012
1013 err:
1014         spin_unlock_bh(&adapter->mcc_lock);
1015
1016          if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1017                 status = -EPERM;
1018
1019         return status;
1020 }
1021
1022 /* Uses synchronous MCCQ */
1023 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1024 {
1025         struct be_mcc_wrb *wrb;
1026         struct be_cmd_req_pmac_del *req;
1027         int status;
1028
1029         if (pmac_id == -1)
1030                 return 0;
1031
1032         spin_lock_bh(&adapter->mcc_lock);
1033
1034         wrb = wrb_from_mccq(adapter);
1035         if (!wrb) {
1036                 status = -EBUSY;
1037                 goto err;
1038         }
1039         req = embedded_payload(wrb);
1040
1041         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1042                                OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1043                                wrb, NULL);
1044
1045         req->hdr.domain = dom;
1046         req->if_id = cpu_to_le32(if_id);
1047         req->pmac_id = cpu_to_le32(pmac_id);
1048
1049         status = be_mcc_notify_wait(adapter);
1050
1051 err:
1052         spin_unlock_bh(&adapter->mcc_lock);
1053         return status;
1054 }
1055
1056 /* Uses Mbox */
1057 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1058                      struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1059 {
1060         struct be_mcc_wrb *wrb;
1061         struct be_cmd_req_cq_create *req;
1062         struct be_dma_mem *q_mem = &cq->dma_mem;
1063         void *ctxt;
1064         int status;
1065
1066         if (mutex_lock_interruptible(&adapter->mbox_lock))
1067                 return -1;
1068
1069         wrb = wrb_from_mbox(adapter);
1070         req = embedded_payload(wrb);
1071         ctxt = &req->context;
1072
1073         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1074                                OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1075                                NULL);
1076
1077         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1078
1079         if (BEx_chip(adapter)) {
1080                 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1081                               coalesce_wm);
1082                 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1083                               ctxt, no_delay);
1084                 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1085                               __ilog2_u32(cq->len / 256));
1086                 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1087                 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1088                 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1089         } else {
1090                 req->hdr.version = 2;
1091                 req->page_size = 1; /* 1 for 4K */
1092
1093                 /* coalesce-wm field in this cmd is not relevant to Lancer.
1094                  * Lancer uses COMMON_MODIFY_CQ to set this field
1095                  */
1096                 if (!lancer_chip(adapter))
1097                         AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1098                                       ctxt, coalesce_wm);
1099                 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1100                               no_delay);
1101                 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1102                               __ilog2_u32(cq->len / 256));
1103                 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1104                 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1105                 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1106         }
1107
1108         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1109
1110         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1111
1112         status = be_mbox_notify_wait(adapter);
1113         if (!status) {
1114                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1115
1116                 cq->id = le16_to_cpu(resp->cq_id);
1117                 cq->created = true;
1118         }
1119
1120         mutex_unlock(&adapter->mbox_lock);
1121
1122         return status;
1123 }
1124
1125 static u32 be_encoded_q_len(int q_len)
1126 {
1127         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1128
1129         if (len_encoded == 16)
1130                 len_encoded = 0;
1131         return len_encoded;
1132 }
1133
1134 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1135                                   struct be_queue_info *mccq,
1136                                   struct be_queue_info *cq)
1137 {
1138         struct be_mcc_wrb *wrb;
1139         struct be_cmd_req_mcc_ext_create *req;
1140         struct be_dma_mem *q_mem = &mccq->dma_mem;
1141         void *ctxt;
1142         int status;
1143
1144         if (mutex_lock_interruptible(&adapter->mbox_lock))
1145                 return -1;
1146
1147         wrb = wrb_from_mbox(adapter);
1148         req = embedded_payload(wrb);
1149         ctxt = &req->context;
1150
1151         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1152                                OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1153                                NULL);
1154
1155         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1156         if (BEx_chip(adapter)) {
1157                 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1158                 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1159                               be_encoded_q_len(mccq->len));
1160                 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1161         } else {
1162                 req->hdr.version = 1;
1163                 req->cq_id = cpu_to_le16(cq->id);
1164
1165                 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1166                               be_encoded_q_len(mccq->len));
1167                 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1168                 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1169                               ctxt, cq->id);
1170                 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1171                               ctxt, 1);
1172         }
1173
1174         /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1175         req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1176         req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1177         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1178
1179         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1180
1181         status = be_mbox_notify_wait(adapter);
1182         if (!status) {
1183                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1184
1185                 mccq->id = le16_to_cpu(resp->id);
1186                 mccq->created = true;
1187         }
1188         mutex_unlock(&adapter->mbox_lock);
1189
1190         return status;
1191 }
1192
1193 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1194                                   struct be_queue_info *mccq,
1195                                   struct be_queue_info *cq)
1196 {
1197         struct be_mcc_wrb *wrb;
1198         struct be_cmd_req_mcc_create *req;
1199         struct be_dma_mem *q_mem = &mccq->dma_mem;
1200         void *ctxt;
1201         int status;
1202
1203         if (mutex_lock_interruptible(&adapter->mbox_lock))
1204                 return -1;
1205
1206         wrb = wrb_from_mbox(adapter);
1207         req = embedded_payload(wrb);
1208         ctxt = &req->context;
1209
1210         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1211                                OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1212                                NULL);
1213
1214         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1215
1216         AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1217         AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1218                       be_encoded_q_len(mccq->len));
1219         AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1220
1221         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1222
1223         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1224
1225         status = be_mbox_notify_wait(adapter);
1226         if (!status) {
1227                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1228
1229                 mccq->id = le16_to_cpu(resp->id);
1230                 mccq->created = true;
1231         }
1232
1233         mutex_unlock(&adapter->mbox_lock);
1234         return status;
1235 }
1236
1237 int be_cmd_mccq_create(struct be_adapter *adapter,
1238                        struct be_queue_info *mccq, struct be_queue_info *cq)
1239 {
1240         int status;
1241
1242         status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1243         if (status && BEx_chip(adapter)) {
1244                 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1245                         "or newer to avoid conflicting priorities between NIC "
1246                         "and FCoE traffic");
1247                 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1248         }
1249         return status;
1250 }
1251
1252 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1253 {
1254         struct be_mcc_wrb wrb = {0};
1255         struct be_cmd_req_eth_tx_create *req;
1256         struct be_queue_info *txq = &txo->q;
1257         struct be_queue_info *cq = &txo->cq;
1258         struct be_dma_mem *q_mem = &txq->dma_mem;
1259         int status, ver = 0;
1260
1261         req = embedded_payload(&wrb);
1262         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1263                                OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1264
1265         if (lancer_chip(adapter)) {
1266                 req->hdr.version = 1;
1267         } else if (BEx_chip(adapter)) {
1268                 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1269                         req->hdr.version = 2;
1270         } else { /* For SH */
1271                 req->hdr.version = 2;
1272         }
1273
1274         if (req->hdr.version > 0)
1275                 req->if_id = cpu_to_le16(adapter->if_handle);
1276         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1277         req->ulp_num = BE_ULP1_NUM;
1278         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1279         req->cq_id = cpu_to_le16(cq->id);
1280         req->queue_size = be_encoded_q_len(txq->len);
1281         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1282         ver = req->hdr.version;
1283
1284         status = be_cmd_notify_wait(adapter, &wrb);
1285         if (!status) {
1286                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1287
1288                 txq->id = le16_to_cpu(resp->cid);
1289                 if (ver == 2)
1290                         txo->db_offset = le32_to_cpu(resp->db_offset);
1291                 else
1292                         txo->db_offset = DB_TXULP1_OFFSET;
1293                 txq->created = true;
1294         }
1295
1296         return status;
1297 }
1298
1299 /* Uses MCC */
1300 int be_cmd_rxq_create(struct be_adapter *adapter,
1301                       struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1302                       u32 if_id, u32 rss, u8 *rss_id)
1303 {
1304         struct be_mcc_wrb *wrb;
1305         struct be_cmd_req_eth_rx_create *req;
1306         struct be_dma_mem *q_mem = &rxq->dma_mem;
1307         int status;
1308
1309         spin_lock_bh(&adapter->mcc_lock);
1310
1311         wrb = wrb_from_mccq(adapter);
1312         if (!wrb) {
1313                 status = -EBUSY;
1314                 goto err;
1315         }
1316         req = embedded_payload(wrb);
1317
1318         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1319                                OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1320
1321         req->cq_id = cpu_to_le16(cq_id);
1322         req->frag_size = fls(frag_size) - 1;
1323         req->num_pages = 2;
1324         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1325         req->interface_id = cpu_to_le32(if_id);
1326         req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1327         req->rss_queue = cpu_to_le32(rss);
1328
1329         status = be_mcc_notify_wait(adapter);
1330         if (!status) {
1331                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1332
1333                 rxq->id = le16_to_cpu(resp->id);
1334                 rxq->created = true;
1335                 *rss_id = resp->rss_id;
1336         }
1337
1338 err:
1339         spin_unlock_bh(&adapter->mcc_lock);
1340         return status;
1341 }
1342
1343 /* Generic destroyer function for all types of queues
1344  * Uses Mbox
1345  */
1346 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1347                      int queue_type)
1348 {
1349         struct be_mcc_wrb *wrb;
1350         struct be_cmd_req_q_destroy *req;
1351         u8 subsys = 0, opcode = 0;
1352         int status;
1353
1354         if (mutex_lock_interruptible(&adapter->mbox_lock))
1355                 return -1;
1356
1357         wrb = wrb_from_mbox(adapter);
1358         req = embedded_payload(wrb);
1359
1360         switch (queue_type) {
1361         case QTYPE_EQ:
1362                 subsys = CMD_SUBSYSTEM_COMMON;
1363                 opcode = OPCODE_COMMON_EQ_DESTROY;
1364                 break;
1365         case QTYPE_CQ:
1366                 subsys = CMD_SUBSYSTEM_COMMON;
1367                 opcode = OPCODE_COMMON_CQ_DESTROY;
1368                 break;
1369         case QTYPE_TXQ:
1370                 subsys = CMD_SUBSYSTEM_ETH;
1371                 opcode = OPCODE_ETH_TX_DESTROY;
1372                 break;
1373         case QTYPE_RXQ:
1374                 subsys = CMD_SUBSYSTEM_ETH;
1375                 opcode = OPCODE_ETH_RX_DESTROY;
1376                 break;
1377         case QTYPE_MCCQ:
1378                 subsys = CMD_SUBSYSTEM_COMMON;
1379                 opcode = OPCODE_COMMON_MCC_DESTROY;
1380                 break;
1381         default:
1382                 BUG();
1383         }
1384
1385         be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1386                                NULL);
1387         req->id = cpu_to_le16(q->id);
1388
1389         status = be_mbox_notify_wait(adapter);
1390         q->created = false;
1391
1392         mutex_unlock(&adapter->mbox_lock);
1393         return status;
1394 }
1395
1396 /* Uses MCC */
1397 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1398 {
1399         struct be_mcc_wrb *wrb;
1400         struct be_cmd_req_q_destroy *req;
1401         int status;
1402
1403         spin_lock_bh(&adapter->mcc_lock);
1404
1405         wrb = wrb_from_mccq(adapter);
1406         if (!wrb) {
1407                 status = -EBUSY;
1408                 goto err;
1409         }
1410         req = embedded_payload(wrb);
1411
1412         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1413                                OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1414         req->id = cpu_to_le16(q->id);
1415
1416         status = be_mcc_notify_wait(adapter);
1417         q->created = false;
1418
1419 err:
1420         spin_unlock_bh(&adapter->mcc_lock);
1421         return status;
1422 }
1423
1424 /* Create an rx filtering policy configuration on an i/f
1425  * Will use MBOX only if MCCQ has not been created.
1426  */
1427 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1428                      u32 *if_handle, u32 domain)
1429 {
1430         struct be_mcc_wrb wrb = {0};
1431         struct be_cmd_req_if_create *req;
1432         int status;
1433
1434         req = embedded_payload(&wrb);
1435         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1436                                OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1437                                sizeof(*req), &wrb, NULL);
1438         req->hdr.domain = domain;
1439         req->capability_flags = cpu_to_le32(cap_flags);
1440         req->enable_flags = cpu_to_le32(en_flags);
1441         req->pmac_invalid = true;
1442
1443         status = be_cmd_notify_wait(adapter, &wrb);
1444         if (!status) {
1445                 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1446
1447                 *if_handle = le32_to_cpu(resp->interface_id);
1448
1449                 /* Hack to retrieve VF's pmac-id on BE3 */
1450                 if (BE3_chip(adapter) && !be_physfn(adapter))
1451                         adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1452         }
1453         return status;
1454 }
1455
1456 /* Uses MCCQ */
1457 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1458 {
1459         struct be_mcc_wrb *wrb;
1460         struct be_cmd_req_if_destroy *req;
1461         int status;
1462
1463         if (interface_id == -1)
1464                 return 0;
1465
1466         spin_lock_bh(&adapter->mcc_lock);
1467
1468         wrb = wrb_from_mccq(adapter);
1469         if (!wrb) {
1470                 status = -EBUSY;
1471                 goto err;
1472         }
1473         req = embedded_payload(wrb);
1474
1475         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1476                                OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1477                                sizeof(*req), wrb, NULL);
1478         req->hdr.domain = domain;
1479         req->interface_id = cpu_to_le32(interface_id);
1480
1481         status = be_mcc_notify_wait(adapter);
1482 err:
1483         spin_unlock_bh(&adapter->mcc_lock);
1484         return status;
1485 }
1486
1487 /* Get stats is a non embedded command: the request is not embedded inside
1488  * WRB but is a separate dma memory block
1489  * Uses asynchronous MCC
1490  */
1491 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1492 {
1493         struct be_mcc_wrb *wrb;
1494         struct be_cmd_req_hdr *hdr;
1495         int status = 0;
1496
1497         spin_lock_bh(&adapter->mcc_lock);
1498
1499         wrb = wrb_from_mccq(adapter);
1500         if (!wrb) {
1501                 status = -EBUSY;
1502                 goto err;
1503         }
1504         hdr = nonemb_cmd->va;
1505
1506         be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1507                                OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1508                                nonemb_cmd);
1509
1510         /* version 1 of the cmd is not supported only by BE2 */
1511         if (BE2_chip(adapter))
1512                 hdr->version = 0;
1513         if (BE3_chip(adapter) || lancer_chip(adapter))
1514                 hdr->version = 1;
1515         else
1516                 hdr->version = 2;
1517
1518         be_mcc_notify(adapter);
1519         adapter->stats_cmd_sent = true;
1520
1521 err:
1522         spin_unlock_bh(&adapter->mcc_lock);
1523         return status;
1524 }
1525
1526 /* Lancer Stats */
1527 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1528                                struct be_dma_mem *nonemb_cmd)
1529 {
1530         struct be_mcc_wrb *wrb;
1531         struct lancer_cmd_req_pport_stats *req;
1532         int status = 0;
1533
1534         if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1535                             CMD_SUBSYSTEM_ETH))
1536                 return -EPERM;
1537
1538         spin_lock_bh(&adapter->mcc_lock);
1539
1540         wrb = wrb_from_mccq(adapter);
1541         if (!wrb) {
1542                 status = -EBUSY;
1543                 goto err;
1544         }
1545         req = nonemb_cmd->va;
1546
1547         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1548                                OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1549                                wrb, nonemb_cmd);
1550
1551         req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1552         req->cmd_params.params.reset_stats = 0;
1553
1554         be_mcc_notify(adapter);
1555         adapter->stats_cmd_sent = true;
1556
1557 err:
1558         spin_unlock_bh(&adapter->mcc_lock);
1559         return status;
1560 }
1561
1562 static int be_mac_to_link_speed(int mac_speed)
1563 {
1564         switch (mac_speed) {
1565         case PHY_LINK_SPEED_ZERO:
1566                 return 0;
1567         case PHY_LINK_SPEED_10MBPS:
1568                 return 10;
1569         case PHY_LINK_SPEED_100MBPS:
1570                 return 100;
1571         case PHY_LINK_SPEED_1GBPS:
1572                 return 1000;
1573         case PHY_LINK_SPEED_10GBPS:
1574                 return 10000;
1575         case PHY_LINK_SPEED_20GBPS:
1576                 return 20000;
1577         case PHY_LINK_SPEED_25GBPS:
1578                 return 25000;
1579         case PHY_LINK_SPEED_40GBPS:
1580                 return 40000;
1581         }
1582         return 0;
1583 }
1584
1585 /* Uses synchronous mcc
1586  * Returns link_speed in Mbps
1587  */
1588 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1589                              u8 *link_status, u32 dom)
1590 {
1591         struct be_mcc_wrb *wrb;
1592         struct be_cmd_req_link_status *req;
1593         int status;
1594
1595         spin_lock_bh(&adapter->mcc_lock);
1596
1597         if (link_status)
1598                 *link_status = LINK_DOWN;
1599
1600         wrb = wrb_from_mccq(adapter);
1601         if (!wrb) {
1602                 status = -EBUSY;
1603                 goto err;
1604         }
1605         req = embedded_payload(wrb);
1606
1607         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1608                                OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1609                                sizeof(*req), wrb, NULL);
1610
1611         /* version 1 of the cmd is not supported only by BE2 */
1612         if (!BE2_chip(adapter))
1613                 req->hdr.version = 1;
1614
1615         req->hdr.domain = dom;
1616
1617         status = be_mcc_notify_wait(adapter);
1618         if (!status) {
1619                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1620
1621                 if (link_speed) {
1622                         *link_speed = resp->link_speed ?
1623                                       le16_to_cpu(resp->link_speed) * 10 :
1624                                       be_mac_to_link_speed(resp->mac_speed);
1625
1626                         if (!resp->logical_link_status)
1627                                 *link_speed = 0;
1628                 }
1629                 if (link_status)
1630                         *link_status = resp->logical_link_status;
1631         }
1632
1633 err:
1634         spin_unlock_bh(&adapter->mcc_lock);
1635         return status;
1636 }
1637
1638 /* Uses synchronous mcc */
1639 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1640 {
1641         struct be_mcc_wrb *wrb;
1642         struct be_cmd_req_get_cntl_addnl_attribs *req;
1643         int status = 0;
1644
1645         spin_lock_bh(&adapter->mcc_lock);
1646
1647         wrb = wrb_from_mccq(adapter);
1648         if (!wrb) {
1649                 status = -EBUSY;
1650                 goto err;
1651         }
1652         req = embedded_payload(wrb);
1653
1654         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1655                                OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1656                                sizeof(*req), wrb, NULL);
1657
1658         be_mcc_notify(adapter);
1659
1660 err:
1661         spin_unlock_bh(&adapter->mcc_lock);
1662         return status;
1663 }
1664
1665 /* Uses synchronous mcc */
1666 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1667 {
1668         struct be_mcc_wrb *wrb;
1669         struct be_cmd_req_get_fat *req;
1670         int status;
1671
1672         spin_lock_bh(&adapter->mcc_lock);
1673
1674         wrb = wrb_from_mccq(adapter);
1675         if (!wrb) {
1676                 status = -EBUSY;
1677                 goto err;
1678         }
1679         req = embedded_payload(wrb);
1680
1681         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1682                                OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1683                                NULL);
1684         req->fat_operation = cpu_to_le32(QUERY_FAT);
1685         status = be_mcc_notify_wait(adapter);
1686         if (!status) {
1687                 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1688
1689                 if (log_size && resp->log_size)
1690                         *log_size = le32_to_cpu(resp->log_size) -
1691                                         sizeof(u32);
1692         }
1693 err:
1694         spin_unlock_bh(&adapter->mcc_lock);
1695         return status;
1696 }
1697
1698 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1699 {
1700         struct be_dma_mem get_fat_cmd;
1701         struct be_mcc_wrb *wrb;
1702         struct be_cmd_req_get_fat *req;
1703         u32 offset = 0, total_size, buf_size,
1704                                 log_offset = sizeof(u32), payload_len;
1705         int status = 0;
1706
1707         if (buf_len == 0)
1708                 return -EIO;
1709
1710         total_size = buf_len;
1711
1712         get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1713         get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1714                                               get_fat_cmd.size,
1715                                               &get_fat_cmd.dma);
1716         if (!get_fat_cmd.va) {
1717                 dev_err(&adapter->pdev->dev,
1718                         "Memory allocation failure while reading FAT data\n");
1719                 return -ENOMEM;
1720         }
1721
1722         spin_lock_bh(&adapter->mcc_lock);
1723
1724         while (total_size) {
1725                 buf_size = min(total_size, (u32)60*1024);
1726                 total_size -= buf_size;
1727
1728                 wrb = wrb_from_mccq(adapter);
1729                 if (!wrb) {
1730                         status = -EBUSY;
1731                         goto err;
1732                 }
1733                 req = get_fat_cmd.va;
1734
1735                 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1736                 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1737                                        OPCODE_COMMON_MANAGE_FAT, payload_len,
1738                                        wrb, &get_fat_cmd);
1739
1740                 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1741                 req->read_log_offset = cpu_to_le32(log_offset);
1742                 req->read_log_length = cpu_to_le32(buf_size);
1743                 req->data_buffer_size = cpu_to_le32(buf_size);
1744
1745                 status = be_mcc_notify_wait(adapter);
1746                 if (!status) {
1747                         struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1748
1749                         memcpy(buf + offset,
1750                                resp->data_buffer,
1751                                le32_to_cpu(resp->read_log_length));
1752                 } else {
1753                         dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1754                         goto err;
1755                 }
1756                 offset += buf_size;
1757                 log_offset += buf_size;
1758         }
1759 err:
1760         pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1761                             get_fat_cmd.va, get_fat_cmd.dma);
1762         spin_unlock_bh(&adapter->mcc_lock);
1763         return status;
1764 }
1765
1766 /* Uses synchronous mcc */
1767 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1768 {
1769         struct be_mcc_wrb *wrb;
1770         struct be_cmd_req_get_fw_version *req;
1771         int status;
1772
1773         spin_lock_bh(&adapter->mcc_lock);
1774
1775         wrb = wrb_from_mccq(adapter);
1776         if (!wrb) {
1777                 status = -EBUSY;
1778                 goto err;
1779         }
1780
1781         req = embedded_payload(wrb);
1782
1783         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1784                                OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1785                                NULL);
1786         status = be_mcc_notify_wait(adapter);
1787         if (!status) {
1788                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1789
1790                 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1791                         sizeof(adapter->fw_ver));
1792                 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1793                         sizeof(adapter->fw_on_flash));
1794         }
1795 err:
1796         spin_unlock_bh(&adapter->mcc_lock);
1797         return status;
1798 }
1799
1800 /* set the EQ delay interval of an EQ to specified value
1801  * Uses async mcc
1802  */
1803 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1804                                struct be_set_eqd *set_eqd, int num)
1805 {
1806         struct be_mcc_wrb *wrb;
1807         struct be_cmd_req_modify_eq_delay *req;
1808         int status = 0, i;
1809
1810         spin_lock_bh(&adapter->mcc_lock);
1811
1812         wrb = wrb_from_mccq(adapter);
1813         if (!wrb) {
1814                 status = -EBUSY;
1815                 goto err;
1816         }
1817         req = embedded_payload(wrb);
1818
1819         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1820                                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1821                                NULL);
1822
1823         req->num_eq = cpu_to_le32(num);
1824         for (i = 0; i < num; i++) {
1825                 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1826                 req->set_eqd[i].phase = 0;
1827                 req->set_eqd[i].delay_multiplier =
1828                                 cpu_to_le32(set_eqd[i].delay_multiplier);
1829         }
1830
1831         be_mcc_notify(adapter);
1832 err:
1833         spin_unlock_bh(&adapter->mcc_lock);
1834         return status;
1835 }
1836
1837 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1838                       int num)
1839 {
1840         int num_eqs, i = 0;
1841
1842         if (lancer_chip(adapter) && num > 8) {
1843                 while (num) {
1844                         num_eqs = min(num, 8);
1845                         __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1846                         i += num_eqs;
1847                         num -= num_eqs;
1848                 }
1849         } else {
1850                 __be_cmd_modify_eqd(adapter, set_eqd, num);
1851         }
1852
1853         return 0;
1854 }
1855
1856 /* Uses sycnhronous mcc */
1857 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1858                        u32 num)
1859 {
1860         struct be_mcc_wrb *wrb;
1861         struct be_cmd_req_vlan_config *req;
1862         int status;
1863
1864         spin_lock_bh(&adapter->mcc_lock);
1865
1866         wrb = wrb_from_mccq(adapter);
1867         if (!wrb) {
1868                 status = -EBUSY;
1869                 goto err;
1870         }
1871         req = embedded_payload(wrb);
1872
1873         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1874                                OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1875                                wrb, NULL);
1876
1877         req->interface_id = if_id;
1878         req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1879         req->num_vlan = num;
1880         memcpy(req->normal_vlan, vtag_array,
1881                req->num_vlan * sizeof(vtag_array[0]));
1882
1883         status = be_mcc_notify_wait(adapter);
1884 err:
1885         spin_unlock_bh(&adapter->mcc_lock);
1886         return status;
1887 }
1888
1889 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1890 {
1891         struct be_mcc_wrb *wrb;
1892         struct be_dma_mem *mem = &adapter->rx_filter;
1893         struct be_cmd_req_rx_filter *req = mem->va;
1894         int status;
1895
1896         spin_lock_bh(&adapter->mcc_lock);
1897
1898         wrb = wrb_from_mccq(adapter);
1899         if (!wrb) {
1900                 status = -EBUSY;
1901                 goto err;
1902         }
1903         memset(req, 0, sizeof(*req));
1904         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1905                                OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1906                                wrb, mem);
1907
1908         req->if_id = cpu_to_le32(adapter->if_handle);
1909         if (flags & IFF_PROMISC) {
1910                 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1911                                                  BE_IF_FLAGS_VLAN_PROMISCUOUS |
1912                                                  BE_IF_FLAGS_MCAST_PROMISCUOUS);
1913                 if (value == ON)
1914                         req->if_flags =
1915                                 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1916                                             BE_IF_FLAGS_VLAN_PROMISCUOUS |
1917                                             BE_IF_FLAGS_MCAST_PROMISCUOUS);
1918         } else if (flags & IFF_ALLMULTI) {
1919                 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1920                 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1921         } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1922                 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1923
1924                 if (value == ON)
1925                         req->if_flags =
1926                                 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1927         } else {
1928                 struct netdev_hw_addr *ha;
1929                 int i = 0;
1930
1931                 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1932                 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1933
1934                 /* Reset mcast promisc mode if already set by setting mask
1935                  * and not setting flags field
1936                  */
1937                 req->if_flags_mask |=
1938                         cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1939                                     be_if_cap_flags(adapter));
1940                 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1941                 netdev_for_each_mc_addr(ha, adapter->netdev)
1942                         memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1943         }
1944
1945         if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1946             req->if_flags_mask) {
1947                 dev_warn(&adapter->pdev->dev,
1948                          "Cannot set rx filter flags 0x%x\n",
1949                          req->if_flags_mask);
1950                 dev_warn(&adapter->pdev->dev,
1951                          "Interface is capable of 0x%x flags only\n",
1952                          be_if_cap_flags(adapter));
1953         }
1954         req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1955
1956         status = be_mcc_notify_wait(adapter);
1957
1958 err:
1959         spin_unlock_bh(&adapter->mcc_lock);
1960         return status;
1961 }
1962
1963 /* Uses synchrounous mcc */
1964 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1965 {
1966         struct be_mcc_wrb *wrb;
1967         struct be_cmd_req_set_flow_control *req;
1968         int status;
1969
1970         if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1971                             CMD_SUBSYSTEM_COMMON))
1972                 return -EPERM;
1973
1974         spin_lock_bh(&adapter->mcc_lock);
1975
1976         wrb = wrb_from_mccq(adapter);
1977         if (!wrb) {
1978                 status = -EBUSY;
1979                 goto err;
1980         }
1981         req = embedded_payload(wrb);
1982
1983         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1984                                OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1985                                wrb, NULL);
1986
1987         req->hdr.version = 1;
1988         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1989         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1990
1991         status = be_mcc_notify_wait(adapter);
1992
1993 err:
1994         spin_unlock_bh(&adapter->mcc_lock);
1995
1996         if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
1997                 return  -EOPNOTSUPP;
1998
1999         return status;
2000 }
2001
2002 /* Uses sycn mcc */
2003 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2004 {
2005         struct be_mcc_wrb *wrb;
2006         struct be_cmd_req_get_flow_control *req;
2007         int status;
2008
2009         if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2010                             CMD_SUBSYSTEM_COMMON))
2011                 return -EPERM;
2012
2013         spin_lock_bh(&adapter->mcc_lock);
2014
2015         wrb = wrb_from_mccq(adapter);
2016         if (!wrb) {
2017                 status = -EBUSY;
2018                 goto err;
2019         }
2020         req = embedded_payload(wrb);
2021
2022         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2023                                OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2024                                wrb, NULL);
2025
2026         status = be_mcc_notify_wait(adapter);
2027         if (!status) {
2028                 struct be_cmd_resp_get_flow_control *resp =
2029                                                 embedded_payload(wrb);
2030
2031                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2032                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2033         }
2034
2035 err:
2036         spin_unlock_bh(&adapter->mcc_lock);
2037         return status;
2038 }
2039
2040 /* Uses mbox */
2041 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2042 {
2043         struct be_mcc_wrb *wrb;
2044         struct be_cmd_req_query_fw_cfg *req;
2045         int status;
2046
2047         if (mutex_lock_interruptible(&adapter->mbox_lock))
2048                 return -1;
2049
2050         wrb = wrb_from_mbox(adapter);
2051         req = embedded_payload(wrb);
2052
2053         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2054                                OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2055                                sizeof(*req), wrb, NULL);
2056
2057         status = be_mbox_notify_wait(adapter);
2058         if (!status) {
2059                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2060
2061                 adapter->port_num = le32_to_cpu(resp->phys_port);
2062                 adapter->function_mode = le32_to_cpu(resp->function_mode);
2063                 adapter->function_caps = le32_to_cpu(resp->function_caps);
2064                 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2065                 dev_info(&adapter->pdev->dev,
2066                          "FW config: function_mode=0x%x, function_caps=0x%x\n",
2067                          adapter->function_mode, adapter->function_caps);
2068         }
2069
2070         mutex_unlock(&adapter->mbox_lock);
2071         return status;
2072 }
2073
2074 /* Uses mbox */
2075 int be_cmd_reset_function(struct be_adapter *adapter)
2076 {
2077         struct be_mcc_wrb *wrb;
2078         struct be_cmd_req_hdr *req;
2079         int status;
2080
2081         if (lancer_chip(adapter)) {
2082                 status = lancer_wait_ready(adapter);
2083                 if (!status) {
2084                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2085                                   adapter->db + SLIPORT_CONTROL_OFFSET);
2086                         status = lancer_test_and_set_rdy_state(adapter);
2087                 }
2088                 if (status) {
2089                         dev_err(&adapter->pdev->dev,
2090                                 "Adapter in non recoverable error\n");
2091                 }
2092                 return status;
2093         }
2094
2095         if (mutex_lock_interruptible(&adapter->mbox_lock))
2096                 return -1;
2097
2098         wrb = wrb_from_mbox(adapter);
2099         req = embedded_payload(wrb);
2100
2101         be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2102                                OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2103                                NULL);
2104
2105         status = be_mbox_notify_wait(adapter);
2106
2107         mutex_unlock(&adapter->mbox_lock);
2108         return status;
2109 }
2110
2111 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2112                       u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2113 {
2114         struct be_mcc_wrb *wrb;
2115         struct be_cmd_req_rss_config *req;
2116         int status;
2117
2118         if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2119                 return 0;
2120
2121         spin_lock_bh(&adapter->mcc_lock);
2122
2123         wrb = wrb_from_mccq(adapter);
2124         if (!wrb) {
2125                 status = -EBUSY;
2126                 goto err;
2127         }
2128         req = embedded_payload(wrb);
2129
2130         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2131                                OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2132
2133         req->if_id = cpu_to_le32(adapter->if_handle);
2134         req->enable_rss = cpu_to_le16(rss_hash_opts);
2135         req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2136
2137         if (!BEx_chip(adapter))
2138                 req->hdr.version = 1;
2139
2140         memcpy(req->cpu_table, rsstable, table_size);
2141         memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2142         be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2143
2144         status = be_mcc_notify_wait(adapter);
2145 err:
2146         spin_unlock_bh(&adapter->mcc_lock);
2147         return status;
2148 }
2149
2150 /* Uses sync mcc */
2151 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2152                             u8 bcn, u8 sts, u8 state)
2153 {
2154         struct be_mcc_wrb *wrb;
2155         struct be_cmd_req_enable_disable_beacon *req;
2156         int status;
2157
2158         spin_lock_bh(&adapter->mcc_lock);
2159
2160         wrb = wrb_from_mccq(adapter);
2161         if (!wrb) {
2162                 status = -EBUSY;
2163                 goto err;
2164         }
2165         req = embedded_payload(wrb);
2166
2167         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2168                                OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2169                                sizeof(*req), wrb, NULL);
2170
2171         req->port_num = port_num;
2172         req->beacon_state = state;
2173         req->beacon_duration = bcn;
2174         req->status_duration = sts;
2175
2176         status = be_mcc_notify_wait(adapter);
2177
2178 err:
2179         spin_unlock_bh(&adapter->mcc_lock);
2180         return status;
2181 }
2182
2183 /* Uses sync mcc */
2184 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2185 {
2186         struct be_mcc_wrb *wrb;
2187         struct be_cmd_req_get_beacon_state *req;
2188         int status;
2189
2190         spin_lock_bh(&adapter->mcc_lock);
2191
2192         wrb = wrb_from_mccq(adapter);
2193         if (!wrb) {
2194                 status = -EBUSY;
2195                 goto err;
2196         }
2197         req = embedded_payload(wrb);
2198
2199         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2200                                OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2201                                wrb, NULL);
2202
2203         req->port_num = port_num;
2204
2205         status = be_mcc_notify_wait(adapter);
2206         if (!status) {
2207                 struct be_cmd_resp_get_beacon_state *resp =
2208                                                 embedded_payload(wrb);
2209
2210                 *state = resp->beacon_state;
2211         }
2212
2213 err:
2214         spin_unlock_bh(&adapter->mcc_lock);
2215         return status;
2216 }
2217
2218 /* Uses sync mcc */
2219 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2220                                       u8 page_num, u8 *data)
2221 {
2222         struct be_dma_mem cmd;
2223         struct be_mcc_wrb *wrb;
2224         struct be_cmd_req_port_type *req;
2225         int status;
2226
2227         if (page_num > TR_PAGE_A2)
2228                 return -EINVAL;
2229
2230         cmd.size = sizeof(struct be_cmd_resp_port_type);
2231         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2232         if (!cmd.va) {
2233                 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2234                 return -ENOMEM;
2235         }
2236         memset(cmd.va, 0, cmd.size);
2237
2238         spin_lock_bh(&adapter->mcc_lock);
2239
2240         wrb = wrb_from_mccq(adapter);
2241         if (!wrb) {
2242                 status = -EBUSY;
2243                 goto err;
2244         }
2245         req = cmd.va;
2246
2247         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2248                                OPCODE_COMMON_READ_TRANSRECV_DATA,
2249                                cmd.size, wrb, &cmd);
2250
2251         req->port = cpu_to_le32(adapter->hba_port_num);
2252         req->page_num = cpu_to_le32(page_num);
2253         status = be_mcc_notify_wait(adapter);
2254         if (!status) {
2255                 struct be_cmd_resp_port_type *resp = cmd.va;
2256
2257                 memcpy(data, resp->page_data, PAGE_DATA_LEN);
2258         }
2259 err:
2260         spin_unlock_bh(&adapter->mcc_lock);
2261         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2262         return status;
2263 }
2264
2265 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2266                             u32 data_size, u32 data_offset,
2267                             const char *obj_name, u32 *data_written,
2268                             u8 *change_status, u8 *addn_status)
2269 {
2270         struct be_mcc_wrb *wrb;
2271         struct lancer_cmd_req_write_object *req;
2272         struct lancer_cmd_resp_write_object *resp;
2273         void *ctxt = NULL;
2274         int status;
2275
2276         spin_lock_bh(&adapter->mcc_lock);
2277         adapter->flash_status = 0;
2278
2279         wrb = wrb_from_mccq(adapter);
2280         if (!wrb) {
2281                 status = -EBUSY;
2282                 goto err_unlock;
2283         }
2284
2285         req = embedded_payload(wrb);
2286
2287         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2288                                OPCODE_COMMON_WRITE_OBJECT,
2289                                sizeof(struct lancer_cmd_req_write_object), wrb,
2290                                NULL);
2291
2292         ctxt = &req->context;
2293         AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2294                       write_length, ctxt, data_size);
2295
2296         if (data_size == 0)
2297                 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2298                               eof, ctxt, 1);
2299         else
2300                 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2301                               eof, ctxt, 0);
2302
2303         be_dws_cpu_to_le(ctxt, sizeof(req->context));
2304         req->write_offset = cpu_to_le32(data_offset);
2305         strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2306         req->descriptor_count = cpu_to_le32(1);
2307         req->buf_len = cpu_to_le32(data_size);
2308         req->addr_low = cpu_to_le32((cmd->dma +
2309                                      sizeof(struct lancer_cmd_req_write_object))
2310                                     & 0xFFFFFFFF);
2311         req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2312                                 sizeof(struct lancer_cmd_req_write_object)));
2313
2314         be_mcc_notify(adapter);
2315         spin_unlock_bh(&adapter->mcc_lock);
2316
2317         if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2318                                          msecs_to_jiffies(60000)))
2319                 status = -ETIMEDOUT;
2320         else
2321                 status = adapter->flash_status;
2322
2323         resp = embedded_payload(wrb);
2324         if (!status) {
2325                 *data_written = le32_to_cpu(resp->actual_write_len);
2326                 *change_status = resp->change_status;
2327         } else {
2328                 *addn_status = resp->additional_status;
2329         }
2330
2331         return status;
2332
2333 err_unlock:
2334         spin_unlock_bh(&adapter->mcc_lock);
2335         return status;
2336 }
2337
2338 int be_cmd_query_cable_type(struct be_adapter *adapter)
2339 {
2340         u8 page_data[PAGE_DATA_LEN];
2341         int status;
2342
2343         status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2344                                                    page_data);
2345         if (!status) {
2346                 switch (adapter->phy.interface_type) {
2347                 case PHY_TYPE_QSFP:
2348                         adapter->phy.cable_type =
2349                                 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2350                         break;
2351                 case PHY_TYPE_SFP_PLUS_10GB:
2352                         adapter->phy.cable_type =
2353                                 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2354                         break;
2355                 default:
2356                         adapter->phy.cable_type = 0;
2357                         break;
2358                 }
2359         }
2360         return status;
2361 }
2362
2363 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2364 {
2365         struct lancer_cmd_req_delete_object *req;
2366         struct be_mcc_wrb *wrb;
2367         int status;
2368
2369         spin_lock_bh(&adapter->mcc_lock);
2370
2371         wrb = wrb_from_mccq(adapter);
2372         if (!wrb) {
2373                 status = -EBUSY;
2374                 goto err;
2375         }
2376
2377         req = embedded_payload(wrb);
2378
2379         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2380                                OPCODE_COMMON_DELETE_OBJECT,
2381                                sizeof(*req), wrb, NULL);
2382
2383         strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2384
2385         status = be_mcc_notify_wait(adapter);
2386 err:
2387         spin_unlock_bh(&adapter->mcc_lock);
2388         return status;
2389 }
2390
2391 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2392                            u32 data_size, u32 data_offset, const char *obj_name,
2393                            u32 *data_read, u32 *eof, u8 *addn_status)
2394 {
2395         struct be_mcc_wrb *wrb;
2396         struct lancer_cmd_req_read_object *req;
2397         struct lancer_cmd_resp_read_object *resp;
2398         int status;
2399
2400         spin_lock_bh(&adapter->mcc_lock);
2401
2402         wrb = wrb_from_mccq(adapter);
2403         if (!wrb) {
2404                 status = -EBUSY;
2405                 goto err_unlock;
2406         }
2407
2408         req = embedded_payload(wrb);
2409
2410         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2411                                OPCODE_COMMON_READ_OBJECT,
2412                                sizeof(struct lancer_cmd_req_read_object), wrb,
2413                                NULL);
2414
2415         req->desired_read_len = cpu_to_le32(data_size);
2416         req->read_offset = cpu_to_le32(data_offset);
2417         strcpy(req->object_name, obj_name);
2418         req->descriptor_count = cpu_to_le32(1);
2419         req->buf_len = cpu_to_le32(data_size);
2420         req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2421         req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2422
2423         status = be_mcc_notify_wait(adapter);
2424
2425         resp = embedded_payload(wrb);
2426         if (!status) {
2427                 *data_read = le32_to_cpu(resp->actual_read_len);
2428                 *eof = le32_to_cpu(resp->eof);
2429         } else {
2430                 *addn_status = resp->additional_status;
2431         }
2432
2433 err_unlock:
2434         spin_unlock_bh(&adapter->mcc_lock);
2435         return status;
2436 }
2437
2438 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2439                           u32 flash_type, u32 flash_opcode, u32 buf_size)
2440 {
2441         struct be_mcc_wrb *wrb;
2442         struct be_cmd_write_flashrom *req;
2443         int status;
2444
2445         spin_lock_bh(&adapter->mcc_lock);
2446         adapter->flash_status = 0;
2447
2448         wrb = wrb_from_mccq(adapter);
2449         if (!wrb) {
2450                 status = -EBUSY;
2451                 goto err_unlock;
2452         }
2453         req = cmd->va;
2454
2455         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2456                                OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2457                                cmd);
2458
2459         req->params.op_type = cpu_to_le32(flash_type);
2460         req->params.op_code = cpu_to_le32(flash_opcode);
2461         req->params.data_buf_size = cpu_to_le32(buf_size);
2462
2463         be_mcc_notify(adapter);
2464         spin_unlock_bh(&adapter->mcc_lock);
2465
2466         if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2467                                          msecs_to_jiffies(40000)))
2468                 status = -ETIMEDOUT;
2469         else
2470                 status = adapter->flash_status;
2471
2472         return status;
2473
2474 err_unlock:
2475         spin_unlock_bh(&adapter->mcc_lock);
2476         return status;
2477 }
2478
2479 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2480                          u16 optype, int offset)
2481 {
2482         struct be_mcc_wrb *wrb;
2483         struct be_cmd_read_flash_crc *req;
2484         int status;
2485
2486         spin_lock_bh(&adapter->mcc_lock);
2487
2488         wrb = wrb_from_mccq(adapter);
2489         if (!wrb) {
2490                 status = -EBUSY;
2491                 goto err;
2492         }
2493         req = embedded_payload(wrb);
2494
2495         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2496                                OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2497                                wrb, NULL);
2498
2499         req->params.op_type = cpu_to_le32(optype);
2500         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2501         req->params.offset = cpu_to_le32(offset);
2502         req->params.data_buf_size = cpu_to_le32(0x4);
2503
2504         status = be_mcc_notify_wait(adapter);
2505         if (!status)
2506                 memcpy(flashed_crc, req->crc, 4);
2507
2508 err:
2509         spin_unlock_bh(&adapter->mcc_lock);
2510         return status;
2511 }
2512
2513 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2514                             struct be_dma_mem *nonemb_cmd)
2515 {
2516         struct be_mcc_wrb *wrb;
2517         struct be_cmd_req_acpi_wol_magic_config *req;
2518         int status;
2519
2520         spin_lock_bh(&adapter->mcc_lock);
2521
2522         wrb = wrb_from_mccq(adapter);
2523         if (!wrb) {
2524                 status = -EBUSY;
2525                 goto err;
2526         }
2527         req = nonemb_cmd->va;
2528
2529         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2530                                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2531                                wrb, nonemb_cmd);
2532         memcpy(req->magic_mac, mac, ETH_ALEN);
2533
2534         status = be_mcc_notify_wait(adapter);
2535
2536 err:
2537         spin_unlock_bh(&adapter->mcc_lock);
2538         return status;
2539 }
2540
2541 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2542                         u8 loopback_type, u8 enable)
2543 {
2544         struct be_mcc_wrb *wrb;
2545         struct be_cmd_req_set_lmode *req;
2546         int status;
2547
2548         spin_lock_bh(&adapter->mcc_lock);
2549
2550         wrb = wrb_from_mccq(adapter);
2551         if (!wrb) {
2552                 status = -EBUSY;
2553                 goto err;
2554         }
2555
2556         req = embedded_payload(wrb);
2557
2558         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2559                                OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2560                                wrb, NULL);
2561
2562         req->src_port = port_num;
2563         req->dest_port = port_num;
2564         req->loopback_type = loopback_type;
2565         req->loopback_state = enable;
2566
2567         status = be_mcc_notify_wait(adapter);
2568 err:
2569         spin_unlock_bh(&adapter->mcc_lock);
2570         return status;
2571 }
2572
2573 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2574                          u32 loopback_type, u32 pkt_size, u32 num_pkts,
2575                          u64 pattern)
2576 {
2577         struct be_mcc_wrb *wrb;
2578         struct be_cmd_req_loopback_test *req;
2579         struct be_cmd_resp_loopback_test *resp;
2580         int status;
2581
2582         spin_lock_bh(&adapter->mcc_lock);
2583
2584         wrb = wrb_from_mccq(adapter);
2585         if (!wrb) {
2586                 status = -EBUSY;
2587                 goto err;
2588         }
2589
2590         req = embedded_payload(wrb);
2591
2592         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2593                                OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2594                                NULL);
2595
2596         req->hdr.timeout = cpu_to_le32(15);
2597         req->pattern = cpu_to_le64(pattern);
2598         req->src_port = cpu_to_le32(port_num);
2599         req->dest_port = cpu_to_le32(port_num);
2600         req->pkt_size = cpu_to_le32(pkt_size);
2601         req->num_pkts = cpu_to_le32(num_pkts);
2602         req->loopback_type = cpu_to_le32(loopback_type);
2603
2604         be_mcc_notify(adapter);
2605
2606         spin_unlock_bh(&adapter->mcc_lock);
2607
2608         wait_for_completion(&adapter->et_cmd_compl);
2609         resp = embedded_payload(wrb);
2610         status = le32_to_cpu(resp->status);
2611
2612         return status;
2613 err:
2614         spin_unlock_bh(&adapter->mcc_lock);
2615         return status;
2616 }
2617
2618 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2619                         u32 byte_cnt, struct be_dma_mem *cmd)
2620 {
2621         struct be_mcc_wrb *wrb;
2622         struct be_cmd_req_ddrdma_test *req;
2623         int status;
2624         int i, j = 0;
2625
2626         spin_lock_bh(&adapter->mcc_lock);
2627
2628         wrb = wrb_from_mccq(adapter);
2629         if (!wrb) {
2630                 status = -EBUSY;
2631                 goto err;
2632         }
2633         req = cmd->va;
2634         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2635                                OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2636                                cmd);
2637
2638         req->pattern = cpu_to_le64(pattern);
2639         req->byte_count = cpu_to_le32(byte_cnt);
2640         for (i = 0; i < byte_cnt; i++) {
2641                 req->snd_buff[i] = (u8)(pattern >> (j*8));
2642                 j++;
2643                 if (j > 7)
2644                         j = 0;
2645         }
2646
2647         status = be_mcc_notify_wait(adapter);
2648
2649         if (!status) {
2650                 struct be_cmd_resp_ddrdma_test *resp;
2651
2652                 resp = cmd->va;
2653                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2654                     resp->snd_err) {
2655                         status = -1;
2656                 }
2657         }
2658
2659 err:
2660         spin_unlock_bh(&adapter->mcc_lock);
2661         return status;
2662 }
2663
2664 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2665                             struct be_dma_mem *nonemb_cmd)
2666 {
2667         struct be_mcc_wrb *wrb;
2668         struct be_cmd_req_seeprom_read *req;
2669         int status;
2670
2671         spin_lock_bh(&adapter->mcc_lock);
2672
2673         wrb = wrb_from_mccq(adapter);
2674         if (!wrb) {
2675                 status = -EBUSY;
2676                 goto err;
2677         }
2678         req = nonemb_cmd->va;
2679
2680         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2681                                OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2682                                nonemb_cmd);
2683
2684         status = be_mcc_notify_wait(adapter);
2685
2686 err:
2687         spin_unlock_bh(&adapter->mcc_lock);
2688         return status;
2689 }
2690
2691 int be_cmd_get_phy_info(struct be_adapter *adapter)
2692 {
2693         struct be_mcc_wrb *wrb;
2694         struct be_cmd_req_get_phy_info *req;
2695         struct be_dma_mem cmd;
2696         int status;
2697
2698         if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2699                             CMD_SUBSYSTEM_COMMON))
2700                 return -EPERM;
2701
2702         spin_lock_bh(&adapter->mcc_lock);
2703
2704         wrb = wrb_from_mccq(adapter);
2705         if (!wrb) {
2706                 status = -EBUSY;
2707                 goto err;
2708         }
2709         cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2710         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2711         if (!cmd.va) {
2712                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2713                 status = -ENOMEM;
2714                 goto err;
2715         }
2716
2717         req = cmd.va;
2718
2719         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2720                                OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2721                                wrb, &cmd);
2722
2723         status = be_mcc_notify_wait(adapter);
2724         if (!status) {
2725                 struct be_phy_info *resp_phy_info =
2726                                 cmd.va + sizeof(struct be_cmd_req_hdr);
2727
2728                 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2729                 adapter->phy.interface_type =
2730                         le16_to_cpu(resp_phy_info->interface_type);
2731                 adapter->phy.auto_speeds_supported =
2732                         le16_to_cpu(resp_phy_info->auto_speeds_supported);
2733                 adapter->phy.fixed_speeds_supported =
2734                         le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2735                 adapter->phy.misc_params =
2736                         le32_to_cpu(resp_phy_info->misc_params);
2737
2738                 if (BE2_chip(adapter)) {
2739                         adapter->phy.fixed_speeds_supported =
2740                                 BE_SUPPORTED_SPEED_10GBPS |
2741                                 BE_SUPPORTED_SPEED_1GBPS;
2742                 }
2743         }
2744         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2745 err:
2746         spin_unlock_bh(&adapter->mcc_lock);
2747         return status;
2748 }
2749
2750 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2751 {
2752         struct be_mcc_wrb *wrb;
2753         struct be_cmd_req_set_qos *req;
2754         int status;
2755
2756         spin_lock_bh(&adapter->mcc_lock);
2757
2758         wrb = wrb_from_mccq(adapter);
2759         if (!wrb) {
2760                 status = -EBUSY;
2761                 goto err;
2762         }
2763
2764         req = embedded_payload(wrb);
2765
2766         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2767                                OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2768
2769         req->hdr.domain = domain;
2770         req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2771         req->max_bps_nic = cpu_to_le32(bps);
2772
2773         status = be_mcc_notify_wait(adapter);
2774
2775 err:
2776         spin_unlock_bh(&adapter->mcc_lock);
2777         return status;
2778 }
2779
2780 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2781 {
2782         struct be_mcc_wrb *wrb;
2783         struct be_cmd_req_cntl_attribs *req;
2784         struct be_cmd_resp_cntl_attribs *resp;
2785         int status;
2786         int payload_len = max(sizeof(*req), sizeof(*resp));
2787         struct mgmt_controller_attrib *attribs;
2788         struct be_dma_mem attribs_cmd;
2789
2790         if (mutex_lock_interruptible(&adapter->mbox_lock))
2791                 return -1;
2792
2793         memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2794         attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2795         attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2796                                               &attribs_cmd.dma);
2797         if (!attribs_cmd.va) {
2798                 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2799                 status = -ENOMEM;
2800                 goto err;
2801         }
2802
2803         wrb = wrb_from_mbox(adapter);
2804         if (!wrb) {
2805                 status = -EBUSY;
2806                 goto err;
2807         }
2808         req = attribs_cmd.va;
2809
2810         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2811                                OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2812                                wrb, &attribs_cmd);
2813
2814         status = be_mbox_notify_wait(adapter);
2815         if (!status) {
2816                 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2817                 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2818         }
2819
2820 err:
2821         mutex_unlock(&adapter->mbox_lock);
2822         if (attribs_cmd.va)
2823                 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2824                                     attribs_cmd.va, attribs_cmd.dma);
2825         return status;
2826 }
2827
2828 /* Uses mbox */
2829 int be_cmd_req_native_mode(struct be_adapter *adapter)
2830 {
2831         struct be_mcc_wrb *wrb;
2832         struct be_cmd_req_set_func_cap *req;
2833         int status;
2834
2835         if (mutex_lock_interruptible(&adapter->mbox_lock))
2836                 return -1;
2837
2838         wrb = wrb_from_mbox(adapter);
2839         if (!wrb) {
2840                 status = -EBUSY;
2841                 goto err;
2842         }
2843
2844         req = embedded_payload(wrb);
2845
2846         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2847                                OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2848                                sizeof(*req), wrb, NULL);
2849
2850         req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2851                                 CAPABILITY_BE3_NATIVE_ERX_API);
2852         req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2853
2854         status = be_mbox_notify_wait(adapter);
2855         if (!status) {
2856                 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2857
2858                 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2859                                         CAPABILITY_BE3_NATIVE_ERX_API;
2860                 if (!adapter->be3_native)
2861                         dev_warn(&adapter->pdev->dev,
2862                                  "adapter not in advanced mode\n");
2863         }
2864 err:
2865         mutex_unlock(&adapter->mbox_lock);
2866         return status;
2867 }
2868
2869 /* Get privilege(s) for a function */
2870 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2871                              u32 domain)
2872 {
2873         struct be_mcc_wrb *wrb;
2874         struct be_cmd_req_get_fn_privileges *req;
2875         int status;
2876
2877         spin_lock_bh(&adapter->mcc_lock);
2878
2879         wrb = wrb_from_mccq(adapter);
2880         if (!wrb) {
2881                 status = -EBUSY;
2882                 goto err;
2883         }
2884
2885         req = embedded_payload(wrb);
2886
2887         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2888                                OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2889                                wrb, NULL);
2890
2891         req->hdr.domain = domain;
2892
2893         status = be_mcc_notify_wait(adapter);
2894         if (!status) {
2895                 struct be_cmd_resp_get_fn_privileges *resp =
2896                                                 embedded_payload(wrb);
2897
2898                 *privilege = le32_to_cpu(resp->privilege_mask);
2899
2900                 /* In UMC mode FW does not return right privileges.
2901                  * Override with correct privilege equivalent to PF.
2902                  */
2903                 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2904                     be_physfn(adapter))
2905                         *privilege = MAX_PRIVILEGES;
2906         }
2907
2908 err:
2909         spin_unlock_bh(&adapter->mcc_lock);
2910         return status;
2911 }
2912
2913 /* Set privilege(s) for a function */
2914 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2915                              u32 domain)
2916 {
2917         struct be_mcc_wrb *wrb;
2918         struct be_cmd_req_set_fn_privileges *req;
2919         int status;
2920
2921         spin_lock_bh(&adapter->mcc_lock);
2922
2923         wrb = wrb_from_mccq(adapter);
2924         if (!wrb) {
2925                 status = -EBUSY;
2926                 goto err;
2927         }
2928
2929         req = embedded_payload(wrb);
2930         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2931                                OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2932                                wrb, NULL);
2933         req->hdr.domain = domain;
2934         if (lancer_chip(adapter))
2935                 req->privileges_lancer = cpu_to_le32(privileges);
2936         else
2937                 req->privileges = cpu_to_le32(privileges);
2938
2939         status = be_mcc_notify_wait(adapter);
2940 err:
2941         spin_unlock_bh(&adapter->mcc_lock);
2942         return status;
2943 }
2944
2945 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2946  * pmac_id_valid: false => pmac_id or MAC address is requested.
2947  *                If pmac_id is returned, pmac_id_valid is returned as true
2948  */
2949 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2950                              bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2951                              u8 domain)
2952 {
2953         struct be_mcc_wrb *wrb;
2954         struct be_cmd_req_get_mac_list *req;
2955         int status;
2956         int mac_count;
2957         struct be_dma_mem get_mac_list_cmd;
2958         int i;
2959
2960         memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2961         get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2962         get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2963                                                    get_mac_list_cmd.size,
2964                                                    &get_mac_list_cmd.dma);
2965
2966         if (!get_mac_list_cmd.va) {
2967                 dev_err(&adapter->pdev->dev,
2968                         "Memory allocation failure during GET_MAC_LIST\n");
2969                 return -ENOMEM;
2970         }
2971
2972         spin_lock_bh(&adapter->mcc_lock);
2973
2974         wrb = wrb_from_mccq(adapter);
2975         if (!wrb) {
2976                 status = -EBUSY;
2977                 goto out;
2978         }
2979
2980         req = get_mac_list_cmd.va;
2981
2982         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2983                                OPCODE_COMMON_GET_MAC_LIST,
2984                                get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2985         req->hdr.domain = domain;
2986         req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2987         if (*pmac_id_valid) {
2988                 req->mac_id = cpu_to_le32(*pmac_id);
2989                 req->iface_id = cpu_to_le16(if_handle);
2990                 req->perm_override = 0;
2991         } else {
2992                 req->perm_override = 1;
2993         }
2994
2995         status = be_mcc_notify_wait(adapter);
2996         if (!status) {
2997                 struct be_cmd_resp_get_mac_list *resp =
2998                                                 get_mac_list_cmd.va;
2999
3000                 if (*pmac_id_valid) {
3001                         memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3002                                ETH_ALEN);
3003                         goto out;
3004                 }
3005
3006                 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3007                 /* Mac list returned could contain one or more active mac_ids
3008                  * or one or more true or pseudo permanant mac addresses.
3009                  * If an active mac_id is present, return first active mac_id
3010                  * found.
3011                  */
3012                 for (i = 0; i < mac_count; i++) {
3013                         struct get_list_macaddr *mac_entry;
3014                         u16 mac_addr_size;
3015                         u32 mac_id;
3016
3017                         mac_entry = &resp->macaddr_list[i];
3018                         mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3019                         /* mac_id is a 32 bit value and mac_addr size
3020                          * is 6 bytes
3021                          */
3022                         if (mac_addr_size == sizeof(u32)) {
3023                                 *pmac_id_valid = true;
3024                                 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3025                                 *pmac_id = le32_to_cpu(mac_id);
3026                                 goto out;
3027                         }
3028                 }
3029                 /* If no active mac_id found, return first mac addr */
3030                 *pmac_id_valid = false;
3031                 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3032                        ETH_ALEN);
3033         }
3034
3035 out:
3036         spin_unlock_bh(&adapter->mcc_lock);
3037         pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
3038                             get_mac_list_cmd.va, get_mac_list_cmd.dma);
3039         return status;
3040 }
3041
3042 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3043                           u8 *mac, u32 if_handle, bool active, u32 domain)
3044 {
3045         if (!active)
3046                 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3047                                          if_handle, domain);
3048         if (BEx_chip(adapter))
3049                 return be_cmd_mac_addr_query(adapter, mac, false,
3050                                              if_handle, curr_pmac_id);
3051         else
3052                 /* Fetch the MAC address using pmac_id */
3053                 return be_cmd_get_mac_from_list(adapter, mac, &active,
3054                                                 &curr_pmac_id,
3055                                                 if_handle, domain);
3056 }
3057
3058 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3059 {
3060         int status;
3061         bool pmac_valid = false;
3062
3063         memset(mac, 0, ETH_ALEN);
3064
3065         if (BEx_chip(adapter)) {
3066                 if (be_physfn(adapter))
3067                         status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3068                                                        0);
3069                 else
3070                         status = be_cmd_mac_addr_query(adapter, mac, false,
3071                                                        adapter->if_handle, 0);
3072         } else {
3073                 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3074                                                   NULL, adapter->if_handle, 0);
3075         }
3076
3077         return status;
3078 }
3079
3080 /* Uses synchronous MCCQ */
3081 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3082                         u8 mac_count, u32 domain)
3083 {
3084         struct be_mcc_wrb *wrb;
3085         struct be_cmd_req_set_mac_list *req;
3086         int status;
3087         struct be_dma_mem cmd;
3088
3089         memset(&cmd, 0, sizeof(struct be_dma_mem));
3090         cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3091         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
3092                                     &cmd.dma, GFP_KERNEL);
3093         if (!cmd.va)
3094                 return -ENOMEM;
3095
3096         spin_lock_bh(&adapter->mcc_lock);
3097
3098         wrb = wrb_from_mccq(adapter);
3099         if (!wrb) {
3100                 status = -EBUSY;
3101                 goto err;
3102         }
3103
3104         req = cmd.va;
3105         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3106                                OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3107                                wrb, &cmd);
3108
3109         req->hdr.domain = domain;
3110         req->mac_count = mac_count;
3111         if (mac_count)
3112                 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3113
3114         status = be_mcc_notify_wait(adapter);
3115
3116 err:
3117         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3118         spin_unlock_bh(&adapter->mcc_lock);
3119         return status;
3120 }
3121
3122 /* Wrapper to delete any active MACs and provision the new mac.
3123  * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3124  * current list are active.
3125  */
3126 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3127 {
3128         bool active_mac = false;
3129         u8 old_mac[ETH_ALEN];
3130         u32 pmac_id;
3131         int status;
3132
3133         status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3134                                           &pmac_id, if_id, dom);
3135
3136         if (!status && active_mac)
3137                 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3138
3139         return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3140 }
3141
3142 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3143                           u32 domain, u16 intf_id, u16 hsw_mode)
3144 {
3145         struct be_mcc_wrb *wrb;
3146         struct be_cmd_req_set_hsw_config *req;
3147         void *ctxt;
3148         int status;
3149
3150         spin_lock_bh(&adapter->mcc_lock);
3151
3152         wrb = wrb_from_mccq(adapter);
3153         if (!wrb) {
3154                 status = -EBUSY;
3155                 goto err;
3156         }
3157
3158         req = embedded_payload(wrb);
3159         ctxt = &req->context;
3160
3161         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3162                                OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3163                                NULL);
3164
3165         req->hdr.domain = domain;
3166         AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3167         if (pvid) {
3168                 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3169                 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3170         }
3171         if (!BEx_chip(adapter) && hsw_mode) {
3172                 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3173                               ctxt, adapter->hba_port_num);
3174                 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3175                 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3176                               ctxt, hsw_mode);
3177         }
3178
3179         be_dws_cpu_to_le(req->context, sizeof(req->context));
3180         status = be_mcc_notify_wait(adapter);
3181
3182 err:
3183         spin_unlock_bh(&adapter->mcc_lock);
3184         return status;
3185 }
3186
3187 /* Get Hyper switch config */
3188 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3189                           u32 domain, u16 intf_id, u8 *mode)
3190 {
3191         struct be_mcc_wrb *wrb;
3192         struct be_cmd_req_get_hsw_config *req;
3193         void *ctxt;
3194         int status;
3195         u16 vid;
3196
3197         spin_lock_bh(&adapter->mcc_lock);
3198
3199         wrb = wrb_from_mccq(adapter);
3200         if (!wrb) {
3201                 status = -EBUSY;
3202                 goto err;
3203         }
3204
3205         req = embedded_payload(wrb);
3206         ctxt = &req->context;
3207
3208         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3209                                OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3210                                NULL);
3211
3212         req->hdr.domain = domain;
3213         AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3214                       ctxt, intf_id);
3215         AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3216
3217         if (!BEx_chip(adapter) && mode) {
3218                 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3219                               ctxt, adapter->hba_port_num);
3220                 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3221         }
3222         be_dws_cpu_to_le(req->context, sizeof(req->context));
3223
3224         status = be_mcc_notify_wait(adapter);
3225         if (!status) {
3226                 struct be_cmd_resp_get_hsw_config *resp =
3227                                                 embedded_payload(wrb);
3228
3229                 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3230                 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3231                                     pvid, &resp->context);
3232                 if (pvid)
3233                         *pvid = le16_to_cpu(vid);
3234                 if (mode)
3235                         *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3236                                               port_fwd_type, &resp->context);
3237         }
3238
3239 err:
3240         spin_unlock_bh(&adapter->mcc_lock);
3241         return status;
3242 }
3243
3244 static bool be_is_wol_excluded(struct be_adapter *adapter)
3245 {
3246         struct pci_dev *pdev = adapter->pdev;
3247
3248         if (!be_physfn(adapter))
3249                 return true;
3250
3251         switch (pdev->subsystem_device) {
3252         case OC_SUBSYS_DEVICE_ID1:
3253         case OC_SUBSYS_DEVICE_ID2:
3254         case OC_SUBSYS_DEVICE_ID3:
3255         case OC_SUBSYS_DEVICE_ID4:
3256                 return true;
3257         default:
3258                 return false;
3259         }
3260 }
3261
3262 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3263 {
3264         struct be_mcc_wrb *wrb;
3265         struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3266         int status = 0;
3267         struct be_dma_mem cmd;
3268
3269         if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3270                             CMD_SUBSYSTEM_ETH))
3271                 return -EPERM;
3272
3273         if (be_is_wol_excluded(adapter))
3274                 return status;
3275
3276         if (mutex_lock_interruptible(&adapter->mbox_lock))
3277                 return -1;
3278
3279         memset(&cmd, 0, sizeof(struct be_dma_mem));
3280         cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3281         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3282         if (!cmd.va) {
3283                 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3284                 status = -ENOMEM;
3285                 goto err;
3286         }
3287
3288         wrb = wrb_from_mbox(adapter);
3289         if (!wrb) {
3290                 status = -EBUSY;
3291                 goto err;
3292         }
3293
3294         req = cmd.va;
3295
3296         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3297                                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3298                                sizeof(*req), wrb, &cmd);
3299
3300         req->hdr.version = 1;
3301         req->query_options = BE_GET_WOL_CAP;
3302
3303         status = be_mbox_notify_wait(adapter);
3304         if (!status) {
3305                 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3306
3307                 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3308
3309                 adapter->wol_cap = resp->wol_settings;
3310                 if (adapter->wol_cap & BE_WOL_CAP)
3311                         adapter->wol_en = true;
3312         }
3313 err:
3314         mutex_unlock(&adapter->mbox_lock);
3315         if (cmd.va)
3316                 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3317         return status;
3318
3319 }
3320
3321 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3322 {
3323         struct be_dma_mem extfat_cmd;
3324         struct be_fat_conf_params *cfgs;
3325         int status;
3326         int i, j;
3327
3328         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3329         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3330         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3331                                              &extfat_cmd.dma);
3332         if (!extfat_cmd.va)
3333                 return -ENOMEM;
3334
3335         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3336         if (status)
3337                 goto err;
3338
3339         cfgs = (struct be_fat_conf_params *)
3340                         (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3341         for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3342                 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3343
3344                 for (j = 0; j < num_modes; j++) {
3345                         if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3346                                 cfgs->module[i].trace_lvl[j].dbg_lvl =
3347                                                         cpu_to_le32(level);
3348                 }
3349         }
3350
3351         status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3352 err:
3353         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3354                             extfat_cmd.dma);
3355         return status;
3356 }
3357
3358 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3359 {
3360         struct be_dma_mem extfat_cmd;
3361         struct be_fat_conf_params *cfgs;
3362         int status, j;
3363         int level = 0;
3364
3365         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3366         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3367         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3368                                              &extfat_cmd.dma);
3369
3370         if (!extfat_cmd.va) {
3371                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3372                         __func__);
3373                 goto err;
3374         }
3375
3376         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3377         if (!status) {
3378                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3379                                                 sizeof(struct be_cmd_resp_hdr));
3380
3381                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3382                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3383                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3384                 }
3385         }
3386         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3387                             extfat_cmd.dma);
3388 err:
3389         return level;
3390 }
3391
3392 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3393                                    struct be_dma_mem *cmd)
3394 {
3395         struct be_mcc_wrb *wrb;
3396         struct be_cmd_req_get_ext_fat_caps *req;
3397         int status;
3398
3399         if (mutex_lock_interruptible(&adapter->mbox_lock))
3400                 return -1;
3401
3402         wrb = wrb_from_mbox(adapter);
3403         if (!wrb) {
3404                 status = -EBUSY;
3405                 goto err;
3406         }
3407
3408         req = cmd->va;
3409         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3410                                OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3411                                cmd->size, wrb, cmd);
3412         req->parameter_type = cpu_to_le32(1);
3413
3414         status = be_mbox_notify_wait(adapter);
3415 err:
3416         mutex_unlock(&adapter->mbox_lock);
3417         return status;
3418 }
3419
3420 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3421                                    struct be_dma_mem *cmd,
3422                                    struct be_fat_conf_params *configs)
3423 {
3424         struct be_mcc_wrb *wrb;
3425         struct be_cmd_req_set_ext_fat_caps *req;
3426         int status;
3427
3428         spin_lock_bh(&adapter->mcc_lock);
3429
3430         wrb = wrb_from_mccq(adapter);
3431         if (!wrb) {
3432                 status = -EBUSY;
3433                 goto err;
3434         }
3435
3436         req = cmd->va;
3437         memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3438         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3439                                OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3440                                cmd->size, wrb, cmd);
3441
3442         status = be_mcc_notify_wait(adapter);
3443 err:
3444         spin_unlock_bh(&adapter->mcc_lock);
3445         return status;
3446 }
3447
3448 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3449 {
3450         struct be_mcc_wrb *wrb;
3451         struct be_cmd_req_get_port_name *req;
3452         int status;
3453
3454         if (!lancer_chip(adapter)) {
3455                 *port_name = adapter->hba_port_num + '0';
3456                 return 0;
3457         }
3458
3459         spin_lock_bh(&adapter->mcc_lock);
3460
3461         wrb = wrb_from_mccq(adapter);
3462         if (!wrb) {
3463                 status = -EBUSY;
3464                 goto err;
3465         }
3466
3467         req = embedded_payload(wrb);
3468
3469         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3470                                OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3471                                NULL);
3472         req->hdr.version = 1;
3473
3474         status = be_mcc_notify_wait(adapter);
3475         if (!status) {
3476                 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3477
3478                 *port_name = resp->port_name[adapter->hba_port_num];
3479         } else {
3480                 *port_name = adapter->hba_port_num + '0';
3481         }
3482 err:
3483         spin_unlock_bh(&adapter->mcc_lock);
3484         return status;
3485 }
3486
3487 /* Descriptor type */
3488 enum {
3489         FUNC_DESC = 1,
3490         VFT_DESC = 2
3491 };
3492
3493 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3494                                                int desc_type)
3495 {
3496         struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3497         struct be_nic_res_desc *nic;
3498         int i;
3499
3500         for (i = 0; i < desc_count; i++) {
3501                 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3502                     hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3503                         nic = (struct be_nic_res_desc *)hdr;
3504                         if (desc_type == FUNC_DESC ||
3505                             (desc_type == VFT_DESC &&
3506                              nic->flags & (1 << VFT_SHIFT)))
3507                                 return nic;
3508                 }
3509
3510                 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3511                 hdr = (void *)hdr + hdr->desc_len;
3512         }
3513         return NULL;
3514 }
3515
3516 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3517 {
3518         return be_get_nic_desc(buf, desc_count, VFT_DESC);
3519 }
3520
3521 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3522 {
3523         return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3524 }
3525
3526 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3527                                                  u32 desc_count)
3528 {
3529         struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3530         struct be_pcie_res_desc *pcie;
3531         int i;
3532
3533         for (i = 0; i < desc_count; i++) {
3534                 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3535                      hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3536                         pcie = (struct be_pcie_res_desc *)hdr;
3537                         if (pcie->pf_num == devfn)
3538                                 return pcie;
3539                 }
3540
3541                 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3542                 hdr = (void *)hdr + hdr->desc_len;
3543         }
3544         return NULL;
3545 }
3546
3547 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3548 {
3549         struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3550         int i;
3551
3552         for (i = 0; i < desc_count; i++) {
3553                 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3554                         return (struct be_port_res_desc *)hdr;
3555
3556                 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3557                 hdr = (void *)hdr + hdr->desc_len;
3558         }
3559         return NULL;
3560 }
3561
3562 static void be_copy_nic_desc(struct be_resources *res,
3563                              struct be_nic_res_desc *desc)
3564 {
3565         res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3566         res->max_vlans = le16_to_cpu(desc->vlan_count);
3567         res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3568         res->max_tx_qs = le16_to_cpu(desc->txq_count);
3569         res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3570         res->max_rx_qs = le16_to_cpu(desc->rq_count);
3571         res->max_evt_qs = le16_to_cpu(desc->eq_count);
3572         /* Clear flags that driver is not interested in */
3573         res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3574                                 BE_IF_CAP_FLAGS_WANT;
3575         /* Need 1 RXQ as the default RXQ */
3576         if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3577                 res->max_rss_qs -= 1;
3578 }
3579
3580 /* Uses Mbox */
3581 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3582 {
3583         struct be_mcc_wrb *wrb;
3584         struct be_cmd_req_get_func_config *req;
3585         int status;
3586         struct be_dma_mem cmd;
3587
3588         if (mutex_lock_interruptible(&adapter->mbox_lock))
3589                 return -1;
3590
3591         memset(&cmd, 0, sizeof(struct be_dma_mem));
3592         cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3593         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3594         if (!cmd.va) {
3595                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3596                 status = -ENOMEM;
3597                 goto err;
3598         }
3599
3600         wrb = wrb_from_mbox(adapter);
3601         if (!wrb) {
3602                 status = -EBUSY;
3603                 goto err;
3604         }
3605
3606         req = cmd.va;
3607
3608         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3609                                OPCODE_COMMON_GET_FUNC_CONFIG,
3610                                cmd.size, wrb, &cmd);
3611
3612         if (skyhawk_chip(adapter))
3613                 req->hdr.version = 1;
3614
3615         status = be_mbox_notify_wait(adapter);
3616         if (!status) {
3617                 struct be_cmd_resp_get_func_config *resp = cmd.va;
3618                 u32 desc_count = le32_to_cpu(resp->desc_count);
3619                 struct be_nic_res_desc *desc;
3620
3621                 desc = be_get_func_nic_desc(resp->func_param, desc_count);
3622                 if (!desc) {
3623                         status = -EINVAL;
3624                         goto err;
3625                 }
3626
3627                 adapter->pf_number = desc->pf_num;
3628                 be_copy_nic_desc(res, desc);
3629         }
3630 err:
3631         mutex_unlock(&adapter->mbox_lock);
3632         if (cmd.va)
3633                 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3634         return status;
3635 }
3636
3637 /* Will use MBOX only if MCCQ has not been created */
3638 int be_cmd_get_profile_config(struct be_adapter *adapter,
3639                               struct be_resources *res, u8 domain)
3640 {
3641         struct be_cmd_resp_get_profile_config *resp;
3642         struct be_cmd_req_get_profile_config *req;
3643         struct be_nic_res_desc *vf_res;
3644         struct be_pcie_res_desc *pcie;
3645         struct be_port_res_desc *port;
3646         struct be_nic_res_desc *nic;
3647         struct be_mcc_wrb wrb = {0};
3648         struct be_dma_mem cmd;
3649         u32 desc_count;
3650         int status;
3651
3652         memset(&cmd, 0, sizeof(struct be_dma_mem));
3653         cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3654         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3655         if (!cmd.va)
3656                 return -ENOMEM;
3657
3658         req = cmd.va;
3659         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3660                                OPCODE_COMMON_GET_PROFILE_CONFIG,
3661                                cmd.size, &wrb, &cmd);
3662
3663         req->hdr.domain = domain;
3664         if (!lancer_chip(adapter))
3665                 req->hdr.version = 1;
3666         req->type = ACTIVE_PROFILE_TYPE;
3667
3668         status = be_cmd_notify_wait(adapter, &wrb);
3669         if (status)
3670                 goto err;
3671
3672         resp = cmd.va;
3673         desc_count = le32_to_cpu(resp->desc_count);
3674
3675         pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3676                                 desc_count);
3677         if (pcie)
3678                 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3679
3680         port = be_get_port_desc(resp->func_param, desc_count);
3681         if (port)
3682                 adapter->mc_type = port->mc_type;
3683
3684         nic = be_get_func_nic_desc(resp->func_param, desc_count);
3685         if (nic)
3686                 be_copy_nic_desc(res, nic);
3687
3688         vf_res = be_get_vft_desc(resp->func_param, desc_count);
3689         if (vf_res)
3690                 res->vf_if_cap_flags = vf_res->cap_flags;
3691 err:
3692         if (cmd.va)
3693                 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3694         return status;
3695 }
3696
3697 /* Will use MBOX only if MCCQ has not been created */
3698 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3699                                      int size, int count, u8 version, u8 domain)
3700 {
3701         struct be_cmd_req_set_profile_config *req;
3702         struct be_mcc_wrb wrb = {0};
3703         struct be_dma_mem cmd;
3704         int status;
3705
3706         memset(&cmd, 0, sizeof(struct be_dma_mem));
3707         cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3708         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3709         if (!cmd.va)
3710                 return -ENOMEM;
3711
3712         req = cmd.va;
3713         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3714                                OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3715                                &wrb, &cmd);
3716         req->hdr.version = version;
3717         req->hdr.domain = domain;
3718         req->desc_count = cpu_to_le32(count);
3719         memcpy(req->desc, desc, size);
3720
3721         status = be_cmd_notify_wait(adapter, &wrb);
3722
3723         if (cmd.va)
3724                 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3725         return status;
3726 }
3727
3728 /* Mark all fields invalid */
3729 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3730 {
3731         memset(nic, 0, sizeof(*nic));
3732         nic->unicast_mac_count = 0xFFFF;
3733         nic->mcc_count = 0xFFFF;
3734         nic->vlan_count = 0xFFFF;
3735         nic->mcast_mac_count = 0xFFFF;
3736         nic->txq_count = 0xFFFF;
3737         nic->rq_count = 0xFFFF;
3738         nic->rssq_count = 0xFFFF;
3739         nic->lro_count = 0xFFFF;
3740         nic->cq_count = 0xFFFF;
3741         nic->toe_conn_count = 0xFFFF;
3742         nic->eq_count = 0xFFFF;
3743         nic->iface_count = 0xFFFF;
3744         nic->link_param = 0xFF;
3745         nic->channel_id_param = cpu_to_le16(0xF000);
3746         nic->acpi_params = 0xFF;
3747         nic->wol_param = 0x0F;
3748         nic->tunnel_iface_count = 0xFFFF;
3749         nic->direct_tenant_iface_count = 0xFFFF;
3750         nic->bw_min = 0xFFFFFFFF;
3751         nic->bw_max = 0xFFFFFFFF;
3752 }
3753
3754 /* Mark all fields invalid */
3755 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3756 {
3757         memset(pcie, 0, sizeof(*pcie));
3758         pcie->sriov_state = 0xFF;
3759         pcie->pf_state = 0xFF;
3760         pcie->pf_type = 0xFF;
3761         pcie->num_vfs = 0xFFFF;
3762 }
3763
3764 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3765                       u8 domain)
3766 {
3767         struct be_nic_res_desc nic_desc;
3768         u32 bw_percent;
3769         u16 version = 0;
3770
3771         if (BE3_chip(adapter))
3772                 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3773
3774         be_reset_nic_desc(&nic_desc);
3775         nic_desc.pf_num = adapter->pf_number;
3776         nic_desc.vf_num = domain;
3777         nic_desc.bw_min = 0;
3778         if (lancer_chip(adapter)) {
3779                 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3780                 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3781                 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3782                                         (1 << NOSV_SHIFT);
3783                 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3784         } else {
3785                 version = 1;
3786                 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3787                 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3788                 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3789                 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3790                 nic_desc.bw_max = cpu_to_le32(bw_percent);
3791         }
3792
3793         return be_cmd_set_profile_config(adapter, &nic_desc,
3794                                          nic_desc.hdr.desc_len,
3795                                          1, version, domain);
3796 }
3797
3798 int be_cmd_set_sriov_config(struct be_adapter *adapter,
3799                             struct be_resources res, u16 num_vfs)
3800 {
3801         struct {
3802                 struct be_pcie_res_desc pcie;
3803                 struct be_nic_res_desc nic_vft;
3804         } __packed desc;
3805         u16 vf_q_count;
3806
3807         if (BEx_chip(adapter) || lancer_chip(adapter))
3808                 return 0;
3809
3810         /* PF PCIE descriptor */
3811         be_reset_pcie_desc(&desc.pcie);
3812         desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3813         desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3814         desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3815         desc.pcie.pf_num = adapter->pdev->devfn;
3816         desc.pcie.sriov_state = num_vfs ? 1 : 0;
3817         desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3818
3819         /* VF NIC Template descriptor */
3820         be_reset_nic_desc(&desc.nic_vft);
3821         desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3822         desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3823         desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
3824                                 (1 << NOSV_SHIFT);
3825         desc.nic_vft.pf_num = adapter->pdev->devfn;
3826         desc.nic_vft.vf_num = 0;
3827
3828         if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3829                 /* If number of VFs requested is 8 less than max supported,
3830                  * assign 8 queue pairs to the PF and divide the remaining
3831                  * resources evenly among the VFs
3832                  */
3833                 if (num_vfs < (be_max_vfs(adapter) - 8))
3834                         vf_q_count = (res.max_rss_qs - 8) / num_vfs;
3835                 else
3836                         vf_q_count = res.max_rss_qs / num_vfs;
3837
3838                 desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
3839                 desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
3840                 desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
3841                 desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
3842         } else {
3843                 desc.nic_vft.txq_count = cpu_to_le16(1);
3844                 desc.nic_vft.rq_count = cpu_to_le16(1);
3845                 desc.nic_vft.rssq_count = cpu_to_le16(0);
3846                 /* One CQ for each TX, RX and MCCQ */
3847                 desc.nic_vft.cq_count = cpu_to_le16(3);
3848         }
3849
3850         return be_cmd_set_profile_config(adapter, &desc,
3851                                          2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3852 }
3853
3854 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3855 {
3856         struct be_mcc_wrb *wrb;
3857         struct be_cmd_req_manage_iface_filters *req;
3858         int status;
3859
3860         if (iface == 0xFFFFFFFF)
3861                 return -1;
3862
3863         spin_lock_bh(&adapter->mcc_lock);
3864
3865         wrb = wrb_from_mccq(adapter);
3866         if (!wrb) {
3867                 status = -EBUSY;
3868                 goto err;
3869         }
3870         req = embedded_payload(wrb);
3871
3872         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3873                                OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3874                                wrb, NULL);
3875         req->op = op;
3876         req->target_iface_id = cpu_to_le32(iface);
3877
3878         status = be_mcc_notify_wait(adapter);
3879 err:
3880         spin_unlock_bh(&adapter->mcc_lock);
3881         return status;
3882 }
3883
3884 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3885 {
3886         struct be_port_res_desc port_desc;
3887
3888         memset(&port_desc, 0, sizeof(port_desc));
3889         port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3890         port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3891         port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3892         port_desc.link_num = adapter->hba_port_num;
3893         if (port) {
3894                 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3895                                         (1 << RCVID_SHIFT);
3896                 port_desc.nv_port = swab16(port);
3897         } else {
3898                 port_desc.nv_flags = NV_TYPE_DISABLED;
3899                 port_desc.nv_port = 0;
3900         }
3901
3902         return be_cmd_set_profile_config(adapter, &port_desc,
3903                                          RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3904 }
3905
3906 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3907                      int vf_num)
3908 {
3909         struct be_mcc_wrb *wrb;
3910         struct be_cmd_req_get_iface_list *req;
3911         struct be_cmd_resp_get_iface_list *resp;
3912         int status;
3913
3914         spin_lock_bh(&adapter->mcc_lock);
3915
3916         wrb = wrb_from_mccq(adapter);
3917         if (!wrb) {
3918                 status = -EBUSY;
3919                 goto err;
3920         }
3921         req = embedded_payload(wrb);
3922
3923         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3924                                OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3925                                wrb, NULL);
3926         req->hdr.domain = vf_num + 1;
3927
3928         status = be_mcc_notify_wait(adapter);
3929         if (!status) {
3930                 resp = (struct be_cmd_resp_get_iface_list *)req;
3931                 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3932         }
3933
3934 err:
3935         spin_unlock_bh(&adapter->mcc_lock);
3936         return status;
3937 }
3938
3939 static int lancer_wait_idle(struct be_adapter *adapter)
3940 {
3941 #define SLIPORT_IDLE_TIMEOUT 30
3942         u32 reg_val;
3943         int status = 0, i;
3944
3945         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3946                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3947                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3948                         break;
3949
3950                 ssleep(1);
3951         }
3952
3953         if (i == SLIPORT_IDLE_TIMEOUT)
3954                 status = -1;
3955
3956         return status;
3957 }
3958
3959 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3960 {
3961         int status = 0;
3962
3963         status = lancer_wait_idle(adapter);
3964         if (status)
3965                 return status;
3966
3967         iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3968
3969         return status;
3970 }
3971
3972 /* Routine to check whether dump image is present or not */
3973 bool dump_present(struct be_adapter *adapter)
3974 {
3975         u32 sliport_status = 0;
3976
3977         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3978         return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3979 }
3980
3981 int lancer_initiate_dump(struct be_adapter *adapter)
3982 {
3983         struct device *dev = &adapter->pdev->dev;
3984         int status;
3985
3986         if (dump_present(adapter)) {
3987                 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
3988                 return -EEXIST;
3989         }
3990
3991         /* give firmware reset and diagnostic dump */
3992         status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3993                                      PHYSDEV_CONTROL_DD_MASK);
3994         if (status < 0) {
3995                 dev_err(dev, "FW reset failed\n");
3996                 return status;
3997         }
3998
3999         status = lancer_wait_idle(adapter);
4000         if (status)
4001                 return status;
4002
4003         if (!dump_present(adapter)) {
4004                 dev_err(dev, "FW dump not generated\n");
4005                 return -EIO;
4006         }
4007
4008         return 0;
4009 }
4010
4011 int lancer_delete_dump(struct be_adapter *adapter)
4012 {
4013         int status;
4014
4015         status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4016         return be_cmd_status(status);
4017 }
4018
4019 /* Uses sync mcc */
4020 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4021 {
4022         struct be_mcc_wrb *wrb;
4023         struct be_cmd_enable_disable_vf *req;
4024         int status;
4025
4026         if (BEx_chip(adapter))
4027                 return 0;
4028
4029         spin_lock_bh(&adapter->mcc_lock);
4030
4031         wrb = wrb_from_mccq(adapter);
4032         if (!wrb) {
4033                 status = -EBUSY;
4034                 goto err;
4035         }
4036
4037         req = embedded_payload(wrb);
4038
4039         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4040                                OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4041                                wrb, NULL);
4042
4043         req->hdr.domain = domain;
4044         req->enable = 1;
4045         status = be_mcc_notify_wait(adapter);
4046 err:
4047         spin_unlock_bh(&adapter->mcc_lock);
4048         return status;
4049 }
4050
4051 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4052 {
4053         struct be_mcc_wrb *wrb;
4054         struct be_cmd_req_intr_set *req;
4055         int status;
4056
4057         if (mutex_lock_interruptible(&adapter->mbox_lock))
4058                 return -1;
4059
4060         wrb = wrb_from_mbox(adapter);
4061
4062         req = embedded_payload(wrb);
4063
4064         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4065                                OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4066                                wrb, NULL);
4067
4068         req->intr_enabled = intr_enable;
4069
4070         status = be_mbox_notify_wait(adapter);
4071
4072         mutex_unlock(&adapter->mbox_lock);
4073         return status;
4074 }
4075
4076 /* Uses MBOX */
4077 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4078 {
4079         struct be_cmd_req_get_active_profile *req;
4080         struct be_mcc_wrb *wrb;
4081         int status;
4082
4083         if (mutex_lock_interruptible(&adapter->mbox_lock))
4084                 return -1;
4085
4086         wrb = wrb_from_mbox(adapter);
4087         if (!wrb) {
4088                 status = -EBUSY;
4089                 goto err;
4090         }
4091
4092         req = embedded_payload(wrb);
4093
4094         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4095                                OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4096                                wrb, NULL);
4097
4098         status = be_mbox_notify_wait(adapter);
4099         if (!status) {
4100                 struct be_cmd_resp_get_active_profile *resp =
4101                                                         embedded_payload(wrb);
4102
4103                 *profile_id = le16_to_cpu(resp->active_profile_id);
4104         }
4105
4106 err:
4107         mutex_unlock(&adapter->mbox_lock);
4108         return status;
4109 }
4110
4111 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4112                                    int link_state, u8 domain)
4113 {
4114         struct be_mcc_wrb *wrb;
4115         struct be_cmd_req_set_ll_link *req;
4116         int status;
4117
4118         if (BEx_chip(adapter) || lancer_chip(adapter))
4119                 return -EOPNOTSUPP;
4120
4121         spin_lock_bh(&adapter->mcc_lock);
4122
4123         wrb = wrb_from_mccq(adapter);
4124         if (!wrb) {
4125                 status = -EBUSY;
4126                 goto err;
4127         }
4128
4129         req = embedded_payload(wrb);
4130
4131         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4132                                OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4133                                sizeof(*req), wrb, NULL);
4134
4135         req->hdr.version = 1;
4136         req->hdr.domain = domain;
4137
4138         if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4139                 req->link_config |= 1;
4140
4141         if (link_state == IFLA_VF_LINK_STATE_AUTO)
4142                 req->link_config |= 1 << PLINK_TRACK_SHIFT;
4143
4144         status = be_mcc_notify_wait(adapter);
4145 err:
4146         spin_unlock_bh(&adapter->mcc_lock);
4147         return status;
4148 }
4149
4150 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4151                     int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4152 {
4153         struct be_adapter *adapter = netdev_priv(netdev_handle);
4154         struct be_mcc_wrb *wrb;
4155         struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4156         struct be_cmd_req_hdr *req;
4157         struct be_cmd_resp_hdr *resp;
4158         int status;
4159
4160         spin_lock_bh(&adapter->mcc_lock);
4161
4162         wrb = wrb_from_mccq(adapter);
4163         if (!wrb) {
4164                 status = -EBUSY;
4165                 goto err;
4166         }
4167         req = embedded_payload(wrb);
4168         resp = embedded_payload(wrb);
4169
4170         be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4171                                hdr->opcode, wrb_payload_size, wrb, NULL);
4172         memcpy(req, wrb_payload, wrb_payload_size);
4173         be_dws_cpu_to_le(req, wrb_payload_size);
4174
4175         status = be_mcc_notify_wait(adapter);
4176         if (cmd_status)
4177                 *cmd_status = (status & 0xffff);
4178         if (ext_status)
4179                 *ext_status = 0;
4180         memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4181         be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4182 err:
4183         spin_unlock_bh(&adapter->mcc_lock);
4184         return status;
4185 }
4186 EXPORT_SYMBOL(be_roce_mcc_cmd);