2 * Copyright (C) 2005 - 2014 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static struct be_cmd_priv_map cmd_priv_map[] = {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
30 OPCODE_COMMON_GET_FLOW_CONTROL,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
36 OPCODE_COMMON_SET_FLOW_CONTROL,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
42 OPCODE_ETH_GET_PPORT_STATS,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
48 OPCODE_COMMON_GET_PHY_DETAILS,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
59 u32 cmd_privileges = adapter->cmd_privileges;
61 for (i = 0; i < num_entries; i++)
62 if (opcode == cmd_priv_map[i].opcode &&
63 subsystem == cmd_priv_map[i].subsystem)
64 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
70 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 return wrb->payload.embedded_payload;
75 static void be_mcc_notify(struct be_adapter *adapter)
77 struct be_queue_info *mccq = &adapter->mcc_obj.q;
80 if (be_error(adapter))
83 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
84 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
87 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
90 /* To check if valid bit is set, check the entire word as we don't know
91 * the endianness of the data (old entry is host endian while a new entry is
93 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
97 if (compl->flags != 0) {
98 flags = le32_to_cpu(compl->flags);
99 if (flags & CQE_FLAGS_VALID_MASK) {
100 compl->flags = flags;
107 /* Need to reset the entire word that houses the valid bit */
108 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
113 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
118 addr = ((addr << 16) << 16) | tag0;
122 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
124 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
125 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
127 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
128 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
135 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
136 * loop (has not issued be_mcc_notify_wait())
138 static void be_async_cmd_process(struct be_adapter *adapter,
139 struct be_mcc_compl *compl,
140 struct be_cmd_resp_hdr *resp_hdr)
142 enum mcc_base_status base_status = base_status(compl->status);
143 u8 opcode = 0, subsystem = 0;
146 opcode = resp_hdr->opcode;
147 subsystem = resp_hdr->subsystem;
150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
152 complete(&adapter->et_cmd_compl);
156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
157 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
158 subsystem == CMD_SUBSYSTEM_COMMON) {
159 adapter->flash_status = compl->status;
160 complete(&adapter->et_cmd_compl);
164 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
165 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
166 subsystem == CMD_SUBSYSTEM_ETH &&
167 base_status == MCC_STATUS_SUCCESS) {
168 be_parse_stats(adapter);
169 adapter->stats_cmd_sent = false;
173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174 subsystem == CMD_SUBSYSTEM_COMMON) {
175 if (base_status == MCC_STATUS_SUCCESS) {
176 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
178 adapter->drv_stats.be_on_die_temperature =
179 resp->on_die_temperature;
181 adapter->be_get_temp_freq = 0;
187 static int be_mcc_compl_process(struct be_adapter *adapter,
188 struct be_mcc_compl *compl)
190 enum mcc_base_status base_status;
191 enum mcc_addl_status addl_status;
192 struct be_cmd_resp_hdr *resp_hdr;
193 u8 opcode = 0, subsystem = 0;
195 /* Just swap the status to host endian; mcc tag is opaquely copied
197 be_dws_le_to_cpu(compl, 4);
199 base_status = base_status(compl->status);
200 addl_status = addl_status(compl->status);
202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
204 opcode = resp_hdr->opcode;
205 subsystem = resp_hdr->subsystem;
208 be_async_cmd_process(adapter, compl, resp_hdr);
210 if (base_status != MCC_STATUS_SUCCESS &&
211 !be_skip_err_log(opcode, base_status, addl_status)) {
212 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
213 dev_warn(&adapter->pdev->dev,
214 "VF is not privileged to issue opcode %d-%d\n",
217 dev_err(&adapter->pdev->dev,
218 "opcode %d-%d failed:status %d-%d\n",
219 opcode, subsystem, base_status, addl_status);
222 return compl->status;
225 /* Link state evt is a string of bytes; no need for endian swapping */
226 static void be_async_link_state_process(struct be_adapter *adapter,
227 struct be_mcc_compl *compl)
229 struct be_async_event_link_state *evt =
230 (struct be_async_event_link_state *)compl;
232 /* When link status changes, link speed must be re-queried from FW */
233 adapter->phy.link_speed = -1;
235 /* On BEx the FW does not send a separate link status
236 * notification for physical and logical link.
237 * On other chips just process the logical link
238 * status notification
240 if (!BEx_chip(adapter) &&
241 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
244 /* For the initial link status do not rely on the ASYNC event as
245 * it may not be received in some cases.
247 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
248 be_link_status_update(adapter,
249 evt->port_link_status & LINK_STATUS_MASK);
252 /* Grp5 CoS Priority evt */
253 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
254 struct be_mcc_compl *compl)
256 struct be_async_event_grp5_cos_priority *evt =
257 (struct be_async_event_grp5_cos_priority *)compl;
260 adapter->vlan_prio_bmap = evt->available_priority_bmap;
261 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
262 adapter->recommended_prio =
263 evt->reco_default_priority << VLAN_PRIO_SHIFT;
267 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
268 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
269 struct be_mcc_compl *compl)
271 struct be_async_event_grp5_qos_link_speed *evt =
272 (struct be_async_event_grp5_qos_link_speed *)compl;
274 if (adapter->phy.link_speed >= 0 &&
275 evt->physical_port == adapter->port_num)
276 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
280 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
281 struct be_mcc_compl *compl)
283 struct be_async_event_grp5_pvid_state *evt =
284 (struct be_async_event_grp5_pvid_state *)compl;
287 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
288 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
294 static void be_async_grp5_evt_process(struct be_adapter *adapter,
295 struct be_mcc_compl *compl)
297 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
298 ASYNC_EVENT_TYPE_MASK;
300 switch (event_type) {
301 case ASYNC_EVENT_COS_PRIORITY:
302 be_async_grp5_cos_priority_process(adapter, compl);
304 case ASYNC_EVENT_QOS_SPEED:
305 be_async_grp5_qos_speed_process(adapter, compl);
307 case ASYNC_EVENT_PVID_STATE:
308 be_async_grp5_pvid_state_process(adapter, compl);
315 static void be_async_dbg_evt_process(struct be_adapter *adapter,
316 struct be_mcc_compl *cmp)
319 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
321 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
322 ASYNC_EVENT_TYPE_MASK;
324 switch (event_type) {
325 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
327 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
328 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
331 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
337 static inline bool is_link_state_evt(u32 flags)
339 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
340 ASYNC_EVENT_CODE_LINK_STATE;
343 static inline bool is_grp5_evt(u32 flags)
345 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
346 ASYNC_EVENT_CODE_GRP_5;
349 static inline bool is_dbg_evt(u32 flags)
351 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
352 ASYNC_EVENT_CODE_QNQ;
355 static void be_mcc_event_process(struct be_adapter *adapter,
356 struct be_mcc_compl *compl)
358 if (is_link_state_evt(compl->flags))
359 be_async_link_state_process(adapter, compl);
360 else if (is_grp5_evt(compl->flags))
361 be_async_grp5_evt_process(adapter, compl);
362 else if (is_dbg_evt(compl->flags))
363 be_async_dbg_evt_process(adapter, compl);
366 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
368 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
369 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
371 if (be_mcc_compl_is_new(compl)) {
372 queue_tail_inc(mcc_cq);
378 void be_async_mcc_enable(struct be_adapter *adapter)
380 spin_lock_bh(&adapter->mcc_cq_lock);
382 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
383 adapter->mcc_obj.rearm_cq = true;
385 spin_unlock_bh(&adapter->mcc_cq_lock);
388 void be_async_mcc_disable(struct be_adapter *adapter)
390 spin_lock_bh(&adapter->mcc_cq_lock);
392 adapter->mcc_obj.rearm_cq = false;
393 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
395 spin_unlock_bh(&adapter->mcc_cq_lock);
398 int be_process_mcc(struct be_adapter *adapter)
400 struct be_mcc_compl *compl;
401 int num = 0, status = 0;
402 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
404 spin_lock(&adapter->mcc_cq_lock);
406 while ((compl = be_mcc_compl_get(adapter))) {
407 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
408 be_mcc_event_process(adapter, compl);
409 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
410 status = be_mcc_compl_process(adapter, compl);
411 atomic_dec(&mcc_obj->q.used);
413 be_mcc_compl_use(compl);
418 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
420 spin_unlock(&adapter->mcc_cq_lock);
424 /* Wait till no more pending mcc requests are present */
425 static int be_mcc_wait_compl(struct be_adapter *adapter)
427 #define mcc_timeout 120000 /* 12s timeout */
429 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
431 for (i = 0; i < mcc_timeout; i++) {
432 if (be_error(adapter))
436 status = be_process_mcc(adapter);
439 if (atomic_read(&mcc_obj->q.used) == 0)
443 if (i == mcc_timeout) {
444 dev_err(&adapter->pdev->dev, "FW not responding\n");
445 adapter->fw_timeout = true;
451 /* Notify MCC requests and wait for completion */
452 static int be_mcc_notify_wait(struct be_adapter *adapter)
455 struct be_mcc_wrb *wrb;
456 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
457 u16 index = mcc_obj->q.head;
458 struct be_cmd_resp_hdr *resp;
460 index_dec(&index, mcc_obj->q.len);
461 wrb = queue_index_node(&mcc_obj->q, index);
463 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
465 be_mcc_notify(adapter);
467 status = be_mcc_wait_compl(adapter);
471 status = (resp->base_status |
472 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
473 CQE_ADDL_STATUS_SHIFT));
478 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
484 if (be_error(adapter))
487 ready = ioread32(db);
488 if (ready == 0xffffffff)
491 ready &= MPU_MAILBOX_DB_RDY_MASK;
496 dev_err(&adapter->pdev->dev, "FW not responding\n");
497 adapter->fw_timeout = true;
498 be_detect_error(adapter);
510 * Insert the mailbox address into the doorbell in two steps
511 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
513 static int be_mbox_notify_wait(struct be_adapter *adapter)
517 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
518 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
519 struct be_mcc_mailbox *mbox = mbox_mem->va;
520 struct be_mcc_compl *compl = &mbox->compl;
522 /* wait for ready to be set */
523 status = be_mbox_db_ready_wait(adapter, db);
527 val |= MPU_MAILBOX_DB_HI_MASK;
528 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
529 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
532 /* wait for ready to be set */
533 status = be_mbox_db_ready_wait(adapter, db);
538 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
539 val |= (u32)(mbox_mem->dma >> 4) << 2;
542 status = be_mbox_db_ready_wait(adapter, db);
546 /* A cq entry has been made now */
547 if (be_mcc_compl_is_new(compl)) {
548 status = be_mcc_compl_process(adapter, &mbox->compl);
549 be_mcc_compl_use(compl);
553 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
559 static u16 be_POST_stage_get(struct be_adapter *adapter)
563 if (BEx_chip(adapter))
564 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
566 pci_read_config_dword(adapter->pdev,
567 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
569 return sem & POST_STAGE_MASK;
572 static int lancer_wait_ready(struct be_adapter *adapter)
574 #define SLIPORT_READY_TIMEOUT 30
578 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
579 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
580 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
586 if (i == SLIPORT_READY_TIMEOUT)
587 return sliport_status ? : -1;
592 static bool lancer_provisioning_error(struct be_adapter *adapter)
594 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
596 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
597 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
598 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
599 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
601 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
602 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
608 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
611 u32 sliport_status, err, reset_needed;
614 resource_error = lancer_provisioning_error(adapter);
618 status = lancer_wait_ready(adapter);
620 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
621 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
622 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
623 if (err && reset_needed) {
624 iowrite32(SLI_PORT_CONTROL_IP_MASK,
625 adapter->db + SLIPORT_CONTROL_OFFSET);
627 /* check if adapter has corrected the error */
628 status = lancer_wait_ready(adapter);
629 sliport_status = ioread32(adapter->db +
630 SLIPORT_STATUS_OFFSET);
631 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
632 SLIPORT_STATUS_RN_MASK);
633 if (status || sliport_status)
635 } else if (err || reset_needed) {
639 /* Stop error recovery if error is not recoverable.
640 * No resource error is temporary errors and will go away
641 * when PF provisions resources.
643 resource_error = lancer_provisioning_error(adapter);
650 int be_fw_wait_ready(struct be_adapter *adapter)
653 int status, timeout = 0;
654 struct device *dev = &adapter->pdev->dev;
656 if (lancer_chip(adapter)) {
657 status = lancer_wait_ready(adapter);
666 stage = be_POST_stage_get(adapter);
667 if (stage == POST_STAGE_ARMFW_RDY)
670 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
671 if (msleep_interruptible(2000)) {
672 dev_err(dev, "Waiting for POST aborted\n");
676 } while (timeout < 60);
679 dev_err(dev, "POST timeout; stage=%#x\n", stage);
683 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
685 return &wrb->payload.sgl[0];
688 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
690 wrb->tag0 = addr & 0xFFFFFFFF;
691 wrb->tag1 = upper_32_bits(addr);
694 /* Don't touch the hdr after it's prepared */
695 /* mem will be NULL for embedded commands */
696 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
697 u8 subsystem, u8 opcode, int cmd_len,
698 struct be_mcc_wrb *wrb,
699 struct be_dma_mem *mem)
703 req_hdr->opcode = opcode;
704 req_hdr->subsystem = subsystem;
705 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
706 req_hdr->version = 0;
707 fill_wrb_tags(wrb, (ulong) req_hdr);
708 wrb->payload_length = cmd_len;
710 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
711 MCC_WRB_SGE_CNT_SHIFT;
712 sge = nonembedded_sgl(wrb);
713 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
714 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
715 sge->len = cpu_to_le32(mem->size);
717 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
718 be_dws_cpu_to_le(wrb, 8);
721 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
722 struct be_dma_mem *mem)
724 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
725 u64 dma = (u64)mem->dma;
727 for (i = 0; i < buf_pages; i++) {
728 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
729 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
734 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
736 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
737 struct be_mcc_wrb *wrb
738 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
739 memset(wrb, 0, sizeof(*wrb));
743 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
745 struct be_queue_info *mccq = &adapter->mcc_obj.q;
746 struct be_mcc_wrb *wrb;
751 if (atomic_read(&mccq->used) >= mccq->len)
754 wrb = queue_head_node(mccq);
755 queue_head_inc(mccq);
756 atomic_inc(&mccq->used);
757 memset(wrb, 0, sizeof(*wrb));
761 static bool use_mcc(struct be_adapter *adapter)
763 return adapter->mcc_obj.q.created;
766 /* Must be used only in process context */
767 static int be_cmd_lock(struct be_adapter *adapter)
769 if (use_mcc(adapter)) {
770 spin_lock_bh(&adapter->mcc_lock);
773 return mutex_lock_interruptible(&adapter->mbox_lock);
777 /* Must be used only in process context */
778 static void be_cmd_unlock(struct be_adapter *adapter)
780 if (use_mcc(adapter))
781 spin_unlock_bh(&adapter->mcc_lock);
783 return mutex_unlock(&adapter->mbox_lock);
786 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
787 struct be_mcc_wrb *wrb)
789 struct be_mcc_wrb *dest_wrb;
791 if (use_mcc(adapter)) {
792 dest_wrb = wrb_from_mccq(adapter);
796 dest_wrb = wrb_from_mbox(adapter);
799 memcpy(dest_wrb, wrb, sizeof(*wrb));
800 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
801 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
806 /* Must be used only in process context */
807 static int be_cmd_notify_wait(struct be_adapter *adapter,
808 struct be_mcc_wrb *wrb)
810 struct be_mcc_wrb *dest_wrb;
813 status = be_cmd_lock(adapter);
817 dest_wrb = be_cmd_copy(adapter, wrb);
821 if (use_mcc(adapter))
822 status = be_mcc_notify_wait(adapter);
824 status = be_mbox_notify_wait(adapter);
827 memcpy(wrb, dest_wrb, sizeof(*wrb));
829 be_cmd_unlock(adapter);
833 /* Tell fw we're about to start firing cmds by writing a
834 * special pattern across the wrb hdr; uses mbox
836 int be_cmd_fw_init(struct be_adapter *adapter)
841 if (lancer_chip(adapter))
844 if (mutex_lock_interruptible(&adapter->mbox_lock))
847 wrb = (u8 *)wrb_from_mbox(adapter);
857 status = be_mbox_notify_wait(adapter);
859 mutex_unlock(&adapter->mbox_lock);
863 /* Tell fw we're done with firing cmds by writing a
864 * special pattern across the wrb hdr; uses mbox
866 int be_cmd_fw_clean(struct be_adapter *adapter)
871 if (lancer_chip(adapter))
874 if (mutex_lock_interruptible(&adapter->mbox_lock))
877 wrb = (u8 *)wrb_from_mbox(adapter);
887 status = be_mbox_notify_wait(adapter);
889 mutex_unlock(&adapter->mbox_lock);
893 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
895 struct be_mcc_wrb *wrb;
896 struct be_cmd_req_eq_create *req;
897 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
900 if (mutex_lock_interruptible(&adapter->mbox_lock))
903 wrb = wrb_from_mbox(adapter);
904 req = embedded_payload(wrb);
906 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
907 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
910 /* Support for EQ_CREATEv2 available only SH-R onwards */
911 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
914 req->hdr.version = ver;
915 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
917 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
919 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
920 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
921 __ilog2_u32(eqo->q.len / 256));
922 be_dws_cpu_to_le(req->context, sizeof(req->context));
924 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
926 status = be_mbox_notify_wait(adapter);
928 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
930 eqo->q.id = le16_to_cpu(resp->eq_id);
932 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
933 eqo->q.created = true;
936 mutex_unlock(&adapter->mbox_lock);
941 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
942 bool permanent, u32 if_handle, u32 pmac_id)
944 struct be_mcc_wrb *wrb;
945 struct be_cmd_req_mac_query *req;
948 spin_lock_bh(&adapter->mcc_lock);
950 wrb = wrb_from_mccq(adapter);
955 req = embedded_payload(wrb);
957 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
958 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
960 req->type = MAC_ADDRESS_TYPE_NETWORK;
964 req->if_id = cpu_to_le16((u16)if_handle);
965 req->pmac_id = cpu_to_le32(pmac_id);
969 status = be_mcc_notify_wait(adapter);
971 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
973 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
977 spin_unlock_bh(&adapter->mcc_lock);
981 /* Uses synchronous MCCQ */
982 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
983 u32 if_id, u32 *pmac_id, u32 domain)
985 struct be_mcc_wrb *wrb;
986 struct be_cmd_req_pmac_add *req;
989 spin_lock_bh(&adapter->mcc_lock);
991 wrb = wrb_from_mccq(adapter);
996 req = embedded_payload(wrb);
998 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
999 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1002 req->hdr.domain = domain;
1003 req->if_id = cpu_to_le32(if_id);
1004 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1006 status = be_mcc_notify_wait(adapter);
1008 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1010 *pmac_id = le32_to_cpu(resp->pmac_id);
1014 spin_unlock_bh(&adapter->mcc_lock);
1016 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1022 /* Uses synchronous MCCQ */
1023 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1025 struct be_mcc_wrb *wrb;
1026 struct be_cmd_req_pmac_del *req;
1032 spin_lock_bh(&adapter->mcc_lock);
1034 wrb = wrb_from_mccq(adapter);
1039 req = embedded_payload(wrb);
1041 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1042 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1045 req->hdr.domain = dom;
1046 req->if_id = cpu_to_le32(if_id);
1047 req->pmac_id = cpu_to_le32(pmac_id);
1049 status = be_mcc_notify_wait(adapter);
1052 spin_unlock_bh(&adapter->mcc_lock);
1057 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1058 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1060 struct be_mcc_wrb *wrb;
1061 struct be_cmd_req_cq_create *req;
1062 struct be_dma_mem *q_mem = &cq->dma_mem;
1066 if (mutex_lock_interruptible(&adapter->mbox_lock))
1069 wrb = wrb_from_mbox(adapter);
1070 req = embedded_payload(wrb);
1071 ctxt = &req->context;
1073 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1074 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1077 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1079 if (BEx_chip(adapter)) {
1080 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1082 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1084 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1085 __ilog2_u32(cq->len / 256));
1086 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1087 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1088 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1090 req->hdr.version = 2;
1091 req->page_size = 1; /* 1 for 4K */
1093 /* coalesce-wm field in this cmd is not relevant to Lancer.
1094 * Lancer uses COMMON_MODIFY_CQ to set this field
1096 if (!lancer_chip(adapter))
1097 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1099 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1101 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1102 __ilog2_u32(cq->len / 256));
1103 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1104 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1105 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1108 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1110 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1112 status = be_mbox_notify_wait(adapter);
1114 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1116 cq->id = le16_to_cpu(resp->cq_id);
1120 mutex_unlock(&adapter->mbox_lock);
1125 static u32 be_encoded_q_len(int q_len)
1127 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1129 if (len_encoded == 16)
1134 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1135 struct be_queue_info *mccq,
1136 struct be_queue_info *cq)
1138 struct be_mcc_wrb *wrb;
1139 struct be_cmd_req_mcc_ext_create *req;
1140 struct be_dma_mem *q_mem = &mccq->dma_mem;
1144 if (mutex_lock_interruptible(&adapter->mbox_lock))
1147 wrb = wrb_from_mbox(adapter);
1148 req = embedded_payload(wrb);
1149 ctxt = &req->context;
1151 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1152 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1155 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1156 if (BEx_chip(adapter)) {
1157 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1158 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1159 be_encoded_q_len(mccq->len));
1160 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1162 req->hdr.version = 1;
1163 req->cq_id = cpu_to_le16(cq->id);
1165 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1166 be_encoded_q_len(mccq->len));
1167 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1168 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1170 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1174 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1175 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1176 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1177 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1179 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1181 status = be_mbox_notify_wait(adapter);
1183 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1185 mccq->id = le16_to_cpu(resp->id);
1186 mccq->created = true;
1188 mutex_unlock(&adapter->mbox_lock);
1193 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1194 struct be_queue_info *mccq,
1195 struct be_queue_info *cq)
1197 struct be_mcc_wrb *wrb;
1198 struct be_cmd_req_mcc_create *req;
1199 struct be_dma_mem *q_mem = &mccq->dma_mem;
1203 if (mutex_lock_interruptible(&adapter->mbox_lock))
1206 wrb = wrb_from_mbox(adapter);
1207 req = embedded_payload(wrb);
1208 ctxt = &req->context;
1210 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1211 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1214 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1216 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1217 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1218 be_encoded_q_len(mccq->len));
1219 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1221 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1223 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1225 status = be_mbox_notify_wait(adapter);
1227 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1229 mccq->id = le16_to_cpu(resp->id);
1230 mccq->created = true;
1233 mutex_unlock(&adapter->mbox_lock);
1237 int be_cmd_mccq_create(struct be_adapter *adapter,
1238 struct be_queue_info *mccq, struct be_queue_info *cq)
1242 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1243 if (status && BEx_chip(adapter)) {
1244 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1245 "or newer to avoid conflicting priorities between NIC "
1246 "and FCoE traffic");
1247 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1252 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1254 struct be_mcc_wrb wrb = {0};
1255 struct be_cmd_req_eth_tx_create *req;
1256 struct be_queue_info *txq = &txo->q;
1257 struct be_queue_info *cq = &txo->cq;
1258 struct be_dma_mem *q_mem = &txq->dma_mem;
1259 int status, ver = 0;
1261 req = embedded_payload(&wrb);
1262 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1263 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1265 if (lancer_chip(adapter)) {
1266 req->hdr.version = 1;
1267 } else if (BEx_chip(adapter)) {
1268 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1269 req->hdr.version = 2;
1270 } else { /* For SH */
1271 req->hdr.version = 2;
1274 if (req->hdr.version > 0)
1275 req->if_id = cpu_to_le16(adapter->if_handle);
1276 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1277 req->ulp_num = BE_ULP1_NUM;
1278 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1279 req->cq_id = cpu_to_le16(cq->id);
1280 req->queue_size = be_encoded_q_len(txq->len);
1281 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1282 ver = req->hdr.version;
1284 status = be_cmd_notify_wait(adapter, &wrb);
1286 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1288 txq->id = le16_to_cpu(resp->cid);
1290 txo->db_offset = le32_to_cpu(resp->db_offset);
1292 txo->db_offset = DB_TXULP1_OFFSET;
1293 txq->created = true;
1300 int be_cmd_rxq_create(struct be_adapter *adapter,
1301 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1302 u32 if_id, u32 rss, u8 *rss_id)
1304 struct be_mcc_wrb *wrb;
1305 struct be_cmd_req_eth_rx_create *req;
1306 struct be_dma_mem *q_mem = &rxq->dma_mem;
1309 spin_lock_bh(&adapter->mcc_lock);
1311 wrb = wrb_from_mccq(adapter);
1316 req = embedded_payload(wrb);
1318 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1319 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1321 req->cq_id = cpu_to_le16(cq_id);
1322 req->frag_size = fls(frag_size) - 1;
1324 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1325 req->interface_id = cpu_to_le32(if_id);
1326 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1327 req->rss_queue = cpu_to_le32(rss);
1329 status = be_mcc_notify_wait(adapter);
1331 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1333 rxq->id = le16_to_cpu(resp->id);
1334 rxq->created = true;
1335 *rss_id = resp->rss_id;
1339 spin_unlock_bh(&adapter->mcc_lock);
1343 /* Generic destroyer function for all types of queues
1346 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1349 struct be_mcc_wrb *wrb;
1350 struct be_cmd_req_q_destroy *req;
1351 u8 subsys = 0, opcode = 0;
1354 if (mutex_lock_interruptible(&adapter->mbox_lock))
1357 wrb = wrb_from_mbox(adapter);
1358 req = embedded_payload(wrb);
1360 switch (queue_type) {
1362 subsys = CMD_SUBSYSTEM_COMMON;
1363 opcode = OPCODE_COMMON_EQ_DESTROY;
1366 subsys = CMD_SUBSYSTEM_COMMON;
1367 opcode = OPCODE_COMMON_CQ_DESTROY;
1370 subsys = CMD_SUBSYSTEM_ETH;
1371 opcode = OPCODE_ETH_TX_DESTROY;
1374 subsys = CMD_SUBSYSTEM_ETH;
1375 opcode = OPCODE_ETH_RX_DESTROY;
1378 subsys = CMD_SUBSYSTEM_COMMON;
1379 opcode = OPCODE_COMMON_MCC_DESTROY;
1385 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1387 req->id = cpu_to_le16(q->id);
1389 status = be_mbox_notify_wait(adapter);
1392 mutex_unlock(&adapter->mbox_lock);
1397 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1399 struct be_mcc_wrb *wrb;
1400 struct be_cmd_req_q_destroy *req;
1403 spin_lock_bh(&adapter->mcc_lock);
1405 wrb = wrb_from_mccq(adapter);
1410 req = embedded_payload(wrb);
1412 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1413 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1414 req->id = cpu_to_le16(q->id);
1416 status = be_mcc_notify_wait(adapter);
1420 spin_unlock_bh(&adapter->mcc_lock);
1424 /* Create an rx filtering policy configuration on an i/f
1425 * Will use MBOX only if MCCQ has not been created.
1427 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1428 u32 *if_handle, u32 domain)
1430 struct be_mcc_wrb wrb = {0};
1431 struct be_cmd_req_if_create *req;
1434 req = embedded_payload(&wrb);
1435 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1436 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1437 sizeof(*req), &wrb, NULL);
1438 req->hdr.domain = domain;
1439 req->capability_flags = cpu_to_le32(cap_flags);
1440 req->enable_flags = cpu_to_le32(en_flags);
1441 req->pmac_invalid = true;
1443 status = be_cmd_notify_wait(adapter, &wrb);
1445 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1447 *if_handle = le32_to_cpu(resp->interface_id);
1449 /* Hack to retrieve VF's pmac-id on BE3 */
1450 if (BE3_chip(adapter) && !be_physfn(adapter))
1451 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1457 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1459 struct be_mcc_wrb *wrb;
1460 struct be_cmd_req_if_destroy *req;
1463 if (interface_id == -1)
1466 spin_lock_bh(&adapter->mcc_lock);
1468 wrb = wrb_from_mccq(adapter);
1473 req = embedded_payload(wrb);
1475 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1476 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1477 sizeof(*req), wrb, NULL);
1478 req->hdr.domain = domain;
1479 req->interface_id = cpu_to_le32(interface_id);
1481 status = be_mcc_notify_wait(adapter);
1483 spin_unlock_bh(&adapter->mcc_lock);
1487 /* Get stats is a non embedded command: the request is not embedded inside
1488 * WRB but is a separate dma memory block
1489 * Uses asynchronous MCC
1491 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1493 struct be_mcc_wrb *wrb;
1494 struct be_cmd_req_hdr *hdr;
1497 spin_lock_bh(&adapter->mcc_lock);
1499 wrb = wrb_from_mccq(adapter);
1504 hdr = nonemb_cmd->va;
1506 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1507 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1510 /* version 1 of the cmd is not supported only by BE2 */
1511 if (BE2_chip(adapter))
1513 if (BE3_chip(adapter) || lancer_chip(adapter))
1518 be_mcc_notify(adapter);
1519 adapter->stats_cmd_sent = true;
1522 spin_unlock_bh(&adapter->mcc_lock);
1527 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1528 struct be_dma_mem *nonemb_cmd)
1530 struct be_mcc_wrb *wrb;
1531 struct lancer_cmd_req_pport_stats *req;
1534 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1538 spin_lock_bh(&adapter->mcc_lock);
1540 wrb = wrb_from_mccq(adapter);
1545 req = nonemb_cmd->va;
1547 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1548 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1551 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1552 req->cmd_params.params.reset_stats = 0;
1554 be_mcc_notify(adapter);
1555 adapter->stats_cmd_sent = true;
1558 spin_unlock_bh(&adapter->mcc_lock);
1562 static int be_mac_to_link_speed(int mac_speed)
1564 switch (mac_speed) {
1565 case PHY_LINK_SPEED_ZERO:
1567 case PHY_LINK_SPEED_10MBPS:
1569 case PHY_LINK_SPEED_100MBPS:
1571 case PHY_LINK_SPEED_1GBPS:
1573 case PHY_LINK_SPEED_10GBPS:
1575 case PHY_LINK_SPEED_20GBPS:
1577 case PHY_LINK_SPEED_25GBPS:
1579 case PHY_LINK_SPEED_40GBPS:
1585 /* Uses synchronous mcc
1586 * Returns link_speed in Mbps
1588 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1589 u8 *link_status, u32 dom)
1591 struct be_mcc_wrb *wrb;
1592 struct be_cmd_req_link_status *req;
1595 spin_lock_bh(&adapter->mcc_lock);
1598 *link_status = LINK_DOWN;
1600 wrb = wrb_from_mccq(adapter);
1605 req = embedded_payload(wrb);
1607 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1608 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1609 sizeof(*req), wrb, NULL);
1611 /* version 1 of the cmd is not supported only by BE2 */
1612 if (!BE2_chip(adapter))
1613 req->hdr.version = 1;
1615 req->hdr.domain = dom;
1617 status = be_mcc_notify_wait(adapter);
1619 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1622 *link_speed = resp->link_speed ?
1623 le16_to_cpu(resp->link_speed) * 10 :
1624 be_mac_to_link_speed(resp->mac_speed);
1626 if (!resp->logical_link_status)
1630 *link_status = resp->logical_link_status;
1634 spin_unlock_bh(&adapter->mcc_lock);
1638 /* Uses synchronous mcc */
1639 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1641 struct be_mcc_wrb *wrb;
1642 struct be_cmd_req_get_cntl_addnl_attribs *req;
1645 spin_lock_bh(&adapter->mcc_lock);
1647 wrb = wrb_from_mccq(adapter);
1652 req = embedded_payload(wrb);
1654 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1655 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1656 sizeof(*req), wrb, NULL);
1658 be_mcc_notify(adapter);
1661 spin_unlock_bh(&adapter->mcc_lock);
1665 /* Uses synchronous mcc */
1666 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1668 struct be_mcc_wrb *wrb;
1669 struct be_cmd_req_get_fat *req;
1672 spin_lock_bh(&adapter->mcc_lock);
1674 wrb = wrb_from_mccq(adapter);
1679 req = embedded_payload(wrb);
1681 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1682 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1684 req->fat_operation = cpu_to_le32(QUERY_FAT);
1685 status = be_mcc_notify_wait(adapter);
1687 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1689 if (log_size && resp->log_size)
1690 *log_size = le32_to_cpu(resp->log_size) -
1694 spin_unlock_bh(&adapter->mcc_lock);
1698 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1700 struct be_dma_mem get_fat_cmd;
1701 struct be_mcc_wrb *wrb;
1702 struct be_cmd_req_get_fat *req;
1703 u32 offset = 0, total_size, buf_size,
1704 log_offset = sizeof(u32), payload_len;
1710 total_size = buf_len;
1712 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1713 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1716 if (!get_fat_cmd.va) {
1717 dev_err(&adapter->pdev->dev,
1718 "Memory allocation failure while reading FAT data\n");
1722 spin_lock_bh(&adapter->mcc_lock);
1724 while (total_size) {
1725 buf_size = min(total_size, (u32)60*1024);
1726 total_size -= buf_size;
1728 wrb = wrb_from_mccq(adapter);
1733 req = get_fat_cmd.va;
1735 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1736 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1737 OPCODE_COMMON_MANAGE_FAT, payload_len,
1740 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1741 req->read_log_offset = cpu_to_le32(log_offset);
1742 req->read_log_length = cpu_to_le32(buf_size);
1743 req->data_buffer_size = cpu_to_le32(buf_size);
1745 status = be_mcc_notify_wait(adapter);
1747 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1749 memcpy(buf + offset,
1751 le32_to_cpu(resp->read_log_length));
1753 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1757 log_offset += buf_size;
1760 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1761 get_fat_cmd.va, get_fat_cmd.dma);
1762 spin_unlock_bh(&adapter->mcc_lock);
1766 /* Uses synchronous mcc */
1767 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1769 struct be_mcc_wrb *wrb;
1770 struct be_cmd_req_get_fw_version *req;
1773 spin_lock_bh(&adapter->mcc_lock);
1775 wrb = wrb_from_mccq(adapter);
1781 req = embedded_payload(wrb);
1783 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1784 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1786 status = be_mcc_notify_wait(adapter);
1788 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1790 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1791 sizeof(adapter->fw_ver));
1792 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1793 sizeof(adapter->fw_on_flash));
1796 spin_unlock_bh(&adapter->mcc_lock);
1800 /* set the EQ delay interval of an EQ to specified value
1803 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1804 struct be_set_eqd *set_eqd, int num)
1806 struct be_mcc_wrb *wrb;
1807 struct be_cmd_req_modify_eq_delay *req;
1810 spin_lock_bh(&adapter->mcc_lock);
1812 wrb = wrb_from_mccq(adapter);
1817 req = embedded_payload(wrb);
1819 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1820 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1823 req->num_eq = cpu_to_le32(num);
1824 for (i = 0; i < num; i++) {
1825 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1826 req->set_eqd[i].phase = 0;
1827 req->set_eqd[i].delay_multiplier =
1828 cpu_to_le32(set_eqd[i].delay_multiplier);
1831 be_mcc_notify(adapter);
1833 spin_unlock_bh(&adapter->mcc_lock);
1837 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1842 if (lancer_chip(adapter) && num > 8) {
1844 num_eqs = min(num, 8);
1845 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1850 __be_cmd_modify_eqd(adapter, set_eqd, num);
1856 /* Uses sycnhronous mcc */
1857 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1860 struct be_mcc_wrb *wrb;
1861 struct be_cmd_req_vlan_config *req;
1864 spin_lock_bh(&adapter->mcc_lock);
1866 wrb = wrb_from_mccq(adapter);
1871 req = embedded_payload(wrb);
1873 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1874 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1877 req->interface_id = if_id;
1878 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1879 req->num_vlan = num;
1880 memcpy(req->normal_vlan, vtag_array,
1881 req->num_vlan * sizeof(vtag_array[0]));
1883 status = be_mcc_notify_wait(adapter);
1885 spin_unlock_bh(&adapter->mcc_lock);
1889 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1891 struct be_mcc_wrb *wrb;
1892 struct be_dma_mem *mem = &adapter->rx_filter;
1893 struct be_cmd_req_rx_filter *req = mem->va;
1896 spin_lock_bh(&adapter->mcc_lock);
1898 wrb = wrb_from_mccq(adapter);
1903 memset(req, 0, sizeof(*req));
1904 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1905 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1908 req->if_id = cpu_to_le32(adapter->if_handle);
1909 if (flags & IFF_PROMISC) {
1910 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1911 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1912 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1915 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1916 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1917 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1918 } else if (flags & IFF_ALLMULTI) {
1919 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1920 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1921 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1922 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1926 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1928 struct netdev_hw_addr *ha;
1931 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1932 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1934 /* Reset mcast promisc mode if already set by setting mask
1935 * and not setting flags field
1937 req->if_flags_mask |=
1938 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1939 be_if_cap_flags(adapter));
1940 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1941 netdev_for_each_mc_addr(ha, adapter->netdev)
1942 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1945 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1946 req->if_flags_mask) {
1947 dev_warn(&adapter->pdev->dev,
1948 "Cannot set rx filter flags 0x%x\n",
1949 req->if_flags_mask);
1950 dev_warn(&adapter->pdev->dev,
1951 "Interface is capable of 0x%x flags only\n",
1952 be_if_cap_flags(adapter));
1954 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1956 status = be_mcc_notify_wait(adapter);
1959 spin_unlock_bh(&adapter->mcc_lock);
1963 /* Uses synchrounous mcc */
1964 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1966 struct be_mcc_wrb *wrb;
1967 struct be_cmd_req_set_flow_control *req;
1970 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1971 CMD_SUBSYSTEM_COMMON))
1974 spin_lock_bh(&adapter->mcc_lock);
1976 wrb = wrb_from_mccq(adapter);
1981 req = embedded_payload(wrb);
1983 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1984 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1987 req->hdr.version = 1;
1988 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1989 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1991 status = be_mcc_notify_wait(adapter);
1994 spin_unlock_bh(&adapter->mcc_lock);
1996 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2003 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2005 struct be_mcc_wrb *wrb;
2006 struct be_cmd_req_get_flow_control *req;
2009 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2010 CMD_SUBSYSTEM_COMMON))
2013 spin_lock_bh(&adapter->mcc_lock);
2015 wrb = wrb_from_mccq(adapter);
2020 req = embedded_payload(wrb);
2022 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2023 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2026 status = be_mcc_notify_wait(adapter);
2028 struct be_cmd_resp_get_flow_control *resp =
2029 embedded_payload(wrb);
2031 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2032 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2036 spin_unlock_bh(&adapter->mcc_lock);
2041 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2043 struct be_mcc_wrb *wrb;
2044 struct be_cmd_req_query_fw_cfg *req;
2047 if (mutex_lock_interruptible(&adapter->mbox_lock))
2050 wrb = wrb_from_mbox(adapter);
2051 req = embedded_payload(wrb);
2053 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2054 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2055 sizeof(*req), wrb, NULL);
2057 status = be_mbox_notify_wait(adapter);
2059 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2061 adapter->port_num = le32_to_cpu(resp->phys_port);
2062 adapter->function_mode = le32_to_cpu(resp->function_mode);
2063 adapter->function_caps = le32_to_cpu(resp->function_caps);
2064 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2065 dev_info(&adapter->pdev->dev,
2066 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2067 adapter->function_mode, adapter->function_caps);
2070 mutex_unlock(&adapter->mbox_lock);
2075 int be_cmd_reset_function(struct be_adapter *adapter)
2077 struct be_mcc_wrb *wrb;
2078 struct be_cmd_req_hdr *req;
2081 if (lancer_chip(adapter)) {
2082 status = lancer_wait_ready(adapter);
2084 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2085 adapter->db + SLIPORT_CONTROL_OFFSET);
2086 status = lancer_test_and_set_rdy_state(adapter);
2089 dev_err(&adapter->pdev->dev,
2090 "Adapter in non recoverable error\n");
2095 if (mutex_lock_interruptible(&adapter->mbox_lock))
2098 wrb = wrb_from_mbox(adapter);
2099 req = embedded_payload(wrb);
2101 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2102 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2105 status = be_mbox_notify_wait(adapter);
2107 mutex_unlock(&adapter->mbox_lock);
2111 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2112 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2114 struct be_mcc_wrb *wrb;
2115 struct be_cmd_req_rss_config *req;
2118 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2121 spin_lock_bh(&adapter->mcc_lock);
2123 wrb = wrb_from_mccq(adapter);
2128 req = embedded_payload(wrb);
2130 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2131 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2133 req->if_id = cpu_to_le32(adapter->if_handle);
2134 req->enable_rss = cpu_to_le16(rss_hash_opts);
2135 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2137 if (!BEx_chip(adapter))
2138 req->hdr.version = 1;
2140 memcpy(req->cpu_table, rsstable, table_size);
2141 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2142 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2144 status = be_mcc_notify_wait(adapter);
2146 spin_unlock_bh(&adapter->mcc_lock);
2151 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2152 u8 bcn, u8 sts, u8 state)
2154 struct be_mcc_wrb *wrb;
2155 struct be_cmd_req_enable_disable_beacon *req;
2158 spin_lock_bh(&adapter->mcc_lock);
2160 wrb = wrb_from_mccq(adapter);
2165 req = embedded_payload(wrb);
2167 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2168 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2169 sizeof(*req), wrb, NULL);
2171 req->port_num = port_num;
2172 req->beacon_state = state;
2173 req->beacon_duration = bcn;
2174 req->status_duration = sts;
2176 status = be_mcc_notify_wait(adapter);
2179 spin_unlock_bh(&adapter->mcc_lock);
2184 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2186 struct be_mcc_wrb *wrb;
2187 struct be_cmd_req_get_beacon_state *req;
2190 spin_lock_bh(&adapter->mcc_lock);
2192 wrb = wrb_from_mccq(adapter);
2197 req = embedded_payload(wrb);
2199 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2200 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2203 req->port_num = port_num;
2205 status = be_mcc_notify_wait(adapter);
2207 struct be_cmd_resp_get_beacon_state *resp =
2208 embedded_payload(wrb);
2210 *state = resp->beacon_state;
2214 spin_unlock_bh(&adapter->mcc_lock);
2219 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2220 u8 page_num, u8 *data)
2222 struct be_dma_mem cmd;
2223 struct be_mcc_wrb *wrb;
2224 struct be_cmd_req_port_type *req;
2227 if (page_num > TR_PAGE_A2)
2230 cmd.size = sizeof(struct be_cmd_resp_port_type);
2231 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2233 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2236 memset(cmd.va, 0, cmd.size);
2238 spin_lock_bh(&adapter->mcc_lock);
2240 wrb = wrb_from_mccq(adapter);
2247 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2248 OPCODE_COMMON_READ_TRANSRECV_DATA,
2249 cmd.size, wrb, &cmd);
2251 req->port = cpu_to_le32(adapter->hba_port_num);
2252 req->page_num = cpu_to_le32(page_num);
2253 status = be_mcc_notify_wait(adapter);
2255 struct be_cmd_resp_port_type *resp = cmd.va;
2257 memcpy(data, resp->page_data, PAGE_DATA_LEN);
2260 spin_unlock_bh(&adapter->mcc_lock);
2261 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2265 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2266 u32 data_size, u32 data_offset,
2267 const char *obj_name, u32 *data_written,
2268 u8 *change_status, u8 *addn_status)
2270 struct be_mcc_wrb *wrb;
2271 struct lancer_cmd_req_write_object *req;
2272 struct lancer_cmd_resp_write_object *resp;
2276 spin_lock_bh(&adapter->mcc_lock);
2277 adapter->flash_status = 0;
2279 wrb = wrb_from_mccq(adapter);
2285 req = embedded_payload(wrb);
2287 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2288 OPCODE_COMMON_WRITE_OBJECT,
2289 sizeof(struct lancer_cmd_req_write_object), wrb,
2292 ctxt = &req->context;
2293 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2294 write_length, ctxt, data_size);
2297 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2300 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2303 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2304 req->write_offset = cpu_to_le32(data_offset);
2305 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2306 req->descriptor_count = cpu_to_le32(1);
2307 req->buf_len = cpu_to_le32(data_size);
2308 req->addr_low = cpu_to_le32((cmd->dma +
2309 sizeof(struct lancer_cmd_req_write_object))
2311 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2312 sizeof(struct lancer_cmd_req_write_object)));
2314 be_mcc_notify(adapter);
2315 spin_unlock_bh(&adapter->mcc_lock);
2317 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2318 msecs_to_jiffies(60000)))
2319 status = -ETIMEDOUT;
2321 status = adapter->flash_status;
2323 resp = embedded_payload(wrb);
2325 *data_written = le32_to_cpu(resp->actual_write_len);
2326 *change_status = resp->change_status;
2328 *addn_status = resp->additional_status;
2334 spin_unlock_bh(&adapter->mcc_lock);
2338 int be_cmd_query_cable_type(struct be_adapter *adapter)
2340 u8 page_data[PAGE_DATA_LEN];
2343 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2346 switch (adapter->phy.interface_type) {
2348 adapter->phy.cable_type =
2349 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2351 case PHY_TYPE_SFP_PLUS_10GB:
2352 adapter->phy.cable_type =
2353 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2356 adapter->phy.cable_type = 0;
2363 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2365 struct lancer_cmd_req_delete_object *req;
2366 struct be_mcc_wrb *wrb;
2369 spin_lock_bh(&adapter->mcc_lock);
2371 wrb = wrb_from_mccq(adapter);
2377 req = embedded_payload(wrb);
2379 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2380 OPCODE_COMMON_DELETE_OBJECT,
2381 sizeof(*req), wrb, NULL);
2383 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2385 status = be_mcc_notify_wait(adapter);
2387 spin_unlock_bh(&adapter->mcc_lock);
2391 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2392 u32 data_size, u32 data_offset, const char *obj_name,
2393 u32 *data_read, u32 *eof, u8 *addn_status)
2395 struct be_mcc_wrb *wrb;
2396 struct lancer_cmd_req_read_object *req;
2397 struct lancer_cmd_resp_read_object *resp;
2400 spin_lock_bh(&adapter->mcc_lock);
2402 wrb = wrb_from_mccq(adapter);
2408 req = embedded_payload(wrb);
2410 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2411 OPCODE_COMMON_READ_OBJECT,
2412 sizeof(struct lancer_cmd_req_read_object), wrb,
2415 req->desired_read_len = cpu_to_le32(data_size);
2416 req->read_offset = cpu_to_le32(data_offset);
2417 strcpy(req->object_name, obj_name);
2418 req->descriptor_count = cpu_to_le32(1);
2419 req->buf_len = cpu_to_le32(data_size);
2420 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2421 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2423 status = be_mcc_notify_wait(adapter);
2425 resp = embedded_payload(wrb);
2427 *data_read = le32_to_cpu(resp->actual_read_len);
2428 *eof = le32_to_cpu(resp->eof);
2430 *addn_status = resp->additional_status;
2434 spin_unlock_bh(&adapter->mcc_lock);
2438 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2439 u32 flash_type, u32 flash_opcode, u32 buf_size)
2441 struct be_mcc_wrb *wrb;
2442 struct be_cmd_write_flashrom *req;
2445 spin_lock_bh(&adapter->mcc_lock);
2446 adapter->flash_status = 0;
2448 wrb = wrb_from_mccq(adapter);
2455 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2456 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2459 req->params.op_type = cpu_to_le32(flash_type);
2460 req->params.op_code = cpu_to_le32(flash_opcode);
2461 req->params.data_buf_size = cpu_to_le32(buf_size);
2463 be_mcc_notify(adapter);
2464 spin_unlock_bh(&adapter->mcc_lock);
2466 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2467 msecs_to_jiffies(40000)))
2468 status = -ETIMEDOUT;
2470 status = adapter->flash_status;
2475 spin_unlock_bh(&adapter->mcc_lock);
2479 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2480 u16 optype, int offset)
2482 struct be_mcc_wrb *wrb;
2483 struct be_cmd_read_flash_crc *req;
2486 spin_lock_bh(&adapter->mcc_lock);
2488 wrb = wrb_from_mccq(adapter);
2493 req = embedded_payload(wrb);
2495 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2496 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2499 req->params.op_type = cpu_to_le32(optype);
2500 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2501 req->params.offset = cpu_to_le32(offset);
2502 req->params.data_buf_size = cpu_to_le32(0x4);
2504 status = be_mcc_notify_wait(adapter);
2506 memcpy(flashed_crc, req->crc, 4);
2509 spin_unlock_bh(&adapter->mcc_lock);
2513 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2514 struct be_dma_mem *nonemb_cmd)
2516 struct be_mcc_wrb *wrb;
2517 struct be_cmd_req_acpi_wol_magic_config *req;
2520 spin_lock_bh(&adapter->mcc_lock);
2522 wrb = wrb_from_mccq(adapter);
2527 req = nonemb_cmd->va;
2529 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2530 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2532 memcpy(req->magic_mac, mac, ETH_ALEN);
2534 status = be_mcc_notify_wait(adapter);
2537 spin_unlock_bh(&adapter->mcc_lock);
2541 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2542 u8 loopback_type, u8 enable)
2544 struct be_mcc_wrb *wrb;
2545 struct be_cmd_req_set_lmode *req;
2548 spin_lock_bh(&adapter->mcc_lock);
2550 wrb = wrb_from_mccq(adapter);
2556 req = embedded_payload(wrb);
2558 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2559 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2562 req->src_port = port_num;
2563 req->dest_port = port_num;
2564 req->loopback_type = loopback_type;
2565 req->loopback_state = enable;
2567 status = be_mcc_notify_wait(adapter);
2569 spin_unlock_bh(&adapter->mcc_lock);
2573 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2574 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2577 struct be_mcc_wrb *wrb;
2578 struct be_cmd_req_loopback_test *req;
2579 struct be_cmd_resp_loopback_test *resp;
2582 spin_lock_bh(&adapter->mcc_lock);
2584 wrb = wrb_from_mccq(adapter);
2590 req = embedded_payload(wrb);
2592 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2593 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2596 req->hdr.timeout = cpu_to_le32(15);
2597 req->pattern = cpu_to_le64(pattern);
2598 req->src_port = cpu_to_le32(port_num);
2599 req->dest_port = cpu_to_le32(port_num);
2600 req->pkt_size = cpu_to_le32(pkt_size);
2601 req->num_pkts = cpu_to_le32(num_pkts);
2602 req->loopback_type = cpu_to_le32(loopback_type);
2604 be_mcc_notify(adapter);
2606 spin_unlock_bh(&adapter->mcc_lock);
2608 wait_for_completion(&adapter->et_cmd_compl);
2609 resp = embedded_payload(wrb);
2610 status = le32_to_cpu(resp->status);
2614 spin_unlock_bh(&adapter->mcc_lock);
2618 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2619 u32 byte_cnt, struct be_dma_mem *cmd)
2621 struct be_mcc_wrb *wrb;
2622 struct be_cmd_req_ddrdma_test *req;
2626 spin_lock_bh(&adapter->mcc_lock);
2628 wrb = wrb_from_mccq(adapter);
2634 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2635 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2638 req->pattern = cpu_to_le64(pattern);
2639 req->byte_count = cpu_to_le32(byte_cnt);
2640 for (i = 0; i < byte_cnt; i++) {
2641 req->snd_buff[i] = (u8)(pattern >> (j*8));
2647 status = be_mcc_notify_wait(adapter);
2650 struct be_cmd_resp_ddrdma_test *resp;
2653 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2660 spin_unlock_bh(&adapter->mcc_lock);
2664 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2665 struct be_dma_mem *nonemb_cmd)
2667 struct be_mcc_wrb *wrb;
2668 struct be_cmd_req_seeprom_read *req;
2671 spin_lock_bh(&adapter->mcc_lock);
2673 wrb = wrb_from_mccq(adapter);
2678 req = nonemb_cmd->va;
2680 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2681 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2684 status = be_mcc_notify_wait(adapter);
2687 spin_unlock_bh(&adapter->mcc_lock);
2691 int be_cmd_get_phy_info(struct be_adapter *adapter)
2693 struct be_mcc_wrb *wrb;
2694 struct be_cmd_req_get_phy_info *req;
2695 struct be_dma_mem cmd;
2698 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2699 CMD_SUBSYSTEM_COMMON))
2702 spin_lock_bh(&adapter->mcc_lock);
2704 wrb = wrb_from_mccq(adapter);
2709 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2710 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2712 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2719 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2720 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2723 status = be_mcc_notify_wait(adapter);
2725 struct be_phy_info *resp_phy_info =
2726 cmd.va + sizeof(struct be_cmd_req_hdr);
2728 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2729 adapter->phy.interface_type =
2730 le16_to_cpu(resp_phy_info->interface_type);
2731 adapter->phy.auto_speeds_supported =
2732 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2733 adapter->phy.fixed_speeds_supported =
2734 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2735 adapter->phy.misc_params =
2736 le32_to_cpu(resp_phy_info->misc_params);
2738 if (BE2_chip(adapter)) {
2739 adapter->phy.fixed_speeds_supported =
2740 BE_SUPPORTED_SPEED_10GBPS |
2741 BE_SUPPORTED_SPEED_1GBPS;
2744 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2746 spin_unlock_bh(&adapter->mcc_lock);
2750 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2752 struct be_mcc_wrb *wrb;
2753 struct be_cmd_req_set_qos *req;
2756 spin_lock_bh(&adapter->mcc_lock);
2758 wrb = wrb_from_mccq(adapter);
2764 req = embedded_payload(wrb);
2766 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2767 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2769 req->hdr.domain = domain;
2770 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2771 req->max_bps_nic = cpu_to_le32(bps);
2773 status = be_mcc_notify_wait(adapter);
2776 spin_unlock_bh(&adapter->mcc_lock);
2780 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2782 struct be_mcc_wrb *wrb;
2783 struct be_cmd_req_cntl_attribs *req;
2784 struct be_cmd_resp_cntl_attribs *resp;
2786 int payload_len = max(sizeof(*req), sizeof(*resp));
2787 struct mgmt_controller_attrib *attribs;
2788 struct be_dma_mem attribs_cmd;
2790 if (mutex_lock_interruptible(&adapter->mbox_lock))
2793 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2794 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2795 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2797 if (!attribs_cmd.va) {
2798 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2803 wrb = wrb_from_mbox(adapter);
2808 req = attribs_cmd.va;
2810 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2811 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2814 status = be_mbox_notify_wait(adapter);
2816 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2817 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2821 mutex_unlock(&adapter->mbox_lock);
2823 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2824 attribs_cmd.va, attribs_cmd.dma);
2829 int be_cmd_req_native_mode(struct be_adapter *adapter)
2831 struct be_mcc_wrb *wrb;
2832 struct be_cmd_req_set_func_cap *req;
2835 if (mutex_lock_interruptible(&adapter->mbox_lock))
2838 wrb = wrb_from_mbox(adapter);
2844 req = embedded_payload(wrb);
2846 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2847 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2848 sizeof(*req), wrb, NULL);
2850 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2851 CAPABILITY_BE3_NATIVE_ERX_API);
2852 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2854 status = be_mbox_notify_wait(adapter);
2856 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2858 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2859 CAPABILITY_BE3_NATIVE_ERX_API;
2860 if (!adapter->be3_native)
2861 dev_warn(&adapter->pdev->dev,
2862 "adapter not in advanced mode\n");
2865 mutex_unlock(&adapter->mbox_lock);
2869 /* Get privilege(s) for a function */
2870 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2873 struct be_mcc_wrb *wrb;
2874 struct be_cmd_req_get_fn_privileges *req;
2877 spin_lock_bh(&adapter->mcc_lock);
2879 wrb = wrb_from_mccq(adapter);
2885 req = embedded_payload(wrb);
2887 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2888 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2891 req->hdr.domain = domain;
2893 status = be_mcc_notify_wait(adapter);
2895 struct be_cmd_resp_get_fn_privileges *resp =
2896 embedded_payload(wrb);
2898 *privilege = le32_to_cpu(resp->privilege_mask);
2900 /* In UMC mode FW does not return right privileges.
2901 * Override with correct privilege equivalent to PF.
2903 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2905 *privilege = MAX_PRIVILEGES;
2909 spin_unlock_bh(&adapter->mcc_lock);
2913 /* Set privilege(s) for a function */
2914 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2917 struct be_mcc_wrb *wrb;
2918 struct be_cmd_req_set_fn_privileges *req;
2921 spin_lock_bh(&adapter->mcc_lock);
2923 wrb = wrb_from_mccq(adapter);
2929 req = embedded_payload(wrb);
2930 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2931 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2933 req->hdr.domain = domain;
2934 if (lancer_chip(adapter))
2935 req->privileges_lancer = cpu_to_le32(privileges);
2937 req->privileges = cpu_to_le32(privileges);
2939 status = be_mcc_notify_wait(adapter);
2941 spin_unlock_bh(&adapter->mcc_lock);
2945 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2946 * pmac_id_valid: false => pmac_id or MAC address is requested.
2947 * If pmac_id is returned, pmac_id_valid is returned as true
2949 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2950 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2953 struct be_mcc_wrb *wrb;
2954 struct be_cmd_req_get_mac_list *req;
2957 struct be_dma_mem get_mac_list_cmd;
2960 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2961 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2962 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2963 get_mac_list_cmd.size,
2964 &get_mac_list_cmd.dma);
2966 if (!get_mac_list_cmd.va) {
2967 dev_err(&adapter->pdev->dev,
2968 "Memory allocation failure during GET_MAC_LIST\n");
2972 spin_lock_bh(&adapter->mcc_lock);
2974 wrb = wrb_from_mccq(adapter);
2980 req = get_mac_list_cmd.va;
2982 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2983 OPCODE_COMMON_GET_MAC_LIST,
2984 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2985 req->hdr.domain = domain;
2986 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2987 if (*pmac_id_valid) {
2988 req->mac_id = cpu_to_le32(*pmac_id);
2989 req->iface_id = cpu_to_le16(if_handle);
2990 req->perm_override = 0;
2992 req->perm_override = 1;
2995 status = be_mcc_notify_wait(adapter);
2997 struct be_cmd_resp_get_mac_list *resp =
2998 get_mac_list_cmd.va;
3000 if (*pmac_id_valid) {
3001 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3006 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3007 /* Mac list returned could contain one or more active mac_ids
3008 * or one or more true or pseudo permanant mac addresses.
3009 * If an active mac_id is present, return first active mac_id
3012 for (i = 0; i < mac_count; i++) {
3013 struct get_list_macaddr *mac_entry;
3017 mac_entry = &resp->macaddr_list[i];
3018 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3019 /* mac_id is a 32 bit value and mac_addr size
3022 if (mac_addr_size == sizeof(u32)) {
3023 *pmac_id_valid = true;
3024 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3025 *pmac_id = le32_to_cpu(mac_id);
3029 /* If no active mac_id found, return first mac addr */
3030 *pmac_id_valid = false;
3031 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3036 spin_unlock_bh(&adapter->mcc_lock);
3037 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
3038 get_mac_list_cmd.va, get_mac_list_cmd.dma);
3042 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3043 u8 *mac, u32 if_handle, bool active, u32 domain)
3046 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3048 if (BEx_chip(adapter))
3049 return be_cmd_mac_addr_query(adapter, mac, false,
3050 if_handle, curr_pmac_id);
3052 /* Fetch the MAC address using pmac_id */
3053 return be_cmd_get_mac_from_list(adapter, mac, &active,
3058 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3061 bool pmac_valid = false;
3063 memset(mac, 0, ETH_ALEN);
3065 if (BEx_chip(adapter)) {
3066 if (be_physfn(adapter))
3067 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3070 status = be_cmd_mac_addr_query(adapter, mac, false,
3071 adapter->if_handle, 0);
3073 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3074 NULL, adapter->if_handle, 0);
3080 /* Uses synchronous MCCQ */
3081 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3082 u8 mac_count, u32 domain)
3084 struct be_mcc_wrb *wrb;
3085 struct be_cmd_req_set_mac_list *req;
3087 struct be_dma_mem cmd;
3089 memset(&cmd, 0, sizeof(struct be_dma_mem));
3090 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3091 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
3092 &cmd.dma, GFP_KERNEL);
3096 spin_lock_bh(&adapter->mcc_lock);
3098 wrb = wrb_from_mccq(adapter);
3105 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3106 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3109 req->hdr.domain = domain;
3110 req->mac_count = mac_count;
3112 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3114 status = be_mcc_notify_wait(adapter);
3117 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3118 spin_unlock_bh(&adapter->mcc_lock);
3122 /* Wrapper to delete any active MACs and provision the new mac.
3123 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3124 * current list are active.
3126 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3128 bool active_mac = false;
3129 u8 old_mac[ETH_ALEN];
3133 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3134 &pmac_id, if_id, dom);
3136 if (!status && active_mac)
3137 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3139 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3142 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3143 u32 domain, u16 intf_id, u16 hsw_mode)
3145 struct be_mcc_wrb *wrb;
3146 struct be_cmd_req_set_hsw_config *req;
3150 spin_lock_bh(&adapter->mcc_lock);
3152 wrb = wrb_from_mccq(adapter);
3158 req = embedded_payload(wrb);
3159 ctxt = &req->context;
3161 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3162 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3165 req->hdr.domain = domain;
3166 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3168 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3169 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3171 if (!BEx_chip(adapter) && hsw_mode) {
3172 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3173 ctxt, adapter->hba_port_num);
3174 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3175 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3179 be_dws_cpu_to_le(req->context, sizeof(req->context));
3180 status = be_mcc_notify_wait(adapter);
3183 spin_unlock_bh(&adapter->mcc_lock);
3187 /* Get Hyper switch config */
3188 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3189 u32 domain, u16 intf_id, u8 *mode)
3191 struct be_mcc_wrb *wrb;
3192 struct be_cmd_req_get_hsw_config *req;
3197 spin_lock_bh(&adapter->mcc_lock);
3199 wrb = wrb_from_mccq(adapter);
3205 req = embedded_payload(wrb);
3206 ctxt = &req->context;
3208 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3209 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3212 req->hdr.domain = domain;
3213 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3215 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3217 if (!BEx_chip(adapter) && mode) {
3218 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3219 ctxt, adapter->hba_port_num);
3220 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3222 be_dws_cpu_to_le(req->context, sizeof(req->context));
3224 status = be_mcc_notify_wait(adapter);
3226 struct be_cmd_resp_get_hsw_config *resp =
3227 embedded_payload(wrb);
3229 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3230 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3231 pvid, &resp->context);
3233 *pvid = le16_to_cpu(vid);
3235 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3236 port_fwd_type, &resp->context);
3240 spin_unlock_bh(&adapter->mcc_lock);
3244 static bool be_is_wol_excluded(struct be_adapter *adapter)
3246 struct pci_dev *pdev = adapter->pdev;
3248 if (!be_physfn(adapter))
3251 switch (pdev->subsystem_device) {
3252 case OC_SUBSYS_DEVICE_ID1:
3253 case OC_SUBSYS_DEVICE_ID2:
3254 case OC_SUBSYS_DEVICE_ID3:
3255 case OC_SUBSYS_DEVICE_ID4:
3262 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3264 struct be_mcc_wrb *wrb;
3265 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3267 struct be_dma_mem cmd;
3269 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3273 if (be_is_wol_excluded(adapter))
3276 if (mutex_lock_interruptible(&adapter->mbox_lock))
3279 memset(&cmd, 0, sizeof(struct be_dma_mem));
3280 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3281 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3283 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3288 wrb = wrb_from_mbox(adapter);
3296 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3297 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3298 sizeof(*req), wrb, &cmd);
3300 req->hdr.version = 1;
3301 req->query_options = BE_GET_WOL_CAP;
3303 status = be_mbox_notify_wait(adapter);
3305 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3307 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3309 adapter->wol_cap = resp->wol_settings;
3310 if (adapter->wol_cap & BE_WOL_CAP)
3311 adapter->wol_en = true;
3314 mutex_unlock(&adapter->mbox_lock);
3316 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3321 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3323 struct be_dma_mem extfat_cmd;
3324 struct be_fat_conf_params *cfgs;
3328 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3329 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3330 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3335 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3339 cfgs = (struct be_fat_conf_params *)
3340 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3341 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3342 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3344 for (j = 0; j < num_modes; j++) {
3345 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3346 cfgs->module[i].trace_lvl[j].dbg_lvl =
3351 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3353 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3358 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3360 struct be_dma_mem extfat_cmd;
3361 struct be_fat_conf_params *cfgs;
3365 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3366 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3367 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3370 if (!extfat_cmd.va) {
3371 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3376 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3378 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3379 sizeof(struct be_cmd_resp_hdr));
3381 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3382 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3383 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3386 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3392 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3393 struct be_dma_mem *cmd)
3395 struct be_mcc_wrb *wrb;
3396 struct be_cmd_req_get_ext_fat_caps *req;
3399 if (mutex_lock_interruptible(&adapter->mbox_lock))
3402 wrb = wrb_from_mbox(adapter);
3409 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3410 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3411 cmd->size, wrb, cmd);
3412 req->parameter_type = cpu_to_le32(1);
3414 status = be_mbox_notify_wait(adapter);
3416 mutex_unlock(&adapter->mbox_lock);
3420 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3421 struct be_dma_mem *cmd,
3422 struct be_fat_conf_params *configs)
3424 struct be_mcc_wrb *wrb;
3425 struct be_cmd_req_set_ext_fat_caps *req;
3428 spin_lock_bh(&adapter->mcc_lock);
3430 wrb = wrb_from_mccq(adapter);
3437 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3438 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3439 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3440 cmd->size, wrb, cmd);
3442 status = be_mcc_notify_wait(adapter);
3444 spin_unlock_bh(&adapter->mcc_lock);
3448 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3450 struct be_mcc_wrb *wrb;
3451 struct be_cmd_req_get_port_name *req;
3454 if (!lancer_chip(adapter)) {
3455 *port_name = adapter->hba_port_num + '0';
3459 spin_lock_bh(&adapter->mcc_lock);
3461 wrb = wrb_from_mccq(adapter);
3467 req = embedded_payload(wrb);
3469 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3470 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3472 req->hdr.version = 1;
3474 status = be_mcc_notify_wait(adapter);
3476 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3478 *port_name = resp->port_name[adapter->hba_port_num];
3480 *port_name = adapter->hba_port_num + '0';
3483 spin_unlock_bh(&adapter->mcc_lock);
3487 /* Descriptor type */
3493 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3496 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3497 struct be_nic_res_desc *nic;
3500 for (i = 0; i < desc_count; i++) {
3501 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3502 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3503 nic = (struct be_nic_res_desc *)hdr;
3504 if (desc_type == FUNC_DESC ||
3505 (desc_type == VFT_DESC &&
3506 nic->flags & (1 << VFT_SHIFT)))
3510 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3511 hdr = (void *)hdr + hdr->desc_len;
3516 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3518 return be_get_nic_desc(buf, desc_count, VFT_DESC);
3521 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3523 return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3526 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3529 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3530 struct be_pcie_res_desc *pcie;
3533 for (i = 0; i < desc_count; i++) {
3534 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3535 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3536 pcie = (struct be_pcie_res_desc *)hdr;
3537 if (pcie->pf_num == devfn)
3541 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3542 hdr = (void *)hdr + hdr->desc_len;
3547 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3549 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3552 for (i = 0; i < desc_count; i++) {
3553 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3554 return (struct be_port_res_desc *)hdr;
3556 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3557 hdr = (void *)hdr + hdr->desc_len;
3562 static void be_copy_nic_desc(struct be_resources *res,
3563 struct be_nic_res_desc *desc)
3565 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3566 res->max_vlans = le16_to_cpu(desc->vlan_count);
3567 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3568 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3569 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3570 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3571 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3572 /* Clear flags that driver is not interested in */
3573 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3574 BE_IF_CAP_FLAGS_WANT;
3575 /* Need 1 RXQ as the default RXQ */
3576 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3577 res->max_rss_qs -= 1;
3581 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3583 struct be_mcc_wrb *wrb;
3584 struct be_cmd_req_get_func_config *req;
3586 struct be_dma_mem cmd;
3588 if (mutex_lock_interruptible(&adapter->mbox_lock))
3591 memset(&cmd, 0, sizeof(struct be_dma_mem));
3592 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3593 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3595 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3600 wrb = wrb_from_mbox(adapter);
3608 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3609 OPCODE_COMMON_GET_FUNC_CONFIG,
3610 cmd.size, wrb, &cmd);
3612 if (skyhawk_chip(adapter))
3613 req->hdr.version = 1;
3615 status = be_mbox_notify_wait(adapter);
3617 struct be_cmd_resp_get_func_config *resp = cmd.va;
3618 u32 desc_count = le32_to_cpu(resp->desc_count);
3619 struct be_nic_res_desc *desc;
3621 desc = be_get_func_nic_desc(resp->func_param, desc_count);
3627 adapter->pf_number = desc->pf_num;
3628 be_copy_nic_desc(res, desc);
3631 mutex_unlock(&adapter->mbox_lock);
3633 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3637 /* Will use MBOX only if MCCQ has not been created */
3638 int be_cmd_get_profile_config(struct be_adapter *adapter,
3639 struct be_resources *res, u8 domain)
3641 struct be_cmd_resp_get_profile_config *resp;
3642 struct be_cmd_req_get_profile_config *req;
3643 struct be_nic_res_desc *vf_res;
3644 struct be_pcie_res_desc *pcie;
3645 struct be_port_res_desc *port;
3646 struct be_nic_res_desc *nic;
3647 struct be_mcc_wrb wrb = {0};
3648 struct be_dma_mem cmd;
3652 memset(&cmd, 0, sizeof(struct be_dma_mem));
3653 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3654 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3659 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3660 OPCODE_COMMON_GET_PROFILE_CONFIG,
3661 cmd.size, &wrb, &cmd);
3663 req->hdr.domain = domain;
3664 if (!lancer_chip(adapter))
3665 req->hdr.version = 1;
3666 req->type = ACTIVE_PROFILE_TYPE;
3668 status = be_cmd_notify_wait(adapter, &wrb);
3673 desc_count = le32_to_cpu(resp->desc_count);
3675 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3678 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3680 port = be_get_port_desc(resp->func_param, desc_count);
3682 adapter->mc_type = port->mc_type;
3684 nic = be_get_func_nic_desc(resp->func_param, desc_count);
3686 be_copy_nic_desc(res, nic);
3688 vf_res = be_get_vft_desc(resp->func_param, desc_count);
3690 res->vf_if_cap_flags = vf_res->cap_flags;
3693 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3697 /* Will use MBOX only if MCCQ has not been created */
3698 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3699 int size, int count, u8 version, u8 domain)
3701 struct be_cmd_req_set_profile_config *req;
3702 struct be_mcc_wrb wrb = {0};
3703 struct be_dma_mem cmd;
3706 memset(&cmd, 0, sizeof(struct be_dma_mem));
3707 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3708 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3713 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3714 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3716 req->hdr.version = version;
3717 req->hdr.domain = domain;
3718 req->desc_count = cpu_to_le32(count);
3719 memcpy(req->desc, desc, size);
3721 status = be_cmd_notify_wait(adapter, &wrb);
3724 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3728 /* Mark all fields invalid */
3729 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3731 memset(nic, 0, sizeof(*nic));
3732 nic->unicast_mac_count = 0xFFFF;
3733 nic->mcc_count = 0xFFFF;
3734 nic->vlan_count = 0xFFFF;
3735 nic->mcast_mac_count = 0xFFFF;
3736 nic->txq_count = 0xFFFF;
3737 nic->rq_count = 0xFFFF;
3738 nic->rssq_count = 0xFFFF;
3739 nic->lro_count = 0xFFFF;
3740 nic->cq_count = 0xFFFF;
3741 nic->toe_conn_count = 0xFFFF;
3742 nic->eq_count = 0xFFFF;
3743 nic->iface_count = 0xFFFF;
3744 nic->link_param = 0xFF;
3745 nic->channel_id_param = cpu_to_le16(0xF000);
3746 nic->acpi_params = 0xFF;
3747 nic->wol_param = 0x0F;
3748 nic->tunnel_iface_count = 0xFFFF;
3749 nic->direct_tenant_iface_count = 0xFFFF;
3750 nic->bw_min = 0xFFFFFFFF;
3751 nic->bw_max = 0xFFFFFFFF;
3754 /* Mark all fields invalid */
3755 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3757 memset(pcie, 0, sizeof(*pcie));
3758 pcie->sriov_state = 0xFF;
3759 pcie->pf_state = 0xFF;
3760 pcie->pf_type = 0xFF;
3761 pcie->num_vfs = 0xFFFF;
3764 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3767 struct be_nic_res_desc nic_desc;
3771 if (BE3_chip(adapter))
3772 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3774 be_reset_nic_desc(&nic_desc);
3775 nic_desc.pf_num = adapter->pf_number;
3776 nic_desc.vf_num = domain;
3777 nic_desc.bw_min = 0;
3778 if (lancer_chip(adapter)) {
3779 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3780 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3781 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3783 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3786 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3787 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3788 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3789 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3790 nic_desc.bw_max = cpu_to_le32(bw_percent);
3793 return be_cmd_set_profile_config(adapter, &nic_desc,
3794 nic_desc.hdr.desc_len,
3795 1, version, domain);
3798 int be_cmd_set_sriov_config(struct be_adapter *adapter,
3799 struct be_resources res, u16 num_vfs)
3802 struct be_pcie_res_desc pcie;
3803 struct be_nic_res_desc nic_vft;
3807 if (BEx_chip(adapter) || lancer_chip(adapter))
3810 /* PF PCIE descriptor */
3811 be_reset_pcie_desc(&desc.pcie);
3812 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3813 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3814 desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3815 desc.pcie.pf_num = adapter->pdev->devfn;
3816 desc.pcie.sriov_state = num_vfs ? 1 : 0;
3817 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3819 /* VF NIC Template descriptor */
3820 be_reset_nic_desc(&desc.nic_vft);
3821 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3822 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3823 desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
3825 desc.nic_vft.pf_num = adapter->pdev->devfn;
3826 desc.nic_vft.vf_num = 0;
3828 if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3829 /* If number of VFs requested is 8 less than max supported,
3830 * assign 8 queue pairs to the PF and divide the remaining
3831 * resources evenly among the VFs
3833 if (num_vfs < (be_max_vfs(adapter) - 8))
3834 vf_q_count = (res.max_rss_qs - 8) / num_vfs;
3836 vf_q_count = res.max_rss_qs / num_vfs;
3838 desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
3839 desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
3840 desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
3841 desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
3843 desc.nic_vft.txq_count = cpu_to_le16(1);
3844 desc.nic_vft.rq_count = cpu_to_le16(1);
3845 desc.nic_vft.rssq_count = cpu_to_le16(0);
3846 /* One CQ for each TX, RX and MCCQ */
3847 desc.nic_vft.cq_count = cpu_to_le16(3);
3850 return be_cmd_set_profile_config(adapter, &desc,
3851 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3854 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3856 struct be_mcc_wrb *wrb;
3857 struct be_cmd_req_manage_iface_filters *req;
3860 if (iface == 0xFFFFFFFF)
3863 spin_lock_bh(&adapter->mcc_lock);
3865 wrb = wrb_from_mccq(adapter);
3870 req = embedded_payload(wrb);
3872 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3873 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3876 req->target_iface_id = cpu_to_le32(iface);
3878 status = be_mcc_notify_wait(adapter);
3880 spin_unlock_bh(&adapter->mcc_lock);
3884 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3886 struct be_port_res_desc port_desc;
3888 memset(&port_desc, 0, sizeof(port_desc));
3889 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3890 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3891 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3892 port_desc.link_num = adapter->hba_port_num;
3894 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3896 port_desc.nv_port = swab16(port);
3898 port_desc.nv_flags = NV_TYPE_DISABLED;
3899 port_desc.nv_port = 0;
3902 return be_cmd_set_profile_config(adapter, &port_desc,
3903 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3906 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3909 struct be_mcc_wrb *wrb;
3910 struct be_cmd_req_get_iface_list *req;
3911 struct be_cmd_resp_get_iface_list *resp;
3914 spin_lock_bh(&adapter->mcc_lock);
3916 wrb = wrb_from_mccq(adapter);
3921 req = embedded_payload(wrb);
3923 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3924 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3926 req->hdr.domain = vf_num + 1;
3928 status = be_mcc_notify_wait(adapter);
3930 resp = (struct be_cmd_resp_get_iface_list *)req;
3931 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3935 spin_unlock_bh(&adapter->mcc_lock);
3939 static int lancer_wait_idle(struct be_adapter *adapter)
3941 #define SLIPORT_IDLE_TIMEOUT 30
3945 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3946 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3947 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3953 if (i == SLIPORT_IDLE_TIMEOUT)
3959 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3963 status = lancer_wait_idle(adapter);
3967 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3972 /* Routine to check whether dump image is present or not */
3973 bool dump_present(struct be_adapter *adapter)
3975 u32 sliport_status = 0;
3977 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3978 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3981 int lancer_initiate_dump(struct be_adapter *adapter)
3983 struct device *dev = &adapter->pdev->dev;
3986 if (dump_present(adapter)) {
3987 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
3991 /* give firmware reset and diagnostic dump */
3992 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3993 PHYSDEV_CONTROL_DD_MASK);
3995 dev_err(dev, "FW reset failed\n");
3999 status = lancer_wait_idle(adapter);
4003 if (!dump_present(adapter)) {
4004 dev_err(dev, "FW dump not generated\n");
4011 int lancer_delete_dump(struct be_adapter *adapter)
4015 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4016 return be_cmd_status(status);
4020 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4022 struct be_mcc_wrb *wrb;
4023 struct be_cmd_enable_disable_vf *req;
4026 if (BEx_chip(adapter))
4029 spin_lock_bh(&adapter->mcc_lock);
4031 wrb = wrb_from_mccq(adapter);
4037 req = embedded_payload(wrb);
4039 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4040 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4043 req->hdr.domain = domain;
4045 status = be_mcc_notify_wait(adapter);
4047 spin_unlock_bh(&adapter->mcc_lock);
4051 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4053 struct be_mcc_wrb *wrb;
4054 struct be_cmd_req_intr_set *req;
4057 if (mutex_lock_interruptible(&adapter->mbox_lock))
4060 wrb = wrb_from_mbox(adapter);
4062 req = embedded_payload(wrb);
4064 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4065 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4068 req->intr_enabled = intr_enable;
4070 status = be_mbox_notify_wait(adapter);
4072 mutex_unlock(&adapter->mbox_lock);
4077 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4079 struct be_cmd_req_get_active_profile *req;
4080 struct be_mcc_wrb *wrb;
4083 if (mutex_lock_interruptible(&adapter->mbox_lock))
4086 wrb = wrb_from_mbox(adapter);
4092 req = embedded_payload(wrb);
4094 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4095 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4098 status = be_mbox_notify_wait(adapter);
4100 struct be_cmd_resp_get_active_profile *resp =
4101 embedded_payload(wrb);
4103 *profile_id = le16_to_cpu(resp->active_profile_id);
4107 mutex_unlock(&adapter->mbox_lock);
4111 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4112 int link_state, u8 domain)
4114 struct be_mcc_wrb *wrb;
4115 struct be_cmd_req_set_ll_link *req;
4118 if (BEx_chip(adapter) || lancer_chip(adapter))
4121 spin_lock_bh(&adapter->mcc_lock);
4123 wrb = wrb_from_mccq(adapter);
4129 req = embedded_payload(wrb);
4131 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4132 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4133 sizeof(*req), wrb, NULL);
4135 req->hdr.version = 1;
4136 req->hdr.domain = domain;
4138 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4139 req->link_config |= 1;
4141 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4142 req->link_config |= 1 << PLINK_TRACK_SHIFT;
4144 status = be_mcc_notify_wait(adapter);
4146 spin_unlock_bh(&adapter->mcc_lock);
4150 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4151 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4153 struct be_adapter *adapter = netdev_priv(netdev_handle);
4154 struct be_mcc_wrb *wrb;
4155 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4156 struct be_cmd_req_hdr *req;
4157 struct be_cmd_resp_hdr *resp;
4160 spin_lock_bh(&adapter->mcc_lock);
4162 wrb = wrb_from_mccq(adapter);
4167 req = embedded_payload(wrb);
4168 resp = embedded_payload(wrb);
4170 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4171 hdr->opcode, wrb_payload_size, wrb, NULL);
4172 memcpy(req, wrb_payload, wrb_payload_size);
4173 be_dws_cpu_to_le(req, wrb_payload_size);
4175 status = be_mcc_notify_wait(adapter);
4177 *cmd_status = (status & 0xffff);
4180 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4181 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4183 spin_unlock_bh(&adapter->mcc_lock);
4186 EXPORT_SYMBOL(be_roce_mcc_cmd);