2 * Copyright (C) 2005 - 2014 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static char *be_port_misconfig_evt_desc[] = {
23 "A valid SFP module detected",
24 "Optics faulted/ incorrectly installed/ not installed.",
25 "Optics of two types installed.",
26 "Incompatible optics.",
27 "Unknown port SFP status"
30 static char *be_port_misconfig_remedy_desc[] = {
32 "Reseat optics. If issue not resolved, replace",
33 "Remove one optic or install matching pair of optics",
34 "Replace with compatible optics for card to function",
38 static struct be_cmd_priv_map cmd_priv_map[] = {
40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
42 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
43 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 OPCODE_COMMON_GET_FLOW_CONTROL,
48 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 OPCODE_COMMON_SET_FLOW_CONTROL,
54 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
58 OPCODE_ETH_GET_PPORT_STATS,
60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
64 OPCODE_COMMON_GET_PHY_DETAILS,
66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
71 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
74 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
75 u32 cmd_privileges = adapter->cmd_privileges;
77 for (i = 0; i < num_entries; i++)
78 if (opcode == cmd_priv_map[i].opcode &&
79 subsystem == cmd_priv_map[i].subsystem)
80 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
86 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
88 return wrb->payload.embedded_payload;
91 static void be_mcc_notify(struct be_adapter *adapter)
93 struct be_queue_info *mccq = &adapter->mcc_obj.q;
96 if (be_error(adapter))
99 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
100 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
103 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
106 /* To check if valid bit is set, check the entire word as we don't know
107 * the endianness of the data (old entry is host endian while a new entry is
109 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
113 if (compl->flags != 0) {
114 flags = le32_to_cpu(compl->flags);
115 if (flags & CQE_FLAGS_VALID_MASK) {
116 compl->flags = flags;
123 /* Need to reset the entire word that houses the valid bit */
124 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
129 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
134 addr = ((addr << 16) << 16) | tag0;
138 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
140 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
141 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
142 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
143 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
144 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
145 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
151 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
152 * loop (has not issued be_mcc_notify_wait())
154 static void be_async_cmd_process(struct be_adapter *adapter,
155 struct be_mcc_compl *compl,
156 struct be_cmd_resp_hdr *resp_hdr)
158 enum mcc_base_status base_status = base_status(compl->status);
159 u8 opcode = 0, subsystem = 0;
162 opcode = resp_hdr->opcode;
163 subsystem = resp_hdr->subsystem;
166 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
167 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
168 complete(&adapter->et_cmd_compl);
172 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
173 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
174 subsystem == CMD_SUBSYSTEM_COMMON) {
175 adapter->flash_status = compl->status;
176 complete(&adapter->et_cmd_compl);
180 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
181 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
182 subsystem == CMD_SUBSYSTEM_ETH &&
183 base_status == MCC_STATUS_SUCCESS) {
184 be_parse_stats(adapter);
185 adapter->stats_cmd_sent = false;
189 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
190 subsystem == CMD_SUBSYSTEM_COMMON) {
191 if (base_status == MCC_STATUS_SUCCESS) {
192 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
194 adapter->drv_stats.be_on_die_temperature =
195 resp->on_die_temperature;
197 adapter->be_get_temp_freq = 0;
203 static int be_mcc_compl_process(struct be_adapter *adapter,
204 struct be_mcc_compl *compl)
206 enum mcc_base_status base_status;
207 enum mcc_addl_status addl_status;
208 struct be_cmd_resp_hdr *resp_hdr;
209 u8 opcode = 0, subsystem = 0;
211 /* Just swap the status to host endian; mcc tag is opaquely copied
213 be_dws_le_to_cpu(compl, 4);
215 base_status = base_status(compl->status);
216 addl_status = addl_status(compl->status);
218 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
220 opcode = resp_hdr->opcode;
221 subsystem = resp_hdr->subsystem;
224 be_async_cmd_process(adapter, compl, resp_hdr);
226 if (base_status != MCC_STATUS_SUCCESS &&
227 !be_skip_err_log(opcode, base_status, addl_status)) {
228 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
229 dev_warn(&adapter->pdev->dev,
230 "VF is not privileged to issue opcode %d-%d\n",
233 dev_err(&adapter->pdev->dev,
234 "opcode %d-%d failed:status %d-%d\n",
235 opcode, subsystem, base_status, addl_status);
238 return compl->status;
241 /* Link state evt is a string of bytes; no need for endian swapping */
242 static void be_async_link_state_process(struct be_adapter *adapter,
243 struct be_mcc_compl *compl)
245 struct be_async_event_link_state *evt =
246 (struct be_async_event_link_state *)compl;
248 /* When link status changes, link speed must be re-queried from FW */
249 adapter->phy.link_speed = -1;
251 /* On BEx the FW does not send a separate link status
252 * notification for physical and logical link.
253 * On other chips just process the logical link
254 * status notification
256 if (!BEx_chip(adapter) &&
257 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
260 /* For the initial link status do not rely on the ASYNC event as
261 * it may not be received in some cases.
263 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
264 be_link_status_update(adapter,
265 evt->port_link_status & LINK_STATUS_MASK);
268 static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
269 struct be_mcc_compl *compl)
271 struct be_async_event_misconfig_port *evt =
272 (struct be_async_event_misconfig_port *)compl;
273 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
274 struct device *dev = &adapter->pdev->dev;
275 u8 port_misconfig_evt;
278 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
280 /* Log an error message that would allow a user to determine
281 * whether the SFPs have an issue
283 dev_info(dev, "Port %c: %s %s", adapter->port_name,
284 be_port_misconfig_evt_desc[port_misconfig_evt],
285 be_port_misconfig_remedy_desc[port_misconfig_evt]);
287 if (port_misconfig_evt == INCOMPATIBLE_SFP)
288 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
291 /* Grp5 CoS Priority evt */
292 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
293 struct be_mcc_compl *compl)
295 struct be_async_event_grp5_cos_priority *evt =
296 (struct be_async_event_grp5_cos_priority *)compl;
299 adapter->vlan_prio_bmap = evt->available_priority_bmap;
300 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
301 adapter->recommended_prio =
302 evt->reco_default_priority << VLAN_PRIO_SHIFT;
306 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
307 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
308 struct be_mcc_compl *compl)
310 struct be_async_event_grp5_qos_link_speed *evt =
311 (struct be_async_event_grp5_qos_link_speed *)compl;
313 if (adapter->phy.link_speed >= 0 &&
314 evt->physical_port == adapter->port_num)
315 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
319 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
320 struct be_mcc_compl *compl)
322 struct be_async_event_grp5_pvid_state *evt =
323 (struct be_async_event_grp5_pvid_state *)compl;
326 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
327 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
333 static void be_async_grp5_evt_process(struct be_adapter *adapter,
334 struct be_mcc_compl *compl)
336 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
337 ASYNC_EVENT_TYPE_MASK;
339 switch (event_type) {
340 case ASYNC_EVENT_COS_PRIORITY:
341 be_async_grp5_cos_priority_process(adapter, compl);
343 case ASYNC_EVENT_QOS_SPEED:
344 be_async_grp5_qos_speed_process(adapter, compl);
346 case ASYNC_EVENT_PVID_STATE:
347 be_async_grp5_pvid_state_process(adapter, compl);
354 static void be_async_dbg_evt_process(struct be_adapter *adapter,
355 struct be_mcc_compl *cmp)
358 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
360 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
361 ASYNC_EVENT_TYPE_MASK;
363 switch (event_type) {
364 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
366 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
367 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
370 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
376 static void be_async_sliport_evt_process(struct be_adapter *adapter,
377 struct be_mcc_compl *cmp)
379 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
380 ASYNC_EVENT_TYPE_MASK;
382 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
383 be_async_port_misconfig_event_process(adapter, cmp);
386 static inline bool is_link_state_evt(u32 flags)
388 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
389 ASYNC_EVENT_CODE_LINK_STATE;
392 static inline bool is_grp5_evt(u32 flags)
394 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
395 ASYNC_EVENT_CODE_GRP_5;
398 static inline bool is_dbg_evt(u32 flags)
400 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
401 ASYNC_EVENT_CODE_QNQ;
404 static inline bool is_sliport_evt(u32 flags)
406 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
407 ASYNC_EVENT_CODE_SLIPORT;
410 static void be_mcc_event_process(struct be_adapter *adapter,
411 struct be_mcc_compl *compl)
413 if (is_link_state_evt(compl->flags))
414 be_async_link_state_process(adapter, compl);
415 else if (is_grp5_evt(compl->flags))
416 be_async_grp5_evt_process(adapter, compl);
417 else if (is_dbg_evt(compl->flags))
418 be_async_dbg_evt_process(adapter, compl);
419 else if (is_sliport_evt(compl->flags))
420 be_async_sliport_evt_process(adapter, compl);
423 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
425 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
426 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
428 if (be_mcc_compl_is_new(compl)) {
429 queue_tail_inc(mcc_cq);
435 void be_async_mcc_enable(struct be_adapter *adapter)
437 spin_lock_bh(&adapter->mcc_cq_lock);
439 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
440 adapter->mcc_obj.rearm_cq = true;
442 spin_unlock_bh(&adapter->mcc_cq_lock);
445 void be_async_mcc_disable(struct be_adapter *adapter)
447 spin_lock_bh(&adapter->mcc_cq_lock);
449 adapter->mcc_obj.rearm_cq = false;
450 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
452 spin_unlock_bh(&adapter->mcc_cq_lock);
455 int be_process_mcc(struct be_adapter *adapter)
457 struct be_mcc_compl *compl;
458 int num = 0, status = 0;
459 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
461 spin_lock(&adapter->mcc_cq_lock);
463 while ((compl = be_mcc_compl_get(adapter))) {
464 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
465 be_mcc_event_process(adapter, compl);
466 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
467 status = be_mcc_compl_process(adapter, compl);
468 atomic_dec(&mcc_obj->q.used);
470 be_mcc_compl_use(compl);
475 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
477 spin_unlock(&adapter->mcc_cq_lock);
481 /* Wait till no more pending mcc requests are present */
482 static int be_mcc_wait_compl(struct be_adapter *adapter)
484 #define mcc_timeout 120000 /* 12s timeout */
486 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
488 for (i = 0; i < mcc_timeout; i++) {
489 if (be_error(adapter))
493 status = be_process_mcc(adapter);
496 if (atomic_read(&mcc_obj->q.used) == 0)
500 if (i == mcc_timeout) {
501 dev_err(&adapter->pdev->dev, "FW not responding\n");
502 adapter->fw_timeout = true;
508 /* Notify MCC requests and wait for completion */
509 static int be_mcc_notify_wait(struct be_adapter *adapter)
512 struct be_mcc_wrb *wrb;
513 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
514 u16 index = mcc_obj->q.head;
515 struct be_cmd_resp_hdr *resp;
517 index_dec(&index, mcc_obj->q.len);
518 wrb = queue_index_node(&mcc_obj->q, index);
520 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
522 be_mcc_notify(adapter);
524 status = be_mcc_wait_compl(adapter);
528 status = (resp->base_status |
529 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
530 CQE_ADDL_STATUS_SHIFT));
535 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
541 if (be_error(adapter))
544 ready = ioread32(db);
545 if (ready == 0xffffffff)
548 ready &= MPU_MAILBOX_DB_RDY_MASK;
553 dev_err(&adapter->pdev->dev, "FW not responding\n");
554 adapter->fw_timeout = true;
555 be_detect_error(adapter);
567 * Insert the mailbox address into the doorbell in two steps
568 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
570 static int be_mbox_notify_wait(struct be_adapter *adapter)
574 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
575 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
576 struct be_mcc_mailbox *mbox = mbox_mem->va;
577 struct be_mcc_compl *compl = &mbox->compl;
579 /* wait for ready to be set */
580 status = be_mbox_db_ready_wait(adapter, db);
584 val |= MPU_MAILBOX_DB_HI_MASK;
585 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
586 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
589 /* wait for ready to be set */
590 status = be_mbox_db_ready_wait(adapter, db);
595 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
596 val |= (u32)(mbox_mem->dma >> 4) << 2;
599 status = be_mbox_db_ready_wait(adapter, db);
603 /* A cq entry has been made now */
604 if (be_mcc_compl_is_new(compl)) {
605 status = be_mcc_compl_process(adapter, &mbox->compl);
606 be_mcc_compl_use(compl);
610 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
616 static u16 be_POST_stage_get(struct be_adapter *adapter)
620 if (BEx_chip(adapter))
621 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
623 pci_read_config_dword(adapter->pdev,
624 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
626 return sem & POST_STAGE_MASK;
629 static int lancer_wait_ready(struct be_adapter *adapter)
631 #define SLIPORT_READY_TIMEOUT 30
635 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
636 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
637 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
640 if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
641 !(sliport_status & SLIPORT_STATUS_RN_MASK))
647 return sliport_status ? : -1;
650 int be_fw_wait_ready(struct be_adapter *adapter)
653 int status, timeout = 0;
654 struct device *dev = &adapter->pdev->dev;
656 if (lancer_chip(adapter)) {
657 status = lancer_wait_ready(adapter);
666 /* There's no means to poll POST state on BE2/3 VFs */
667 if (BEx_chip(adapter) && be_virtfn(adapter))
670 stage = be_POST_stage_get(adapter);
671 if (stage == POST_STAGE_ARMFW_RDY)
674 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
675 if (msleep_interruptible(2000)) {
676 dev_err(dev, "Waiting for POST aborted\n");
680 } while (timeout < 60);
683 dev_err(dev, "POST timeout; stage=%#x\n", stage);
687 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
689 return &wrb->payload.sgl[0];
692 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
694 wrb->tag0 = addr & 0xFFFFFFFF;
695 wrb->tag1 = upper_32_bits(addr);
698 /* Don't touch the hdr after it's prepared */
699 /* mem will be NULL for embedded commands */
700 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
701 u8 subsystem, u8 opcode, int cmd_len,
702 struct be_mcc_wrb *wrb,
703 struct be_dma_mem *mem)
707 req_hdr->opcode = opcode;
708 req_hdr->subsystem = subsystem;
709 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
710 req_hdr->version = 0;
711 fill_wrb_tags(wrb, (ulong) req_hdr);
712 wrb->payload_length = cmd_len;
714 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
715 MCC_WRB_SGE_CNT_SHIFT;
716 sge = nonembedded_sgl(wrb);
717 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
718 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
719 sge->len = cpu_to_le32(mem->size);
721 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
722 be_dws_cpu_to_le(wrb, 8);
725 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
726 struct be_dma_mem *mem)
728 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
729 u64 dma = (u64)mem->dma;
731 for (i = 0; i < buf_pages; i++) {
732 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
733 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
738 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
740 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
741 struct be_mcc_wrb *wrb
742 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
743 memset(wrb, 0, sizeof(*wrb));
747 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
749 struct be_queue_info *mccq = &adapter->mcc_obj.q;
750 struct be_mcc_wrb *wrb;
755 if (atomic_read(&mccq->used) >= mccq->len)
758 wrb = queue_head_node(mccq);
759 queue_head_inc(mccq);
760 atomic_inc(&mccq->used);
761 memset(wrb, 0, sizeof(*wrb));
765 static bool use_mcc(struct be_adapter *adapter)
767 return adapter->mcc_obj.q.created;
770 /* Must be used only in process context */
771 static int be_cmd_lock(struct be_adapter *adapter)
773 if (use_mcc(adapter)) {
774 spin_lock_bh(&adapter->mcc_lock);
777 return mutex_lock_interruptible(&adapter->mbox_lock);
781 /* Must be used only in process context */
782 static void be_cmd_unlock(struct be_adapter *adapter)
784 if (use_mcc(adapter))
785 spin_unlock_bh(&adapter->mcc_lock);
787 return mutex_unlock(&adapter->mbox_lock);
790 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
791 struct be_mcc_wrb *wrb)
793 struct be_mcc_wrb *dest_wrb;
795 if (use_mcc(adapter)) {
796 dest_wrb = wrb_from_mccq(adapter);
800 dest_wrb = wrb_from_mbox(adapter);
803 memcpy(dest_wrb, wrb, sizeof(*wrb));
804 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
805 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
810 /* Must be used only in process context */
811 static int be_cmd_notify_wait(struct be_adapter *adapter,
812 struct be_mcc_wrb *wrb)
814 struct be_mcc_wrb *dest_wrb;
817 status = be_cmd_lock(adapter);
821 dest_wrb = be_cmd_copy(adapter, wrb);
825 if (use_mcc(adapter))
826 status = be_mcc_notify_wait(adapter);
828 status = be_mbox_notify_wait(adapter);
831 memcpy(wrb, dest_wrb, sizeof(*wrb));
833 be_cmd_unlock(adapter);
837 /* Tell fw we're about to start firing cmds by writing a
838 * special pattern across the wrb hdr; uses mbox
840 int be_cmd_fw_init(struct be_adapter *adapter)
845 if (lancer_chip(adapter))
848 if (mutex_lock_interruptible(&adapter->mbox_lock))
851 wrb = (u8 *)wrb_from_mbox(adapter);
861 status = be_mbox_notify_wait(adapter);
863 mutex_unlock(&adapter->mbox_lock);
867 /* Tell fw we're done with firing cmds by writing a
868 * special pattern across the wrb hdr; uses mbox
870 int be_cmd_fw_clean(struct be_adapter *adapter)
875 if (lancer_chip(adapter))
878 if (mutex_lock_interruptible(&adapter->mbox_lock))
881 wrb = (u8 *)wrb_from_mbox(adapter);
891 status = be_mbox_notify_wait(adapter);
893 mutex_unlock(&adapter->mbox_lock);
897 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
899 struct be_mcc_wrb *wrb;
900 struct be_cmd_req_eq_create *req;
901 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
904 if (mutex_lock_interruptible(&adapter->mbox_lock))
907 wrb = wrb_from_mbox(adapter);
908 req = embedded_payload(wrb);
910 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
911 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
914 /* Support for EQ_CREATEv2 available only SH-R onwards */
915 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
918 req->hdr.version = ver;
919 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
921 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
923 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
924 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
925 __ilog2_u32(eqo->q.len / 256));
926 be_dws_cpu_to_le(req->context, sizeof(req->context));
928 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
930 status = be_mbox_notify_wait(adapter);
932 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
934 eqo->q.id = le16_to_cpu(resp->eq_id);
936 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
937 eqo->q.created = true;
940 mutex_unlock(&adapter->mbox_lock);
945 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
946 bool permanent, u32 if_handle, u32 pmac_id)
948 struct be_mcc_wrb *wrb;
949 struct be_cmd_req_mac_query *req;
952 spin_lock_bh(&adapter->mcc_lock);
954 wrb = wrb_from_mccq(adapter);
959 req = embedded_payload(wrb);
961 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
962 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
964 req->type = MAC_ADDRESS_TYPE_NETWORK;
968 req->if_id = cpu_to_le16((u16)if_handle);
969 req->pmac_id = cpu_to_le32(pmac_id);
973 status = be_mcc_notify_wait(adapter);
975 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
977 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
981 spin_unlock_bh(&adapter->mcc_lock);
985 /* Uses synchronous MCCQ */
986 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
987 u32 if_id, u32 *pmac_id, u32 domain)
989 struct be_mcc_wrb *wrb;
990 struct be_cmd_req_pmac_add *req;
993 spin_lock_bh(&adapter->mcc_lock);
995 wrb = wrb_from_mccq(adapter);
1000 req = embedded_payload(wrb);
1002 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1003 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1006 req->hdr.domain = domain;
1007 req->if_id = cpu_to_le32(if_id);
1008 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1010 status = be_mcc_notify_wait(adapter);
1012 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1014 *pmac_id = le32_to_cpu(resp->pmac_id);
1018 spin_unlock_bh(&adapter->mcc_lock);
1020 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1026 /* Uses synchronous MCCQ */
1027 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1029 struct be_mcc_wrb *wrb;
1030 struct be_cmd_req_pmac_del *req;
1036 spin_lock_bh(&adapter->mcc_lock);
1038 wrb = wrb_from_mccq(adapter);
1043 req = embedded_payload(wrb);
1045 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1046 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1049 req->hdr.domain = dom;
1050 req->if_id = cpu_to_le32(if_id);
1051 req->pmac_id = cpu_to_le32(pmac_id);
1053 status = be_mcc_notify_wait(adapter);
1056 spin_unlock_bh(&adapter->mcc_lock);
1061 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1062 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1064 struct be_mcc_wrb *wrb;
1065 struct be_cmd_req_cq_create *req;
1066 struct be_dma_mem *q_mem = &cq->dma_mem;
1070 if (mutex_lock_interruptible(&adapter->mbox_lock))
1073 wrb = wrb_from_mbox(adapter);
1074 req = embedded_payload(wrb);
1075 ctxt = &req->context;
1077 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1078 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1081 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1083 if (BEx_chip(adapter)) {
1084 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1086 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1088 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1089 __ilog2_u32(cq->len / 256));
1090 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1091 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1092 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1094 req->hdr.version = 2;
1095 req->page_size = 1; /* 1 for 4K */
1097 /* coalesce-wm field in this cmd is not relevant to Lancer.
1098 * Lancer uses COMMON_MODIFY_CQ to set this field
1100 if (!lancer_chip(adapter))
1101 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1103 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1105 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1106 __ilog2_u32(cq->len / 256));
1107 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1108 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1109 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1112 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1114 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1116 status = be_mbox_notify_wait(adapter);
1118 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1120 cq->id = le16_to_cpu(resp->cq_id);
1124 mutex_unlock(&adapter->mbox_lock);
1129 static u32 be_encoded_q_len(int q_len)
1131 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1133 if (len_encoded == 16)
1138 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1139 struct be_queue_info *mccq,
1140 struct be_queue_info *cq)
1142 struct be_mcc_wrb *wrb;
1143 struct be_cmd_req_mcc_ext_create *req;
1144 struct be_dma_mem *q_mem = &mccq->dma_mem;
1148 if (mutex_lock_interruptible(&adapter->mbox_lock))
1151 wrb = wrb_from_mbox(adapter);
1152 req = embedded_payload(wrb);
1153 ctxt = &req->context;
1155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1156 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1159 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1160 if (BEx_chip(adapter)) {
1161 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1162 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1163 be_encoded_q_len(mccq->len));
1164 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1166 req->hdr.version = 1;
1167 req->cq_id = cpu_to_le16(cq->id);
1169 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1170 be_encoded_q_len(mccq->len));
1171 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1172 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1174 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1178 /* Subscribe to Link State, Sliport Event and Group 5 Events
1179 * (bits 1, 5 and 17 set)
1181 req->async_event_bitmap[0] =
1182 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1183 BIT(ASYNC_EVENT_CODE_GRP_5) |
1184 BIT(ASYNC_EVENT_CODE_QNQ) |
1185 BIT(ASYNC_EVENT_CODE_SLIPORT));
1187 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1189 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1191 status = be_mbox_notify_wait(adapter);
1193 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1195 mccq->id = le16_to_cpu(resp->id);
1196 mccq->created = true;
1198 mutex_unlock(&adapter->mbox_lock);
1203 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1204 struct be_queue_info *mccq,
1205 struct be_queue_info *cq)
1207 struct be_mcc_wrb *wrb;
1208 struct be_cmd_req_mcc_create *req;
1209 struct be_dma_mem *q_mem = &mccq->dma_mem;
1213 if (mutex_lock_interruptible(&adapter->mbox_lock))
1216 wrb = wrb_from_mbox(adapter);
1217 req = embedded_payload(wrb);
1218 ctxt = &req->context;
1220 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1221 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1224 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1226 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1227 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1228 be_encoded_q_len(mccq->len));
1229 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1231 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1233 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1235 status = be_mbox_notify_wait(adapter);
1237 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1239 mccq->id = le16_to_cpu(resp->id);
1240 mccq->created = true;
1243 mutex_unlock(&adapter->mbox_lock);
1247 int be_cmd_mccq_create(struct be_adapter *adapter,
1248 struct be_queue_info *mccq, struct be_queue_info *cq)
1252 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1253 if (status && BEx_chip(adapter)) {
1254 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1255 "or newer to avoid conflicting priorities between NIC "
1256 "and FCoE traffic");
1257 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1262 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1264 struct be_mcc_wrb wrb = {0};
1265 struct be_cmd_req_eth_tx_create *req;
1266 struct be_queue_info *txq = &txo->q;
1267 struct be_queue_info *cq = &txo->cq;
1268 struct be_dma_mem *q_mem = &txq->dma_mem;
1269 int status, ver = 0;
1271 req = embedded_payload(&wrb);
1272 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1273 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1275 if (lancer_chip(adapter)) {
1276 req->hdr.version = 1;
1277 } else if (BEx_chip(adapter)) {
1278 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1279 req->hdr.version = 2;
1280 } else { /* For SH */
1281 req->hdr.version = 2;
1284 if (req->hdr.version > 0)
1285 req->if_id = cpu_to_le16(adapter->if_handle);
1286 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1287 req->ulp_num = BE_ULP1_NUM;
1288 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1289 req->cq_id = cpu_to_le16(cq->id);
1290 req->queue_size = be_encoded_q_len(txq->len);
1291 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1292 ver = req->hdr.version;
1294 status = be_cmd_notify_wait(adapter, &wrb);
1296 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1298 txq->id = le16_to_cpu(resp->cid);
1300 txo->db_offset = le32_to_cpu(resp->db_offset);
1302 txo->db_offset = DB_TXULP1_OFFSET;
1303 txq->created = true;
1310 int be_cmd_rxq_create(struct be_adapter *adapter,
1311 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1312 u32 if_id, u32 rss, u8 *rss_id)
1314 struct be_mcc_wrb *wrb;
1315 struct be_cmd_req_eth_rx_create *req;
1316 struct be_dma_mem *q_mem = &rxq->dma_mem;
1319 spin_lock_bh(&adapter->mcc_lock);
1321 wrb = wrb_from_mccq(adapter);
1326 req = embedded_payload(wrb);
1328 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1329 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1331 req->cq_id = cpu_to_le16(cq_id);
1332 req->frag_size = fls(frag_size) - 1;
1334 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1335 req->interface_id = cpu_to_le32(if_id);
1336 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1337 req->rss_queue = cpu_to_le32(rss);
1339 status = be_mcc_notify_wait(adapter);
1341 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1343 rxq->id = le16_to_cpu(resp->id);
1344 rxq->created = true;
1345 *rss_id = resp->rss_id;
1349 spin_unlock_bh(&adapter->mcc_lock);
1353 /* Generic destroyer function for all types of queues
1356 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1359 struct be_mcc_wrb *wrb;
1360 struct be_cmd_req_q_destroy *req;
1361 u8 subsys = 0, opcode = 0;
1364 if (mutex_lock_interruptible(&adapter->mbox_lock))
1367 wrb = wrb_from_mbox(adapter);
1368 req = embedded_payload(wrb);
1370 switch (queue_type) {
1372 subsys = CMD_SUBSYSTEM_COMMON;
1373 opcode = OPCODE_COMMON_EQ_DESTROY;
1376 subsys = CMD_SUBSYSTEM_COMMON;
1377 opcode = OPCODE_COMMON_CQ_DESTROY;
1380 subsys = CMD_SUBSYSTEM_ETH;
1381 opcode = OPCODE_ETH_TX_DESTROY;
1384 subsys = CMD_SUBSYSTEM_ETH;
1385 opcode = OPCODE_ETH_RX_DESTROY;
1388 subsys = CMD_SUBSYSTEM_COMMON;
1389 opcode = OPCODE_COMMON_MCC_DESTROY;
1395 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1397 req->id = cpu_to_le16(q->id);
1399 status = be_mbox_notify_wait(adapter);
1402 mutex_unlock(&adapter->mbox_lock);
1407 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1409 struct be_mcc_wrb *wrb;
1410 struct be_cmd_req_q_destroy *req;
1413 spin_lock_bh(&adapter->mcc_lock);
1415 wrb = wrb_from_mccq(adapter);
1420 req = embedded_payload(wrb);
1422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1423 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1424 req->id = cpu_to_le16(q->id);
1426 status = be_mcc_notify_wait(adapter);
1430 spin_unlock_bh(&adapter->mcc_lock);
1434 /* Create an rx filtering policy configuration on an i/f
1435 * Will use MBOX only if MCCQ has not been created.
1437 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1438 u32 *if_handle, u32 domain)
1440 struct be_mcc_wrb wrb = {0};
1441 struct be_cmd_req_if_create *req;
1444 req = embedded_payload(&wrb);
1445 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1446 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1447 sizeof(*req), &wrb, NULL);
1448 req->hdr.domain = domain;
1449 req->capability_flags = cpu_to_le32(cap_flags);
1450 req->enable_flags = cpu_to_le32(en_flags);
1451 req->pmac_invalid = true;
1453 status = be_cmd_notify_wait(adapter, &wrb);
1455 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1457 *if_handle = le32_to_cpu(resp->interface_id);
1459 /* Hack to retrieve VF's pmac-id on BE3 */
1460 if (BE3_chip(adapter) && !be_physfn(adapter))
1461 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1467 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1469 struct be_mcc_wrb *wrb;
1470 struct be_cmd_req_if_destroy *req;
1473 if (interface_id == -1)
1476 spin_lock_bh(&adapter->mcc_lock);
1478 wrb = wrb_from_mccq(adapter);
1483 req = embedded_payload(wrb);
1485 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1486 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1487 sizeof(*req), wrb, NULL);
1488 req->hdr.domain = domain;
1489 req->interface_id = cpu_to_le32(interface_id);
1491 status = be_mcc_notify_wait(adapter);
1493 spin_unlock_bh(&adapter->mcc_lock);
1497 /* Get stats is a non embedded command: the request is not embedded inside
1498 * WRB but is a separate dma memory block
1499 * Uses asynchronous MCC
1501 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1503 struct be_mcc_wrb *wrb;
1504 struct be_cmd_req_hdr *hdr;
1507 spin_lock_bh(&adapter->mcc_lock);
1509 wrb = wrb_from_mccq(adapter);
1514 hdr = nonemb_cmd->va;
1516 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1517 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1520 /* version 1 of the cmd is not supported only by BE2 */
1521 if (BE2_chip(adapter))
1523 if (BE3_chip(adapter) || lancer_chip(adapter))
1528 be_mcc_notify(adapter);
1529 adapter->stats_cmd_sent = true;
1532 spin_unlock_bh(&adapter->mcc_lock);
1537 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1538 struct be_dma_mem *nonemb_cmd)
1540 struct be_mcc_wrb *wrb;
1541 struct lancer_cmd_req_pport_stats *req;
1544 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1548 spin_lock_bh(&adapter->mcc_lock);
1550 wrb = wrb_from_mccq(adapter);
1555 req = nonemb_cmd->va;
1557 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1558 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1561 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1562 req->cmd_params.params.reset_stats = 0;
1564 be_mcc_notify(adapter);
1565 adapter->stats_cmd_sent = true;
1568 spin_unlock_bh(&adapter->mcc_lock);
1572 static int be_mac_to_link_speed(int mac_speed)
1574 switch (mac_speed) {
1575 case PHY_LINK_SPEED_ZERO:
1577 case PHY_LINK_SPEED_10MBPS:
1579 case PHY_LINK_SPEED_100MBPS:
1581 case PHY_LINK_SPEED_1GBPS:
1583 case PHY_LINK_SPEED_10GBPS:
1585 case PHY_LINK_SPEED_20GBPS:
1587 case PHY_LINK_SPEED_25GBPS:
1589 case PHY_LINK_SPEED_40GBPS:
1595 /* Uses synchronous mcc
1596 * Returns link_speed in Mbps
1598 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1599 u8 *link_status, u32 dom)
1601 struct be_mcc_wrb *wrb;
1602 struct be_cmd_req_link_status *req;
1605 spin_lock_bh(&adapter->mcc_lock);
1608 *link_status = LINK_DOWN;
1610 wrb = wrb_from_mccq(adapter);
1615 req = embedded_payload(wrb);
1617 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1618 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1619 sizeof(*req), wrb, NULL);
1621 /* version 1 of the cmd is not supported only by BE2 */
1622 if (!BE2_chip(adapter))
1623 req->hdr.version = 1;
1625 req->hdr.domain = dom;
1627 status = be_mcc_notify_wait(adapter);
1629 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1632 *link_speed = resp->link_speed ?
1633 le16_to_cpu(resp->link_speed) * 10 :
1634 be_mac_to_link_speed(resp->mac_speed);
1636 if (!resp->logical_link_status)
1640 *link_status = resp->logical_link_status;
1644 spin_unlock_bh(&adapter->mcc_lock);
1648 /* Uses synchronous mcc */
1649 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1651 struct be_mcc_wrb *wrb;
1652 struct be_cmd_req_get_cntl_addnl_attribs *req;
1655 spin_lock_bh(&adapter->mcc_lock);
1657 wrb = wrb_from_mccq(adapter);
1662 req = embedded_payload(wrb);
1664 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1665 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1666 sizeof(*req), wrb, NULL);
1668 be_mcc_notify(adapter);
1671 spin_unlock_bh(&adapter->mcc_lock);
1675 /* Uses synchronous mcc */
1676 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1678 struct be_mcc_wrb *wrb;
1679 struct be_cmd_req_get_fat *req;
1682 spin_lock_bh(&adapter->mcc_lock);
1684 wrb = wrb_from_mccq(adapter);
1689 req = embedded_payload(wrb);
1691 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1692 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1694 req->fat_operation = cpu_to_le32(QUERY_FAT);
1695 status = be_mcc_notify_wait(adapter);
1697 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1699 if (log_size && resp->log_size)
1700 *log_size = le32_to_cpu(resp->log_size) -
1704 spin_unlock_bh(&adapter->mcc_lock);
1708 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1710 struct be_dma_mem get_fat_cmd;
1711 struct be_mcc_wrb *wrb;
1712 struct be_cmd_req_get_fat *req;
1713 u32 offset = 0, total_size, buf_size,
1714 log_offset = sizeof(u32), payload_len;
1720 total_size = buf_len;
1722 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1723 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1726 if (!get_fat_cmd.va) {
1727 dev_err(&adapter->pdev->dev,
1728 "Memory allocation failure while reading FAT data\n");
1732 spin_lock_bh(&adapter->mcc_lock);
1734 while (total_size) {
1735 buf_size = min(total_size, (u32)60*1024);
1736 total_size -= buf_size;
1738 wrb = wrb_from_mccq(adapter);
1743 req = get_fat_cmd.va;
1745 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1746 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1747 OPCODE_COMMON_MANAGE_FAT, payload_len,
1750 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1751 req->read_log_offset = cpu_to_le32(log_offset);
1752 req->read_log_length = cpu_to_le32(buf_size);
1753 req->data_buffer_size = cpu_to_le32(buf_size);
1755 status = be_mcc_notify_wait(adapter);
1757 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1759 memcpy(buf + offset,
1761 le32_to_cpu(resp->read_log_length));
1763 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1767 log_offset += buf_size;
1770 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1771 get_fat_cmd.va, get_fat_cmd.dma);
1772 spin_unlock_bh(&adapter->mcc_lock);
1776 /* Uses synchronous mcc */
1777 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1779 struct be_mcc_wrb *wrb;
1780 struct be_cmd_req_get_fw_version *req;
1783 spin_lock_bh(&adapter->mcc_lock);
1785 wrb = wrb_from_mccq(adapter);
1791 req = embedded_payload(wrb);
1793 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1794 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1796 status = be_mcc_notify_wait(adapter);
1798 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1800 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1801 sizeof(adapter->fw_ver));
1802 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1803 sizeof(adapter->fw_on_flash));
1806 spin_unlock_bh(&adapter->mcc_lock);
1810 /* set the EQ delay interval of an EQ to specified value
1813 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1814 struct be_set_eqd *set_eqd, int num)
1816 struct be_mcc_wrb *wrb;
1817 struct be_cmd_req_modify_eq_delay *req;
1820 spin_lock_bh(&adapter->mcc_lock);
1822 wrb = wrb_from_mccq(adapter);
1827 req = embedded_payload(wrb);
1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1830 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1833 req->num_eq = cpu_to_le32(num);
1834 for (i = 0; i < num; i++) {
1835 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1836 req->set_eqd[i].phase = 0;
1837 req->set_eqd[i].delay_multiplier =
1838 cpu_to_le32(set_eqd[i].delay_multiplier);
1841 be_mcc_notify(adapter);
1843 spin_unlock_bh(&adapter->mcc_lock);
1847 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1853 num_eqs = min(num, 8);
1854 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1862 /* Uses sycnhronous mcc */
1863 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1864 u32 num, u32 domain)
1866 struct be_mcc_wrb *wrb;
1867 struct be_cmd_req_vlan_config *req;
1870 spin_lock_bh(&adapter->mcc_lock);
1872 wrb = wrb_from_mccq(adapter);
1877 req = embedded_payload(wrb);
1879 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1880 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1882 req->hdr.domain = domain;
1884 req->interface_id = if_id;
1885 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1886 req->num_vlan = num;
1887 memcpy(req->normal_vlan, vtag_array,
1888 req->num_vlan * sizeof(vtag_array[0]));
1890 status = be_mcc_notify_wait(adapter);
1892 spin_unlock_bh(&adapter->mcc_lock);
1896 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1898 struct be_mcc_wrb *wrb;
1899 struct be_dma_mem *mem = &adapter->rx_filter;
1900 struct be_cmd_req_rx_filter *req = mem->va;
1903 spin_lock_bh(&adapter->mcc_lock);
1905 wrb = wrb_from_mccq(adapter);
1910 memset(req, 0, sizeof(*req));
1911 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1912 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1915 req->if_id = cpu_to_le32(adapter->if_handle);
1916 req->if_flags_mask = cpu_to_le32(flags);
1917 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1919 if (flags & BE_IF_FLAGS_MULTICAST) {
1920 struct netdev_hw_addr *ha;
1923 /* Reset mcast promisc mode if already set by setting mask
1924 * and not setting flags field
1926 req->if_flags_mask |=
1927 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1928 be_if_cap_flags(adapter));
1929 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1930 netdev_for_each_mc_addr(ha, adapter->netdev)
1931 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1934 status = be_mcc_notify_wait(adapter);
1936 spin_unlock_bh(&adapter->mcc_lock);
1940 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1942 struct device *dev = &adapter->pdev->dev;
1944 if ((flags & be_if_cap_flags(adapter)) != flags) {
1945 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
1946 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
1947 be_if_cap_flags(adapter));
1949 flags &= be_if_cap_flags(adapter);
1951 return __be_cmd_rx_filter(adapter, flags, value);
1954 /* Uses synchrounous mcc */
1955 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1957 struct be_mcc_wrb *wrb;
1958 struct be_cmd_req_set_flow_control *req;
1961 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1962 CMD_SUBSYSTEM_COMMON))
1965 spin_lock_bh(&adapter->mcc_lock);
1967 wrb = wrb_from_mccq(adapter);
1972 req = embedded_payload(wrb);
1974 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1975 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1978 req->hdr.version = 1;
1979 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1980 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1982 status = be_mcc_notify_wait(adapter);
1985 spin_unlock_bh(&adapter->mcc_lock);
1987 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
1994 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1996 struct be_mcc_wrb *wrb;
1997 struct be_cmd_req_get_flow_control *req;
2000 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2001 CMD_SUBSYSTEM_COMMON))
2004 spin_lock_bh(&adapter->mcc_lock);
2006 wrb = wrb_from_mccq(adapter);
2011 req = embedded_payload(wrb);
2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2014 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2017 status = be_mcc_notify_wait(adapter);
2019 struct be_cmd_resp_get_flow_control *resp =
2020 embedded_payload(wrb);
2022 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2023 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2027 spin_unlock_bh(&adapter->mcc_lock);
2032 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2034 struct be_mcc_wrb *wrb;
2035 struct be_cmd_req_query_fw_cfg *req;
2038 if (mutex_lock_interruptible(&adapter->mbox_lock))
2041 wrb = wrb_from_mbox(adapter);
2042 req = embedded_payload(wrb);
2044 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2045 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2046 sizeof(*req), wrb, NULL);
2048 status = be_mbox_notify_wait(adapter);
2050 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2052 adapter->port_num = le32_to_cpu(resp->phys_port);
2053 adapter->function_mode = le32_to_cpu(resp->function_mode);
2054 adapter->function_caps = le32_to_cpu(resp->function_caps);
2055 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2056 dev_info(&adapter->pdev->dev,
2057 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2058 adapter->function_mode, adapter->function_caps);
2061 mutex_unlock(&adapter->mbox_lock);
2066 int be_cmd_reset_function(struct be_adapter *adapter)
2068 struct be_mcc_wrb *wrb;
2069 struct be_cmd_req_hdr *req;
2072 if (lancer_chip(adapter)) {
2073 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2074 adapter->db + SLIPORT_CONTROL_OFFSET);
2075 status = lancer_wait_ready(adapter);
2077 dev_err(&adapter->pdev->dev,
2078 "Adapter in non recoverable error\n");
2082 if (mutex_lock_interruptible(&adapter->mbox_lock))
2085 wrb = wrb_from_mbox(adapter);
2086 req = embedded_payload(wrb);
2088 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2089 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2092 status = be_mbox_notify_wait(adapter);
2094 mutex_unlock(&adapter->mbox_lock);
2098 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2099 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2101 struct be_mcc_wrb *wrb;
2102 struct be_cmd_req_rss_config *req;
2105 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2108 spin_lock_bh(&adapter->mcc_lock);
2110 wrb = wrb_from_mccq(adapter);
2115 req = embedded_payload(wrb);
2117 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2118 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2120 req->if_id = cpu_to_le32(adapter->if_handle);
2121 req->enable_rss = cpu_to_le16(rss_hash_opts);
2122 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2124 if (!BEx_chip(adapter))
2125 req->hdr.version = 1;
2127 memcpy(req->cpu_table, rsstable, table_size);
2128 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2129 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2131 status = be_mcc_notify_wait(adapter);
2133 spin_unlock_bh(&adapter->mcc_lock);
2138 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2139 u8 bcn, u8 sts, u8 state)
2141 struct be_mcc_wrb *wrb;
2142 struct be_cmd_req_enable_disable_beacon *req;
2145 spin_lock_bh(&adapter->mcc_lock);
2147 wrb = wrb_from_mccq(adapter);
2152 req = embedded_payload(wrb);
2154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2155 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2156 sizeof(*req), wrb, NULL);
2158 req->port_num = port_num;
2159 req->beacon_state = state;
2160 req->beacon_duration = bcn;
2161 req->status_duration = sts;
2163 status = be_mcc_notify_wait(adapter);
2166 spin_unlock_bh(&adapter->mcc_lock);
2171 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2173 struct be_mcc_wrb *wrb;
2174 struct be_cmd_req_get_beacon_state *req;
2177 spin_lock_bh(&adapter->mcc_lock);
2179 wrb = wrb_from_mccq(adapter);
2184 req = embedded_payload(wrb);
2186 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2187 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2190 req->port_num = port_num;
2192 status = be_mcc_notify_wait(adapter);
2194 struct be_cmd_resp_get_beacon_state *resp =
2195 embedded_payload(wrb);
2197 *state = resp->beacon_state;
2201 spin_unlock_bh(&adapter->mcc_lock);
2206 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2207 u8 page_num, u8 *data)
2209 struct be_dma_mem cmd;
2210 struct be_mcc_wrb *wrb;
2211 struct be_cmd_req_port_type *req;
2214 if (page_num > TR_PAGE_A2)
2217 cmd.size = sizeof(struct be_cmd_resp_port_type);
2218 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2220 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2223 memset(cmd.va, 0, cmd.size);
2225 spin_lock_bh(&adapter->mcc_lock);
2227 wrb = wrb_from_mccq(adapter);
2234 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2235 OPCODE_COMMON_READ_TRANSRECV_DATA,
2236 cmd.size, wrb, &cmd);
2238 req->port = cpu_to_le32(adapter->hba_port_num);
2239 req->page_num = cpu_to_le32(page_num);
2240 status = be_mcc_notify_wait(adapter);
2242 struct be_cmd_resp_port_type *resp = cmd.va;
2244 memcpy(data, resp->page_data, PAGE_DATA_LEN);
2247 spin_unlock_bh(&adapter->mcc_lock);
2248 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2252 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2253 u32 data_size, u32 data_offset,
2254 const char *obj_name, u32 *data_written,
2255 u8 *change_status, u8 *addn_status)
2257 struct be_mcc_wrb *wrb;
2258 struct lancer_cmd_req_write_object *req;
2259 struct lancer_cmd_resp_write_object *resp;
2263 spin_lock_bh(&adapter->mcc_lock);
2264 adapter->flash_status = 0;
2266 wrb = wrb_from_mccq(adapter);
2272 req = embedded_payload(wrb);
2274 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2275 OPCODE_COMMON_WRITE_OBJECT,
2276 sizeof(struct lancer_cmd_req_write_object), wrb,
2279 ctxt = &req->context;
2280 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2281 write_length, ctxt, data_size);
2284 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2287 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2290 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2291 req->write_offset = cpu_to_le32(data_offset);
2292 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2293 req->descriptor_count = cpu_to_le32(1);
2294 req->buf_len = cpu_to_le32(data_size);
2295 req->addr_low = cpu_to_le32((cmd->dma +
2296 sizeof(struct lancer_cmd_req_write_object))
2298 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2299 sizeof(struct lancer_cmd_req_write_object)));
2301 be_mcc_notify(adapter);
2302 spin_unlock_bh(&adapter->mcc_lock);
2304 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2305 msecs_to_jiffies(60000)))
2306 status = -ETIMEDOUT;
2308 status = adapter->flash_status;
2310 resp = embedded_payload(wrb);
2312 *data_written = le32_to_cpu(resp->actual_write_len);
2313 *change_status = resp->change_status;
2315 *addn_status = resp->additional_status;
2321 spin_unlock_bh(&adapter->mcc_lock);
2325 int be_cmd_query_cable_type(struct be_adapter *adapter)
2327 u8 page_data[PAGE_DATA_LEN];
2330 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2333 switch (adapter->phy.interface_type) {
2335 adapter->phy.cable_type =
2336 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2338 case PHY_TYPE_SFP_PLUS_10GB:
2339 adapter->phy.cable_type =
2340 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2343 adapter->phy.cable_type = 0;
2350 int be_cmd_query_sfp_info(struct be_adapter *adapter)
2352 u8 page_data[PAGE_DATA_LEN];
2355 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2358 strlcpy(adapter->phy.vendor_name, page_data +
2359 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2360 strlcpy(adapter->phy.vendor_pn,
2361 page_data + SFP_VENDOR_PN_OFFSET,
2362 SFP_VENDOR_NAME_LEN - 1);
2368 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2370 struct lancer_cmd_req_delete_object *req;
2371 struct be_mcc_wrb *wrb;
2374 spin_lock_bh(&adapter->mcc_lock);
2376 wrb = wrb_from_mccq(adapter);
2382 req = embedded_payload(wrb);
2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2385 OPCODE_COMMON_DELETE_OBJECT,
2386 sizeof(*req), wrb, NULL);
2388 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2390 status = be_mcc_notify_wait(adapter);
2392 spin_unlock_bh(&adapter->mcc_lock);
2396 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2397 u32 data_size, u32 data_offset, const char *obj_name,
2398 u32 *data_read, u32 *eof, u8 *addn_status)
2400 struct be_mcc_wrb *wrb;
2401 struct lancer_cmd_req_read_object *req;
2402 struct lancer_cmd_resp_read_object *resp;
2405 spin_lock_bh(&adapter->mcc_lock);
2407 wrb = wrb_from_mccq(adapter);
2413 req = embedded_payload(wrb);
2415 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2416 OPCODE_COMMON_READ_OBJECT,
2417 sizeof(struct lancer_cmd_req_read_object), wrb,
2420 req->desired_read_len = cpu_to_le32(data_size);
2421 req->read_offset = cpu_to_le32(data_offset);
2422 strcpy(req->object_name, obj_name);
2423 req->descriptor_count = cpu_to_le32(1);
2424 req->buf_len = cpu_to_le32(data_size);
2425 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2426 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2428 status = be_mcc_notify_wait(adapter);
2430 resp = embedded_payload(wrb);
2432 *data_read = le32_to_cpu(resp->actual_read_len);
2433 *eof = le32_to_cpu(resp->eof);
2435 *addn_status = resp->additional_status;
2439 spin_unlock_bh(&adapter->mcc_lock);
2443 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2444 u32 flash_type, u32 flash_opcode, u32 img_offset,
2447 struct be_mcc_wrb *wrb;
2448 struct be_cmd_write_flashrom *req;
2451 spin_lock_bh(&adapter->mcc_lock);
2452 adapter->flash_status = 0;
2454 wrb = wrb_from_mccq(adapter);
2461 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2462 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2465 req->params.op_type = cpu_to_le32(flash_type);
2466 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2467 req->params.offset = cpu_to_le32(img_offset);
2469 req->params.op_code = cpu_to_le32(flash_opcode);
2470 req->params.data_buf_size = cpu_to_le32(buf_size);
2472 be_mcc_notify(adapter);
2473 spin_unlock_bh(&adapter->mcc_lock);
2475 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2476 msecs_to_jiffies(40000)))
2477 status = -ETIMEDOUT;
2479 status = adapter->flash_status;
2484 spin_unlock_bh(&adapter->mcc_lock);
2488 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2489 u16 img_optype, u32 img_offset, u32 crc_offset)
2491 struct be_cmd_read_flash_crc *req;
2492 struct be_mcc_wrb *wrb;
2495 spin_lock_bh(&adapter->mcc_lock);
2497 wrb = wrb_from_mccq(adapter);
2502 req = embedded_payload(wrb);
2504 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2505 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2508 req->params.op_type = cpu_to_le32(img_optype);
2509 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2510 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2512 req->params.offset = cpu_to_le32(crc_offset);
2514 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2515 req->params.data_buf_size = cpu_to_le32(0x4);
2517 status = be_mcc_notify_wait(adapter);
2519 memcpy(flashed_crc, req->crc, 4);
2522 spin_unlock_bh(&adapter->mcc_lock);
2526 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2527 struct be_dma_mem *nonemb_cmd)
2529 struct be_mcc_wrb *wrb;
2530 struct be_cmd_req_acpi_wol_magic_config *req;
2533 spin_lock_bh(&adapter->mcc_lock);
2535 wrb = wrb_from_mccq(adapter);
2540 req = nonemb_cmd->va;
2542 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2543 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2545 memcpy(req->magic_mac, mac, ETH_ALEN);
2547 status = be_mcc_notify_wait(adapter);
2550 spin_unlock_bh(&adapter->mcc_lock);
2554 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2555 u8 loopback_type, u8 enable)
2557 struct be_mcc_wrb *wrb;
2558 struct be_cmd_req_set_lmode *req;
2561 spin_lock_bh(&adapter->mcc_lock);
2563 wrb = wrb_from_mccq(adapter);
2569 req = embedded_payload(wrb);
2571 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2572 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2575 req->src_port = port_num;
2576 req->dest_port = port_num;
2577 req->loopback_type = loopback_type;
2578 req->loopback_state = enable;
2580 status = be_mcc_notify_wait(adapter);
2582 spin_unlock_bh(&adapter->mcc_lock);
2586 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2587 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2590 struct be_mcc_wrb *wrb;
2591 struct be_cmd_req_loopback_test *req;
2592 struct be_cmd_resp_loopback_test *resp;
2595 spin_lock_bh(&adapter->mcc_lock);
2597 wrb = wrb_from_mccq(adapter);
2603 req = embedded_payload(wrb);
2605 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2606 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2609 req->hdr.timeout = cpu_to_le32(15);
2610 req->pattern = cpu_to_le64(pattern);
2611 req->src_port = cpu_to_le32(port_num);
2612 req->dest_port = cpu_to_le32(port_num);
2613 req->pkt_size = cpu_to_le32(pkt_size);
2614 req->num_pkts = cpu_to_le32(num_pkts);
2615 req->loopback_type = cpu_to_le32(loopback_type);
2617 be_mcc_notify(adapter);
2619 spin_unlock_bh(&adapter->mcc_lock);
2621 wait_for_completion(&adapter->et_cmd_compl);
2622 resp = embedded_payload(wrb);
2623 status = le32_to_cpu(resp->status);
2627 spin_unlock_bh(&adapter->mcc_lock);
2631 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2632 u32 byte_cnt, struct be_dma_mem *cmd)
2634 struct be_mcc_wrb *wrb;
2635 struct be_cmd_req_ddrdma_test *req;
2639 spin_lock_bh(&adapter->mcc_lock);
2641 wrb = wrb_from_mccq(adapter);
2647 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2648 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2651 req->pattern = cpu_to_le64(pattern);
2652 req->byte_count = cpu_to_le32(byte_cnt);
2653 for (i = 0; i < byte_cnt; i++) {
2654 req->snd_buff[i] = (u8)(pattern >> (j*8));
2660 status = be_mcc_notify_wait(adapter);
2663 struct be_cmd_resp_ddrdma_test *resp;
2666 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2673 spin_unlock_bh(&adapter->mcc_lock);
2677 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2678 struct be_dma_mem *nonemb_cmd)
2680 struct be_mcc_wrb *wrb;
2681 struct be_cmd_req_seeprom_read *req;
2684 spin_lock_bh(&adapter->mcc_lock);
2686 wrb = wrb_from_mccq(adapter);
2691 req = nonemb_cmd->va;
2693 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2694 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2697 status = be_mcc_notify_wait(adapter);
2700 spin_unlock_bh(&adapter->mcc_lock);
2704 int be_cmd_get_phy_info(struct be_adapter *adapter)
2706 struct be_mcc_wrb *wrb;
2707 struct be_cmd_req_get_phy_info *req;
2708 struct be_dma_mem cmd;
2711 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2712 CMD_SUBSYSTEM_COMMON))
2715 spin_lock_bh(&adapter->mcc_lock);
2717 wrb = wrb_from_mccq(adapter);
2722 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2723 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2725 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2732 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2733 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2736 status = be_mcc_notify_wait(adapter);
2738 struct be_phy_info *resp_phy_info =
2739 cmd.va + sizeof(struct be_cmd_req_hdr);
2741 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2742 adapter->phy.interface_type =
2743 le16_to_cpu(resp_phy_info->interface_type);
2744 adapter->phy.auto_speeds_supported =
2745 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2746 adapter->phy.fixed_speeds_supported =
2747 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2748 adapter->phy.misc_params =
2749 le32_to_cpu(resp_phy_info->misc_params);
2751 if (BE2_chip(adapter)) {
2752 adapter->phy.fixed_speeds_supported =
2753 BE_SUPPORTED_SPEED_10GBPS |
2754 BE_SUPPORTED_SPEED_1GBPS;
2757 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2759 spin_unlock_bh(&adapter->mcc_lock);
2763 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2765 struct be_mcc_wrb *wrb;
2766 struct be_cmd_req_set_qos *req;
2769 spin_lock_bh(&adapter->mcc_lock);
2771 wrb = wrb_from_mccq(adapter);
2777 req = embedded_payload(wrb);
2779 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2780 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2782 req->hdr.domain = domain;
2783 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2784 req->max_bps_nic = cpu_to_le32(bps);
2786 status = be_mcc_notify_wait(adapter);
2789 spin_unlock_bh(&adapter->mcc_lock);
2793 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2795 struct be_mcc_wrb *wrb;
2796 struct be_cmd_req_cntl_attribs *req;
2797 struct be_cmd_resp_cntl_attribs *resp;
2799 int payload_len = max(sizeof(*req), sizeof(*resp));
2800 struct mgmt_controller_attrib *attribs;
2801 struct be_dma_mem attribs_cmd;
2803 if (mutex_lock_interruptible(&adapter->mbox_lock))
2806 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2807 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2808 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2810 if (!attribs_cmd.va) {
2811 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2816 wrb = wrb_from_mbox(adapter);
2821 req = attribs_cmd.va;
2823 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2824 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2827 status = be_mbox_notify_wait(adapter);
2829 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2830 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2834 mutex_unlock(&adapter->mbox_lock);
2836 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2837 attribs_cmd.va, attribs_cmd.dma);
2842 int be_cmd_req_native_mode(struct be_adapter *adapter)
2844 struct be_mcc_wrb *wrb;
2845 struct be_cmd_req_set_func_cap *req;
2848 if (mutex_lock_interruptible(&adapter->mbox_lock))
2851 wrb = wrb_from_mbox(adapter);
2857 req = embedded_payload(wrb);
2859 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2860 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2861 sizeof(*req), wrb, NULL);
2863 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2864 CAPABILITY_BE3_NATIVE_ERX_API);
2865 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2867 status = be_mbox_notify_wait(adapter);
2869 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2871 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2872 CAPABILITY_BE3_NATIVE_ERX_API;
2873 if (!adapter->be3_native)
2874 dev_warn(&adapter->pdev->dev,
2875 "adapter not in advanced mode\n");
2878 mutex_unlock(&adapter->mbox_lock);
2882 /* Get privilege(s) for a function */
2883 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2886 struct be_mcc_wrb *wrb;
2887 struct be_cmd_req_get_fn_privileges *req;
2890 spin_lock_bh(&adapter->mcc_lock);
2892 wrb = wrb_from_mccq(adapter);
2898 req = embedded_payload(wrb);
2900 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2901 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2904 req->hdr.domain = domain;
2906 status = be_mcc_notify_wait(adapter);
2908 struct be_cmd_resp_get_fn_privileges *resp =
2909 embedded_payload(wrb);
2911 *privilege = le32_to_cpu(resp->privilege_mask);
2913 /* In UMC mode FW does not return right privileges.
2914 * Override with correct privilege equivalent to PF.
2916 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2918 *privilege = MAX_PRIVILEGES;
2922 spin_unlock_bh(&adapter->mcc_lock);
2926 /* Set privilege(s) for a function */
2927 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2930 struct be_mcc_wrb *wrb;
2931 struct be_cmd_req_set_fn_privileges *req;
2934 spin_lock_bh(&adapter->mcc_lock);
2936 wrb = wrb_from_mccq(adapter);
2942 req = embedded_payload(wrb);
2943 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2944 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2946 req->hdr.domain = domain;
2947 if (lancer_chip(adapter))
2948 req->privileges_lancer = cpu_to_le32(privileges);
2950 req->privileges = cpu_to_le32(privileges);
2952 status = be_mcc_notify_wait(adapter);
2954 spin_unlock_bh(&adapter->mcc_lock);
2958 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2959 * pmac_id_valid: false => pmac_id or MAC address is requested.
2960 * If pmac_id is returned, pmac_id_valid is returned as true
2962 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2963 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2966 struct be_mcc_wrb *wrb;
2967 struct be_cmd_req_get_mac_list *req;
2970 struct be_dma_mem get_mac_list_cmd;
2973 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2974 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2975 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2976 get_mac_list_cmd.size,
2977 &get_mac_list_cmd.dma);
2979 if (!get_mac_list_cmd.va) {
2980 dev_err(&adapter->pdev->dev,
2981 "Memory allocation failure during GET_MAC_LIST\n");
2985 spin_lock_bh(&adapter->mcc_lock);
2987 wrb = wrb_from_mccq(adapter);
2993 req = get_mac_list_cmd.va;
2995 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2996 OPCODE_COMMON_GET_MAC_LIST,
2997 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2998 req->hdr.domain = domain;
2999 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3000 if (*pmac_id_valid) {
3001 req->mac_id = cpu_to_le32(*pmac_id);
3002 req->iface_id = cpu_to_le16(if_handle);
3003 req->perm_override = 0;
3005 req->perm_override = 1;
3008 status = be_mcc_notify_wait(adapter);
3010 struct be_cmd_resp_get_mac_list *resp =
3011 get_mac_list_cmd.va;
3013 if (*pmac_id_valid) {
3014 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3019 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3020 /* Mac list returned could contain one or more active mac_ids
3021 * or one or more true or pseudo permanent mac addresses.
3022 * If an active mac_id is present, return first active mac_id
3025 for (i = 0; i < mac_count; i++) {
3026 struct get_list_macaddr *mac_entry;
3030 mac_entry = &resp->macaddr_list[i];
3031 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3032 /* mac_id is a 32 bit value and mac_addr size
3035 if (mac_addr_size == sizeof(u32)) {
3036 *pmac_id_valid = true;
3037 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3038 *pmac_id = le32_to_cpu(mac_id);
3042 /* If no active mac_id found, return first mac addr */
3043 *pmac_id_valid = false;
3044 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3049 spin_unlock_bh(&adapter->mcc_lock);
3050 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
3051 get_mac_list_cmd.va, get_mac_list_cmd.dma);
3055 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3056 u8 *mac, u32 if_handle, bool active, u32 domain)
3059 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3061 if (BEx_chip(adapter))
3062 return be_cmd_mac_addr_query(adapter, mac, false,
3063 if_handle, curr_pmac_id);
3065 /* Fetch the MAC address using pmac_id */
3066 return be_cmd_get_mac_from_list(adapter, mac, &active,
3071 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3074 bool pmac_valid = false;
3078 if (BEx_chip(adapter)) {
3079 if (be_physfn(adapter))
3080 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3083 status = be_cmd_mac_addr_query(adapter, mac, false,
3084 adapter->if_handle, 0);
3086 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3087 NULL, adapter->if_handle, 0);
3093 /* Uses synchronous MCCQ */
3094 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3095 u8 mac_count, u32 domain)
3097 struct be_mcc_wrb *wrb;
3098 struct be_cmd_req_set_mac_list *req;
3100 struct be_dma_mem cmd;
3102 memset(&cmd, 0, sizeof(struct be_dma_mem));
3103 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3104 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
3105 &cmd.dma, GFP_KERNEL);
3109 spin_lock_bh(&adapter->mcc_lock);
3111 wrb = wrb_from_mccq(adapter);
3118 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3119 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3122 req->hdr.domain = domain;
3123 req->mac_count = mac_count;
3125 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3127 status = be_mcc_notify_wait(adapter);
3130 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3131 spin_unlock_bh(&adapter->mcc_lock);
3135 /* Wrapper to delete any active MACs and provision the new mac.
3136 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3137 * current list are active.
3139 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3141 bool active_mac = false;
3142 u8 old_mac[ETH_ALEN];
3146 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3147 &pmac_id, if_id, dom);
3149 if (!status && active_mac)
3150 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3152 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3155 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3156 u32 domain, u16 intf_id, u16 hsw_mode)
3158 struct be_mcc_wrb *wrb;
3159 struct be_cmd_req_set_hsw_config *req;
3163 spin_lock_bh(&adapter->mcc_lock);
3165 wrb = wrb_from_mccq(adapter);
3171 req = embedded_payload(wrb);
3172 ctxt = &req->context;
3174 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3175 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3178 req->hdr.domain = domain;
3179 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3181 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3182 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3184 if (!BEx_chip(adapter) && hsw_mode) {
3185 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3186 ctxt, adapter->hba_port_num);
3187 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3188 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3192 be_dws_cpu_to_le(req->context, sizeof(req->context));
3193 status = be_mcc_notify_wait(adapter);
3196 spin_unlock_bh(&adapter->mcc_lock);
3200 /* Get Hyper switch config */
3201 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3202 u32 domain, u16 intf_id, u8 *mode)
3204 struct be_mcc_wrb *wrb;
3205 struct be_cmd_req_get_hsw_config *req;
3210 spin_lock_bh(&adapter->mcc_lock);
3212 wrb = wrb_from_mccq(adapter);
3218 req = embedded_payload(wrb);
3219 ctxt = &req->context;
3221 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3222 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3225 req->hdr.domain = domain;
3226 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3228 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3230 if (!BEx_chip(adapter) && mode) {
3231 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3232 ctxt, adapter->hba_port_num);
3233 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3235 be_dws_cpu_to_le(req->context, sizeof(req->context));
3237 status = be_mcc_notify_wait(adapter);
3239 struct be_cmd_resp_get_hsw_config *resp =
3240 embedded_payload(wrb);
3242 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3243 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3244 pvid, &resp->context);
3246 *pvid = le16_to_cpu(vid);
3248 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3249 port_fwd_type, &resp->context);
3253 spin_unlock_bh(&adapter->mcc_lock);
3257 static bool be_is_wol_excluded(struct be_adapter *adapter)
3259 struct pci_dev *pdev = adapter->pdev;
3261 if (!be_physfn(adapter))
3264 switch (pdev->subsystem_device) {
3265 case OC_SUBSYS_DEVICE_ID1:
3266 case OC_SUBSYS_DEVICE_ID2:
3267 case OC_SUBSYS_DEVICE_ID3:
3268 case OC_SUBSYS_DEVICE_ID4:
3275 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3277 struct be_mcc_wrb *wrb;
3278 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3280 struct be_dma_mem cmd;
3282 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3286 if (be_is_wol_excluded(adapter))
3289 if (mutex_lock_interruptible(&adapter->mbox_lock))
3292 memset(&cmd, 0, sizeof(struct be_dma_mem));
3293 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3294 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3296 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3301 wrb = wrb_from_mbox(adapter);
3309 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3310 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3311 sizeof(*req), wrb, &cmd);
3313 req->hdr.version = 1;
3314 req->query_options = BE_GET_WOL_CAP;
3316 status = be_mbox_notify_wait(adapter);
3318 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3320 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3322 adapter->wol_cap = resp->wol_settings;
3323 if (adapter->wol_cap & BE_WOL_CAP)
3324 adapter->wol_en = true;
3327 mutex_unlock(&adapter->mbox_lock);
3329 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3334 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3336 struct be_dma_mem extfat_cmd;
3337 struct be_fat_conf_params *cfgs;
3341 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3342 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3343 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3348 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3352 cfgs = (struct be_fat_conf_params *)
3353 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3354 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3355 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3357 for (j = 0; j < num_modes; j++) {
3358 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3359 cfgs->module[i].trace_lvl[j].dbg_lvl =
3364 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3366 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3371 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3373 struct be_dma_mem extfat_cmd;
3374 struct be_fat_conf_params *cfgs;
3378 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3379 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3380 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3383 if (!extfat_cmd.va) {
3384 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3389 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3391 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3392 sizeof(struct be_cmd_resp_hdr));
3394 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3395 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3396 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3399 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3405 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3406 struct be_dma_mem *cmd)
3408 struct be_mcc_wrb *wrb;
3409 struct be_cmd_req_get_ext_fat_caps *req;
3412 if (mutex_lock_interruptible(&adapter->mbox_lock))
3415 wrb = wrb_from_mbox(adapter);
3422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3423 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3424 cmd->size, wrb, cmd);
3425 req->parameter_type = cpu_to_le32(1);
3427 status = be_mbox_notify_wait(adapter);
3429 mutex_unlock(&adapter->mbox_lock);
3433 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3434 struct be_dma_mem *cmd,
3435 struct be_fat_conf_params *configs)
3437 struct be_mcc_wrb *wrb;
3438 struct be_cmd_req_set_ext_fat_caps *req;
3441 spin_lock_bh(&adapter->mcc_lock);
3443 wrb = wrb_from_mccq(adapter);
3450 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3451 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3452 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3453 cmd->size, wrb, cmd);
3455 status = be_mcc_notify_wait(adapter);
3457 spin_unlock_bh(&adapter->mcc_lock);
3461 int be_cmd_query_port_name(struct be_adapter *adapter)
3463 struct be_cmd_req_get_port_name *req;
3464 struct be_mcc_wrb *wrb;
3467 if (mutex_lock_interruptible(&adapter->mbox_lock))
3470 wrb = wrb_from_mbox(adapter);
3471 req = embedded_payload(wrb);
3473 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3474 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3476 if (!BEx_chip(adapter))
3477 req->hdr.version = 1;
3479 status = be_mbox_notify_wait(adapter);
3481 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3483 adapter->port_name = resp->port_name[adapter->hba_port_num];
3485 adapter->port_name = adapter->hba_port_num + '0';
3488 mutex_unlock(&adapter->mbox_lock);
3492 /* Descriptor type */
3498 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3501 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3502 struct be_nic_res_desc *nic;
3505 for (i = 0; i < desc_count; i++) {
3506 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3507 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3508 nic = (struct be_nic_res_desc *)hdr;
3509 if (desc_type == FUNC_DESC ||
3510 (desc_type == VFT_DESC &&
3511 nic->flags & (1 << VFT_SHIFT)))
3515 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3516 hdr = (void *)hdr + hdr->desc_len;
3521 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3523 return be_get_nic_desc(buf, desc_count, VFT_DESC);
3526 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3528 return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3531 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3534 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3535 struct be_pcie_res_desc *pcie;
3538 for (i = 0; i < desc_count; i++) {
3539 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3540 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3541 pcie = (struct be_pcie_res_desc *)hdr;
3542 if (pcie->pf_num == devfn)
3546 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3547 hdr = (void *)hdr + hdr->desc_len;
3552 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3554 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3557 for (i = 0; i < desc_count; i++) {
3558 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3559 return (struct be_port_res_desc *)hdr;
3561 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3562 hdr = (void *)hdr + hdr->desc_len;
3567 static void be_copy_nic_desc(struct be_resources *res,
3568 struct be_nic_res_desc *desc)
3570 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3571 res->max_vlans = le16_to_cpu(desc->vlan_count);
3572 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3573 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3574 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3575 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3576 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3577 res->max_cq_count = le16_to_cpu(desc->cq_count);
3578 res->max_iface_count = le16_to_cpu(desc->iface_count);
3579 res->max_mcc_count = le16_to_cpu(desc->mcc_count);
3580 /* Clear flags that driver is not interested in */
3581 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3582 BE_IF_CAP_FLAGS_WANT;
3586 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3588 struct be_mcc_wrb *wrb;
3589 struct be_cmd_req_get_func_config *req;
3591 struct be_dma_mem cmd;
3593 if (mutex_lock_interruptible(&adapter->mbox_lock))
3596 memset(&cmd, 0, sizeof(struct be_dma_mem));
3597 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3598 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3600 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3605 wrb = wrb_from_mbox(adapter);
3613 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3614 OPCODE_COMMON_GET_FUNC_CONFIG,
3615 cmd.size, wrb, &cmd);
3617 if (skyhawk_chip(adapter))
3618 req->hdr.version = 1;
3620 status = be_mbox_notify_wait(adapter);
3622 struct be_cmd_resp_get_func_config *resp = cmd.va;
3623 u32 desc_count = le32_to_cpu(resp->desc_count);
3624 struct be_nic_res_desc *desc;
3626 desc = be_get_func_nic_desc(resp->func_param, desc_count);
3632 adapter->pf_number = desc->pf_num;
3633 be_copy_nic_desc(res, desc);
3636 mutex_unlock(&adapter->mbox_lock);
3638 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3642 /* Will use MBOX only if MCCQ has not been created */
3643 int be_cmd_get_profile_config(struct be_adapter *adapter,
3644 struct be_resources *res, u8 query, u8 domain)
3646 struct be_cmd_resp_get_profile_config *resp;
3647 struct be_cmd_req_get_profile_config *req;
3648 struct be_nic_res_desc *vf_res;
3649 struct be_pcie_res_desc *pcie;
3650 struct be_port_res_desc *port;
3651 struct be_nic_res_desc *nic;
3652 struct be_mcc_wrb wrb = {0};
3653 struct be_dma_mem cmd;
3657 memset(&cmd, 0, sizeof(struct be_dma_mem));
3658 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3659 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3664 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3665 OPCODE_COMMON_GET_PROFILE_CONFIG,
3666 cmd.size, &wrb, &cmd);
3668 req->hdr.domain = domain;
3669 if (!lancer_chip(adapter))
3670 req->hdr.version = 1;
3671 req->type = ACTIVE_PROFILE_TYPE;
3673 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
3674 * descriptors with all bits set to "1" for the fields which can be
3675 * modified using SET_PROFILE_CONFIG cmd.
3677 if (query == RESOURCE_MODIFIABLE)
3678 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
3680 status = be_cmd_notify_wait(adapter, &wrb);
3685 desc_count = le16_to_cpu(resp->desc_count);
3687 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3690 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3692 port = be_get_port_desc(resp->func_param, desc_count);
3694 adapter->mc_type = port->mc_type;
3696 nic = be_get_func_nic_desc(resp->func_param, desc_count);
3698 be_copy_nic_desc(res, nic);
3700 vf_res = be_get_vft_desc(resp->func_param, desc_count);
3702 res->vf_if_cap_flags = vf_res->cap_flags;
3705 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3709 /* Will use MBOX only if MCCQ has not been created */
3710 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3711 int size, int count, u8 version, u8 domain)
3713 struct be_cmd_req_set_profile_config *req;
3714 struct be_mcc_wrb wrb = {0};
3715 struct be_dma_mem cmd;
3718 memset(&cmd, 0, sizeof(struct be_dma_mem));
3719 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3720 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3725 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3726 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3728 req->hdr.version = version;
3729 req->hdr.domain = domain;
3730 req->desc_count = cpu_to_le32(count);
3731 memcpy(req->desc, desc, size);
3733 status = be_cmd_notify_wait(adapter, &wrb);
3736 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3740 /* Mark all fields invalid */
3741 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3743 memset(nic, 0, sizeof(*nic));
3744 nic->unicast_mac_count = 0xFFFF;
3745 nic->mcc_count = 0xFFFF;
3746 nic->vlan_count = 0xFFFF;
3747 nic->mcast_mac_count = 0xFFFF;
3748 nic->txq_count = 0xFFFF;
3749 nic->rq_count = 0xFFFF;
3750 nic->rssq_count = 0xFFFF;
3751 nic->lro_count = 0xFFFF;
3752 nic->cq_count = 0xFFFF;
3753 nic->toe_conn_count = 0xFFFF;
3754 nic->eq_count = 0xFFFF;
3755 nic->iface_count = 0xFFFF;
3756 nic->link_param = 0xFF;
3757 nic->channel_id_param = cpu_to_le16(0xF000);
3758 nic->acpi_params = 0xFF;
3759 nic->wol_param = 0x0F;
3760 nic->tunnel_iface_count = 0xFFFF;
3761 nic->direct_tenant_iface_count = 0xFFFF;
3762 nic->bw_min = 0xFFFFFFFF;
3763 nic->bw_max = 0xFFFFFFFF;
3766 /* Mark all fields invalid */
3767 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3769 memset(pcie, 0, sizeof(*pcie));
3770 pcie->sriov_state = 0xFF;
3771 pcie->pf_state = 0xFF;
3772 pcie->pf_type = 0xFF;
3773 pcie->num_vfs = 0xFFFF;
3776 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3779 struct be_nic_res_desc nic_desc;
3783 if (BE3_chip(adapter))
3784 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3786 be_reset_nic_desc(&nic_desc);
3787 nic_desc.pf_num = adapter->pf_number;
3788 nic_desc.vf_num = domain;
3789 nic_desc.bw_min = 0;
3790 if (lancer_chip(adapter)) {
3791 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3792 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3793 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3795 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3798 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3799 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3800 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3801 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3802 nic_desc.bw_max = cpu_to_le32(bw_percent);
3805 return be_cmd_set_profile_config(adapter, &nic_desc,
3806 nic_desc.hdr.desc_len,
3807 1, version, domain);
3810 static void be_fill_vf_res_template(struct be_adapter *adapter,
3811 struct be_resources pool_res,
3812 u16 num_vfs, u16 num_vf_qs,
3813 struct be_nic_res_desc *nic_vft)
3815 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
3816 struct be_resources res_mod = {0};
3818 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3819 * which are modifiable using SET_PROFILE_CONFIG cmd.
3821 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
3823 /* If RSS IFACE capability flags are modifiable for a VF, set the
3824 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3825 * more than 1 RSSQ is available for a VF.
3826 * Otherwise, provision only 1 queue pair for VF.
3828 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3829 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3830 if (num_vf_qs > 1) {
3831 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3832 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3833 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3835 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3836 BE_IF_FLAGS_DEFQ_RSS);
3839 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
3844 nic_vft->rq_count = cpu_to_le16(num_vf_qs);
3845 nic_vft->txq_count = cpu_to_le16(num_vf_qs);
3846 nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
3847 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
3850 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3851 * among the PF and it's VFs, if the fields are changeable
3853 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3854 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
3857 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3858 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
3861 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3862 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
3865 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3866 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
3870 int be_cmd_set_sriov_config(struct be_adapter *adapter,
3871 struct be_resources pool_res, u16 num_vfs,
3875 struct be_pcie_res_desc pcie;
3876 struct be_nic_res_desc nic_vft;
3879 /* PF PCIE descriptor */
3880 be_reset_pcie_desc(&desc.pcie);
3881 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3882 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3883 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3884 desc.pcie.pf_num = adapter->pdev->devfn;
3885 desc.pcie.sriov_state = num_vfs ? 1 : 0;
3886 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3888 /* VF NIC Template descriptor */
3889 be_reset_nic_desc(&desc.nic_vft);
3890 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3891 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3892 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3893 desc.nic_vft.pf_num = adapter->pdev->devfn;
3894 desc.nic_vft.vf_num = 0;
3896 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
3899 return be_cmd_set_profile_config(adapter, &desc,
3900 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3903 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3905 struct be_mcc_wrb *wrb;
3906 struct be_cmd_req_manage_iface_filters *req;
3909 if (iface == 0xFFFFFFFF)
3912 spin_lock_bh(&adapter->mcc_lock);
3914 wrb = wrb_from_mccq(adapter);
3919 req = embedded_payload(wrb);
3921 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3922 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3925 req->target_iface_id = cpu_to_le32(iface);
3927 status = be_mcc_notify_wait(adapter);
3929 spin_unlock_bh(&adapter->mcc_lock);
3933 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3935 struct be_port_res_desc port_desc;
3937 memset(&port_desc, 0, sizeof(port_desc));
3938 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3939 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3940 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3941 port_desc.link_num = adapter->hba_port_num;
3943 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3945 port_desc.nv_port = swab16(port);
3947 port_desc.nv_flags = NV_TYPE_DISABLED;
3948 port_desc.nv_port = 0;
3951 return be_cmd_set_profile_config(adapter, &port_desc,
3952 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3955 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3958 struct be_mcc_wrb *wrb;
3959 struct be_cmd_req_get_iface_list *req;
3960 struct be_cmd_resp_get_iface_list *resp;
3963 spin_lock_bh(&adapter->mcc_lock);
3965 wrb = wrb_from_mccq(adapter);
3970 req = embedded_payload(wrb);
3972 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3973 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3975 req->hdr.domain = vf_num + 1;
3977 status = be_mcc_notify_wait(adapter);
3979 resp = (struct be_cmd_resp_get_iface_list *)req;
3980 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3984 spin_unlock_bh(&adapter->mcc_lock);
3988 static int lancer_wait_idle(struct be_adapter *adapter)
3990 #define SLIPORT_IDLE_TIMEOUT 30
3994 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3995 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3996 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4002 if (i == SLIPORT_IDLE_TIMEOUT)
4008 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4012 status = lancer_wait_idle(adapter);
4016 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4021 /* Routine to check whether dump image is present or not */
4022 bool dump_present(struct be_adapter *adapter)
4024 u32 sliport_status = 0;
4026 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4027 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4030 int lancer_initiate_dump(struct be_adapter *adapter)
4032 struct device *dev = &adapter->pdev->dev;
4035 if (dump_present(adapter)) {
4036 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4040 /* give firmware reset and diagnostic dump */
4041 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4042 PHYSDEV_CONTROL_DD_MASK);
4044 dev_err(dev, "FW reset failed\n");
4048 status = lancer_wait_idle(adapter);
4052 if (!dump_present(adapter)) {
4053 dev_err(dev, "FW dump not generated\n");
4060 int lancer_delete_dump(struct be_adapter *adapter)
4064 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4065 return be_cmd_status(status);
4069 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4071 struct be_mcc_wrb *wrb;
4072 struct be_cmd_enable_disable_vf *req;
4075 if (BEx_chip(adapter))
4078 spin_lock_bh(&adapter->mcc_lock);
4080 wrb = wrb_from_mccq(adapter);
4086 req = embedded_payload(wrb);
4088 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4089 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4092 req->hdr.domain = domain;
4094 status = be_mcc_notify_wait(adapter);
4096 spin_unlock_bh(&adapter->mcc_lock);
4100 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4102 struct be_mcc_wrb *wrb;
4103 struct be_cmd_req_intr_set *req;
4106 if (mutex_lock_interruptible(&adapter->mbox_lock))
4109 wrb = wrb_from_mbox(adapter);
4111 req = embedded_payload(wrb);
4113 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4114 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4117 req->intr_enabled = intr_enable;
4119 status = be_mbox_notify_wait(adapter);
4121 mutex_unlock(&adapter->mbox_lock);
4126 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4128 struct be_cmd_req_get_active_profile *req;
4129 struct be_mcc_wrb *wrb;
4132 if (mutex_lock_interruptible(&adapter->mbox_lock))
4135 wrb = wrb_from_mbox(adapter);
4141 req = embedded_payload(wrb);
4143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4144 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4147 status = be_mbox_notify_wait(adapter);
4149 struct be_cmd_resp_get_active_profile *resp =
4150 embedded_payload(wrb);
4152 *profile_id = le16_to_cpu(resp->active_profile_id);
4156 mutex_unlock(&adapter->mbox_lock);
4160 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4161 int link_state, u8 domain)
4163 struct be_mcc_wrb *wrb;
4164 struct be_cmd_req_set_ll_link *req;
4167 if (BEx_chip(adapter) || lancer_chip(adapter))
4170 spin_lock_bh(&adapter->mcc_lock);
4172 wrb = wrb_from_mccq(adapter);
4178 req = embedded_payload(wrb);
4180 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4181 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4182 sizeof(*req), wrb, NULL);
4184 req->hdr.version = 1;
4185 req->hdr.domain = domain;
4187 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4188 req->link_config |= 1;
4190 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4191 req->link_config |= 1 << PLINK_TRACK_SHIFT;
4193 status = be_mcc_notify_wait(adapter);
4195 spin_unlock_bh(&adapter->mcc_lock);
4199 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4200 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4202 struct be_adapter *adapter = netdev_priv(netdev_handle);
4203 struct be_mcc_wrb *wrb;
4204 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4205 struct be_cmd_req_hdr *req;
4206 struct be_cmd_resp_hdr *resp;
4209 spin_lock_bh(&adapter->mcc_lock);
4211 wrb = wrb_from_mccq(adapter);
4216 req = embedded_payload(wrb);
4217 resp = embedded_payload(wrb);
4219 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4220 hdr->opcode, wrb_payload_size, wrb, NULL);
4221 memcpy(req, wrb_payload, wrb_payload_size);
4222 be_dws_cpu_to_le(req, wrb_payload_size);
4224 status = be_mcc_notify_wait(adapter);
4226 *cmd_status = (status & 0xffff);
4229 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4230 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4232 spin_unlock_bh(&adapter->mcc_lock);
4235 EXPORT_SYMBOL(be_roce_mcc_cmd);