2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
15 {QLCNIC_CMD_INTRPT_TEST, 4, 1},
16 {QLCNIC_CMD_SET_MTU, 4, 1},
17 {QLCNIC_CMD_READ_PHY, 4, 2},
18 {QLCNIC_CMD_WRITE_PHY, 5, 1},
19 {QLCNIC_CMD_READ_HW_REG, 4, 1},
20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
36 {QLCNIC_CMD_CONFIG_PORT, 4, 1},
37 {QLCNIC_CMD_TEMP_SIZE, 4, 4},
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39 {QLCNIC_CMD_SET_DRV_VER, 4, 1},
42 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
44 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
48 /* Allocate mailbox registers */
49 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
50 struct qlcnic_adapter *adapter, u32 type)
53 const struct qlcnic_mailbox_metadata *mbx_tbl;
55 mbx_tbl = qlcnic_mbx_tbl;
56 size = ARRAY_SIZE(qlcnic_mbx_tbl);
57 for (i = 0; i < size; i++) {
58 if (type == mbx_tbl[i].cmd) {
59 mbx->req.num = mbx_tbl[i].in_args;
60 mbx->rsp.num = mbx_tbl[i].out_args;
61 mbx->req.arg = kcalloc(mbx->req.num,
62 sizeof(u32), GFP_ATOMIC);
65 mbx->rsp.arg = kcalloc(mbx->rsp.num,
66 sizeof(u32), GFP_ATOMIC);
72 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
73 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
74 mbx->req.arg[0] = type;
81 /* Free up mailbox registers */
82 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
90 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
94 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
95 if (adapter->npars[i].pci_func == pci_func)
103 qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
109 /* give atleast 1ms for firmware to respond */
112 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
113 return QLCNIC_CDRP_RSP_TIMEOUT;
115 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
116 } while (!QLCNIC_CDRP_IS_RSP(rsp));
121 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
122 struct qlcnic_cmd_args *cmd)
127 struct pci_dev *pdev = adapter->pdev;
128 struct qlcnic_hardware_context *ahw = adapter->ahw;
130 signature = qlcnic_get_cmd_signature(ahw);
132 /* Acquire semaphore before accessing CRB */
133 if (qlcnic_api_lock(adapter)) {
134 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
135 return cmd->rsp.arg[0];
138 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
139 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++)
140 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
141 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
142 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
143 rsp = qlcnic_poll_rsp(adapter);
145 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
146 dev_err(&pdev->dev, "card response timeout.\n");
147 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
148 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
149 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
150 dev_err(&pdev->dev, "failed card response code:0x%x\n",
152 } else if (rsp == QLCNIC_CDRP_RSP_OK)
153 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
155 for (i = 1; i < cmd->rsp.num; i++)
156 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
158 /* Release semaphore */
159 qlcnic_api_unlock(adapter);
160 return cmd->rsp.arg[0];
163 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
167 struct qlcnic_cmd_args cmd;
168 dma_addr_t tmp_addr_t = 0;
170 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, 0x1000,
171 &tmp_addr_t, GFP_KERNEL);
173 dev_err(&adapter->pdev->dev,
174 "Can't get memory for FW dump template\n");
178 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
183 cmd.req.arg[1] = LSD(tmp_addr_t);
184 cmd.req.arg[2] = MSD(tmp_addr_t);
185 cmd.req.arg[3] = 0x1000;
186 err = qlcnic_issue_cmd(adapter, &cmd);
189 qlcnic_free_mbx_args(&cmd);
192 dma_free_coherent(&adapter->pdev->dev, 0x1000, tmp_addr, tmp_addr_t);
198 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
201 struct qlcnic_cmd_args cmd;
202 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
204 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
206 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
207 cmd.req.arg[1] = recv_ctx->context_id;
208 cmd.req.arg[2] = mtu;
210 err = qlcnic_issue_cmd(adapter, &cmd);
212 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
215 qlcnic_free_mbx_args(&cmd);
219 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
222 struct qlcnic_hostrq_rx_ctx *prq;
223 struct qlcnic_cardrsp_rx_ctx *prsp;
224 struct qlcnic_hostrq_rds_ring *prq_rds;
225 struct qlcnic_hostrq_sds_ring *prq_sds;
226 struct qlcnic_cardrsp_rds_ring *prsp_rds;
227 struct qlcnic_cardrsp_sds_ring *prsp_sds;
228 struct qlcnic_host_rds_ring *rds_ring;
229 struct qlcnic_host_sds_ring *sds_ring;
230 struct qlcnic_cmd_args cmd;
232 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
235 u8 i, nrds_rings, nsds_rings;
237 size_t rq_size, rsp_size;
238 u32 cap, reg, val, reg2;
241 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
243 nrds_rings = adapter->max_rds_rings;
244 nsds_rings = adapter->max_sds_rings;
247 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
250 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
253 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
254 &hostrq_phys_addr, GFP_KERNEL);
259 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
260 &cardrsp_phys_addr, GFP_KERNEL);
267 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
269 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
270 | QLCNIC_CAP0_VALIDOFF);
271 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
273 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
274 prq->valid_field_offset = cpu_to_le16(temp_u16);
275 prq->txrx_sds_binding = nsds_rings - 1;
277 prq->capabilities[0] = cpu_to_le32(cap);
278 prq->host_int_crb_mode =
279 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
280 prq->host_rds_crb_mode =
281 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
283 prq->num_rds_rings = cpu_to_le16(nrds_rings);
284 prq->num_sds_rings = cpu_to_le16(nsds_rings);
285 prq->rds_ring_offset = 0;
287 val = le32_to_cpu(prq->rds_ring_offset) +
288 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
289 prq->sds_ring_offset = cpu_to_le32(val);
291 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
292 le32_to_cpu(prq->rds_ring_offset));
294 for (i = 0; i < nrds_rings; i++) {
296 rds_ring = &recv_ctx->rds_rings[i];
297 rds_ring->producer = 0;
299 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
300 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
301 prq_rds[i].ring_kind = cpu_to_le32(i);
302 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
305 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
306 le32_to_cpu(prq->sds_ring_offset));
308 for (i = 0; i < nsds_rings; i++) {
310 sds_ring = &recv_ctx->sds_rings[i];
311 sds_ring->consumer = 0;
312 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
314 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
315 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
316 prq_sds[i].msi_index = cpu_to_le16(i);
319 phys_addr = hostrq_phys_addr;
320 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
321 cmd.req.arg[1] = MSD(phys_addr);
322 cmd.req.arg[2] = LSD(phys_addr);
323 cmd.req.arg[3] = rq_size;
324 err = qlcnic_issue_cmd(adapter, &cmd);
326 dev_err(&adapter->pdev->dev,
327 "Failed to create rx ctx in firmware%d\n", err);
331 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
332 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
334 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
335 rds_ring = &recv_ctx->rds_rings[i];
337 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
338 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
341 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
342 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
344 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
345 sds_ring = &recv_ctx->sds_rings[i];
347 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
348 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
350 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
351 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
354 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
355 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
356 recv_ctx->virt_port = prsp->virt_port;
359 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
361 qlcnic_free_mbx_args(&cmd);
363 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
368 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
371 struct qlcnic_cmd_args cmd;
372 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
374 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
375 cmd.req.arg[1] = recv_ctx->context_id;
376 err = qlcnic_issue_cmd(adapter, &cmd);
378 dev_err(&adapter->pdev->dev,
379 "Failed to destroy rx ctx in firmware\n");
381 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
382 qlcnic_free_mbx_args(&cmd);
385 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
386 struct qlcnic_host_tx_ring *tx_ring,
389 struct qlcnic_hostrq_tx_ctx *prq;
390 struct qlcnic_hostrq_cds_ring *prq_cds;
391 struct qlcnic_cardrsp_tx_ctx *prsp;
392 void *rq_addr, *rsp_addr;
393 size_t rq_size, rsp_size;
395 struct qlcnic_cmd_args cmd;
398 dma_addr_t rq_phys_addr, rsp_phys_addr;
400 /* reset host resources */
401 tx_ring->producer = 0;
402 tx_ring->sw_consumer = 0;
403 *(tx_ring->hw_consumer) = 0;
405 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
406 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
407 &rq_phys_addr, GFP_KERNEL);
411 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
412 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
413 &rsp_phys_addr, GFP_KERNEL);
419 memset(rq_addr, 0, rq_size);
422 memset(rsp_addr, 0, rsp_size);
425 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
427 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
429 prq->capabilities[0] = cpu_to_le32(temp);
431 prq->host_int_crb_mode =
432 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
435 prq->interrupt_ctl = 0;
436 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
438 prq_cds = &prq->cds_ring;
440 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
441 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
443 phys_addr = rq_phys_addr;
445 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
446 cmd.req.arg[1] = MSD(phys_addr);
447 cmd.req.arg[2] = LSD(phys_addr);
448 cmd.req.arg[3] = rq_size;
449 err = qlcnic_issue_cmd(adapter, &cmd);
451 if (err == QLCNIC_RCODE_SUCCESS) {
452 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
453 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
454 tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
456 dev_err(&adapter->pdev->dev,
457 "Failed to create tx ctx in firmware%d\n", err);
461 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
465 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
466 qlcnic_free_mbx_args(&cmd);
472 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
473 struct qlcnic_host_tx_ring *tx_ring)
475 struct qlcnic_cmd_args cmd;
477 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
478 cmd.req.arg[1] = tx_ring->ctx_id;
479 if (qlcnic_issue_cmd(adapter, &cmd))
480 dev_err(&adapter->pdev->dev,
481 "Failed to destroy tx ctx in firmware\n");
482 qlcnic_free_mbx_args(&cmd);
486 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
489 struct qlcnic_cmd_args cmd;
491 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
492 cmd.req.arg[1] = config;
493 err = qlcnic_issue_cmd(adapter, &cmd);
494 qlcnic_free_mbx_args(&cmd);
498 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
503 struct qlcnic_recv_context *recv_ctx;
504 struct qlcnic_host_rds_ring *rds_ring;
505 struct qlcnic_host_sds_ring *sds_ring;
506 struct qlcnic_host_tx_ring *tx_ring;
508 struct pci_dev *pdev = adapter->pdev;
510 recv_ctx = adapter->recv_ctx;
511 tx_ring = adapter->tx_ring;
513 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
514 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
515 if (tx_ring->hw_consumer == NULL) {
516 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
521 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
522 &tx_ring->phys_addr, GFP_KERNEL);
525 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
530 tx_ring->desc_head = addr;
532 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
533 rds_ring = &recv_ctx->rds_rings[ring];
534 addr = dma_alloc_coherent(&adapter->pdev->dev,
535 RCV_DESC_RINGSIZE(rds_ring),
536 &rds_ring->phys_addr, GFP_KERNEL);
539 "failed to allocate rds ring [%d]\n", ring);
543 rds_ring->desc_head = addr;
547 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
548 sds_ring = &recv_ctx->sds_rings[ring];
550 addr = dma_alloc_coherent(&adapter->pdev->dev,
551 STATUS_DESC_RINGSIZE(sds_ring),
552 &sds_ring->phys_addr, GFP_KERNEL);
555 "failed to allocate sds ring [%d]\n", ring);
559 sds_ring->desc_head = addr;
565 qlcnic_free_hw_resources(adapter);
569 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
573 if (dev->flags & QLCNIC_NEED_FLR) {
574 pci_reset_function(dev->pdev);
575 dev->flags &= ~QLCNIC_NEED_FLR;
578 err = qlcnic_fw_cmd_create_rx_ctx(dev);
582 for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
583 err = qlcnic_fw_cmd_create_tx_ctx(dev,
587 qlcnic_fw_cmd_destroy_rx_ctx(dev);
591 for (i = 0; i < ring; i++)
592 qlcnic_fw_cmd_destroy_tx_ctx(dev,
599 set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
603 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
607 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
608 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
609 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
610 qlcnic_fw_cmd_destroy_tx_ctx(adapter,
611 &adapter->tx_ring[ring]);
612 /* Allow dma queues to drain after context reset */
617 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
619 struct qlcnic_recv_context *recv_ctx;
620 struct qlcnic_host_rds_ring *rds_ring;
621 struct qlcnic_host_sds_ring *sds_ring;
622 struct qlcnic_host_tx_ring *tx_ring;
625 recv_ctx = adapter->recv_ctx;
627 tx_ring = adapter->tx_ring;
628 if (tx_ring->hw_consumer != NULL) {
629 dma_free_coherent(&adapter->pdev->dev,
631 tx_ring->hw_consumer,
632 tx_ring->hw_cons_phys_addr);
633 tx_ring->hw_consumer = NULL;
636 if (tx_ring->desc_head != NULL) {
637 dma_free_coherent(&adapter->pdev->dev,
638 TX_DESC_RINGSIZE(tx_ring),
639 tx_ring->desc_head, tx_ring->phys_addr);
640 tx_ring->desc_head = NULL;
643 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
644 rds_ring = &recv_ctx->rds_rings[ring];
646 if (rds_ring->desc_head != NULL) {
647 dma_free_coherent(&adapter->pdev->dev,
648 RCV_DESC_RINGSIZE(rds_ring),
650 rds_ring->phys_addr);
651 rds_ring->desc_head = NULL;
655 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
656 sds_ring = &recv_ctx->sds_rings[ring];
658 if (sds_ring->desc_head != NULL) {
659 dma_free_coherent(&adapter->pdev->dev,
660 STATUS_DESC_RINGSIZE(sds_ring),
662 sds_ring->phys_addr);
663 sds_ring->desc_head = NULL;
669 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
672 struct qlcnic_cmd_args cmd;
673 u32 mac_low, mac_high;
675 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
676 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
677 err = qlcnic_issue_cmd(adapter, &cmd);
679 if (err == QLCNIC_RCODE_SUCCESS) {
680 mac_low = cmd.rsp.arg[1];
681 mac_high = cmd.rsp.arg[2];
683 for (i = 0; i < 2; i++)
684 mac[i] = (u8) (mac_high >> ((1 - i) * 8));
685 for (i = 2; i < 6; i++)
686 mac[i] = (u8) (mac_low >> ((5 - i) * 8));
688 dev_err(&adapter->pdev->dev,
689 "Failed to get mac address%d\n", err);
692 qlcnic_free_mbx_args(&cmd);
696 /* Get info of a NIC partition */
697 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
698 struct qlcnic_info *npar_info, u8 func_id)
701 dma_addr_t nic_dma_t;
702 const struct qlcnic_info_le *nic_info;
704 struct qlcnic_cmd_args cmd;
705 size_t nic_size = sizeof(struct qlcnic_info_le);
707 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
708 &nic_dma_t, GFP_KERNEL);
711 memset(nic_info_addr, 0, nic_size);
713 nic_info = nic_info_addr;
715 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
716 cmd.req.arg[1] = MSD(nic_dma_t);
717 cmd.req.arg[2] = LSD(nic_dma_t);
718 cmd.req.arg[3] = (func_id << 16 | nic_size);
719 err = qlcnic_issue_cmd(adapter, &cmd);
720 if (err != QLCNIC_RCODE_SUCCESS) {
721 dev_err(&adapter->pdev->dev,
722 "Failed to get nic info%d\n", err);
725 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
726 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
727 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
728 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
729 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
730 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
731 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
732 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
733 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
734 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
737 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
739 qlcnic_free_mbx_args(&cmd);
744 /* Configure a NIC partition */
745 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
746 struct qlcnic_info *nic)
749 dma_addr_t nic_dma_t;
751 struct qlcnic_cmd_args cmd;
752 struct qlcnic_info_le *nic_info;
753 size_t nic_size = sizeof(struct qlcnic_info_le);
755 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
758 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
759 &nic_dma_t, GFP_KERNEL);
763 memset(nic_info_addr, 0, nic_size);
764 nic_info = nic_info_addr;
766 nic_info->pci_func = cpu_to_le16(nic->pci_func);
767 nic_info->op_mode = cpu_to_le16(nic->op_mode);
768 nic_info->phys_port = cpu_to_le16(nic->phys_port);
769 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
770 nic_info->capabilities = cpu_to_le32(nic->capabilities);
771 nic_info->max_mac_filters = nic->max_mac_filters;
772 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
773 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
774 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
775 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
777 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
778 cmd.req.arg[1] = MSD(nic_dma_t);
779 cmd.req.arg[2] = LSD(nic_dma_t);
780 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
781 err = qlcnic_issue_cmd(adapter, &cmd);
783 if (err != QLCNIC_RCODE_SUCCESS) {
784 dev_err(&adapter->pdev->dev,
785 "Failed to set nic info%d\n", err);
789 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
791 qlcnic_free_mbx_args(&cmd);
796 /* Get PCI Info of a partition */
797 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
798 struct qlcnic_pci_info *pci_info)
801 struct qlcnic_cmd_args cmd;
802 dma_addr_t pci_info_dma_t;
803 struct qlcnic_pci_info_le *npar;
805 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
806 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
808 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
809 &pci_info_dma_t, GFP_KERNEL);
812 memset(pci_info_addr, 0, pci_size);
814 npar = pci_info_addr;
815 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
816 cmd.req.arg[1] = MSD(pci_info_dma_t);
817 cmd.req.arg[2] = LSD(pci_info_dma_t);
818 cmd.req.arg[3] = pci_size;
819 err = qlcnic_issue_cmd(adapter, &cmd);
821 adapter->ahw->act_pci_func = 0;
822 if (err == QLCNIC_RCODE_SUCCESS) {
823 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
824 pci_info->id = le16_to_cpu(npar->id);
825 pci_info->active = le16_to_cpu(npar->active);
826 pci_info->type = le16_to_cpu(npar->type);
827 if (pci_info->type == QLCNIC_TYPE_NIC)
828 adapter->ahw->act_pci_func++;
829 pci_info->default_port =
830 le16_to_cpu(npar->default_port);
831 pci_info->tx_min_bw =
832 le16_to_cpu(npar->tx_min_bw);
833 pci_info->tx_max_bw =
834 le16_to_cpu(npar->tx_max_bw);
835 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
838 dev_err(&adapter->pdev->dev,
839 "Failed to get PCI Info%d\n", err);
843 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
845 qlcnic_free_mbx_args(&cmd);
850 /* Configure eSwitch for port mirroring */
851 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
852 u8 enable_mirroring, u8 pci_func)
856 struct qlcnic_cmd_args cmd;
858 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
859 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
862 arg1 = id | (enable_mirroring ? BIT_4 : 0);
863 arg1 |= pci_func << 8;
865 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING);
866 cmd.req.arg[1] = arg1;
867 err = qlcnic_issue_cmd(adapter, &cmd);
869 if (err != QLCNIC_RCODE_SUCCESS)
870 dev_err(&adapter->pdev->dev,
871 "Failed to configure port mirroring%d on eswitch:%d\n",
874 dev_info(&adapter->pdev->dev,
875 "Configured eSwitch %d for port mirroring:%d\n",
877 qlcnic_free_mbx_args(&cmd);
882 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
883 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
885 size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
886 struct qlcnic_esw_stats_le *stats;
887 dma_addr_t stats_dma_t;
890 struct qlcnic_cmd_args cmd;
893 if (esw_stats == NULL)
896 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
897 (func != adapter->ahw->pci_func)) {
898 dev_err(&adapter->pdev->dev,
899 "Not privilege to query stats for func=%d", func);
903 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
904 &stats_dma_t, GFP_KERNEL);
906 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
909 memset(stats_addr, 0, stats_size);
911 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
912 arg1 |= rx_tx << 15 | stats_size << 16;
914 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
915 cmd.req.arg[1] = arg1;
916 cmd.req.arg[2] = MSD(stats_dma_t);
917 cmd.req.arg[3] = LSD(stats_dma_t);
918 err = qlcnic_issue_cmd(adapter, &cmd);
922 esw_stats->context_id = le16_to_cpu(stats->context_id);
923 esw_stats->version = le16_to_cpu(stats->version);
924 esw_stats->size = le16_to_cpu(stats->size);
925 esw_stats->multicast_frames =
926 le64_to_cpu(stats->multicast_frames);
927 esw_stats->broadcast_frames =
928 le64_to_cpu(stats->broadcast_frames);
929 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
930 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
931 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
932 esw_stats->errors = le64_to_cpu(stats->errors);
933 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
936 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
938 qlcnic_free_mbx_args(&cmd);
943 /* This routine will retrieve the MAC statistics from firmware */
944 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
945 struct qlcnic_mac_statistics *mac_stats)
947 struct qlcnic_mac_statistics_le *stats;
948 struct qlcnic_cmd_args cmd;
949 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
950 dma_addr_t stats_dma_t;
954 if (mac_stats == NULL)
957 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
958 &stats_dma_t, GFP_KERNEL);
960 dev_err(&adapter->pdev->dev,
961 "%s: Unable to allocate memory.\n", __func__);
964 memset(stats_addr, 0, stats_size);
965 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
966 cmd.req.arg[1] = stats_size << 16;
967 cmd.req.arg[2] = MSD(stats_dma_t);
968 cmd.req.arg[3] = LSD(stats_dma_t);
969 err = qlcnic_issue_cmd(adapter, &cmd);
972 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
973 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
974 mac_stats->mac_tx_mcast_pkts =
975 le64_to_cpu(stats->mac_tx_mcast_pkts);
976 mac_stats->mac_tx_bcast_pkts =
977 le64_to_cpu(stats->mac_tx_bcast_pkts);
978 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
979 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
980 mac_stats->mac_rx_mcast_pkts =
981 le64_to_cpu(stats->mac_rx_mcast_pkts);
982 mac_stats->mac_rx_length_error =
983 le64_to_cpu(stats->mac_rx_length_error);
984 mac_stats->mac_rx_length_small =
985 le64_to_cpu(stats->mac_rx_length_small);
986 mac_stats->mac_rx_length_large =
987 le64_to_cpu(stats->mac_rx_length_large);
988 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
989 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
990 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
992 dev_err(&adapter->pdev->dev,
993 "%s: Get mac stats failed, err=%d.\n", __func__, err);
996 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
999 qlcnic_free_mbx_args(&cmd);
1004 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1005 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1007 struct __qlcnic_esw_statistics port_stats;
1011 if (esw_stats == NULL)
1013 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1015 if (adapter->npars == NULL)
1018 memset(esw_stats, 0, sizeof(u64));
1019 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
1020 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
1021 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
1022 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
1023 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
1024 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
1025 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
1026 esw_stats->context_id = eswitch;
1028 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1029 if (adapter->npars[i].phy_port != eswitch)
1032 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1033 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
1034 rx_tx, &port_stats))
1037 esw_stats->size = port_stats.size;
1038 esw_stats->version = port_stats.version;
1039 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
1040 port_stats.unicast_frames);
1041 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
1042 port_stats.multicast_frames);
1043 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
1044 port_stats.broadcast_frames);
1045 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
1046 port_stats.dropped_frames);
1047 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
1049 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
1050 port_stats.local_frames);
1051 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
1052 port_stats.numbytes);
1058 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1059 const u8 port, const u8 rx_tx)
1063 struct qlcnic_cmd_args cmd;
1065 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1068 if (func_esw == QLCNIC_STATS_PORT) {
1069 if (port >= QLCNIC_MAX_PCI_FUNC)
1071 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
1072 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1078 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1081 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1082 arg1 |= BIT_14 | rx_tx << 15;
1084 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
1085 cmd.req.arg[1] = arg1;
1086 err = qlcnic_issue_cmd(adapter, &cmd);
1087 qlcnic_free_mbx_args(&cmd);
1091 dev_err(&adapter->pdev->dev,
1092 "Invalid args func_esw %d port %d rx_ctx %d\n",
1093 func_esw, port, rx_tx);
1098 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1099 u32 *arg1, u32 *arg2)
1102 struct qlcnic_cmd_args cmd;
1104 pci_func = (*arg1 >> 8);
1106 qlcnic_alloc_mbx_args(&cmd, adapter,
1107 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
1108 cmd.req.arg[1] = *arg1;
1109 err = qlcnic_issue_cmd(adapter, &cmd);
1110 *arg1 = cmd.rsp.arg[1];
1111 *arg2 = cmd.rsp.arg[2];
1112 qlcnic_free_mbx_args(&cmd);
1114 if (err == QLCNIC_RCODE_SUCCESS)
1115 dev_info(&adapter->pdev->dev,
1116 "eSwitch port config for pci func %d\n", pci_func);
1118 dev_err(&adapter->pdev->dev,
1119 "Failed to get eswitch port config for pci func %d\n",
1123 /* Configure eSwitch port
1124 op_mode = 0 for setting default port behavior
1125 op_mode = 1 for setting vlan id
1126 op_mode = 2 for deleting vlan id
1127 op_type = 0 for vlan_id
1128 op_type = 1 for port vlan_id
1130 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1131 struct qlcnic_esw_func_cfg *esw_cfg)
1133 int err = -EIO, index;
1135 struct qlcnic_cmd_args cmd;
1138 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1140 pci_func = esw_cfg->pci_func;
1141 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1144 arg1 = (adapter->npars[index].phy_port & BIT_0);
1145 arg1 |= (pci_func << 8);
1147 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1149 arg1 &= ~(0x0ff << 8);
1150 arg1 |= (pci_func << 8);
1151 arg1 &= ~(BIT_2 | BIT_3);
1152 switch (esw_cfg->op_mode) {
1153 case QLCNIC_PORT_DEFAULTS:
1154 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1155 arg2 |= (BIT_0 | BIT_1);
1156 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1157 arg2 |= (BIT_2 | BIT_3);
1158 if (!(esw_cfg->discard_tagged))
1160 if (!(esw_cfg->promisc_mode))
1162 if (!(esw_cfg->mac_override))
1164 if (!(esw_cfg->mac_anti_spoof))
1166 if (!(esw_cfg->offload_flags & BIT_0))
1167 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1168 if (!(esw_cfg->offload_flags & BIT_1))
1170 if (!(esw_cfg->offload_flags & BIT_2))
1173 case QLCNIC_ADD_VLAN:
1174 arg1 |= (BIT_2 | BIT_5);
1175 arg1 |= (esw_cfg->vlan_id << 16);
1177 case QLCNIC_DEL_VLAN:
1178 arg1 |= (BIT_3 | BIT_5);
1179 arg1 &= ~(0x0ffff << 16);
1185 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH);
1186 cmd.req.arg[1] = arg1;
1187 cmd.req.arg[2] = arg2;
1188 err = qlcnic_issue_cmd(adapter, &cmd);
1189 qlcnic_free_mbx_args(&cmd);
1191 if (err != QLCNIC_RCODE_SUCCESS)
1192 dev_err(&adapter->pdev->dev,
1193 "Failed to configure eswitch pci func %d\n", pci_func);
1195 dev_info(&adapter->pdev->dev,
1196 "Configured eSwitch for pci func %d\n", pci_func);
1202 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1203 struct qlcnic_esw_func_cfg *esw_cfg)
1209 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
1210 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
1213 phy_port = adapter->npars[index].phy_port;
1215 phy_port = adapter->ahw->physical_port;
1218 arg1 |= (esw_cfg->pci_func << 8);
1219 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1222 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1223 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1224 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1225 esw_cfg->mac_override = !!(arg1 & BIT_7);
1226 esw_cfg->vlan_id = LSW(arg1 >> 16);
1227 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1228 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);