2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 static int debug = -1; /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
87 static const struct pci_device_id qlge_pci_tbl[] = {
88 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
90 /* required last entry */
94 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96 static int ql_wol(struct ql_adapter *);
97 static void qlge_set_multicast_list(struct net_device *);
98 static int ql_adapter_down(struct ql_adapter *);
99 static int ql_adapter_up(struct ql_adapter *);
101 /* This hardware semaphore causes exclusive access to
102 * resources shared between the NIC driver, MPI firmware,
103 * FCOE firmware and the FC driver.
105 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
110 case SEM_XGMAC0_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 case SEM_XGMAC1_MASK:
114 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 case SEM_MAC_ADDR_MASK:
120 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 case SEM_RT_IDX_MASK:
129 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 case SEM_PROC_REG_MASK:
132 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
139 ql_write32(qdev, SEM, sem_bits | sem_mask);
140 return !(ql_read32(qdev, SEM) & sem_bits);
143 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 unsigned int wait_count = 30;
147 if (!ql_sem_trylock(qdev, sem_mask))
150 } while (--wait_count);
154 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 ql_write32(qdev, SEM, sem_mask);
157 ql_read32(qdev, SEM); /* flush */
160 /* This function waits for a specific bit to come ready
161 * in a given register. It is used mostly by the initialize
162 * process, but is also used in kernel thread API such as
163 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
170 for (count = 0; count < UDELAY_COUNT; count++) {
171 temp = ql_read32(qdev, reg);
173 /* check for errors */
174 if (temp & err_bit) {
175 netif_alert(qdev, probe, qdev->ndev,
176 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 } else if (temp & bit) {
182 udelay(UDELAY_DELAY);
184 netif_alert(qdev, probe, qdev->ndev,
185 "Timed out waiting for reg %x to come ready.\n", reg);
189 /* The CFG register is used to download TX and RX control blocks
190 * to the chip. This function waits for an operation to complete.
192 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
197 for (count = 0; count < UDELAY_COUNT; count++) {
198 temp = ql_read32(qdev, CFG);
203 udelay(UDELAY_DELAY);
208 /* Used to issue init control blocks to hw. Maps control block,
209 * sets address, triggers download, waits for completion.
211 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
221 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
224 map = pci_map_single(qdev->pdev, ptr, size, direction);
225 if (pci_dma_mapping_error(qdev->pdev, map)) {
226 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 status = ql_wait_cfg(qdev, bit);
236 netif_err(qdev, ifup, qdev->ndev,
237 "Timed out waiting for CFG to come ready.\n");
241 ql_write32(qdev, ICB_L, (u32) map);
242 ql_write32(qdev, ICB_H, (u32) (map >> 32));
244 mask = CFG_Q_MASK | (bit << 16);
245 value = bit | (q_id << CFG_Q_SHIFT);
246 ql_write32(qdev, CFG, (mask | value));
249 * Wait for the bit to clear after signaling hw.
251 status = ql_wait_cfg(qdev, bit);
253 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
254 pci_unmap_single(qdev->pdev, map, size, direction);
258 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
259 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
266 case MAC_ADDR_TYPE_MULTI_MAC:
267 case MAC_ADDR_TYPE_CAM_MAC:
270 ql_wait_reg_rdy(qdev,
271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
281 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
290 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
293 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
294 if (type == MAC_ADDR_TYPE_CAM_MAC) {
296 ql_wait_reg_rdy(qdev,
297 MAC_ADDR_IDX, MAC_ADDR_MW,
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
316 netif_crit(qdev, ifup, qdev->ndev,
317 "Address type %d not yet supported.\n", type);
324 /* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 case MAC_ADDR_TYPE_MULTI_MAC:
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366 case MAC_ADDR_TYPE_CAM_MAC:
369 u32 upper = (addr[0] << 8) | addr[1];
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
374 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
377 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
378 (index << MAC_ADDR_IDX_SHIFT) | /* index */
380 ql_write32(qdev, MAC_ADDR_DATA, lower);
382 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
388 ql_write32(qdev, MAC_ADDR_DATA, upper);
390 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
393 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 /* This field should also include the queue id
397 * and possibly the function id. Right now we hardcode
398 * the route field to NIC core.
400 cam_output = (CAM_OUT_ROUTE_NIC |
402 func << CAM_OUT_FUNC_SHIFT) |
403 (0 << CAM_OUT_CQ_ID_SHIFT));
404 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
405 cam_output |= CAM_OUT_RV;
406 /* route to NIC core */
407 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
410 case MAC_ADDR_TYPE_VLAN:
412 u32 enable_bit = *((u32 *) &addr[0]);
413 /* For VLAN, the addr actually holds a bit that
414 * either enables or disables the vlan id we are
415 * addressing. It's either MAC_ADDR_E on or off.
416 * That's bit-27 we're talking about.
419 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
422 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
423 (index << MAC_ADDR_IDX_SHIFT) | /* index */
425 enable_bit); /* enable/disable */
428 case MAC_ADDR_TYPE_MULTI_FLTR:
430 netif_crit(qdev, ifup, qdev->ndev,
431 "Address type %d not yet supported.\n", type);
438 /* Set or clear MAC address in hardware. We sometimes
439 * have to clear it to prevent wrong frame routing
440 * especially in a bonding environment.
442 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
445 char zero_mac_addr[ETH_ALEN];
449 addr = &qdev->current_mac_addr[0];
450 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
451 "Set Mac addr %pM\n", addr);
453 eth_zero_addr(zero_mac_addr);
454 addr = &zero_mac_addr[0];
455 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
456 "Clearing MAC address\n");
458 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
461 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
462 MAC_ADDR_TYPE_CAM_MAC,
463 qdev->func * MAX_CQ);
464 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
466 netif_err(qdev, ifup, qdev->ndev,
467 "Failed to init mac address.\n");
471 void ql_link_on(struct ql_adapter *qdev)
473 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
474 netif_carrier_on(qdev->ndev);
475 ql_set_mac_addr(qdev, 1);
478 void ql_link_off(struct ql_adapter *qdev)
480 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
481 netif_carrier_off(qdev->ndev);
482 ql_set_mac_addr(qdev, 0);
485 /* Get a specific frame routing value from the CAM.
486 * Used for debug and reg dump.
488 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
492 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
496 ql_write32(qdev, RT_IDX,
497 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
498 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
501 *value = ql_read32(qdev, RT_DATA);
506 /* The NIC function for this chip has 16 routing indexes. Each one can be used
507 * to route different frame types to various inbound queues. We send broadcast/
508 * multicast/error frames to the default queue for slow handling,
509 * and CAM hit/RSS frames to the fast handling queues.
511 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
514 int status = -EINVAL; /* Return error if no mask match. */
520 value = RT_IDX_DST_CAM_Q | /* dest */
521 RT_IDX_TYPE_NICQ | /* type */
522 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
525 case RT_IDX_VALID: /* Promiscuous Mode frames. */
527 value = RT_IDX_DST_DFLT_Q | /* dest */
528 RT_IDX_TYPE_NICQ | /* type */
529 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
534 value = RT_IDX_DST_DFLT_Q | /* dest */
535 RT_IDX_TYPE_NICQ | /* type */
536 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
541 value = RT_IDX_DST_DFLT_Q | /* dest */
542 RT_IDX_TYPE_NICQ | /* type */
543 (RT_IDX_IP_CSUM_ERR_SLOT <<
544 RT_IDX_IDX_SHIFT); /* index */
547 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
555 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
562 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
578 value = RT_IDX_DST_RSS | /* dest */
579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 case 0: /* Clear the E-bit on an entry. */
585 value = RT_IDX_DST_DFLT_Q | /* dest */
586 RT_IDX_TYPE_NICQ | /* type */
587 (index << RT_IDX_IDX_SHIFT);/* index */
591 netif_err(qdev, ifup, qdev->ndev,
592 "Mask type %d not yet supported.\n", mask);
598 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
601 value |= (enable ? RT_IDX_E : 0);
602 ql_write32(qdev, RT_IDX, value);
603 ql_write32(qdev, RT_DATA, enable ? mask : 0);
609 static void ql_enable_interrupts(struct ql_adapter *qdev)
611 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
614 static void ql_disable_interrupts(struct ql_adapter *qdev)
616 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
619 static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
621 struct intr_context *ctx = &qdev->intr_context[intr];
623 ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
626 static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
628 struct intr_context *ctx = &qdev->intr_context[intr];
630 ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
633 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
637 for (i = 0; i < qdev->intr_count; i++)
638 ql_enable_completion_interrupt(qdev, i);
641 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
645 __le16 *flash = (__le16 *)&qdev->flash;
647 status = strncmp((char *)&qdev->flash, str, 4);
649 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
653 for (i = 0; i < size; i++)
654 csum += le16_to_cpu(*flash++);
657 netif_err(qdev, ifup, qdev->ndev,
658 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
663 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
666 /* wait for reg to come ready */
667 status = ql_wait_reg_rdy(qdev,
668 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
671 /* set up for reg read */
672 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
673 /* wait for reg to come ready */
674 status = ql_wait_reg_rdy(qdev,
675 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
678 /* This data is stored on flash as an array of
679 * __le32. Since ql_read32() returns cpu endian
680 * we need to swap it back.
682 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
687 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
691 __le32 *p = (__le32 *)&qdev->flash;
695 /* Get flash offset for function and adjust
699 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
701 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
703 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
706 size = sizeof(struct flash_params_8000) / sizeof(u32);
707 for (i = 0; i < size; i++, p++) {
708 status = ql_read_flash_word(qdev, i+offset, p);
710 netif_err(qdev, ifup, qdev->ndev,
711 "Error reading flash.\n");
716 status = ql_validate_flash(qdev,
717 sizeof(struct flash_params_8000) /
721 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
726 /* Extract either manufacturer or BOFM modified
729 if (qdev->flash.flash_params_8000.data_type1 == 2)
731 qdev->flash.flash_params_8000.mac_addr1,
732 qdev->ndev->addr_len);
735 qdev->flash.flash_params_8000.mac_addr,
736 qdev->ndev->addr_len);
738 if (!is_valid_ether_addr(mac_addr)) {
739 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
744 memcpy(qdev->ndev->dev_addr,
746 qdev->ndev->addr_len);
749 ql_sem_unlock(qdev, SEM_FLASH_MASK);
753 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
757 __le32 *p = (__le32 *)&qdev->flash;
759 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
761 /* Second function's parameters follow the first
767 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
770 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p);
773 netif_err(qdev, ifup, qdev->ndev,
774 "Error reading flash.\n");
780 status = ql_validate_flash(qdev,
781 sizeof(struct flash_params_8012) /
785 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
790 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
795 memcpy(qdev->ndev->dev_addr,
796 qdev->flash.flash_params_8012.mac_addr,
797 qdev->ndev->addr_len);
800 ql_sem_unlock(qdev, SEM_FLASH_MASK);
804 /* xgmac register are located behind the xgmac_addr and xgmac_data
805 * register pair. Each read/write requires us to wait for the ready
806 * bit before reading/writing the data.
808 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
811 /* wait for reg to come ready */
812 status = ql_wait_reg_rdy(qdev,
813 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
816 /* write the data to the data reg */
817 ql_write32(qdev, XGMAC_DATA, data);
818 /* trigger the write */
819 ql_write32(qdev, XGMAC_ADDR, reg);
823 /* xgmac register are located behind the xgmac_addr and xgmac_data
824 * register pair. Each read/write requires us to wait for the ready
825 * bit before reading/writing the data.
827 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
830 /* wait for reg to come ready */
831 status = ql_wait_reg_rdy(qdev,
832 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
835 /* set up for reg read */
836 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
837 /* wait for reg to come ready */
838 status = ql_wait_reg_rdy(qdev,
839 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
843 *data = ql_read32(qdev, XGMAC_DATA);
848 /* This is used for reading the 64-bit statistics regs. */
849 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
855 status = ql_read_xgmac_reg(qdev, reg, &lo);
859 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
863 *data = (u64) lo | ((u64) hi << 32);
869 static int ql_8000_port_initialize(struct ql_adapter *qdev)
873 * Get MPI firmware version for driver banner
876 status = ql_mb_about_fw(qdev);
879 status = ql_mb_get_fw_state(qdev);
882 /* Wake up a worker to get/set the TX/RX frame sizes. */
883 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
888 /* Take the MAC Core out of reset.
889 * Enable statistics counting.
890 * Take the transmitter/receiver out of reset.
891 * This functionality may be done in the MPI firmware at a
894 static int ql_8012_port_initialize(struct ql_adapter *qdev)
899 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
900 /* Another function has the semaphore, so
901 * wait for the port init bit to come ready.
903 netif_info(qdev, link, qdev->ndev,
904 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
905 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
907 netif_crit(qdev, link, qdev->ndev,
908 "Port initialize timed out.\n");
913 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
914 /* Set the core reset. */
915 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
918 data |= GLOBAL_CFG_RESET;
919 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
923 /* Clear the core reset and turn on jumbo for receiver. */
924 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
925 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
926 data |= GLOBAL_CFG_TX_STAT_EN;
927 data |= GLOBAL_CFG_RX_STAT_EN;
928 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
932 /* Enable transmitter, and clear it's reset. */
933 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
936 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
937 data |= TX_CFG_EN; /* Enable the transmitter. */
938 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
942 /* Enable receiver and clear it's reset. */
943 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
946 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
947 data |= RX_CFG_EN; /* Enable the receiver. */
948 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
954 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
958 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
962 /* Signal to the world that the port is enabled. */
963 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
965 ql_sem_unlock(qdev, qdev->xg_sem_mask);
969 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
971 return PAGE_SIZE << qdev->lbq_buf_order;
974 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
976 struct qlge_bq_desc *bq_desc;
978 bq_desc = &bq->queue[bq->next_to_clean];
979 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
984 static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
985 struct rx_ring *rx_ring)
987 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
989 pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
990 qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
992 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
993 ql_lbq_block_size(qdev)) {
994 /* last chunk of the master page */
995 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
996 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
1002 /* Update an rx ring index. */
1003 static void ql_update_cq(struct rx_ring *rx_ring)
1005 rx_ring->cnsmr_idx++;
1006 rx_ring->curr_entry++;
1007 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1008 rx_ring->cnsmr_idx = 0;
1009 rx_ring->curr_entry = rx_ring->cq_base;
1013 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1015 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1018 static const char * const bq_type_name[] = {
1023 /* return 0 or negative error */
1024 static int qlge_refill_sb(struct rx_ring *rx_ring,
1025 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1027 struct ql_adapter *qdev = rx_ring->qdev;
1028 struct sk_buff *skb;
1030 if (sbq_desc->p.skb)
1033 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1034 "ring %u sbq: getting new skb for index %d.\n",
1035 rx_ring->cq_id, sbq_desc->index);
1037 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1040 skb_reserve(skb, QLGE_SB_PAD);
1042 sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
1044 PCI_DMA_FROMDEVICE);
1045 if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
1046 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1047 dev_kfree_skb_any(skb);
1050 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1052 sbq_desc->p.skb = skb;
1056 /* return 0 or negative error */
1057 static int qlge_refill_lb(struct rx_ring *rx_ring,
1058 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1060 struct ql_adapter *qdev = rx_ring->qdev;
1061 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1063 if (!master_chunk->page) {
1065 dma_addr_t dma_addr;
1067 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1068 if (unlikely(!page))
1070 dma_addr = pci_map_page(qdev->pdev, page, 0,
1071 ql_lbq_block_size(qdev),
1072 PCI_DMA_FROMDEVICE);
1073 if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
1074 __free_pages(page, qdev->lbq_buf_order);
1075 netif_err(qdev, drv, qdev->ndev,
1076 "PCI mapping failed.\n");
1079 master_chunk->page = page;
1080 master_chunk->va = page_address(page);
1081 master_chunk->offset = 0;
1082 rx_ring->chunk_dma_addr = dma_addr;
1085 lbq_desc->p.pg_chunk = *master_chunk;
1086 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1087 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1088 lbq_desc->p.pg_chunk.offset);
1090 /* Adjust the master page chunk for next
1093 master_chunk->offset += qdev->lbq_buf_size;
1094 if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1095 master_chunk->page = NULL;
1097 master_chunk->va += qdev->lbq_buf_size;
1098 get_page(master_chunk->page);
1104 /* return 0 or negative error */
1105 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1107 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1108 struct ql_adapter *qdev = rx_ring->qdev;
1109 struct qlge_bq_desc *bq_desc;
1114 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1119 i = bq->next_to_use;
1120 bq_desc = &bq->queue[i];
1123 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1124 "ring %u %s: try cleaning idx %d\n",
1125 rx_ring->cq_id, bq_type_name[bq->type], i);
1127 if (bq->type == QLGE_SB)
1128 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1130 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1132 netif_err(qdev, ifup, qdev->ndev,
1133 "ring %u %s: Could not get a page chunk, idx %d\n",
1134 rx_ring->cq_id, bq_type_name[bq->type], i);
1141 bq_desc = &bq->queue[0];
1145 } while (refill_count);
1148 if (bq->next_to_use != i) {
1149 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1150 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1151 "ring %u %s: updating prod idx = %d.\n",
1152 rx_ring->cq_id, bq_type_name[bq->type],
1154 ql_write_db_reg(i, bq->prod_idx_db_reg);
1156 bq->next_to_use = i;
1162 static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1163 unsigned long delay)
1165 bool sbq_fail, lbq_fail;
1167 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1168 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1170 /* Minimum number of buffers needed to be able to receive at least one
1171 * frame of any format:
1172 * sbq: 1 for header + 1 for data
1173 * lbq: mtu 9000 / lb size
1174 * Below this, the queue might stall.
1176 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1177 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1178 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1179 /* Allocations can take a long time in certain cases (ex.
1180 * reclaim). Therefore, use a workqueue for long-running
1183 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1184 &rx_ring->refill_work, delay);
1187 static void qlge_slow_refill(struct work_struct *work)
1189 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1191 struct napi_struct *napi = &rx_ring->napi;
1194 ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1198 /* napi_disable() might have prevented incomplete napi work from being
1201 napi_schedule(napi);
1202 /* trigger softirq processing */
1206 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1207 * fails at some stage, or from the interrupt when a tx completes.
1209 static void ql_unmap_send(struct ql_adapter *qdev,
1210 struct tx_ring_desc *tx_ring_desc, int mapped)
1213 for (i = 0; i < mapped; i++) {
1214 if (i == 0 || (i == 7 && mapped > 7)) {
1216 * Unmap the skb->data area, or the
1217 * external sglist (AKA the Outbound
1218 * Address List (OAL)).
1219 * If its the zeroeth element, then it's
1220 * the skb->data area. If it's the 7th
1221 * element and there is more than 6 frags,
1225 netif_printk(qdev, tx_done, KERN_DEBUG,
1227 "unmapping OAL area.\n");
1229 pci_unmap_single(qdev->pdev,
1230 dma_unmap_addr(&tx_ring_desc->map[i],
1232 dma_unmap_len(&tx_ring_desc->map[i],
1236 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1237 "unmapping frag %d.\n", i);
1238 pci_unmap_page(qdev->pdev,
1239 dma_unmap_addr(&tx_ring_desc->map[i],
1241 dma_unmap_len(&tx_ring_desc->map[i],
1242 maplen), PCI_DMA_TODEVICE);
1248 /* Map the buffers for this transmit. This will return
1249 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1251 static int ql_map_send(struct ql_adapter *qdev,
1252 struct ob_mac_iocb_req *mac_iocb_ptr,
1253 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1255 int len = skb_headlen(skb);
1257 int frag_idx, err, map_idx = 0;
1258 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1259 int frag_cnt = skb_shinfo(skb)->nr_frags;
1262 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1263 "frag_cnt = %d.\n", frag_cnt);
1266 * Map the skb buffer first.
1268 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1270 err = pci_dma_mapping_error(qdev->pdev, map);
1272 netif_err(qdev, tx_queued, qdev->ndev,
1273 "PCI mapping failed with error: %d\n", err);
1275 return NETDEV_TX_BUSY;
1278 tbd->len = cpu_to_le32(len);
1279 tbd->addr = cpu_to_le64(map);
1280 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1281 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1285 * This loop fills the remainder of the 8 address descriptors
1286 * in the IOCB. If there are more than 7 fragments, then the
1287 * eighth address desc will point to an external list (OAL).
1288 * When this happens, the remainder of the frags will be stored
1291 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1292 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1294 if (frag_idx == 6 && frag_cnt > 7) {
1295 /* Let's tack on an sglist.
1296 * Our control block will now
1298 * iocb->seg[0] = skb->data
1299 * iocb->seg[1] = frag[0]
1300 * iocb->seg[2] = frag[1]
1301 * iocb->seg[3] = frag[2]
1302 * iocb->seg[4] = frag[3]
1303 * iocb->seg[5] = frag[4]
1304 * iocb->seg[6] = frag[5]
1305 * iocb->seg[7] = ptr to OAL (external sglist)
1306 * oal->seg[0] = frag[6]
1307 * oal->seg[1] = frag[7]
1308 * oal->seg[2] = frag[8]
1309 * oal->seg[3] = frag[9]
1310 * oal->seg[4] = frag[10]
1313 /* Tack on the OAL in the eighth segment of IOCB. */
1314 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1317 err = pci_dma_mapping_error(qdev->pdev, map);
1319 netif_err(qdev, tx_queued, qdev->ndev,
1320 "PCI mapping outbound address list with error: %d\n",
1325 tbd->addr = cpu_to_le64(map);
1327 * The length is the number of fragments
1328 * that remain to be mapped times the length
1329 * of our sglist (OAL).
1332 cpu_to_le32((sizeof(struct tx_buf_desc) *
1333 (frag_cnt - frag_idx)) | TX_DESC_C);
1334 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1336 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1337 sizeof(struct oal));
1338 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1342 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1345 err = dma_mapping_error(&qdev->pdev->dev, map);
1347 netif_err(qdev, tx_queued, qdev->ndev,
1348 "PCI mapping frags failed with error: %d.\n",
1353 tbd->addr = cpu_to_le64(map);
1354 tbd->len = cpu_to_le32(skb_frag_size(frag));
1355 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1356 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1357 skb_frag_size(frag));
1360 /* Save the number of segments we've mapped. */
1361 tx_ring_desc->map_cnt = map_idx;
1362 /* Terminate the last segment. */
1363 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1364 return NETDEV_TX_OK;
1368 * If the first frag mapping failed, then i will be zero.
1369 * This causes the unmap of the skb->data area. Otherwise
1370 * we pass in the number of frags that mapped successfully
1371 * so they can be umapped.
1373 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1374 return NETDEV_TX_BUSY;
1377 /* Categorizing receive firmware frame errors */
1378 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1379 struct rx_ring *rx_ring)
1381 struct nic_stats *stats = &qdev->nic_stats;
1383 stats->rx_err_count++;
1384 rx_ring->rx_errors++;
1386 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1387 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1388 stats->rx_code_err++;
1390 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1391 stats->rx_oversize_err++;
1393 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1394 stats->rx_undersize_err++;
1396 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1397 stats->rx_preamble_err++;
1399 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1400 stats->rx_frame_len_err++;
1402 case IB_MAC_IOCB_RSP_ERR_CRC:
1403 stats->rx_crc_err++;
1410 * ql_update_mac_hdr_len - helper routine to update the mac header length
1411 * based on vlan tags if present
1413 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1414 struct ib_mac_iocb_rsp *ib_mac_rsp,
1415 void *page, size_t *len)
1419 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1421 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1423 /* Look for stacked vlan tags in ethertype field */
1424 if (tags[6] == ETH_P_8021Q &&
1425 tags[8] == ETH_P_8021Q)
1426 *len += 2 * VLAN_HLEN;
1432 /* Process an inbound completion from an rx ring. */
1433 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1434 struct rx_ring *rx_ring,
1435 struct ib_mac_iocb_rsp *ib_mac_rsp,
1436 u32 length, u16 vlan_id)
1438 struct sk_buff *skb;
1439 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1440 struct napi_struct *napi = &rx_ring->napi;
1442 /* Frame error, so drop the packet. */
1443 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1445 put_page(lbq_desc->p.pg_chunk.page);
1448 napi->dev = qdev->ndev;
1450 skb = napi_get_frags(napi);
1452 netif_err(qdev, drv, qdev->ndev,
1453 "Couldn't get an skb, exiting.\n");
1454 rx_ring->rx_dropped++;
1455 put_page(lbq_desc->p.pg_chunk.page);
1458 prefetch(lbq_desc->p.pg_chunk.va);
1459 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1460 lbq_desc->p.pg_chunk.page,
1461 lbq_desc->p.pg_chunk.offset,
1465 skb->data_len += length;
1466 skb->truesize += length;
1467 skb_shinfo(skb)->nr_frags++;
1469 rx_ring->rx_packets++;
1470 rx_ring->rx_bytes += length;
1471 skb->ip_summed = CHECKSUM_UNNECESSARY;
1472 skb_record_rx_queue(skb, rx_ring->cq_id);
1473 if (vlan_id != 0xffff)
1474 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1475 napi_gro_frags(napi);
1478 /* Process an inbound completion from an rx ring. */
1479 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1480 struct rx_ring *rx_ring,
1481 struct ib_mac_iocb_rsp *ib_mac_rsp,
1482 u32 length, u16 vlan_id)
1484 struct net_device *ndev = qdev->ndev;
1485 struct sk_buff *skb = NULL;
1487 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1488 struct napi_struct *napi = &rx_ring->napi;
1489 size_t hlen = ETH_HLEN;
1491 skb = netdev_alloc_skb(ndev, length);
1493 rx_ring->rx_dropped++;
1494 put_page(lbq_desc->p.pg_chunk.page);
1498 addr = lbq_desc->p.pg_chunk.va;
1501 /* Frame error, so drop the packet. */
1502 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1503 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1507 /* Update the MAC header length*/
1508 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1510 /* The max framesize filter on this chip is set higher than
1511 * MTU since FCoE uses 2k frames.
1513 if (skb->len > ndev->mtu + hlen) {
1514 netif_err(qdev, drv, qdev->ndev,
1515 "Segment too small, dropping.\n");
1516 rx_ring->rx_dropped++;
1519 skb_put_data(skb, addr, hlen);
1520 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1521 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1523 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1524 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1525 skb->len += length - hlen;
1526 skb->data_len += length - hlen;
1527 skb->truesize += length - hlen;
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += skb->len;
1531 skb->protocol = eth_type_trans(skb, ndev);
1532 skb_checksum_none_assert(skb);
1534 if ((ndev->features & NETIF_F_RXCSUM) &&
1535 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1538 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1539 "TCP checksum done!\n");
1540 skb->ip_summed = CHECKSUM_UNNECESSARY;
1541 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1542 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1543 /* Unfragmented ipv4 UDP frame. */
1545 (struct iphdr *)((u8 *)addr + hlen);
1546 if (!(iph->frag_off &
1547 htons(IP_MF|IP_OFFSET))) {
1548 skb->ip_summed = CHECKSUM_UNNECESSARY;
1549 netif_printk(qdev, rx_status, KERN_DEBUG,
1551 "UDP checksum done!\n");
1556 skb_record_rx_queue(skb, rx_ring->cq_id);
1557 if (vlan_id != 0xffff)
1558 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1559 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1560 napi_gro_receive(napi, skb);
1562 netif_receive_skb(skb);
1565 dev_kfree_skb_any(skb);
1566 put_page(lbq_desc->p.pg_chunk.page);
1569 /* Process an inbound completion from an rx ring. */
1570 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1571 struct rx_ring *rx_ring,
1572 struct ib_mac_iocb_rsp *ib_mac_rsp,
1573 u32 length, u16 vlan_id)
1575 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1576 struct net_device *ndev = qdev->ndev;
1577 struct sk_buff *skb, *new_skb;
1579 skb = sbq_desc->p.skb;
1580 /* Allocate new_skb and copy */
1581 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1583 rx_ring->rx_dropped++;
1586 skb_reserve(new_skb, NET_IP_ALIGN);
1588 pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
1589 SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1591 skb_put_data(new_skb, skb->data, length);
1595 /* Frame error, so drop the packet. */
1596 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1597 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1598 dev_kfree_skb_any(skb);
1602 /* loopback self test for ethtool */
1603 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1604 ql_check_lb_frame(qdev, skb);
1605 dev_kfree_skb_any(skb);
1609 /* The max framesize filter on this chip is set higher than
1610 * MTU since FCoE uses 2k frames.
1612 if (skb->len > ndev->mtu + ETH_HLEN) {
1613 dev_kfree_skb_any(skb);
1614 rx_ring->rx_dropped++;
1618 prefetch(skb->data);
1619 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1620 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1622 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1623 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1624 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1625 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1626 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1627 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1629 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1630 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1631 "Promiscuous Packet.\n");
1633 rx_ring->rx_packets++;
1634 rx_ring->rx_bytes += skb->len;
1635 skb->protocol = eth_type_trans(skb, ndev);
1636 skb_checksum_none_assert(skb);
1638 /* If rx checksum is on, and there are no
1639 * csum or frame errors.
1641 if ((ndev->features & NETIF_F_RXCSUM) &&
1642 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1644 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1645 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646 "TCP checksum done!\n");
1647 skb->ip_summed = CHECKSUM_UNNECESSARY;
1648 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1649 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1650 /* Unfragmented ipv4 UDP frame. */
1651 struct iphdr *iph = (struct iphdr *) skb->data;
1652 if (!(iph->frag_off &
1653 htons(IP_MF|IP_OFFSET))) {
1654 skb->ip_summed = CHECKSUM_UNNECESSARY;
1655 netif_printk(qdev, rx_status, KERN_DEBUG,
1657 "UDP checksum done!\n");
1662 skb_record_rx_queue(skb, rx_ring->cq_id);
1663 if (vlan_id != 0xffff)
1664 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1665 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1666 napi_gro_receive(&rx_ring->napi, skb);
1668 netif_receive_skb(skb);
1671 static void ql_realign_skb(struct sk_buff *skb, int len)
1673 void *temp_addr = skb->data;
1675 /* Undo the skb_reserve(skb,32) we did before
1676 * giving to hardware, and realign data on
1677 * a 2-byte boundary.
1679 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1680 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1681 memmove(skb->data, temp_addr, len);
1685 * This function builds an skb for the given inbound
1686 * completion. It will be rewritten for readability in the near
1687 * future, but for not it works well.
1689 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1690 struct rx_ring *rx_ring,
1691 struct ib_mac_iocb_rsp *ib_mac_rsp)
1693 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1694 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1695 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1696 struct sk_buff *skb = NULL;
1697 size_t hlen = ETH_HLEN;
1700 * Handle the header buffer if present.
1702 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1703 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1704 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1705 "Header of %d bytes in small buffer.\n", hdr_len);
1707 * Headers fit nicely into a small buffer.
1709 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1710 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1711 SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1712 skb = sbq_desc->p.skb;
1713 ql_realign_skb(skb, hdr_len);
1714 skb_put(skb, hdr_len);
1715 sbq_desc->p.skb = NULL;
1719 * Handle the data buffer(s).
1721 if (unlikely(!length)) { /* Is there data too? */
1722 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1723 "No Data buffer in this packet.\n");
1727 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1728 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1729 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1730 "Headers in small, data of %d bytes in small, combine them.\n",
1733 * Data is less than small buffer size so it's
1734 * stuffed in a small buffer.
1735 * For this case we append the data
1736 * from the "data" small buffer to the "header" small
1739 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1740 pci_dma_sync_single_for_cpu(qdev->pdev,
1743 PCI_DMA_FROMDEVICE);
1744 skb_put_data(skb, sbq_desc->p.skb->data, length);
1746 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1747 "%d bytes in a single small buffer.\n",
1749 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1750 skb = sbq_desc->p.skb;
1751 ql_realign_skb(skb, length);
1752 skb_put(skb, length);
1753 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1755 PCI_DMA_FROMDEVICE);
1756 sbq_desc->p.skb = NULL;
1758 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1759 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1760 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1761 "Header in small, %d bytes in large. Chain large to small!\n",
1764 * The data is in a single large buffer. We
1765 * chain it to the header buffer's skb and let
1768 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1769 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1770 "Chaining page at offset = %d, for %d bytes to skb.\n",
1771 lbq_desc->p.pg_chunk.offset, length);
1772 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1773 lbq_desc->p.pg_chunk.offset, length);
1775 skb->data_len += length;
1776 skb->truesize += length;
1779 * The headers and data are in a single large buffer. We
1780 * copy it to a new skb and let it go. This can happen with
1781 * jumbo mtu on a non-TCP/UDP frame.
1783 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1784 skb = netdev_alloc_skb(qdev->ndev, length);
1786 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1787 "No skb available, drop the packet.\n");
1790 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
1792 PCI_DMA_FROMDEVICE);
1793 skb_reserve(skb, NET_IP_ALIGN);
1794 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1795 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1797 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1798 lbq_desc->p.pg_chunk.offset,
1801 skb->data_len += length;
1802 skb->truesize += length;
1803 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1804 lbq_desc->p.pg_chunk.va,
1806 __pskb_pull_tail(skb, hlen);
1810 * The data is in a chain of large buffers
1811 * pointed to by a small buffer. We loop
1812 * thru and chain them to the our small header
1814 * frags: There are 18 max frags and our small
1815 * buffer will hold 32 of them. The thing is,
1816 * we'll use 3 max for our 9000 byte jumbo
1817 * frames. If the MTU goes up we could
1818 * eventually be in trouble.
1821 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1822 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1823 SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1824 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1826 * This is an non TCP/UDP IP frame, so
1827 * the headers aren't split into a small
1828 * buffer. We have to use the small buffer
1829 * that contains our sg list as our skb to
1830 * send upstairs. Copy the sg list here to
1831 * a local buffer and use it to find the
1834 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1835 "%d bytes of headers & data in chain of large.\n",
1837 skb = sbq_desc->p.skb;
1838 sbq_desc->p.skb = NULL;
1839 skb_reserve(skb, NET_IP_ALIGN);
1842 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1843 size = min(length, qdev->lbq_buf_size);
1845 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846 "Adding page %d to skb for %d bytes.\n",
1848 skb_fill_page_desc(skb, i,
1849 lbq_desc->p.pg_chunk.page,
1850 lbq_desc->p.pg_chunk.offset, size);
1852 skb->data_len += size;
1853 skb->truesize += size;
1856 } while (length > 0);
1857 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1859 __pskb_pull_tail(skb, hlen);
1864 /* Process an inbound completion from an rx ring. */
1865 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1866 struct rx_ring *rx_ring,
1867 struct ib_mac_iocb_rsp *ib_mac_rsp,
1870 struct net_device *ndev = qdev->ndev;
1871 struct sk_buff *skb = NULL;
1873 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1875 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1876 if (unlikely(!skb)) {
1877 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1878 "No skb available, drop packet.\n");
1879 rx_ring->rx_dropped++;
1883 /* Frame error, so drop the packet. */
1884 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1885 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1886 dev_kfree_skb_any(skb);
1890 /* The max framesize filter on this chip is set higher than
1891 * MTU since FCoE uses 2k frames.
1893 if (skb->len > ndev->mtu + ETH_HLEN) {
1894 dev_kfree_skb_any(skb);
1895 rx_ring->rx_dropped++;
1899 /* loopback self test for ethtool */
1900 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1901 ql_check_lb_frame(qdev, skb);
1902 dev_kfree_skb_any(skb);
1906 prefetch(skb->data);
1907 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1908 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1909 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1910 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1911 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1912 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1913 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1914 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1915 rx_ring->rx_multicast++;
1917 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "Promiscuous Packet.\n");
1922 skb->protocol = eth_type_trans(skb, ndev);
1923 skb_checksum_none_assert(skb);
1925 /* If rx checksum is on, and there are no
1926 * csum or frame errors.
1928 if ((ndev->features & NETIF_F_RXCSUM) &&
1929 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1931 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1932 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1933 "TCP checksum done!\n");
1934 skb->ip_summed = CHECKSUM_UNNECESSARY;
1935 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1936 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1937 /* Unfragmented ipv4 UDP frame. */
1938 struct iphdr *iph = (struct iphdr *) skb->data;
1939 if (!(iph->frag_off &
1940 htons(IP_MF|IP_OFFSET))) {
1941 skb->ip_summed = CHECKSUM_UNNECESSARY;
1942 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1943 "TCP checksum done!\n");
1948 rx_ring->rx_packets++;
1949 rx_ring->rx_bytes += skb->len;
1950 skb_record_rx_queue(skb, rx_ring->cq_id);
1951 if (vlan_id != 0xffff)
1952 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1953 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1954 napi_gro_receive(&rx_ring->napi, skb);
1956 netif_receive_skb(skb);
1959 /* Process an inbound completion from an rx ring. */
1960 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1961 struct rx_ring *rx_ring,
1962 struct ib_mac_iocb_rsp *ib_mac_rsp)
1964 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1965 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1966 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1967 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1968 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1970 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1972 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1973 /* The data and headers are split into
1976 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1978 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1979 /* The data fit in a single small buffer.
1980 * Allocate a new skb, copy the data and
1981 * return the buffer to the free pool.
1983 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1985 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1986 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1987 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1988 /* TCP packet in a page chunk that's been checksummed.
1989 * Tack it on to our GRO skb and let it go.
1991 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1993 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1994 /* Non-TCP packet in a page chunk. Allocate an
1995 * skb, tack it on frags, and send it up.
1997 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
2000 /* Non-TCP/UDP large frames that span multiple buffers
2001 * can be processed corrrectly by the split frame logic.
2003 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2007 return (unsigned long)length;
2010 /* Process an outbound completion from an rx ring. */
2011 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2012 struct ob_mac_iocb_rsp *mac_rsp)
2014 struct tx_ring *tx_ring;
2015 struct tx_ring_desc *tx_ring_desc;
2017 QL_DUMP_OB_MAC_RSP(mac_rsp);
2018 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2019 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2020 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2021 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2022 tx_ring->tx_packets++;
2023 dev_kfree_skb(tx_ring_desc->skb);
2024 tx_ring_desc->skb = NULL;
2026 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2029 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2030 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2031 netif_warn(qdev, tx_done, qdev->ndev,
2032 "Total descriptor length did not match transfer length.\n");
2034 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2035 netif_warn(qdev, tx_done, qdev->ndev,
2036 "Frame too short to be valid, not sent.\n");
2038 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2039 netif_warn(qdev, tx_done, qdev->ndev,
2040 "Frame too long, but sent anyway.\n");
2042 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2043 netif_warn(qdev, tx_done, qdev->ndev,
2044 "PCI backplane error. Frame not sent.\n");
2047 atomic_inc(&tx_ring->tx_count);
2050 /* Fire up a handler to reset the MPI processor. */
2051 void ql_queue_fw_error(struct ql_adapter *qdev)
2054 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2057 void ql_queue_asic_error(struct ql_adapter *qdev)
2060 ql_disable_interrupts(qdev);
2061 /* Clear adapter up bit to signal the recovery
2062 * process that it shouldn't kill the reset worker
2065 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2066 /* Set asic recovery bit to indicate reset process that we are
2067 * in fatal error recovery process rather than normal close
2069 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2070 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2073 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2074 struct ib_ae_iocb_rsp *ib_ae_rsp)
2076 switch (ib_ae_rsp->event) {
2077 case MGMT_ERR_EVENT:
2078 netif_err(qdev, rx_err, qdev->ndev,
2079 "Management Processor Fatal Error.\n");
2080 ql_queue_fw_error(qdev);
2083 case CAM_LOOKUP_ERR_EVENT:
2084 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2085 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2086 ql_queue_asic_error(qdev);
2089 case SOFT_ECC_ERROR_EVENT:
2090 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2091 ql_queue_asic_error(qdev);
2094 case PCI_ERR_ANON_BUF_RD:
2095 netdev_err(qdev->ndev, "PCI error occurred when reading "
2096 "anonymous buffers from rx_ring %d.\n",
2098 ql_queue_asic_error(qdev);
2102 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2104 ql_queue_asic_error(qdev);
2109 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2111 struct ql_adapter *qdev = rx_ring->qdev;
2112 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2113 struct ob_mac_iocb_rsp *net_rsp = NULL;
2116 struct tx_ring *tx_ring;
2117 /* While there are entries in the completion queue. */
2118 while (prod != rx_ring->cnsmr_idx) {
2120 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2121 "cq_id = %d, prod = %d, cnsmr = %d\n",
2122 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2124 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2126 switch (net_rsp->opcode) {
2128 case OPCODE_OB_MAC_TSO_IOCB:
2129 case OPCODE_OB_MAC_IOCB:
2130 ql_process_mac_tx_intr(qdev, net_rsp);
2133 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2134 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2138 ql_update_cq(rx_ring);
2139 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2143 ql_write_cq_idx(rx_ring);
2144 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2145 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2146 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2148 * The queue got stopped because the tx_ring was full.
2149 * Wake it up, because it's now at least 25% empty.
2151 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2157 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2159 struct ql_adapter *qdev = rx_ring->qdev;
2160 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2161 struct ql_net_rsp_iocb *net_rsp;
2164 /* While there are entries in the completion queue. */
2165 while (prod != rx_ring->cnsmr_idx) {
2167 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2168 "cq_id = %d, prod = %d, cnsmr = %d\n",
2169 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2171 net_rsp = rx_ring->curr_entry;
2173 switch (net_rsp->opcode) {
2174 case OPCODE_IB_MAC_IOCB:
2175 ql_process_mac_rx_intr(qdev, rx_ring,
2176 (struct ib_mac_iocb_rsp *)
2180 case OPCODE_IB_AE_IOCB:
2181 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2185 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2186 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2191 ql_update_cq(rx_ring);
2192 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2193 if (count == budget)
2196 ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2197 ql_write_cq_idx(rx_ring);
2201 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2203 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2204 struct ql_adapter *qdev = rx_ring->qdev;
2205 struct rx_ring *trx_ring;
2206 int i, work_done = 0;
2207 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2209 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2210 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2212 /* Service the TX rings first. They start
2213 * right after the RSS rings.
2215 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2216 trx_ring = &qdev->rx_ring[i];
2217 /* If this TX completion ring belongs to this vector and
2218 * it's not empty then service it.
2220 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2221 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2222 trx_ring->cnsmr_idx)) {
2223 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2224 "%s: Servicing TX completion ring %d.\n",
2225 __func__, trx_ring->cq_id);
2226 ql_clean_outbound_rx_ring(trx_ring);
2231 * Now service the RSS ring if it's active.
2233 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2234 rx_ring->cnsmr_idx) {
2235 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2236 "%s: Servicing RX completion ring %d.\n",
2237 __func__, rx_ring->cq_id);
2238 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2241 if (work_done < budget) {
2242 napi_complete_done(napi, work_done);
2243 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2248 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2250 struct ql_adapter *qdev = netdev_priv(ndev);
2252 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2253 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2254 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2256 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2261 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2262 * based on the features to enable/disable hardware vlan accel
2264 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2265 netdev_features_t features)
2267 struct ql_adapter *qdev = netdev_priv(ndev);
2269 bool need_restart = netif_running(ndev);
2272 status = ql_adapter_down(qdev);
2274 netif_err(qdev, link, qdev->ndev,
2275 "Failed to bring down the adapter\n");
2280 /* update the features with resent change */
2281 ndev->features = features;
2284 status = ql_adapter_up(qdev);
2286 netif_err(qdev, link, qdev->ndev,
2287 "Failed to bring up the adapter\n");
2295 static int qlge_set_features(struct net_device *ndev,
2296 netdev_features_t features)
2298 netdev_features_t changed = ndev->features ^ features;
2301 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2302 /* Update the behavior of vlan accel in the adapter */
2303 err = qlge_update_hw_vlan_features(ndev, features);
2307 qlge_vlan_mode(ndev, features);
2313 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2315 u32 enable_bit = MAC_ADDR_E;
2318 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2319 MAC_ADDR_TYPE_VLAN, vid);
2321 netif_err(qdev, ifup, qdev->ndev,
2322 "Failed to init vlan address.\n");
2326 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2328 struct ql_adapter *qdev = netdev_priv(ndev);
2332 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2336 err = __qlge_vlan_rx_add_vid(qdev, vid);
2337 set_bit(vid, qdev->active_vlans);
2339 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2344 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2349 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2350 MAC_ADDR_TYPE_VLAN, vid);
2352 netif_err(qdev, ifup, qdev->ndev,
2353 "Failed to clear vlan address.\n");
2357 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2359 struct ql_adapter *qdev = netdev_priv(ndev);
2363 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2368 clear_bit(vid, qdev->active_vlans);
2370 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2375 static void qlge_restore_vlan(struct ql_adapter *qdev)
2380 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2384 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2385 __qlge_vlan_rx_add_vid(qdev, vid);
2387 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2390 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2391 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2393 struct rx_ring *rx_ring = dev_id;
2394 napi_schedule(&rx_ring->napi);
2398 /* This handles a fatal error, MPI activity, and the default
2399 * rx_ring in an MSI-X multiple vector environment.
2400 * In MSI/Legacy environment it also process the rest of
2403 static irqreturn_t qlge_isr(int irq, void *dev_id)
2405 struct rx_ring *rx_ring = dev_id;
2406 struct ql_adapter *qdev = rx_ring->qdev;
2407 struct intr_context *intr_context = &qdev->intr_context[0];
2411 /* Experience shows that when using INTx interrupts, interrupts must
2412 * be masked manually.
2413 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2414 * (even though it is auto-masked), otherwise a later command to
2415 * enable it is not effective.
2417 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2418 ql_disable_completion_interrupt(qdev, 0);
2420 var = ql_read32(qdev, STS);
2423 * Check for fatal error.
2426 ql_disable_completion_interrupt(qdev, 0);
2427 ql_queue_asic_error(qdev);
2428 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2429 var = ql_read32(qdev, ERR_STS);
2430 netdev_err(qdev->ndev, "Resetting chip. "
2431 "Error Status Register = 0x%x\n", var);
2436 * Check MPI processor activity.
2438 if ((var & STS_PI) &&
2439 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2441 * We've got an async event or mailbox completion.
2442 * Handle it and clear the source of the interrupt.
2444 netif_err(qdev, intr, qdev->ndev,
2445 "Got MPI processor interrupt.\n");
2446 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2447 queue_delayed_work_on(smp_processor_id(),
2448 qdev->workqueue, &qdev->mpi_work, 0);
2453 * Get the bit-mask that shows the active queues for this
2454 * pass. Compare it to the queues that this irq services
2455 * and call napi if there's a match.
2457 var = ql_read32(qdev, ISR1);
2458 if (var & intr_context->irq_mask) {
2459 netif_info(qdev, intr, qdev->ndev,
2460 "Waking handler for rx_ring[0].\n");
2461 napi_schedule(&rx_ring->napi);
2464 /* Experience shows that the device sometimes signals an
2465 * interrupt but no work is scheduled from this function.
2466 * Nevertheless, the interrupt is auto-masked. Therefore, we
2467 * systematically re-enable the interrupt if we didn't
2470 ql_enable_completion_interrupt(qdev, 0);
2473 return work_done ? IRQ_HANDLED : IRQ_NONE;
2476 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2479 if (skb_is_gso(skb)) {
2481 __be16 l3_proto = vlan_get_protocol(skb);
2483 err = skb_cow_head(skb, 0);
2487 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2488 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2489 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2490 mac_iocb_ptr->total_hdrs_len =
2491 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2492 mac_iocb_ptr->net_trans_offset =
2493 cpu_to_le16(skb_network_offset(skb) |
2494 skb_transport_offset(skb)
2495 << OB_MAC_TRANSPORT_HDR_SHIFT);
2496 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2497 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2498 if (likely(l3_proto == htons(ETH_P_IP))) {
2499 struct iphdr *iph = ip_hdr(skb);
2501 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2502 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2506 } else if (l3_proto == htons(ETH_P_IPV6)) {
2507 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2508 tcp_hdr(skb)->check =
2509 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2510 &ipv6_hdr(skb)->daddr,
2518 static void ql_hw_csum_setup(struct sk_buff *skb,
2519 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2522 struct iphdr *iph = ip_hdr(skb);
2524 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2525 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2526 mac_iocb_ptr->net_trans_offset =
2527 cpu_to_le16(skb_network_offset(skb) |
2528 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2530 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2531 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2532 if (likely(iph->protocol == IPPROTO_TCP)) {
2533 check = &(tcp_hdr(skb)->check);
2534 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2535 mac_iocb_ptr->total_hdrs_len =
2536 cpu_to_le16(skb_transport_offset(skb) +
2537 (tcp_hdr(skb)->doff << 2));
2539 check = &(udp_hdr(skb)->check);
2540 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2541 mac_iocb_ptr->total_hdrs_len =
2542 cpu_to_le16(skb_transport_offset(skb) +
2543 sizeof(struct udphdr));
2545 *check = ~csum_tcpudp_magic(iph->saddr,
2546 iph->daddr, len, iph->protocol, 0);
2549 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2551 struct tx_ring_desc *tx_ring_desc;
2552 struct ob_mac_iocb_req *mac_iocb_ptr;
2553 struct ql_adapter *qdev = netdev_priv(ndev);
2555 struct tx_ring *tx_ring;
2556 u32 tx_ring_idx = (u32) skb->queue_mapping;
2558 tx_ring = &qdev->tx_ring[tx_ring_idx];
2560 if (skb_padto(skb, ETH_ZLEN))
2561 return NETDEV_TX_OK;
2563 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2564 netif_info(qdev, tx_queued, qdev->ndev,
2565 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2566 __func__, tx_ring_idx);
2567 netif_stop_subqueue(ndev, tx_ring->wq_id);
2568 tx_ring->tx_errors++;
2569 return NETDEV_TX_BUSY;
2571 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2572 mac_iocb_ptr = tx_ring_desc->queue_entry;
2573 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2575 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2576 mac_iocb_ptr->tid = tx_ring_desc->index;
2577 /* We use the upper 32-bits to store the tx queue for this IO.
2578 * When we get the completion we can use it to establish the context.
2580 mac_iocb_ptr->txq_idx = tx_ring_idx;
2581 tx_ring_desc->skb = skb;
2583 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2585 if (skb_vlan_tag_present(skb)) {
2586 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2587 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2588 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2589 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2591 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2593 dev_kfree_skb_any(skb);
2594 return NETDEV_TX_OK;
2595 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2596 ql_hw_csum_setup(skb,
2597 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2599 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2601 netif_err(qdev, tx_queued, qdev->ndev,
2602 "Could not map the segments.\n");
2603 tx_ring->tx_errors++;
2604 return NETDEV_TX_BUSY;
2606 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2607 tx_ring->prod_idx++;
2608 if (tx_ring->prod_idx == tx_ring->wq_len)
2609 tx_ring->prod_idx = 0;
2612 ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2613 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2614 "tx queued, slot %d, len %d\n",
2615 tx_ring->prod_idx, skb->len);
2617 atomic_dec(&tx_ring->tx_count);
2619 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2620 netif_stop_subqueue(ndev, tx_ring->wq_id);
2621 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2623 * The queue got stopped because the tx_ring was full.
2624 * Wake it up, because it's now at least 25% empty.
2626 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2628 return NETDEV_TX_OK;
2631 static void ql_free_shadow_space(struct ql_adapter *qdev)
2633 if (qdev->rx_ring_shadow_reg_area) {
2634 pci_free_consistent(qdev->pdev,
2636 qdev->rx_ring_shadow_reg_area,
2637 qdev->rx_ring_shadow_reg_dma);
2638 qdev->rx_ring_shadow_reg_area = NULL;
2640 if (qdev->tx_ring_shadow_reg_area) {
2641 pci_free_consistent(qdev->pdev,
2643 qdev->tx_ring_shadow_reg_area,
2644 qdev->tx_ring_shadow_reg_dma);
2645 qdev->tx_ring_shadow_reg_area = NULL;
2649 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2651 qdev->rx_ring_shadow_reg_area =
2652 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2653 &qdev->rx_ring_shadow_reg_dma);
2654 if (!qdev->rx_ring_shadow_reg_area) {
2655 netif_err(qdev, ifup, qdev->ndev,
2656 "Allocation of RX shadow space failed.\n");
2660 qdev->tx_ring_shadow_reg_area =
2661 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2662 &qdev->tx_ring_shadow_reg_dma);
2663 if (!qdev->tx_ring_shadow_reg_area) {
2664 netif_err(qdev, ifup, qdev->ndev,
2665 "Allocation of TX shadow space failed.\n");
2666 goto err_wqp_sh_area;
2671 pci_free_consistent(qdev->pdev,
2673 qdev->rx_ring_shadow_reg_area,
2674 qdev->rx_ring_shadow_reg_dma);
2678 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2680 struct tx_ring_desc *tx_ring_desc;
2682 struct ob_mac_iocb_req *mac_iocb_ptr;
2684 mac_iocb_ptr = tx_ring->wq_base;
2685 tx_ring_desc = tx_ring->q;
2686 for (i = 0; i < tx_ring->wq_len; i++) {
2687 tx_ring_desc->index = i;
2688 tx_ring_desc->skb = NULL;
2689 tx_ring_desc->queue_entry = mac_iocb_ptr;
2693 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2696 static void ql_free_tx_resources(struct ql_adapter *qdev,
2697 struct tx_ring *tx_ring)
2699 if (tx_ring->wq_base) {
2700 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2701 tx_ring->wq_base, tx_ring->wq_base_dma);
2702 tx_ring->wq_base = NULL;
2708 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2709 struct tx_ring *tx_ring)
2712 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2713 &tx_ring->wq_base_dma);
2715 if (!tx_ring->wq_base ||
2716 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2720 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2727 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2728 tx_ring->wq_base, tx_ring->wq_base_dma);
2729 tx_ring->wq_base = NULL;
2731 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2735 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2737 struct qlge_bq *lbq = &rx_ring->lbq;
2738 unsigned int last_offset;
2740 last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2741 while (lbq->next_to_clean != lbq->next_to_use) {
2742 struct qlge_bq_desc *lbq_desc =
2743 &lbq->queue[lbq->next_to_clean];
2745 if (lbq_desc->p.pg_chunk.offset == last_offset)
2746 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
2747 ql_lbq_block_size(qdev),
2748 PCI_DMA_FROMDEVICE);
2749 put_page(lbq_desc->p.pg_chunk.page);
2751 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2754 if (rx_ring->master_chunk.page) {
2755 pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
2756 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2757 put_page(rx_ring->master_chunk.page);
2758 rx_ring->master_chunk.page = NULL;
2762 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2766 for (i = 0; i < QLGE_BQ_LEN; i++) {
2767 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2770 netif_err(qdev, ifup, qdev->ndev,
2771 "sbq_desc %d is NULL.\n", i);
2774 if (sbq_desc->p.skb) {
2775 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
2777 PCI_DMA_FROMDEVICE);
2778 dev_kfree_skb(sbq_desc->p.skb);
2779 sbq_desc->p.skb = NULL;
2784 /* Free all large and small rx buffers associated
2785 * with the completion queues for this device.
2787 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2791 for (i = 0; i < qdev->rx_ring_count; i++) {
2792 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2794 if (rx_ring->lbq.queue)
2795 ql_free_lbq_buffers(qdev, rx_ring);
2796 if (rx_ring->sbq.queue)
2797 ql_free_sbq_buffers(qdev, rx_ring);
2801 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2805 for (i = 0; i < qdev->rss_ring_count; i++)
2806 ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2810 static int qlge_init_bq(struct qlge_bq *bq)
2812 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2813 struct ql_adapter *qdev = rx_ring->qdev;
2814 struct qlge_bq_desc *bq_desc;
2818 bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
2821 netif_err(qdev, ifup, qdev->ndev,
2822 "ring %u %s allocation failed.\n", rx_ring->cq_id,
2823 bq_type_name[bq->type]);
2827 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2833 bq_desc = &bq->queue[0];
2834 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2835 bq_desc->p.skb = NULL;
2837 bq_desc->buf_ptr = buf_ptr;
2843 static void ql_free_rx_resources(struct ql_adapter *qdev,
2844 struct rx_ring *rx_ring)
2846 /* Free the small buffer queue. */
2847 if (rx_ring->sbq.base) {
2848 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2849 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2850 rx_ring->sbq.base = NULL;
2853 /* Free the small buffer queue control blocks. */
2854 kfree(rx_ring->sbq.queue);
2855 rx_ring->sbq.queue = NULL;
2857 /* Free the large buffer queue. */
2858 if (rx_ring->lbq.base) {
2859 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2860 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2861 rx_ring->lbq.base = NULL;
2864 /* Free the large buffer queue control blocks. */
2865 kfree(rx_ring->lbq.queue);
2866 rx_ring->lbq.queue = NULL;
2868 /* Free the rx queue. */
2869 if (rx_ring->cq_base) {
2870 pci_free_consistent(qdev->pdev,
2872 rx_ring->cq_base, rx_ring->cq_base_dma);
2873 rx_ring->cq_base = NULL;
2877 /* Allocate queues and buffers for this completions queue based
2878 * on the values in the parameter structure.
2880 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2881 struct rx_ring *rx_ring)
2885 * Allocate the completion queue for this rx_ring.
2888 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2889 &rx_ring->cq_base_dma);
2891 if (!rx_ring->cq_base) {
2892 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2896 if (rx_ring->cq_id < qdev->rss_ring_count &&
2897 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2898 ql_free_rx_resources(qdev, rx_ring);
2905 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2907 struct tx_ring *tx_ring;
2908 struct tx_ring_desc *tx_ring_desc;
2912 * Loop through all queues and free
2915 for (j = 0; j < qdev->tx_ring_count; j++) {
2916 tx_ring = &qdev->tx_ring[j];
2917 for (i = 0; i < tx_ring->wq_len; i++) {
2918 tx_ring_desc = &tx_ring->q[i];
2919 if (tx_ring_desc && tx_ring_desc->skb) {
2920 netif_err(qdev, ifdown, qdev->ndev,
2921 "Freeing lost SKB %p, from queue %d, index %d.\n",
2922 tx_ring_desc->skb, j,
2923 tx_ring_desc->index);
2924 ql_unmap_send(qdev, tx_ring_desc,
2925 tx_ring_desc->map_cnt);
2926 dev_kfree_skb(tx_ring_desc->skb);
2927 tx_ring_desc->skb = NULL;
2933 static void ql_free_mem_resources(struct ql_adapter *qdev)
2937 for (i = 0; i < qdev->tx_ring_count; i++)
2938 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2939 for (i = 0; i < qdev->rx_ring_count; i++)
2940 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2941 ql_free_shadow_space(qdev);
2944 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2948 /* Allocate space for our shadow registers and such. */
2949 if (ql_alloc_shadow_space(qdev))
2952 for (i = 0; i < qdev->rx_ring_count; i++) {
2953 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2954 netif_err(qdev, ifup, qdev->ndev,
2955 "RX resource allocation failed.\n");
2959 /* Allocate tx queue resources */
2960 for (i = 0; i < qdev->tx_ring_count; i++) {
2961 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2962 netif_err(qdev, ifup, qdev->ndev,
2963 "TX resource allocation failed.\n");
2970 ql_free_mem_resources(qdev);
2974 /* Set up the rx ring control block and pass it to the chip.
2975 * The control block is defined as
2976 * "Completion Queue Initialization Control Block", or cqicb.
2978 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2980 struct cqicb *cqicb = &rx_ring->cqicb;
2981 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2982 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2983 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2984 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2985 void __iomem *doorbell_area =
2986 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2989 __le64 *base_indirect_ptr;
2992 /* Set up the shadow registers for this ring. */
2993 rx_ring->prod_idx_sh_reg = shadow_reg;
2994 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2995 *rx_ring->prod_idx_sh_reg = 0;
2996 shadow_reg += sizeof(u64);
2997 shadow_reg_dma += sizeof(u64);
2998 rx_ring->lbq.base_indirect = shadow_reg;
2999 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
3000 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3001 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3002 rx_ring->sbq.base_indirect = shadow_reg;
3003 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
3005 /* PCI doorbell mem area + 0x00 for consumer index register */
3006 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3007 rx_ring->cnsmr_idx = 0;
3008 rx_ring->curr_entry = rx_ring->cq_base;
3010 /* PCI doorbell mem area + 0x04 for valid register */
3011 rx_ring->valid_db_reg = doorbell_area + 0x04;
3013 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3014 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3016 /* PCI doorbell mem area + 0x1c */
3017 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3019 memset((void *)cqicb, 0, sizeof(struct cqicb));
3020 cqicb->msix_vect = rx_ring->irq;
3022 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3025 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3027 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3030 * Set up the control block load flags.
3032 cqicb->flags = FLAGS_LC | /* Load queue base address */
3033 FLAGS_LV | /* Load MSI-X vector */
3034 FLAGS_LI; /* Load irq delay values */
3035 if (rx_ring->cq_id < qdev->rss_ring_count) {
3036 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3037 tmp = (u64)rx_ring->lbq.base_dma;
3038 base_indirect_ptr = rx_ring->lbq.base_indirect;
3041 *base_indirect_ptr = cpu_to_le64(tmp);
3042 tmp += DB_PAGE_SIZE;
3043 base_indirect_ptr++;
3045 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3046 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3047 cqicb->lbq_buf_size =
3048 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3049 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3050 rx_ring->lbq.next_to_use = 0;
3051 rx_ring->lbq.next_to_clean = 0;
3053 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3054 tmp = (u64)rx_ring->sbq.base_dma;
3055 base_indirect_ptr = rx_ring->sbq.base_indirect;
3058 *base_indirect_ptr = cpu_to_le64(tmp);
3059 tmp += DB_PAGE_SIZE;
3060 base_indirect_ptr++;
3062 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3064 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3065 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3066 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3067 rx_ring->sbq.next_to_use = 0;
3068 rx_ring->sbq.next_to_clean = 0;
3070 if (rx_ring->cq_id < qdev->rss_ring_count) {
3071 /* Inbound completion handling rx_rings run in
3072 * separate NAPI contexts.
3074 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3076 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3077 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3079 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3080 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3082 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3083 CFG_LCQ, rx_ring->cq_id);
3085 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3091 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3093 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3094 void __iomem *doorbell_area =
3095 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3096 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3097 (tx_ring->wq_id * sizeof(u64));
3098 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3099 (tx_ring->wq_id * sizeof(u64));
3103 * Assign doorbell registers for this tx_ring.
3105 /* TX PCI doorbell mem area for tx producer index */
3106 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3107 tx_ring->prod_idx = 0;
3108 /* TX PCI doorbell mem area + 0x04 */
3109 tx_ring->valid_db_reg = doorbell_area + 0x04;
3112 * Assign shadow registers for this tx_ring.
3114 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3115 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3117 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3118 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3119 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3120 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3122 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3124 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3126 ql_init_tx_ring(qdev, tx_ring);
3128 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3129 (u16) tx_ring->wq_id);
3131 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3137 static void ql_disable_msix(struct ql_adapter *qdev)
3139 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3140 pci_disable_msix(qdev->pdev);
3141 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3142 kfree(qdev->msi_x_entry);
3143 qdev->msi_x_entry = NULL;
3144 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3145 pci_disable_msi(qdev->pdev);
3146 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3150 /* We start by trying to get the number of vectors
3151 * stored in qdev->intr_count. If we don't get that
3152 * many then we reduce the count and try again.
3154 static void ql_enable_msix(struct ql_adapter *qdev)
3158 /* Get the MSIX vectors. */
3159 if (qlge_irq_type == MSIX_IRQ) {
3160 /* Try to alloc space for the msix struct,
3161 * if it fails then go to MSI/legacy.
3163 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3164 sizeof(struct msix_entry),
3166 if (!qdev->msi_x_entry) {
3167 qlge_irq_type = MSI_IRQ;
3171 for (i = 0; i < qdev->intr_count; i++)
3172 qdev->msi_x_entry[i].entry = i;
3174 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3175 1, qdev->intr_count);
3177 kfree(qdev->msi_x_entry);
3178 qdev->msi_x_entry = NULL;
3179 netif_warn(qdev, ifup, qdev->ndev,
3180 "MSI-X Enable failed, trying MSI.\n");
3181 qlge_irq_type = MSI_IRQ;
3183 qdev->intr_count = err;
3184 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3185 netif_info(qdev, ifup, qdev->ndev,
3186 "MSI-X Enabled, got %d vectors.\n",
3192 qdev->intr_count = 1;
3193 if (qlge_irq_type == MSI_IRQ) {
3194 if (!pci_enable_msi(qdev->pdev)) {
3195 set_bit(QL_MSI_ENABLED, &qdev->flags);
3196 netif_info(qdev, ifup, qdev->ndev,
3197 "Running with MSI interrupts.\n");
3201 qlge_irq_type = LEG_IRQ;
3202 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3203 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3204 "Running with legacy interrupts.\n");
3207 /* Each vector services 1 RSS ring and and 1 or more
3208 * TX completion rings. This function loops through
3209 * the TX completion rings and assigns the vector that
3210 * will service it. An example would be if there are
3211 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3212 * This would mean that vector 0 would service RSS ring 0
3213 * and TX completion rings 0,1,2 and 3. Vector 1 would
3214 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3216 static void ql_set_tx_vect(struct ql_adapter *qdev)
3219 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3221 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3222 /* Assign irq vectors to TX rx_rings.*/
3223 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3224 i < qdev->rx_ring_count; i++) {
3225 if (j == tx_rings_per_vector) {
3229 qdev->rx_ring[i].irq = vect;
3233 /* For single vector all rings have an irq
3236 for (i = 0; i < qdev->rx_ring_count; i++)
3237 qdev->rx_ring[i].irq = 0;
3241 /* Set the interrupt mask for this vector. Each vector
3242 * will service 1 RSS ring and 1 or more TX completion
3243 * rings. This function sets up a bit mask per vector
3244 * that indicates which rings it services.
3246 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3248 int j, vect = ctx->intr;
3249 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3251 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3252 /* Add the RSS ring serviced by this vector
3255 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3256 /* Add the TX ring(s) serviced by this vector
3258 for (j = 0; j < tx_rings_per_vector; j++) {
3260 (1 << qdev->rx_ring[qdev->rss_ring_count +
3261 (vect * tx_rings_per_vector) + j].cq_id);
3264 /* For single vector we just shift each queue's
3267 for (j = 0; j < qdev->rx_ring_count; j++)
3268 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3273 * Here we build the intr_context structures based on
3274 * our rx_ring count and intr vector count.
3275 * The intr_context structure is used to hook each vector
3276 * to possibly different handlers.
3278 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3281 struct intr_context *intr_context = &qdev->intr_context[0];
3283 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3284 /* Each rx_ring has it's
3285 * own intr_context since we have separate
3286 * vectors for each queue.
3288 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3289 qdev->rx_ring[i].irq = i;
3290 intr_context->intr = i;
3291 intr_context->qdev = qdev;
3292 /* Set up this vector's bit-mask that indicates
3293 * which queues it services.
3295 ql_set_irq_mask(qdev, intr_context);
3297 * We set up each vectors enable/disable/read bits so
3298 * there's no bit/mask calculations in the critical path.
3300 intr_context->intr_en_mask =
3301 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3302 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3304 intr_context->intr_dis_mask =
3305 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3306 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3308 intr_context->intr_read_mask =
3309 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3310 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3313 /* The first vector/queue handles
3314 * broadcast/multicast, fatal errors,
3315 * and firmware events. This in addition
3316 * to normal inbound NAPI processing.
3318 intr_context->handler = qlge_isr;
3319 sprintf(intr_context->name, "%s-rx-%d",
3320 qdev->ndev->name, i);
3323 * Inbound queues handle unicast frames only.
3325 intr_context->handler = qlge_msix_rx_isr;
3326 sprintf(intr_context->name, "%s-rx-%d",
3327 qdev->ndev->name, i);
3332 * All rx_rings use the same intr_context since
3333 * there is only one vector.
3335 intr_context->intr = 0;
3336 intr_context->qdev = qdev;
3338 * We set up each vectors enable/disable/read bits so
3339 * there's no bit/mask calculations in the critical path.
3341 intr_context->intr_en_mask =
3342 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3343 intr_context->intr_dis_mask =
3344 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3345 INTR_EN_TYPE_DISABLE;
3346 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3347 /* Experience shows that when using INTx interrupts,
3348 * the device does not always auto-mask INTR_EN_EN.
3349 * Moreover, masking INTR_EN_EN manually does not
3350 * immediately prevent interrupt generation.
3352 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3354 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3356 intr_context->intr_read_mask =
3357 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3359 * Single interrupt means one handler for all rings.
3361 intr_context->handler = qlge_isr;
3362 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3363 /* Set up this vector's bit-mask that indicates
3364 * which queues it services. In this case there is
3365 * a single vector so it will service all RSS and
3366 * TX completion rings.
3368 ql_set_irq_mask(qdev, intr_context);
3370 /* Tell the TX completion rings which MSIx vector
3371 * they will be using.
3373 ql_set_tx_vect(qdev);
3376 static void ql_free_irq(struct ql_adapter *qdev)
3379 struct intr_context *intr_context = &qdev->intr_context[0];
3381 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3382 if (intr_context->hooked) {
3383 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3384 free_irq(qdev->msi_x_entry[i].vector,
3387 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3391 ql_disable_msix(qdev);
3394 static int ql_request_irq(struct ql_adapter *qdev)
3398 struct pci_dev *pdev = qdev->pdev;
3399 struct intr_context *intr_context = &qdev->intr_context[0];
3401 ql_resolve_queues_to_irqs(qdev);
3403 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3404 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3405 status = request_irq(qdev->msi_x_entry[i].vector,
3406 intr_context->handler,
3411 netif_err(qdev, ifup, qdev->ndev,
3412 "Failed request for MSIX interrupt %d.\n",
3417 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3418 "trying msi or legacy interrupts.\n");
3419 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3420 "%s: irq = %d.\n", __func__, pdev->irq);
3421 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3422 "%s: context->name = %s.\n", __func__,
3423 intr_context->name);
3424 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3425 "%s: dev_id = 0x%p.\n", __func__,
3428 request_irq(pdev->irq, qlge_isr,
3429 test_bit(QL_MSI_ENABLED,
3431 flags) ? 0 : IRQF_SHARED,
3432 intr_context->name, &qdev->rx_ring[0]);
3436 netif_err(qdev, ifup, qdev->ndev,
3437 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3438 intr_context->name);
3440 intr_context->hooked = 1;
3444 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3449 static int ql_start_rss(struct ql_adapter *qdev)
3451 static const u8 init_hash_seed[] = {
3452 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3453 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3454 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3455 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3456 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3458 struct ricb *ricb = &qdev->ricb;
3461 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3463 memset((void *)ricb, 0, sizeof(*ricb));
3465 ricb->base_cq = RSS_L4K;
3467 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3468 ricb->mask = cpu_to_le16((u16)(0x3ff));
3471 * Fill out the Indirection Table.
3473 for (i = 0; i < 1024; i++)
3474 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3476 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3477 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3479 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3481 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3487 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3491 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3494 /* Clear all the entries in the routing table. */
3495 for (i = 0; i < 16; i++) {
3496 status = ql_set_routing_reg(qdev, i, 0, 0);
3498 netif_err(qdev, ifup, qdev->ndev,
3499 "Failed to init routing register for CAM packets.\n");
3503 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3507 /* Initialize the frame-to-queue routing. */
3508 static int ql_route_initialize(struct ql_adapter *qdev)
3512 /* Clear all the entries in the routing table. */
3513 status = ql_clear_routing_entries(qdev);
3517 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3521 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3522 RT_IDX_IP_CSUM_ERR, 1);
3524 netif_err(qdev, ifup, qdev->ndev,
3525 "Failed to init routing register for IP CSUM error packets.\n");
3528 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3529 RT_IDX_TU_CSUM_ERR, 1);
3531 netif_err(qdev, ifup, qdev->ndev,
3532 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3535 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3537 netif_err(qdev, ifup, qdev->ndev,
3538 "Failed to init routing register for broadcast packets.\n");
3541 /* If we have more than one inbound queue, then turn on RSS in the
3544 if (qdev->rss_ring_count > 1) {
3545 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3546 RT_IDX_RSS_MATCH, 1);
3548 netif_err(qdev, ifup, qdev->ndev,
3549 "Failed to init routing register for MATCH RSS packets.\n");
3554 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3557 netif_err(qdev, ifup, qdev->ndev,
3558 "Failed to init routing register for CAM packets.\n");
3560 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3564 int ql_cam_route_initialize(struct ql_adapter *qdev)
3568 /* If check if the link is up and use to
3569 * determine if we are setting or clearing
3570 * the MAC address in the CAM.
3572 set = ql_read32(qdev, STS);
3573 set &= qdev->port_link_up;
3574 status = ql_set_mac_addr(qdev, set);
3576 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3580 status = ql_route_initialize(qdev);
3582 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3587 static int ql_adapter_initialize(struct ql_adapter *qdev)
3594 * Set up the System register to halt on errors.
3596 value = SYS_EFE | SYS_FAE;
3598 ql_write32(qdev, SYS, mask | value);
3600 /* Set the default queue, and VLAN behavior. */
3601 value = NIC_RCV_CFG_DFQ;
3602 mask = NIC_RCV_CFG_DFQ_MASK;
3603 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3604 value |= NIC_RCV_CFG_RV;
3605 mask |= (NIC_RCV_CFG_RV << 16);
3607 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3609 /* Set the MPI interrupt to enabled. */
3610 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3612 /* Enable the function, set pagesize, enable error checking. */
3613 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3614 FSC_EC | FSC_VM_PAGE_4K;
3615 value |= SPLT_SETTING;
3617 /* Set/clear header splitting. */
3618 mask = FSC_VM_PAGESIZE_MASK |
3619 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3620 ql_write32(qdev, FSC, mask | value);
3622 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3624 /* Set RX packet routing to use port/pci function on which the
3625 * packet arrived on in addition to usual frame routing.
3626 * This is helpful on bonding where both interfaces can have
3627 * the same MAC address.
3629 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3630 /* Reroute all packets to our Interface.
3631 * They may have been routed to MPI firmware
3634 value = ql_read32(qdev, MGMT_RCV_CFG);
3635 value &= ~MGMT_RCV_CFG_RM;
3638 /* Sticky reg needs clearing due to WOL. */
3639 ql_write32(qdev, MGMT_RCV_CFG, mask);
3640 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3642 /* Default WOL is enable on Mezz cards */
3643 if (qdev->pdev->subsystem_device == 0x0068 ||
3644 qdev->pdev->subsystem_device == 0x0180)
3645 qdev->wol = WAKE_MAGIC;
3647 /* Start up the rx queues. */
3648 for (i = 0; i < qdev->rx_ring_count; i++) {
3649 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3651 netif_err(qdev, ifup, qdev->ndev,
3652 "Failed to start rx ring[%d].\n", i);
3657 /* If there is more than one inbound completion queue
3658 * then download a RICB to configure RSS.
3660 if (qdev->rss_ring_count > 1) {
3661 status = ql_start_rss(qdev);
3663 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3668 /* Start up the tx queues. */
3669 for (i = 0; i < qdev->tx_ring_count; i++) {
3670 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3672 netif_err(qdev, ifup, qdev->ndev,
3673 "Failed to start tx ring[%d].\n", i);
3678 /* Initialize the port and set the max framesize. */
3679 status = qdev->nic_ops->port_initialize(qdev);
3681 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3683 /* Set up the MAC address and frame routing filter. */
3684 status = ql_cam_route_initialize(qdev);
3686 netif_err(qdev, ifup, qdev->ndev,
3687 "Failed to init CAM/Routing tables.\n");
3691 /* Start NAPI for the RSS queues. */
3692 for (i = 0; i < qdev->rss_ring_count; i++)
3693 napi_enable(&qdev->rx_ring[i].napi);
3698 /* Issue soft reset to chip. */
3699 static int ql_adapter_reset(struct ql_adapter *qdev)
3703 unsigned long end_jiffies;
3705 /* Clear all the entries in the routing table. */
3706 status = ql_clear_routing_entries(qdev);
3708 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3712 /* Check if bit is set then skip the mailbox command and
3713 * clear the bit, else we are in normal reset process.
3715 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3716 /* Stop management traffic. */
3717 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3719 /* Wait for the NIC and MGMNT FIFOs to empty. */
3720 ql_wait_fifo_empty(qdev);
3722 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3725 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3727 end_jiffies = jiffies + usecs_to_jiffies(30);
3729 value = ql_read32(qdev, RST_FO);
3730 if ((value & RST_FO_FR) == 0)
3733 } while (time_before(jiffies, end_jiffies));
3735 if (value & RST_FO_FR) {
3736 netif_err(qdev, ifdown, qdev->ndev,
3737 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3738 status = -ETIMEDOUT;
3741 /* Resume management traffic. */
3742 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3746 static void ql_display_dev_info(struct net_device *ndev)
3748 struct ql_adapter *qdev = netdev_priv(ndev);
3750 netif_info(qdev, probe, qdev->ndev,
3751 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3752 "XG Roll = %d, XG Rev = %d.\n",
3755 qdev->chip_rev_id & 0x0000000f,
3756 qdev->chip_rev_id >> 4 & 0x0000000f,
3757 qdev->chip_rev_id >> 8 & 0x0000000f,
3758 qdev->chip_rev_id >> 12 & 0x0000000f);
3759 netif_info(qdev, probe, qdev->ndev,
3760 "MAC address %pM\n", ndev->dev_addr);
3763 static int ql_wol(struct ql_adapter *qdev)
3766 u32 wol = MB_WOL_DISABLE;
3768 /* The CAM is still intact after a reset, but if we
3769 * are doing WOL, then we may need to program the
3770 * routing regs. We would also need to issue the mailbox
3771 * commands to instruct the MPI what to do per the ethtool
3775 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3776 WAKE_MCAST | WAKE_BCAST)) {
3777 netif_err(qdev, ifdown, qdev->ndev,
3778 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3783 if (qdev->wol & WAKE_MAGIC) {
3784 status = ql_mb_wol_set_magic(qdev, 1);
3786 netif_err(qdev, ifdown, qdev->ndev,
3787 "Failed to set magic packet on %s.\n",
3791 netif_info(qdev, drv, qdev->ndev,
3792 "Enabled magic packet successfully on %s.\n",
3795 wol |= MB_WOL_MAGIC_PKT;
3799 wol |= MB_WOL_MODE_ON;
3800 status = ql_mb_wol_mode(qdev, wol);
3801 netif_err(qdev, drv, qdev->ndev,
3802 "WOL %s (wol code 0x%x) on %s\n",
3803 (status == 0) ? "Successfully set" : "Failed",
3804 wol, qdev->ndev->name);
3810 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3813 /* Don't kill the reset worker thread if we
3814 * are in the process of recovery.
3816 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3817 cancel_delayed_work_sync(&qdev->asic_reset_work);
3818 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3819 cancel_delayed_work_sync(&qdev->mpi_work);
3820 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3821 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3822 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3825 static int ql_adapter_down(struct ql_adapter *qdev)
3831 ql_cancel_all_work_sync(qdev);
3833 for (i = 0; i < qdev->rss_ring_count; i++)
3834 napi_disable(&qdev->rx_ring[i].napi);
3836 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3838 ql_disable_interrupts(qdev);
3840 ql_tx_ring_clean(qdev);
3842 /* Call netif_napi_del() from common point.
3844 for (i = 0; i < qdev->rss_ring_count; i++)
3845 netif_napi_del(&qdev->rx_ring[i].napi);
3847 status = ql_adapter_reset(qdev);
3849 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3851 ql_free_rx_buffers(qdev);
3856 static int ql_adapter_up(struct ql_adapter *qdev)
3860 err = ql_adapter_initialize(qdev);
3862 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3865 set_bit(QL_ADAPTER_UP, &qdev->flags);
3866 ql_alloc_rx_buffers(qdev);
3867 /* If the port is initialized and the
3868 * link is up the turn on the carrier.
3870 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3871 (ql_read32(qdev, STS) & qdev->port_link_up))
3873 /* Restore rx mode. */
3874 clear_bit(QL_ALLMULTI, &qdev->flags);
3875 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3876 qlge_set_multicast_list(qdev->ndev);
3878 /* Restore vlan setting. */
3879 qlge_restore_vlan(qdev);
3881 ql_enable_interrupts(qdev);
3882 ql_enable_all_completion_interrupts(qdev);
3883 netif_tx_start_all_queues(qdev->ndev);
3887 ql_adapter_reset(qdev);
3891 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3893 ql_free_mem_resources(qdev);
3897 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3901 if (ql_alloc_mem_resources(qdev)) {
3902 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3905 status = ql_request_irq(qdev);
3909 static int qlge_close(struct net_device *ndev)
3911 struct ql_adapter *qdev = netdev_priv(ndev);
3914 /* If we hit pci_channel_io_perm_failure
3915 * failure condition, then we already
3916 * brought the adapter down.
3918 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3919 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3920 clear_bit(QL_EEH_FATAL, &qdev->flags);
3925 * Wait for device to recover from a reset.
3926 * (Rarely happens, but possible.)
3928 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3931 /* Make sure refill_work doesn't re-enable napi */
3932 for (i = 0; i < qdev->rss_ring_count; i++)
3933 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3935 ql_adapter_down(qdev);
3936 ql_release_adapter_resources(qdev);
3940 static void qlge_set_lb_size(struct ql_adapter *qdev)
3942 if (qdev->ndev->mtu <= 1500)
3943 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3945 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3946 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3949 static int ql_configure_rings(struct ql_adapter *qdev)
3952 struct rx_ring *rx_ring;
3953 struct tx_ring *tx_ring;
3954 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3956 /* In a perfect world we have one RSS ring for each CPU
3957 * and each has it's own vector. To do that we ask for
3958 * cpu_cnt vectors. ql_enable_msix() will adjust the
3959 * vector count to what we actually get. We then
3960 * allocate an RSS ring for each.
3961 * Essentially, we are doing min(cpu_count, msix_vector_count).
3963 qdev->intr_count = cpu_cnt;
3964 ql_enable_msix(qdev);
3965 /* Adjust the RSS ring count to the actual vector count. */
3966 qdev->rss_ring_count = qdev->intr_count;
3967 qdev->tx_ring_count = cpu_cnt;
3968 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3970 for (i = 0; i < qdev->tx_ring_count; i++) {
3971 tx_ring = &qdev->tx_ring[i];
3972 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3973 tx_ring->qdev = qdev;
3975 tx_ring->wq_len = qdev->tx_ring_size;
3977 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3980 * The completion queue ID for the tx rings start
3981 * immediately after the rss rings.
3983 tx_ring->cq_id = qdev->rss_ring_count + i;
3986 for (i = 0; i < qdev->rx_ring_count; i++) {
3987 rx_ring = &qdev->rx_ring[i];
3988 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3989 rx_ring->qdev = qdev;
3991 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3992 if (i < qdev->rss_ring_count) {
3994 * Inbound (RSS) queues.
3996 rx_ring->cq_len = qdev->rx_ring_size;
3998 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3999 rx_ring->lbq.type = QLGE_LB;
4000 rx_ring->sbq.type = QLGE_SB;
4001 INIT_DELAYED_WORK(&rx_ring->refill_work,
4005 * Outbound queue handles outbound completions only.
4007 /* outbound cq is same size as tx_ring it services. */
4008 rx_ring->cq_len = qdev->tx_ring_size;
4010 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4016 static int qlge_open(struct net_device *ndev)
4019 struct ql_adapter *qdev = netdev_priv(ndev);
4021 err = ql_adapter_reset(qdev);
4025 qlge_set_lb_size(qdev);
4026 err = ql_configure_rings(qdev);
4030 err = ql_get_adapter_resources(qdev);
4034 err = ql_adapter_up(qdev);
4041 ql_release_adapter_resources(qdev);
4045 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4049 /* Wait for an outstanding reset to complete. */
4050 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4053 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4054 netif_err(qdev, ifup, qdev->ndev,
4055 "Waiting for adapter UP...\n");
4060 netif_err(qdev, ifup, qdev->ndev,
4061 "Timed out waiting for adapter UP\n");
4066 status = ql_adapter_down(qdev);
4070 qlge_set_lb_size(qdev);
4072 status = ql_adapter_up(qdev);
4078 netif_alert(qdev, ifup, qdev->ndev,
4079 "Driver up/down cycle failed, closing device.\n");
4080 set_bit(QL_ADAPTER_UP, &qdev->flags);
4081 dev_close(qdev->ndev);
4085 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4087 struct ql_adapter *qdev = netdev_priv(ndev);
4090 if (ndev->mtu == 1500 && new_mtu == 9000)
4091 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4092 else if (ndev->mtu == 9000 && new_mtu == 1500)
4093 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4097 queue_delayed_work(qdev->workqueue,
4098 &qdev->mpi_port_cfg_work, 3 * HZ);
4100 ndev->mtu = new_mtu;
4102 if (!netif_running(qdev->ndev))
4105 status = ql_change_rx_buffers(qdev);
4107 netif_err(qdev, ifup, qdev->ndev,
4108 "Changing MTU failed.\n");
4114 static struct net_device_stats *qlge_get_stats(struct net_device
4117 struct ql_adapter *qdev = netdev_priv(ndev);
4118 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4119 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4120 unsigned long pkts, mcast, dropped, errors, bytes;
4124 pkts = mcast = dropped = errors = bytes = 0;
4125 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4126 pkts += rx_ring->rx_packets;
4127 bytes += rx_ring->rx_bytes;
4128 dropped += rx_ring->rx_dropped;
4129 errors += rx_ring->rx_errors;
4130 mcast += rx_ring->rx_multicast;
4132 ndev->stats.rx_packets = pkts;
4133 ndev->stats.rx_bytes = bytes;
4134 ndev->stats.rx_dropped = dropped;
4135 ndev->stats.rx_errors = errors;
4136 ndev->stats.multicast = mcast;
4139 pkts = errors = bytes = 0;
4140 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4141 pkts += tx_ring->tx_packets;
4142 bytes += tx_ring->tx_bytes;
4143 errors += tx_ring->tx_errors;
4145 ndev->stats.tx_packets = pkts;
4146 ndev->stats.tx_bytes = bytes;
4147 ndev->stats.tx_errors = errors;
4148 return &ndev->stats;
4151 static void qlge_set_multicast_list(struct net_device *ndev)
4153 struct ql_adapter *qdev = netdev_priv(ndev);
4154 struct netdev_hw_addr *ha;
4157 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4161 * Set or clear promiscuous mode if a
4162 * transition is taking place.
4164 if (ndev->flags & IFF_PROMISC) {
4165 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4166 if (ql_set_routing_reg
4167 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4168 netif_err(qdev, hw, qdev->ndev,
4169 "Failed to set promiscuous mode.\n");
4171 set_bit(QL_PROMISCUOUS, &qdev->flags);
4175 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4176 if (ql_set_routing_reg
4177 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4178 netif_err(qdev, hw, qdev->ndev,
4179 "Failed to clear promiscuous mode.\n");
4181 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4187 * Set or clear all multicast mode if a
4188 * transition is taking place.
4190 if ((ndev->flags & IFF_ALLMULTI) ||
4191 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4192 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4193 if (ql_set_routing_reg
4194 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4195 netif_err(qdev, hw, qdev->ndev,
4196 "Failed to set all-multi mode.\n");
4198 set_bit(QL_ALLMULTI, &qdev->flags);
4202 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4203 if (ql_set_routing_reg
4204 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4205 netif_err(qdev, hw, qdev->ndev,
4206 "Failed to clear all-multi mode.\n");
4208 clear_bit(QL_ALLMULTI, &qdev->flags);
4213 if (!netdev_mc_empty(ndev)) {
4214 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4218 netdev_for_each_mc_addr(ha, ndev) {
4219 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4220 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4221 netif_err(qdev, hw, qdev->ndev,
4222 "Failed to loadmulticast address.\n");
4223 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4228 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4229 if (ql_set_routing_reg
4230 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4231 netif_err(qdev, hw, qdev->ndev,
4232 "Failed to set multicast match mode.\n");
4234 set_bit(QL_ALLMULTI, &qdev->flags);
4238 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4241 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4243 struct ql_adapter *qdev = netdev_priv(ndev);
4244 struct sockaddr *addr = p;
4247 if (!is_valid_ether_addr(addr->sa_data))
4248 return -EADDRNOTAVAIL;
4249 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4250 /* Update local copy of current mac address. */
4251 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4253 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4256 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4257 MAC_ADDR_TYPE_CAM_MAC,
4258 qdev->func * MAX_CQ);
4260 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4261 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4265 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4267 struct ql_adapter *qdev = netdev_priv(ndev);
4268 ql_queue_asic_error(qdev);
4271 static void ql_asic_reset_work(struct work_struct *work)
4273 struct ql_adapter *qdev =
4274 container_of(work, struct ql_adapter, asic_reset_work.work);
4277 status = ql_adapter_down(qdev);
4281 status = ql_adapter_up(qdev);
4285 /* Restore rx mode. */
4286 clear_bit(QL_ALLMULTI, &qdev->flags);
4287 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4288 qlge_set_multicast_list(qdev->ndev);
4293 netif_alert(qdev, ifup, qdev->ndev,
4294 "Driver up/down cycle failed, closing device\n");
4296 set_bit(QL_ADAPTER_UP, &qdev->flags);
4297 dev_close(qdev->ndev);
4301 static const struct nic_operations qla8012_nic_ops = {
4302 .get_flash = ql_get_8012_flash_params,
4303 .port_initialize = ql_8012_port_initialize,
4306 static const struct nic_operations qla8000_nic_ops = {
4307 .get_flash = ql_get_8000_flash_params,
4308 .port_initialize = ql_8000_port_initialize,
4311 /* Find the pcie function number for the other NIC
4312 * on this chip. Since both NIC functions share a
4313 * common firmware we have the lowest enabled function
4314 * do any common work. Examples would be resetting
4315 * after a fatal firmware error, or doing a firmware
4318 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4322 u32 nic_func1, nic_func2;
4324 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4329 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4330 MPI_TEST_NIC_FUNC_MASK);
4331 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4332 MPI_TEST_NIC_FUNC_MASK);
4334 if (qdev->func == nic_func1)
4335 qdev->alt_func = nic_func2;
4336 else if (qdev->func == nic_func2)
4337 qdev->alt_func = nic_func1;
4344 static int ql_get_board_info(struct ql_adapter *qdev)
4348 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4352 status = ql_get_alt_pcie_func(qdev);
4356 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4358 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4359 qdev->port_link_up = STS_PL1;
4360 qdev->port_init = STS_PI1;
4361 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4362 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4364 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4365 qdev->port_link_up = STS_PL0;
4366 qdev->port_init = STS_PI0;
4367 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4368 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4370 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4371 qdev->device_id = qdev->pdev->device;
4372 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4373 qdev->nic_ops = &qla8012_nic_ops;
4374 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4375 qdev->nic_ops = &qla8000_nic_ops;
4379 static void ql_release_all(struct pci_dev *pdev)
4381 struct net_device *ndev = pci_get_drvdata(pdev);
4382 struct ql_adapter *qdev = netdev_priv(ndev);
4384 if (qdev->workqueue) {
4385 destroy_workqueue(qdev->workqueue);
4386 qdev->workqueue = NULL;
4390 iounmap(qdev->reg_base);
4391 if (qdev->doorbell_area)
4392 iounmap(qdev->doorbell_area);
4393 vfree(qdev->mpi_coredump);
4394 pci_release_regions(pdev);
4397 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4400 struct ql_adapter *qdev = netdev_priv(ndev);
4403 memset((void *)qdev, 0, sizeof(*qdev));
4404 err = pci_enable_device(pdev);
4406 dev_err(&pdev->dev, "PCI device enable failed.\n");
4412 pci_set_drvdata(pdev, ndev);
4414 /* Set PCIe read request size */
4415 err = pcie_set_readrq(pdev, 4096);
4417 dev_err(&pdev->dev, "Set readrq failed.\n");
4421 err = pci_request_regions(pdev, DRV_NAME);
4423 dev_err(&pdev->dev, "PCI region request failed.\n");
4427 pci_set_master(pdev);
4428 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4429 set_bit(QL_DMA64, &qdev->flags);
4430 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4432 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4434 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4438 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4442 /* Set PCIe reset type for EEH to fundamental. */
4443 pdev->needs_freset = 1;
4444 pci_save_state(pdev);
4446 ioremap(pci_resource_start(pdev, 1),
4447 pci_resource_len(pdev, 1));
4448 if (!qdev->reg_base) {
4449 dev_err(&pdev->dev, "Register mapping failed.\n");
4454 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4455 qdev->doorbell_area =
4456 ioremap(pci_resource_start(pdev, 3),
4457 pci_resource_len(pdev, 3));
4458 if (!qdev->doorbell_area) {
4459 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4464 err = ql_get_board_info(qdev);
4466 dev_err(&pdev->dev, "Register access failed.\n");
4470 qdev->msg_enable = netif_msg_init(debug, default_msg);
4471 spin_lock_init(&qdev->stats_lock);
4473 if (qlge_mpi_coredump) {
4474 qdev->mpi_coredump =
4475 vmalloc(sizeof(struct ql_mpi_coredump));
4476 if (!qdev->mpi_coredump) {
4480 if (qlge_force_coredump)
4481 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4483 /* make sure the EEPROM is good */
4484 err = qdev->nic_ops->get_flash(qdev);
4486 dev_err(&pdev->dev, "Invalid FLASH.\n");
4490 /* Keep local copy of current mac address. */
4491 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4493 /* Set up the default ring sizes. */
4494 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4495 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4497 /* Set up the coalescing parameters. */
4498 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4499 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4500 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4501 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4504 * Set up the operating parameters.
4506 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4508 if (!qdev->workqueue) {
4513 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4514 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4515 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4516 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4517 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4518 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4519 init_completion(&qdev->ide_completion);
4520 mutex_init(&qdev->mpi_mutex);
4523 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4524 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4525 DRV_NAME, DRV_VERSION);
4529 ql_release_all(pdev);
4531 pci_disable_device(pdev);
4535 static const struct net_device_ops qlge_netdev_ops = {
4536 .ndo_open = qlge_open,
4537 .ndo_stop = qlge_close,
4538 .ndo_start_xmit = qlge_send,
4539 .ndo_change_mtu = qlge_change_mtu,
4540 .ndo_get_stats = qlge_get_stats,
4541 .ndo_set_rx_mode = qlge_set_multicast_list,
4542 .ndo_set_mac_address = qlge_set_mac_address,
4543 .ndo_validate_addr = eth_validate_addr,
4544 .ndo_tx_timeout = qlge_tx_timeout,
4545 .ndo_set_features = qlge_set_features,
4546 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4547 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4550 static void ql_timer(struct timer_list *t)
4552 struct ql_adapter *qdev = from_timer(qdev, t, timer);
4555 var = ql_read32(qdev, STS);
4556 if (pci_channel_offline(qdev->pdev)) {
4557 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4561 mod_timer(&qdev->timer, jiffies + (5*HZ));
4564 static int qlge_probe(struct pci_dev *pdev,
4565 const struct pci_device_id *pci_entry)
4567 struct net_device *ndev = NULL;
4568 struct ql_adapter *qdev = NULL;
4569 static int cards_found;
4572 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4574 netif_get_num_default_rss_queues()));
4578 err = ql_init_device(pdev, ndev, cards_found);
4584 qdev = netdev_priv(ndev);
4585 SET_NETDEV_DEV(ndev, &pdev->dev);
4586 ndev->hw_features = NETIF_F_SG |
4590 NETIF_F_HW_VLAN_CTAG_TX |
4591 NETIF_F_HW_VLAN_CTAG_RX |
4592 NETIF_F_HW_VLAN_CTAG_FILTER |
4594 ndev->features = ndev->hw_features;
4595 ndev->vlan_features = ndev->hw_features;
4596 /* vlan gets same features (except vlan filter) */
4597 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4598 NETIF_F_HW_VLAN_CTAG_TX |
4599 NETIF_F_HW_VLAN_CTAG_RX);
4601 if (test_bit(QL_DMA64, &qdev->flags))
4602 ndev->features |= NETIF_F_HIGHDMA;
4605 * Set up net_device structure.
4607 ndev->tx_queue_len = qdev->tx_ring_size;
4608 ndev->irq = pdev->irq;
4610 ndev->netdev_ops = &qlge_netdev_ops;
4611 ndev->ethtool_ops = &qlge_ethtool_ops;
4612 ndev->watchdog_timeo = 10 * HZ;
4614 /* MTU range: this driver only supports 1500 or 9000, so this only
4615 * filters out values above or below, and we'll rely on
4616 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4618 ndev->min_mtu = ETH_DATA_LEN;
4619 ndev->max_mtu = 9000;
4621 err = register_netdev(ndev);
4623 dev_err(&pdev->dev, "net device registration failed.\n");
4624 ql_release_all(pdev);
4625 pci_disable_device(pdev);
4629 /* Start up the timer to trigger EEH if
4632 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4633 mod_timer(&qdev->timer, jiffies + (5*HZ));
4635 ql_display_dev_info(ndev);
4636 atomic_set(&qdev->lb_count, 0);
4641 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4643 return qlge_send(skb, ndev);
4646 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4648 return ql_clean_inbound_rx_ring(rx_ring, budget);
4651 static void qlge_remove(struct pci_dev *pdev)
4653 struct net_device *ndev = pci_get_drvdata(pdev);
4654 struct ql_adapter *qdev = netdev_priv(ndev);
4655 del_timer_sync(&qdev->timer);
4656 ql_cancel_all_work_sync(qdev);
4657 unregister_netdev(ndev);
4658 ql_release_all(pdev);
4659 pci_disable_device(pdev);
4663 /* Clean up resources without touching hardware. */
4664 static void ql_eeh_close(struct net_device *ndev)
4667 struct ql_adapter *qdev = netdev_priv(ndev);
4669 if (netif_carrier_ok(ndev)) {
4670 netif_carrier_off(ndev);
4671 netif_stop_queue(ndev);
4674 /* Disabling the timer */
4675 ql_cancel_all_work_sync(qdev);
4677 for (i = 0; i < qdev->rss_ring_count; i++)
4678 netif_napi_del(&qdev->rx_ring[i].napi);
4680 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4681 ql_tx_ring_clean(qdev);
4682 ql_free_rx_buffers(qdev);
4683 ql_release_adapter_resources(qdev);
4687 * This callback is called by the PCI subsystem whenever
4688 * a PCI bus error is detected.
4690 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4691 enum pci_channel_state state)
4693 struct net_device *ndev = pci_get_drvdata(pdev);
4694 struct ql_adapter *qdev = netdev_priv(ndev);
4697 case pci_channel_io_normal:
4698 return PCI_ERS_RESULT_CAN_RECOVER;
4699 case pci_channel_io_frozen:
4700 netif_device_detach(ndev);
4701 del_timer_sync(&qdev->timer);
4702 if (netif_running(ndev))
4704 pci_disable_device(pdev);
4705 return PCI_ERS_RESULT_NEED_RESET;
4706 case pci_channel_io_perm_failure:
4708 "%s: pci_channel_io_perm_failure.\n", __func__);
4709 del_timer_sync(&qdev->timer);
4711 set_bit(QL_EEH_FATAL, &qdev->flags);
4712 return PCI_ERS_RESULT_DISCONNECT;
4715 /* Request a slot reset. */
4716 return PCI_ERS_RESULT_NEED_RESET;
4720 * This callback is called after the PCI buss has been reset.
4721 * Basically, this tries to restart the card from scratch.
4722 * This is a shortened version of the device probe/discovery code,
4723 * it resembles the first-half of the () routine.
4725 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4727 struct net_device *ndev = pci_get_drvdata(pdev);
4728 struct ql_adapter *qdev = netdev_priv(ndev);
4730 pdev->error_state = pci_channel_io_normal;
4732 pci_restore_state(pdev);
4733 if (pci_enable_device(pdev)) {
4734 netif_err(qdev, ifup, qdev->ndev,
4735 "Cannot re-enable PCI device after reset.\n");
4736 return PCI_ERS_RESULT_DISCONNECT;
4738 pci_set_master(pdev);
4740 if (ql_adapter_reset(qdev)) {
4741 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4742 set_bit(QL_EEH_FATAL, &qdev->flags);
4743 return PCI_ERS_RESULT_DISCONNECT;
4746 return PCI_ERS_RESULT_RECOVERED;
4749 static void qlge_io_resume(struct pci_dev *pdev)
4751 struct net_device *ndev = pci_get_drvdata(pdev);
4752 struct ql_adapter *qdev = netdev_priv(ndev);
4755 if (netif_running(ndev)) {
4756 err = qlge_open(ndev);
4758 netif_err(qdev, ifup, qdev->ndev,
4759 "Device initialization failed after reset.\n");
4763 netif_err(qdev, ifup, qdev->ndev,
4764 "Device was not running prior to EEH.\n");
4766 mod_timer(&qdev->timer, jiffies + (5*HZ));
4767 netif_device_attach(ndev);
4770 static const struct pci_error_handlers qlge_err_handler = {
4771 .error_detected = qlge_io_error_detected,
4772 .slot_reset = qlge_io_slot_reset,
4773 .resume = qlge_io_resume,
4776 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4778 struct net_device *ndev = pci_get_drvdata(pdev);
4779 struct ql_adapter *qdev = netdev_priv(ndev);
4782 netif_device_detach(ndev);
4783 del_timer_sync(&qdev->timer);
4785 if (netif_running(ndev)) {
4786 err = ql_adapter_down(qdev);
4792 err = pci_save_state(pdev);
4796 pci_disable_device(pdev);
4798 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4804 static int qlge_resume(struct pci_dev *pdev)
4806 struct net_device *ndev = pci_get_drvdata(pdev);
4807 struct ql_adapter *qdev = netdev_priv(ndev);
4810 pci_set_power_state(pdev, PCI_D0);
4811 pci_restore_state(pdev);
4812 err = pci_enable_device(pdev);
4814 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4817 pci_set_master(pdev);
4819 pci_enable_wake(pdev, PCI_D3hot, 0);
4820 pci_enable_wake(pdev, PCI_D3cold, 0);
4822 if (netif_running(ndev)) {
4823 err = ql_adapter_up(qdev);
4828 mod_timer(&qdev->timer, jiffies + (5*HZ));
4829 netif_device_attach(ndev);
4833 #endif /* CONFIG_PM */
4835 static void qlge_shutdown(struct pci_dev *pdev)
4837 qlge_suspend(pdev, PMSG_SUSPEND);
4840 static struct pci_driver qlge_driver = {
4842 .id_table = qlge_pci_tbl,
4843 .probe = qlge_probe,
4844 .remove = qlge_remove,
4846 .suspend = qlge_suspend,
4847 .resume = qlge_resume,
4849 .shutdown = qlge_shutdown,
4850 .err_handler = &qlge_err_handler
4853 module_pci_driver(qlge_driver);