1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <net/ip6_checksum.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 /* e1000_pci_tbl - PCI Device ID Table
44 * Last entry must be all 0s
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void __devexit e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
177 static void e1000_shutdown(struct pci_dev *pdev);
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
187 MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
195 static struct pci_error_handlers e1000_err_handler = {
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
201 static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove),
207 /* Power Management Hooks */
208 .suspend = e1000_suspend,
209 .resume = e1000_resume,
211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
215 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_VERSION);
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
237 * e1000_init_module - Driver Registration Routine
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
243 static int __init e1000_init_module(void)
246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
248 pr_info("%s\n", e1000_copyright);
250 ret = pci_register_driver(&e1000_driver);
251 if (copybreak != COPYBREAK_DEFAULT) {
253 pr_info("copybreak disabled\n");
255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
261 module_init(e1000_init_module);
264 * e1000_exit_module - Driver Exit Cleanup Routine
266 * e1000_exit_module is called just before the driver is removed
270 static void __exit e1000_exit_module(void)
272 pci_unregister_driver(&e1000_driver);
275 module_exit(e1000_exit_module);
277 static int e1000_request_irq(struct e1000_adapter *adapter)
279 struct net_device *netdev = adapter->netdev;
280 irq_handler_t handler = e1000_intr;
281 int irq_flags = IRQF_SHARED;
284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293 static void e1000_free_irq(struct e1000_adapter *adapter)
295 struct net_device *netdev = adapter->netdev;
297 free_irq(adapter->pdev->irq, netdev);
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
305 static void e1000_irq_disable(struct e1000_adapter *adapter)
307 struct e1000_hw *hw = &adapter->hw;
311 synchronize_irq(adapter->pdev->irq);
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
319 static void e1000_irq_enable(struct e1000_adapter *adapter)
321 struct e1000_hw *hw = &adapter->hw;
323 ew32(IMS, IMS_ENABLE_MASK);
327 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
329 struct e1000_hw *hw = &adapter->hw;
330 struct net_device *netdev = adapter->netdev;
331 u16 vid = hw->mng_cookie.vlan_id;
332 u16 old_vid = adapter->mng_vlan_id;
334 if (!e1000_vlan_used(adapter))
337 if (!test_bit(vid, adapter->active_vlans)) {
338 if (hw->mng_cookie.status &
339 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340 e1000_vlan_rx_add_vid(netdev, vid);
341 adapter->mng_vlan_id = vid;
343 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
345 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
347 !test_bit(old_vid, adapter->active_vlans))
348 e1000_vlan_rx_kill_vid(netdev, old_vid);
350 adapter->mng_vlan_id = vid;
354 static void e1000_init_manageability(struct e1000_adapter *adapter)
356 struct e1000_hw *hw = &adapter->hw;
358 if (adapter->en_mng_pt) {
359 u32 manc = er32(MANC);
361 /* disable hardware interception of ARP */
362 manc &= ~(E1000_MANC_ARP_EN);
368 static void e1000_release_manageability(struct e1000_adapter *adapter)
370 struct e1000_hw *hw = &adapter->hw;
372 if (adapter->en_mng_pt) {
373 u32 manc = er32(MANC);
375 /* re-enable hardware interception of ARP */
376 manc |= E1000_MANC_ARP_EN;
383 * e1000_configure - configure the hardware for RX and TX
384 * @adapter = private board structure
386 static void e1000_configure(struct e1000_adapter *adapter)
388 struct net_device *netdev = adapter->netdev;
391 e1000_set_rx_mode(netdev);
393 e1000_restore_vlan(adapter);
394 e1000_init_manageability(adapter);
396 e1000_configure_tx(adapter);
397 e1000_setup_rctl(adapter);
398 e1000_configure_rx(adapter);
399 /* call E1000_DESC_UNUSED which always leaves
400 * at least 1 descriptor unused to make sure
401 * next_to_use != next_to_clean */
402 for (i = 0; i < adapter->num_rx_queues; i++) {
403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring));
409 int e1000_up(struct e1000_adapter *adapter)
411 struct e1000_hw *hw = &adapter->hw;
413 /* hardware has been reset, we need to reload some things */
414 e1000_configure(adapter);
416 clear_bit(__E1000_DOWN, &adapter->flags);
418 napi_enable(&adapter->napi);
420 e1000_irq_enable(adapter);
422 netif_wake_queue(adapter->netdev);
424 /* fire a link change interrupt to start the watchdog */
425 ew32(ICS, E1000_ICS_LSC);
430 * e1000_power_up_phy - restore link in case the phy was powered down
431 * @adapter: address of board private structure
433 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset ***
439 void e1000_power_up_phy(struct e1000_adapter *adapter)
441 struct e1000_hw *hw = &adapter->hw;
444 /* Just clear the power down bit to wake the phy back up */
445 if (hw->media_type == e1000_media_type_copper) {
446 /* according to the manual, the phy will retain its
447 * settings across a power-down/up cycle */
448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
449 mii_reg &= ~MII_CR_POWER_DOWN;
450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
454 static void e1000_power_down_phy(struct e1000_adapter *adapter)
456 struct e1000_hw *hw = &adapter->hw;
458 /* Power down the PHY so no link is implied when interface is down *
459 * The PHY cannot be powered down if any of the following is true *
462 * (c) SoL/IDER session is active */
463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
467 switch (hw->mac_type) {
470 case e1000_82545_rev_3:
473 case e1000_82546_rev_3:
475 case e1000_82541_rev_2:
477 case e1000_82547_rev_2:
478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485 mii_reg |= MII_CR_POWER_DOWN;
486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
493 static void e1000_down_and_stop(struct e1000_adapter *adapter)
495 set_bit(__E1000_DOWN, &adapter->flags);
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
501 cancel_delayed_work_sync(&adapter->watchdog_task);
502 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
506 void e1000_down(struct e1000_adapter *adapter)
508 struct e1000_hw *hw = &adapter->hw;
509 struct net_device *netdev = adapter->netdev;
513 /* disable receives in the hardware */
515 ew32(RCTL, rctl & ~E1000_RCTL_EN);
516 /* flush and sleep below */
518 netif_tx_disable(netdev);
520 /* disable transmits in the hardware */
522 tctl &= ~E1000_TCTL_EN;
524 /* flush both disables and wait for them to finish */
528 napi_disable(&adapter->napi);
530 e1000_irq_disable(adapter);
533 * Setting DOWN must be after irq_disable to prevent
534 * a screaming interrupt. Setting DOWN also prevents
535 * tasks from rescheduling.
537 e1000_down_and_stop(adapter);
539 adapter->link_speed = 0;
540 adapter->link_duplex = 0;
541 netif_carrier_off(netdev);
543 e1000_reset(adapter);
544 e1000_clean_all_tx_rings(adapter);
545 e1000_clean_all_rx_rings(adapter);
548 static void e1000_reinit_safe(struct e1000_adapter *adapter)
550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
552 mutex_lock(&adapter->mutex);
555 mutex_unlock(&adapter->mutex);
556 clear_bit(__E1000_RESETTING, &adapter->flags);
559 void e1000_reinit_locked(struct e1000_adapter *adapter)
561 /* if rtnl_lock is not held the call path is bogus */
563 WARN_ON(in_interrupt());
564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
568 clear_bit(__E1000_RESETTING, &adapter->flags);
571 void e1000_reset(struct e1000_adapter *adapter)
573 struct e1000_hw *hw = &adapter->hw;
574 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
575 bool legacy_pba_adjust = false;
578 /* Repartition Pba for greater than 9k mtu
579 * To take effect CTRL.RST is required.
582 switch (hw->mac_type) {
583 case e1000_82542_rev2_0:
584 case e1000_82542_rev2_1:
589 case e1000_82541_rev_2:
590 legacy_pba_adjust = true;
594 case e1000_82545_rev_3:
597 case e1000_82546_rev_3:
601 case e1000_82547_rev_2:
602 legacy_pba_adjust = true;
605 case e1000_undefined:
610 if (legacy_pba_adjust) {
611 if (hw->max_frame_size > E1000_RXBUFFER_8192)
612 pba -= 8; /* allocate more FIFO for Tx */
614 if (hw->mac_type == e1000_82547) {
615 adapter->tx_fifo_head = 0;
616 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617 adapter->tx_fifo_size =
618 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
619 atomic_set(&adapter->tx_fifo_stall, 0);
621 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
622 /* adjust PBA for jumbo frames */
625 /* To maintain wire speed transmits, the Tx FIFO should be
626 * large enough to accommodate two full transmit packets,
627 * rounded up to the next 1KB and expressed in KB. Likewise,
628 * the Rx FIFO should be large enough to accommodate at least
629 * one full receive packet and is similarly rounded up and
630 * expressed in KB. */
632 /* upper 16 bits has Tx packet buffer allocation size in KB */
633 tx_space = pba >> 16;
634 /* lower 16 bits has Rx packet buffer allocation size in KB */
637 * the tx fifo also stores 16 bytes of information about the tx
638 * but don't include ethernet FCS because hardware appends it
640 min_tx_space = (hw->max_frame_size +
641 sizeof(struct e1000_tx_desc) -
643 min_tx_space = ALIGN(min_tx_space, 1024);
645 /* software strips receive CRC, so leave room for it */
646 min_rx_space = hw->max_frame_size;
647 min_rx_space = ALIGN(min_rx_space, 1024);
650 /* If current Tx allocation is less than the min Tx FIFO size,
651 * and the min Tx FIFO size is less than the current Rx FIFO
652 * allocation, take space away from current Rx allocation */
653 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space);
657 /* PCI/PCIx hardware has PBA alignment constraints */
658 switch (hw->mac_type) {
659 case e1000_82545 ... e1000_82546_rev_3:
660 pba &= ~(E1000_PBA_8K - 1);
666 /* if short on rx space, rx wins and must trump tx
667 * adjustment or use Early Receive if available */
668 if (pba < min_rx_space)
676 * flow control settings:
677 * The high water mark must be low enough to fit one full frame
678 * (or the size used for early receive) above it in the Rx FIFO.
679 * Set it to the lower of:
680 * - 90% of the Rx FIFO size, and
681 * - the full Rx FIFO size minus the early receive size (for parts
682 * with ERT support assuming ERT set to E1000_ERT_2048), or
683 * - the full Rx FIFO size minus one full frame
685 hwm = min(((pba << 10) * 9 / 10),
686 ((pba << 10) - hw->max_frame_size));
688 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
689 hw->fc_low_water = hw->fc_high_water - 8;
690 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
692 hw->fc = hw->original_fc;
694 /* Allow time for pending master requests to run */
696 if (hw->mac_type >= e1000_82544)
699 if (e1000_init_hw(hw))
700 e_dev_err("Hardware Error\n");
701 e1000_update_mng_vlan(adapter);
703 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
704 if (hw->mac_type >= e1000_82544 &&
706 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
707 u32 ctrl = er32(CTRL);
708 /* clear phy power management bit if we are in gig only mode,
709 * which if enabled will attempt negotiation to 100Mb, which
710 * can cause a loss of link at power off or driver unload */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
721 e1000_release_manageability(adapter);
725 * Dump the eeprom for users having checksum issues
727 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
729 struct net_device *netdev = adapter->netdev;
730 struct ethtool_eeprom eeprom;
731 const struct ethtool_ops *ops = netdev->ethtool_ops;
734 u16 csum_old, csum_new = 0;
736 eeprom.len = ops->get_eeprom_len(netdev);
739 data = kmalloc(eeprom.len, GFP_KERNEL);
743 ops->get_eeprom(netdev, &eeprom, data);
745 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
746 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
747 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
748 csum_new += data[i] + (data[i + 1] << 8);
749 csum_new = EEPROM_SUM - csum_new;
751 pr_err("/*********************/\n");
752 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
753 pr_err("Calculated : 0x%04x\n", csum_new);
755 pr_err("Offset Values\n");
756 pr_err("======== ======\n");
757 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
759 pr_err("Include this output when contacting your support provider.\n");
760 pr_err("This is not a software error! Something bad happened to\n");
761 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
762 pr_err("result in further problems, possibly loss of data,\n");
763 pr_err("corruption or system hangs!\n");
764 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
765 pr_err("which is invalid and requires you to set the proper MAC\n");
766 pr_err("address manually before continuing to enable this network\n");
767 pr_err("device. Please inspect the EEPROM dump and report the\n");
768 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
769 pr_err("/*********************/\n");
775 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
776 * @pdev: PCI device information struct
778 * Return true if an adapter needs ioport resources
780 static int e1000_is_need_ioport(struct pci_dev *pdev)
782 switch (pdev->device) {
783 case E1000_DEV_ID_82540EM:
784 case E1000_DEV_ID_82540EM_LOM:
785 case E1000_DEV_ID_82540EP:
786 case E1000_DEV_ID_82540EP_LOM:
787 case E1000_DEV_ID_82540EP_LP:
788 case E1000_DEV_ID_82541EI:
789 case E1000_DEV_ID_82541EI_MOBILE:
790 case E1000_DEV_ID_82541ER:
791 case E1000_DEV_ID_82541ER_LOM:
792 case E1000_DEV_ID_82541GI:
793 case E1000_DEV_ID_82541GI_LF:
794 case E1000_DEV_ID_82541GI_MOBILE:
795 case E1000_DEV_ID_82544EI_COPPER:
796 case E1000_DEV_ID_82544EI_FIBER:
797 case E1000_DEV_ID_82544GC_COPPER:
798 case E1000_DEV_ID_82544GC_LOM:
799 case E1000_DEV_ID_82545EM_COPPER:
800 case E1000_DEV_ID_82545EM_FIBER:
801 case E1000_DEV_ID_82546EB_COPPER:
802 case E1000_DEV_ID_82546EB_FIBER:
803 case E1000_DEV_ID_82546EB_QUAD_COPPER:
810 static netdev_features_t e1000_fix_features(struct net_device *netdev,
811 netdev_features_t features)
814 * Since there is no support for separate rx/tx vlan accel
815 * enable/disable make sure tx flag is always in same state as rx.
817 if (features & NETIF_F_HW_VLAN_RX)
818 features |= NETIF_F_HW_VLAN_TX;
820 features &= ~NETIF_F_HW_VLAN_TX;
825 static int e1000_set_features(struct net_device *netdev,
826 netdev_features_t features)
828 struct e1000_adapter *adapter = netdev_priv(netdev);
829 netdev_features_t changed = features ^ netdev->features;
831 if (changed & NETIF_F_HW_VLAN_RX)
832 e1000_vlan_mode(netdev, features);
834 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
837 netdev->features = features;
838 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
840 if (netif_running(netdev))
841 e1000_reinit_locked(adapter);
843 e1000_reset(adapter);
848 static const struct net_device_ops e1000_netdev_ops = {
849 .ndo_open = e1000_open,
850 .ndo_stop = e1000_close,
851 .ndo_start_xmit = e1000_xmit_frame,
852 .ndo_get_stats = e1000_get_stats,
853 .ndo_set_rx_mode = e1000_set_rx_mode,
854 .ndo_set_mac_address = e1000_set_mac,
855 .ndo_tx_timeout = e1000_tx_timeout,
856 .ndo_change_mtu = e1000_change_mtu,
857 .ndo_do_ioctl = e1000_ioctl,
858 .ndo_validate_addr = eth_validate_addr,
859 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
860 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
861 #ifdef CONFIG_NET_POLL_CONTROLLER
862 .ndo_poll_controller = e1000_netpoll,
864 .ndo_fix_features = e1000_fix_features,
865 .ndo_set_features = e1000_set_features,
869 * e1000_init_hw_struct - initialize members of hw struct
870 * @adapter: board private struct
871 * @hw: structure used by e1000_hw.c
873 * Factors out initialization of the e1000_hw struct to its own function
874 * that can be called very early at init (just after struct allocation).
875 * Fields are initialized based on PCI device information and
876 * OS network device settings (MTU size).
877 * Returns negative error codes if MAC type setup fails.
879 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
882 struct pci_dev *pdev = adapter->pdev;
884 /* PCI config space info */
885 hw->vendor_id = pdev->vendor;
886 hw->device_id = pdev->device;
887 hw->subsystem_vendor_id = pdev->subsystem_vendor;
888 hw->subsystem_id = pdev->subsystem_device;
889 hw->revision_id = pdev->revision;
891 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
893 hw->max_frame_size = adapter->netdev->mtu +
894 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
895 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
897 /* identify the MAC */
898 if (e1000_set_mac_type(hw)) {
899 e_err(probe, "Unknown MAC Type\n");
903 switch (hw->mac_type) {
908 case e1000_82541_rev_2:
909 case e1000_82547_rev_2:
910 hw->phy_init_script = 1;
914 e1000_set_media_type(hw);
915 e1000_get_bus_info(hw);
917 hw->wait_autoneg_complete = false;
918 hw->tbi_compatibility_en = true;
919 hw->adaptive_ifs = true;
923 if (hw->media_type == e1000_media_type_copper) {
924 hw->mdix = AUTO_ALL_MODES;
925 hw->disable_polarity_correction = false;
926 hw->master_slave = E1000_MASTER_SLAVE;
933 * e1000_probe - Device Initialization Routine
934 * @pdev: PCI device information struct
935 * @ent: entry in e1000_pci_tbl
937 * Returns 0 on success, negative on failure
939 * e1000_probe initializes an adapter identified by a pci_dev structure.
940 * The OS initialization, configuring of the adapter private structure,
941 * and a hardware reset occur.
943 static int __devinit e1000_probe(struct pci_dev *pdev,
944 const struct pci_device_id *ent)
946 struct net_device *netdev;
947 struct e1000_adapter *adapter;
950 static int cards_found = 0;
951 static int global_quad_port_a = 0; /* global ksp3 port a indication */
952 int i, err, pci_using_dac;
955 u16 eeprom_apme_mask = E1000_EEPROM_APME;
956 int bars, need_ioport;
958 /* do not allocate ioport bars when not needed */
959 need_ioport = e1000_is_need_ioport(pdev);
961 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
962 err = pci_enable_device(pdev);
964 bars = pci_select_bars(pdev, IORESOURCE_MEM);
965 err = pci_enable_device_mem(pdev);
970 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
974 pci_set_master(pdev);
975 err = pci_save_state(pdev);
977 goto err_alloc_etherdev;
980 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
982 goto err_alloc_etherdev;
984 SET_NETDEV_DEV(netdev, &pdev->dev);
986 pci_set_drvdata(pdev, netdev);
987 adapter = netdev_priv(netdev);
988 adapter->netdev = netdev;
989 adapter->pdev = pdev;
990 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
991 adapter->bars = bars;
992 adapter->need_ioport = need_ioport;
998 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1002 if (adapter->need_ioport) {
1003 for (i = BAR_1; i <= BAR_5; i++) {
1004 if (pci_resource_len(pdev, i) == 0)
1006 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1007 hw->io_base = pci_resource_start(pdev, i);
1013 /* make ready for any if (hw->...) below */
1014 err = e1000_init_hw_struct(adapter, hw);
1019 * there is a workaround being applied below that limits
1020 * 64-bit DMA addresses to 64-bit hardware. There are some
1021 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1024 if ((hw->bus_type == e1000_bus_type_pcix) &&
1025 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1027 * according to DMA-API-HOWTO, coherent calls will always
1028 * succeed if the set call did
1030 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1033 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1035 pr_err("No usable DMA config, aborting\n");
1038 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1041 netdev->netdev_ops = &e1000_netdev_ops;
1042 e1000_set_ethtool_ops(netdev);
1043 netdev->watchdog_timeo = 5 * HZ;
1044 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1046 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1048 adapter->bd_number = cards_found;
1050 /* setup the private structure */
1052 err = e1000_sw_init(adapter);
1057 if (hw->mac_type == e1000_ce4100) {
1058 hw->ce4100_gbe_mdio_base_virt =
1059 ioremap(pci_resource_start(pdev, BAR_1),
1060 pci_resource_len(pdev, BAR_1));
1062 if (!hw->ce4100_gbe_mdio_base_virt)
1063 goto err_mdio_ioremap;
1066 if (hw->mac_type >= e1000_82543) {
1067 netdev->hw_features = NETIF_F_SG |
1070 netdev->features = NETIF_F_HW_VLAN_TX |
1071 NETIF_F_HW_VLAN_FILTER;
1074 if ((hw->mac_type >= e1000_82544) &&
1075 (hw->mac_type != e1000_82547))
1076 netdev->hw_features |= NETIF_F_TSO;
1078 netdev->priv_flags |= IFF_SUPP_NOFCS;
1080 netdev->features |= netdev->hw_features;
1081 netdev->hw_features |= NETIF_F_RXCSUM;
1082 netdev->hw_features |= NETIF_F_RXALL;
1083 netdev->hw_features |= NETIF_F_RXFCS;
1085 if (pci_using_dac) {
1086 netdev->features |= NETIF_F_HIGHDMA;
1087 netdev->vlan_features |= NETIF_F_HIGHDMA;
1090 netdev->vlan_features |= NETIF_F_TSO;
1091 netdev->vlan_features |= NETIF_F_HW_CSUM;
1092 netdev->vlan_features |= NETIF_F_SG;
1094 netdev->priv_flags |= IFF_UNICAST_FLT;
1096 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1098 /* initialize eeprom parameters */
1099 if (e1000_init_eeprom_params(hw)) {
1100 e_err(probe, "EEPROM initialization failed\n");
1104 /* before reading the EEPROM, reset the controller to
1105 * put the device in a known good starting state */
1109 /* make sure the EEPROM is good */
1110 if (e1000_validate_eeprom_checksum(hw) < 0) {
1111 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1112 e1000_dump_eeprom(adapter);
1114 * set MAC address to all zeroes to invalidate and temporary
1115 * disable this device for the user. This blocks regular
1116 * traffic while still permitting ethtool ioctls from reaching
1117 * the hardware as well as allowing the user to run the
1118 * interface after manually setting a hw addr using
1121 memset(hw->mac_addr, 0, netdev->addr_len);
1123 /* copy the MAC address out of the EEPROM */
1124 if (e1000_read_mac_addr(hw))
1125 e_err(probe, "EEPROM Read Error\n");
1127 /* don't block initalization here due to bad MAC address */
1128 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1129 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1131 if (!is_valid_ether_addr(netdev->perm_addr))
1132 e_err(probe, "Invalid MAC Address\n");
1135 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1136 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1137 e1000_82547_tx_fifo_stall_task);
1138 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1139 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1141 e1000_check_options(adapter);
1143 /* Initial Wake on LAN setting
1144 * If APM wake is enabled in the EEPROM,
1145 * enable the ACPI Magic Packet filter
1148 switch (hw->mac_type) {
1149 case e1000_82542_rev2_0:
1150 case e1000_82542_rev2_1:
1154 e1000_read_eeprom(hw,
1155 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1156 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1159 case e1000_82546_rev_3:
1160 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1161 e1000_read_eeprom(hw,
1162 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1167 e1000_read_eeprom(hw,
1168 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1171 if (eeprom_data & eeprom_apme_mask)
1172 adapter->eeprom_wol |= E1000_WUFC_MAG;
1174 /* now that we have the eeprom settings, apply the special cases
1175 * where the eeprom may be wrong or the board simply won't support
1176 * wake on lan on a particular port */
1177 switch (pdev->device) {
1178 case E1000_DEV_ID_82546GB_PCIE:
1179 adapter->eeprom_wol = 0;
1181 case E1000_DEV_ID_82546EB_FIBER:
1182 case E1000_DEV_ID_82546GB_FIBER:
1183 /* Wake events only supported on port A for dual fiber
1184 * regardless of eeprom setting */
1185 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1186 adapter->eeprom_wol = 0;
1188 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1189 /* if quad port adapter, disable WoL on all but port A */
1190 if (global_quad_port_a != 0)
1191 adapter->eeprom_wol = 0;
1193 adapter->quad_port_a = true;
1194 /* Reset for multiple quad port adapters */
1195 if (++global_quad_port_a == 4)
1196 global_quad_port_a = 0;
1200 /* initialize the wol settings based on the eeprom settings */
1201 adapter->wol = adapter->eeprom_wol;
1202 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1204 /* Auto detect PHY address */
1205 if (hw->mac_type == e1000_ce4100) {
1206 for (i = 0; i < 32; i++) {
1208 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1209 if (tmp == 0 || tmp == 0xFF) {
1218 /* reset the hardware with the new settings */
1219 e1000_reset(adapter);
1221 strcpy(netdev->name, "eth%d");
1222 err = register_netdev(netdev);
1226 e1000_vlan_filter_on_off(adapter, false);
1228 /* print bus type/speed/width info */
1229 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1230 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1231 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1232 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1233 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1234 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1235 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1238 /* carrier off reporting is important to ethtool even BEFORE open */
1239 netif_carrier_off(netdev);
1241 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1248 e1000_phy_hw_reset(hw);
1250 if (hw->flash_address)
1251 iounmap(hw->flash_address);
1252 kfree(adapter->tx_ring);
1253 kfree(adapter->rx_ring);
1257 iounmap(hw->ce4100_gbe_mdio_base_virt);
1258 iounmap(hw->hw_addr);
1260 free_netdev(netdev);
1262 pci_release_selected_regions(pdev, bars);
1264 pci_disable_device(pdev);
1269 * e1000_remove - Device Removal Routine
1270 * @pdev: PCI device information struct
1272 * e1000_remove is called by the PCI subsystem to alert the driver
1273 * that it should release a PCI device. The could be caused by a
1274 * Hot-Plug event, or because the driver is going to be removed from
1278 static void __devexit e1000_remove(struct pci_dev *pdev)
1280 struct net_device *netdev = pci_get_drvdata(pdev);
1281 struct e1000_adapter *adapter = netdev_priv(netdev);
1282 struct e1000_hw *hw = &adapter->hw;
1284 e1000_down_and_stop(adapter);
1285 e1000_release_manageability(adapter);
1287 unregister_netdev(netdev);
1289 e1000_phy_hw_reset(hw);
1291 kfree(adapter->tx_ring);
1292 kfree(adapter->rx_ring);
1294 if (hw->mac_type == e1000_ce4100)
1295 iounmap(hw->ce4100_gbe_mdio_base_virt);
1296 iounmap(hw->hw_addr);
1297 if (hw->flash_address)
1298 iounmap(hw->flash_address);
1299 pci_release_selected_regions(pdev, adapter->bars);
1301 free_netdev(netdev);
1303 pci_disable_device(pdev);
1307 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1308 * @adapter: board private structure to initialize
1310 * e1000_sw_init initializes the Adapter private data structure.
1311 * e1000_init_hw_struct MUST be called before this function
1314 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1316 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1318 adapter->num_tx_queues = 1;
1319 adapter->num_rx_queues = 1;
1321 if (e1000_alloc_queues(adapter)) {
1322 e_err(probe, "Unable to allocate memory for queues\n");
1326 /* Explicitly disable IRQ since the NIC can be in any state. */
1327 e1000_irq_disable(adapter);
1329 spin_lock_init(&adapter->stats_lock);
1330 mutex_init(&adapter->mutex);
1332 set_bit(__E1000_DOWN, &adapter->flags);
1338 * e1000_alloc_queues - Allocate memory for all rings
1339 * @adapter: board private structure to initialize
1341 * We allocate one ring per queue at run-time since we don't know the
1342 * number of queues at compile-time.
1345 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1347 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1348 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1349 if (!adapter->tx_ring)
1352 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1353 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1354 if (!adapter->rx_ring) {
1355 kfree(adapter->tx_ring);
1359 return E1000_SUCCESS;
1363 * e1000_open - Called when a network interface is made active
1364 * @netdev: network interface device structure
1366 * Returns 0 on success, negative value on failure
1368 * The open entry point is called when a network interface is made
1369 * active by the system (IFF_UP). At this point all resources needed
1370 * for transmit and receive operations are allocated, the interrupt
1371 * handler is registered with the OS, the watchdog task is started,
1372 * and the stack is notified that the interface is ready.
1375 static int e1000_open(struct net_device *netdev)
1377 struct e1000_adapter *adapter = netdev_priv(netdev);
1378 struct e1000_hw *hw = &adapter->hw;
1381 /* disallow open during test */
1382 if (test_bit(__E1000_TESTING, &adapter->flags))
1385 netif_carrier_off(netdev);
1387 /* allocate transmit descriptors */
1388 err = e1000_setup_all_tx_resources(adapter);
1392 /* allocate receive descriptors */
1393 err = e1000_setup_all_rx_resources(adapter);
1397 e1000_power_up_phy(adapter);
1399 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1400 if ((hw->mng_cookie.status &
1401 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1402 e1000_update_mng_vlan(adapter);
1405 /* before we allocate an interrupt, we must be ready to handle it.
1406 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1407 * as soon as we call pci_request_irq, so we have to setup our
1408 * clean_rx handler before we do so. */
1409 e1000_configure(adapter);
1411 err = e1000_request_irq(adapter);
1415 /* From here on the code is the same as e1000_up() */
1416 clear_bit(__E1000_DOWN, &adapter->flags);
1418 napi_enable(&adapter->napi);
1420 e1000_irq_enable(adapter);
1422 netif_start_queue(netdev);
1424 /* fire a link status change interrupt to start the watchdog */
1425 ew32(ICS, E1000_ICS_LSC);
1427 return E1000_SUCCESS;
1430 e1000_power_down_phy(adapter);
1431 e1000_free_all_rx_resources(adapter);
1433 e1000_free_all_tx_resources(adapter);
1435 e1000_reset(adapter);
1441 * e1000_close - Disables a network interface
1442 * @netdev: network interface device structure
1444 * Returns 0, this is not allowed to fail
1446 * The close entry point is called when an interface is de-activated
1447 * by the OS. The hardware is still under the drivers control, but
1448 * needs to be disabled. A global MAC reset is issued to stop the
1449 * hardware, and all transmit and receive resources are freed.
1452 static int e1000_close(struct net_device *netdev)
1454 struct e1000_adapter *adapter = netdev_priv(netdev);
1455 struct e1000_hw *hw = &adapter->hw;
1457 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1458 e1000_down(adapter);
1459 e1000_power_down_phy(adapter);
1460 e1000_free_irq(adapter);
1462 e1000_free_all_tx_resources(adapter);
1463 e1000_free_all_rx_resources(adapter);
1465 /* kill manageability vlan ID if supported, but not if a vlan with
1466 * the same ID is registered on the host OS (let 8021q kill it) */
1467 if ((hw->mng_cookie.status &
1468 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1469 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1470 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1477 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1478 * @adapter: address of board private structure
1479 * @start: address of beginning of memory
1480 * @len: length of memory
1482 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1485 struct e1000_hw *hw = &adapter->hw;
1486 unsigned long begin = (unsigned long)start;
1487 unsigned long end = begin + len;
1489 /* First rev 82545 and 82546 need to not allow any memory
1490 * write location to cross 64k boundary due to errata 23 */
1491 if (hw->mac_type == e1000_82545 ||
1492 hw->mac_type == e1000_ce4100 ||
1493 hw->mac_type == e1000_82546) {
1494 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1501 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1502 * @adapter: board private structure
1503 * @txdr: tx descriptor ring (for a specific queue) to setup
1505 * Return 0 on success, negative on failure
1508 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1509 struct e1000_tx_ring *txdr)
1511 struct pci_dev *pdev = adapter->pdev;
1514 size = sizeof(struct e1000_buffer) * txdr->count;
1515 txdr->buffer_info = vzalloc(size);
1516 if (!txdr->buffer_info) {
1517 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1522 /* round up to nearest 4K */
1524 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1525 txdr->size = ALIGN(txdr->size, 4096);
1527 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1531 vfree(txdr->buffer_info);
1532 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1537 /* Fix for errata 23, can't cross 64kB boundary */
1538 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539 void *olddesc = txdr->desc;
1540 dma_addr_t olddma = txdr->dma;
1541 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1542 txdr->size, txdr->desc);
1543 /* Try again, without freeing the previous */
1544 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1545 &txdr->dma, GFP_KERNEL);
1546 /* Failed allocation, critical failure */
1548 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1550 goto setup_tx_desc_die;
1553 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1555 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1557 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1559 e_err(probe, "Unable to allocate aligned memory "
1560 "for the transmit descriptor ring\n");
1561 vfree(txdr->buffer_info);
1564 /* Free old allocation, new allocation was successful */
1565 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1569 memset(txdr->desc, 0, txdr->size);
1571 txdr->next_to_use = 0;
1572 txdr->next_to_clean = 0;
1578 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1579 * (Descriptors) for all queues
1580 * @adapter: board private structure
1582 * Return 0 on success, negative on failure
1585 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1589 for (i = 0; i < adapter->num_tx_queues; i++) {
1590 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1592 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1593 for (i-- ; i >= 0; i--)
1594 e1000_free_tx_resources(adapter,
1595 &adapter->tx_ring[i]);
1604 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1605 * @adapter: board private structure
1607 * Configure the Tx unit of the MAC after a reset.
1610 static void e1000_configure_tx(struct e1000_adapter *adapter)
1613 struct e1000_hw *hw = &adapter->hw;
1614 u32 tdlen, tctl, tipg;
1617 /* Setup the HW Tx Head and Tail descriptor pointers */
1619 switch (adapter->num_tx_queues) {
1622 tdba = adapter->tx_ring[0].dma;
1623 tdlen = adapter->tx_ring[0].count *
1624 sizeof(struct e1000_tx_desc);
1626 ew32(TDBAH, (tdba >> 32));
1627 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1630 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1631 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1635 /* Set the default values for the Tx Inter Packet Gap timer */
1636 if ((hw->media_type == e1000_media_type_fiber ||
1637 hw->media_type == e1000_media_type_internal_serdes))
1638 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1640 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1642 switch (hw->mac_type) {
1643 case e1000_82542_rev2_0:
1644 case e1000_82542_rev2_1:
1645 tipg = DEFAULT_82542_TIPG_IPGT;
1646 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1647 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1650 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1651 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1654 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1655 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1658 /* Set the Tx Interrupt Delay register */
1660 ew32(TIDV, adapter->tx_int_delay);
1661 if (hw->mac_type >= e1000_82540)
1662 ew32(TADV, adapter->tx_abs_int_delay);
1664 /* Program the Transmit Control Register */
1667 tctl &= ~E1000_TCTL_CT;
1668 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1669 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1671 e1000_config_collision_dist(hw);
1673 /* Setup Transmit Descriptor Settings for eop descriptor */
1674 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1676 /* only set IDE if we are delaying interrupts using the timers */
1677 if (adapter->tx_int_delay)
1678 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1680 if (hw->mac_type < e1000_82543)
1681 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1683 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1685 /* Cache if we're 82544 running in PCI-X because we'll
1686 * need this to apply a workaround later in the send path. */
1687 if (hw->mac_type == e1000_82544 &&
1688 hw->bus_type == e1000_bus_type_pcix)
1689 adapter->pcix_82544 = true;
1696 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1697 * @adapter: board private structure
1698 * @rxdr: rx descriptor ring (for a specific queue) to setup
1700 * Returns 0 on success, negative on failure
1703 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1704 struct e1000_rx_ring *rxdr)
1706 struct pci_dev *pdev = adapter->pdev;
1709 size = sizeof(struct e1000_buffer) * rxdr->count;
1710 rxdr->buffer_info = vzalloc(size);
1711 if (!rxdr->buffer_info) {
1712 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1717 desc_len = sizeof(struct e1000_rx_desc);
1719 /* Round up to nearest 4K */
1721 rxdr->size = rxdr->count * desc_len;
1722 rxdr->size = ALIGN(rxdr->size, 4096);
1724 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1728 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1731 vfree(rxdr->buffer_info);
1735 /* Fix for errata 23, can't cross 64kB boundary */
1736 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1737 void *olddesc = rxdr->desc;
1738 dma_addr_t olddma = rxdr->dma;
1739 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1740 rxdr->size, rxdr->desc);
1741 /* Try again, without freeing the previous */
1742 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1743 &rxdr->dma, GFP_KERNEL);
1744 /* Failed allocation, critical failure */
1746 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1748 e_err(probe, "Unable to allocate memory for the Rx "
1749 "descriptor ring\n");
1750 goto setup_rx_desc_die;
1753 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1755 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1757 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1759 e_err(probe, "Unable to allocate aligned memory for "
1760 "the Rx descriptor ring\n");
1761 goto setup_rx_desc_die;
1763 /* Free old allocation, new allocation was successful */
1764 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1768 memset(rxdr->desc, 0, rxdr->size);
1770 rxdr->next_to_clean = 0;
1771 rxdr->next_to_use = 0;
1772 rxdr->rx_skb_top = NULL;
1778 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1779 * (Descriptors) for all queues
1780 * @adapter: board private structure
1782 * Return 0 on success, negative on failure
1785 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1789 for (i = 0; i < adapter->num_rx_queues; i++) {
1790 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1792 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1793 for (i-- ; i >= 0; i--)
1794 e1000_free_rx_resources(adapter,
1795 &adapter->rx_ring[i]);
1804 * e1000_setup_rctl - configure the receive control registers
1805 * @adapter: Board private structure
1807 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1809 struct e1000_hw *hw = &adapter->hw;
1814 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1816 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1817 E1000_RCTL_RDMTS_HALF |
1818 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1820 if (hw->tbi_compatibility_on == 1)
1821 rctl |= E1000_RCTL_SBP;
1823 rctl &= ~E1000_RCTL_SBP;
1825 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1826 rctl &= ~E1000_RCTL_LPE;
1828 rctl |= E1000_RCTL_LPE;
1830 /* Setup buffer sizes */
1831 rctl &= ~E1000_RCTL_SZ_4096;
1832 rctl |= E1000_RCTL_BSEX;
1833 switch (adapter->rx_buffer_len) {
1834 case E1000_RXBUFFER_2048:
1836 rctl |= E1000_RCTL_SZ_2048;
1837 rctl &= ~E1000_RCTL_BSEX;
1839 case E1000_RXBUFFER_4096:
1840 rctl |= E1000_RCTL_SZ_4096;
1842 case E1000_RXBUFFER_8192:
1843 rctl |= E1000_RCTL_SZ_8192;
1845 case E1000_RXBUFFER_16384:
1846 rctl |= E1000_RCTL_SZ_16384;
1850 /* This is useful for sniffing bad packets. */
1851 if (adapter->netdev->features & NETIF_F_RXALL) {
1852 /* UPE and MPE will be handled by normal PROMISC logic
1853 * in e1000e_set_rx_mode */
1854 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1855 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1856 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1858 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1859 E1000_RCTL_DPF | /* Allow filtered pause */
1860 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1861 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1862 * and that breaks VLANs.
1870 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1871 * @adapter: board private structure
1873 * Configure the Rx unit of the MAC after a reset.
1876 static void e1000_configure_rx(struct e1000_adapter *adapter)
1879 struct e1000_hw *hw = &adapter->hw;
1880 u32 rdlen, rctl, rxcsum;
1882 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1883 rdlen = adapter->rx_ring[0].count *
1884 sizeof(struct e1000_rx_desc);
1885 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1886 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1888 rdlen = adapter->rx_ring[0].count *
1889 sizeof(struct e1000_rx_desc);
1890 adapter->clean_rx = e1000_clean_rx_irq;
1891 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1894 /* disable receives while setting up the descriptors */
1896 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1898 /* set the Receive Delay Timer Register */
1899 ew32(RDTR, adapter->rx_int_delay);
1901 if (hw->mac_type >= e1000_82540) {
1902 ew32(RADV, adapter->rx_abs_int_delay);
1903 if (adapter->itr_setting != 0)
1904 ew32(ITR, 1000000000 / (adapter->itr * 256));
1907 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1908 * the Base and Length of the Rx Descriptor Ring */
1909 switch (adapter->num_rx_queues) {
1912 rdba = adapter->rx_ring[0].dma;
1914 ew32(RDBAH, (rdba >> 32));
1915 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1918 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1919 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1923 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1924 if (hw->mac_type >= e1000_82543) {
1925 rxcsum = er32(RXCSUM);
1926 if (adapter->rx_csum)
1927 rxcsum |= E1000_RXCSUM_TUOFL;
1929 /* don't need to clear IPPCSE as it defaults to 0 */
1930 rxcsum &= ~E1000_RXCSUM_TUOFL;
1931 ew32(RXCSUM, rxcsum);
1934 /* Enable Receives */
1935 ew32(RCTL, rctl | E1000_RCTL_EN);
1939 * e1000_free_tx_resources - Free Tx Resources per Queue
1940 * @adapter: board private structure
1941 * @tx_ring: Tx descriptor ring for a specific queue
1943 * Free all transmit software resources
1946 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1947 struct e1000_tx_ring *tx_ring)
1949 struct pci_dev *pdev = adapter->pdev;
1951 e1000_clean_tx_ring(adapter, tx_ring);
1953 vfree(tx_ring->buffer_info);
1954 tx_ring->buffer_info = NULL;
1956 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1959 tx_ring->desc = NULL;
1963 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1964 * @adapter: board private structure
1966 * Free all transmit software resources
1969 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1973 for (i = 0; i < adapter->num_tx_queues; i++)
1974 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1977 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1978 struct e1000_buffer *buffer_info)
1980 if (buffer_info->dma) {
1981 if (buffer_info->mapped_as_page)
1982 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1983 buffer_info->length, DMA_TO_DEVICE);
1985 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1986 buffer_info->length,
1988 buffer_info->dma = 0;
1990 if (buffer_info->skb) {
1991 dev_kfree_skb_any(buffer_info->skb);
1992 buffer_info->skb = NULL;
1994 buffer_info->time_stamp = 0;
1995 /* buffer_info must be completely set up in the transmit path */
1999 * e1000_clean_tx_ring - Free Tx Buffers
2000 * @adapter: board private structure
2001 * @tx_ring: ring to be cleaned
2004 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2005 struct e1000_tx_ring *tx_ring)
2007 struct e1000_hw *hw = &adapter->hw;
2008 struct e1000_buffer *buffer_info;
2012 /* Free all the Tx ring sk_buffs */
2014 for (i = 0; i < tx_ring->count; i++) {
2015 buffer_info = &tx_ring->buffer_info[i];
2016 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2019 size = sizeof(struct e1000_buffer) * tx_ring->count;
2020 memset(tx_ring->buffer_info, 0, size);
2022 /* Zero out the descriptor ring */
2024 memset(tx_ring->desc, 0, tx_ring->size);
2026 tx_ring->next_to_use = 0;
2027 tx_ring->next_to_clean = 0;
2028 tx_ring->last_tx_tso = false;
2030 writel(0, hw->hw_addr + tx_ring->tdh);
2031 writel(0, hw->hw_addr + tx_ring->tdt);
2035 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2036 * @adapter: board private structure
2039 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2043 for (i = 0; i < adapter->num_tx_queues; i++)
2044 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2048 * e1000_free_rx_resources - Free Rx Resources
2049 * @adapter: board private structure
2050 * @rx_ring: ring to clean the resources from
2052 * Free all receive software resources
2055 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2056 struct e1000_rx_ring *rx_ring)
2058 struct pci_dev *pdev = adapter->pdev;
2060 e1000_clean_rx_ring(adapter, rx_ring);
2062 vfree(rx_ring->buffer_info);
2063 rx_ring->buffer_info = NULL;
2065 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2068 rx_ring->desc = NULL;
2072 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2073 * @adapter: board private structure
2075 * Free all receive software resources
2078 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2082 for (i = 0; i < adapter->num_rx_queues; i++)
2083 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2087 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2088 * @adapter: board private structure
2089 * @rx_ring: ring to free buffers from
2092 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2093 struct e1000_rx_ring *rx_ring)
2095 struct e1000_hw *hw = &adapter->hw;
2096 struct e1000_buffer *buffer_info;
2097 struct pci_dev *pdev = adapter->pdev;
2101 /* Free all the Rx ring sk_buffs */
2102 for (i = 0; i < rx_ring->count; i++) {
2103 buffer_info = &rx_ring->buffer_info[i];
2104 if (buffer_info->dma &&
2105 adapter->clean_rx == e1000_clean_rx_irq) {
2106 dma_unmap_single(&pdev->dev, buffer_info->dma,
2107 buffer_info->length,
2109 } else if (buffer_info->dma &&
2110 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2111 dma_unmap_page(&pdev->dev, buffer_info->dma,
2112 buffer_info->length,
2116 buffer_info->dma = 0;
2117 if (buffer_info->page) {
2118 put_page(buffer_info->page);
2119 buffer_info->page = NULL;
2121 if (buffer_info->skb) {
2122 dev_kfree_skb(buffer_info->skb);
2123 buffer_info->skb = NULL;
2127 /* there also may be some cached data from a chained receive */
2128 if (rx_ring->rx_skb_top) {
2129 dev_kfree_skb(rx_ring->rx_skb_top);
2130 rx_ring->rx_skb_top = NULL;
2133 size = sizeof(struct e1000_buffer) * rx_ring->count;
2134 memset(rx_ring->buffer_info, 0, size);
2136 /* Zero out the descriptor ring */
2137 memset(rx_ring->desc, 0, rx_ring->size);
2139 rx_ring->next_to_clean = 0;
2140 rx_ring->next_to_use = 0;
2142 writel(0, hw->hw_addr + rx_ring->rdh);
2143 writel(0, hw->hw_addr + rx_ring->rdt);
2147 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2148 * @adapter: board private structure
2151 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2155 for (i = 0; i < adapter->num_rx_queues; i++)
2156 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2159 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2160 * and memory write and invalidate disabled for certain operations
2162 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2164 struct e1000_hw *hw = &adapter->hw;
2165 struct net_device *netdev = adapter->netdev;
2168 e1000_pci_clear_mwi(hw);
2171 rctl |= E1000_RCTL_RST;
2173 E1000_WRITE_FLUSH();
2176 if (netif_running(netdev))
2177 e1000_clean_all_rx_rings(adapter);
2180 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2182 struct e1000_hw *hw = &adapter->hw;
2183 struct net_device *netdev = adapter->netdev;
2187 rctl &= ~E1000_RCTL_RST;
2189 E1000_WRITE_FLUSH();
2192 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2193 e1000_pci_set_mwi(hw);
2195 if (netif_running(netdev)) {
2196 /* No need to loop, because 82542 supports only 1 queue */
2197 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2198 e1000_configure_rx(adapter);
2199 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2204 * e1000_set_mac - Change the Ethernet Address of the NIC
2205 * @netdev: network interface device structure
2206 * @p: pointer to an address structure
2208 * Returns 0 on success, negative on failure
2211 static int e1000_set_mac(struct net_device *netdev, void *p)
2213 struct e1000_adapter *adapter = netdev_priv(netdev);
2214 struct e1000_hw *hw = &adapter->hw;
2215 struct sockaddr *addr = p;
2217 if (!is_valid_ether_addr(addr->sa_data))
2218 return -EADDRNOTAVAIL;
2220 /* 82542 2.0 needs to be in reset to write receive address registers */
2222 if (hw->mac_type == e1000_82542_rev2_0)
2223 e1000_enter_82542_rst(adapter);
2225 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2226 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2228 e1000_rar_set(hw, hw->mac_addr, 0);
2230 if (hw->mac_type == e1000_82542_rev2_0)
2231 e1000_leave_82542_rst(adapter);
2237 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2238 * @netdev: network interface device structure
2240 * The set_rx_mode entry point is called whenever the unicast or multicast
2241 * address lists or the network interface flags are updated. This routine is
2242 * responsible for configuring the hardware for proper unicast, multicast,
2243 * promiscuous mode, and all-multi behavior.
2246 static void e1000_set_rx_mode(struct net_device *netdev)
2248 struct e1000_adapter *adapter = netdev_priv(netdev);
2249 struct e1000_hw *hw = &adapter->hw;
2250 struct netdev_hw_addr *ha;
2251 bool use_uc = false;
2254 int i, rar_entries = E1000_RAR_ENTRIES;
2255 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2256 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2259 e_err(probe, "memory allocation failed\n");
2263 /* Check for Promiscuous and All Multicast modes */
2267 if (netdev->flags & IFF_PROMISC) {
2268 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2269 rctl &= ~E1000_RCTL_VFE;
2271 if (netdev->flags & IFF_ALLMULTI)
2272 rctl |= E1000_RCTL_MPE;
2274 rctl &= ~E1000_RCTL_MPE;
2275 /* Enable VLAN filter if there is a VLAN */
2276 if (e1000_vlan_used(adapter))
2277 rctl |= E1000_RCTL_VFE;
2280 if (netdev_uc_count(netdev) > rar_entries - 1) {
2281 rctl |= E1000_RCTL_UPE;
2282 } else if (!(netdev->flags & IFF_PROMISC)) {
2283 rctl &= ~E1000_RCTL_UPE;
2289 /* 82542 2.0 needs to be in reset to write receive address registers */
2291 if (hw->mac_type == e1000_82542_rev2_0)
2292 e1000_enter_82542_rst(adapter);
2294 /* load the first 14 addresses into the exact filters 1-14. Unicast
2295 * addresses take precedence to avoid disabling unicast filtering
2298 * RAR 0 is used for the station MAC address
2299 * if there are not 14 addresses, go ahead and clear the filters
2303 netdev_for_each_uc_addr(ha, netdev) {
2304 if (i == rar_entries)
2306 e1000_rar_set(hw, ha->addr, i++);
2309 netdev_for_each_mc_addr(ha, netdev) {
2310 if (i == rar_entries) {
2311 /* load any remaining addresses into the hash table */
2312 u32 hash_reg, hash_bit, mta;
2313 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2314 hash_reg = (hash_value >> 5) & 0x7F;
2315 hash_bit = hash_value & 0x1F;
2316 mta = (1 << hash_bit);
2317 mcarray[hash_reg] |= mta;
2319 e1000_rar_set(hw, ha->addr, i++);
2323 for (; i < rar_entries; i++) {
2324 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2325 E1000_WRITE_FLUSH();
2326 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2327 E1000_WRITE_FLUSH();
2330 /* write the hash table completely, write from bottom to avoid
2331 * both stupid write combining chipsets, and flushing each write */
2332 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2334 * If we are on an 82544 has an errata where writing odd
2335 * offsets overwrites the previous even offset, but writing
2336 * backwards over the range solves the issue by always
2337 * writing the odd offset first
2339 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2341 E1000_WRITE_FLUSH();
2343 if (hw->mac_type == e1000_82542_rev2_0)
2344 e1000_leave_82542_rst(adapter);
2350 * e1000_update_phy_info_task - get phy info
2351 * @work: work struct contained inside adapter struct
2353 * Need to wait a few seconds after link up to get diagnostic information from
2356 static void e1000_update_phy_info_task(struct work_struct *work)
2358 struct e1000_adapter *adapter = container_of(work,
2359 struct e1000_adapter,
2360 phy_info_task.work);
2361 if (test_bit(__E1000_DOWN, &adapter->flags))
2363 mutex_lock(&adapter->mutex);
2364 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2365 mutex_unlock(&adapter->mutex);
2369 * e1000_82547_tx_fifo_stall_task - task to complete work
2370 * @work: work struct contained inside adapter struct
2372 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2374 struct e1000_adapter *adapter = container_of(work,
2375 struct e1000_adapter,
2376 fifo_stall_task.work);
2377 struct e1000_hw *hw = &adapter->hw;
2378 struct net_device *netdev = adapter->netdev;
2381 if (test_bit(__E1000_DOWN, &adapter->flags))
2383 mutex_lock(&adapter->mutex);
2384 if (atomic_read(&adapter->tx_fifo_stall)) {
2385 if ((er32(TDT) == er32(TDH)) &&
2386 (er32(TDFT) == er32(TDFH)) &&
2387 (er32(TDFTS) == er32(TDFHS))) {
2389 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2390 ew32(TDFT, adapter->tx_head_addr);
2391 ew32(TDFH, adapter->tx_head_addr);
2392 ew32(TDFTS, adapter->tx_head_addr);
2393 ew32(TDFHS, adapter->tx_head_addr);
2395 E1000_WRITE_FLUSH();
2397 adapter->tx_fifo_head = 0;
2398 atomic_set(&adapter->tx_fifo_stall, 0);
2399 netif_wake_queue(netdev);
2400 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2401 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2404 mutex_unlock(&adapter->mutex);
2407 bool e1000_has_link(struct e1000_adapter *adapter)
2409 struct e1000_hw *hw = &adapter->hw;
2410 bool link_active = false;
2412 /* get_link_status is set on LSC (link status) interrupt or rx
2413 * sequence error interrupt (except on intel ce4100).
2414 * get_link_status will stay false until the
2415 * e1000_check_for_link establishes link for copper adapters
2418 switch (hw->media_type) {
2419 case e1000_media_type_copper:
2420 if (hw->mac_type == e1000_ce4100)
2421 hw->get_link_status = 1;
2422 if (hw->get_link_status) {
2423 e1000_check_for_link(hw);
2424 link_active = !hw->get_link_status;
2429 case e1000_media_type_fiber:
2430 e1000_check_for_link(hw);
2431 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2433 case e1000_media_type_internal_serdes:
2434 e1000_check_for_link(hw);
2435 link_active = hw->serdes_has_link;
2445 * e1000_watchdog - work function
2446 * @work: work struct contained inside adapter struct
2448 static void e1000_watchdog(struct work_struct *work)
2450 struct e1000_adapter *adapter = container_of(work,
2451 struct e1000_adapter,
2452 watchdog_task.work);
2453 struct e1000_hw *hw = &adapter->hw;
2454 struct net_device *netdev = adapter->netdev;
2455 struct e1000_tx_ring *txdr = adapter->tx_ring;
2458 if (test_bit(__E1000_DOWN, &adapter->flags))
2461 mutex_lock(&adapter->mutex);
2462 link = e1000_has_link(adapter);
2463 if ((netif_carrier_ok(netdev)) && link)
2467 if (!netif_carrier_ok(netdev)) {
2470 /* update snapshot of PHY registers on LSC */
2471 e1000_get_speed_and_duplex(hw,
2472 &adapter->link_speed,
2473 &adapter->link_duplex);
2476 pr_info("%s NIC Link is Up %d Mbps %s, "
2477 "Flow Control: %s\n",
2479 adapter->link_speed,
2480 adapter->link_duplex == FULL_DUPLEX ?
2481 "Full Duplex" : "Half Duplex",
2482 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2483 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2484 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2485 E1000_CTRL_TFCE) ? "TX" : "None")));
2487 /* adjust timeout factor according to speed/duplex */
2488 adapter->tx_timeout_factor = 1;
2489 switch (adapter->link_speed) {
2492 adapter->tx_timeout_factor = 16;
2496 /* maybe add some timeout factor ? */
2500 /* enable transmits in the hardware */
2502 tctl |= E1000_TCTL_EN;
2505 netif_carrier_on(netdev);
2506 if (!test_bit(__E1000_DOWN, &adapter->flags))
2507 schedule_delayed_work(&adapter->phy_info_task,
2509 adapter->smartspeed = 0;
2512 if (netif_carrier_ok(netdev)) {
2513 adapter->link_speed = 0;
2514 adapter->link_duplex = 0;
2515 pr_info("%s NIC Link is Down\n",
2517 netif_carrier_off(netdev);
2519 if (!test_bit(__E1000_DOWN, &adapter->flags))
2520 schedule_delayed_work(&adapter->phy_info_task,
2524 e1000_smartspeed(adapter);
2528 e1000_update_stats(adapter);
2530 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2531 adapter->tpt_old = adapter->stats.tpt;
2532 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2533 adapter->colc_old = adapter->stats.colc;
2535 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2536 adapter->gorcl_old = adapter->stats.gorcl;
2537 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2538 adapter->gotcl_old = adapter->stats.gotcl;
2540 e1000_update_adaptive(hw);
2542 if (!netif_carrier_ok(netdev)) {
2543 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2544 /* We've lost link, so the controller stops DMA,
2545 * but we've got queued Tx work that's never going
2546 * to get done, so reset controller to flush Tx.
2547 * (Do the reset outside of interrupt context). */
2548 adapter->tx_timeout_count++;
2549 schedule_work(&adapter->reset_task);
2550 /* exit immediately since reset is imminent */
2555 /* Simple mode for Interrupt Throttle Rate (ITR) */
2556 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2558 * Symmetric Tx/Rx gets a reduced ITR=2000;
2559 * Total asymmetrical Tx or Rx gets ITR=8000;
2560 * everyone else is between 2000-8000.
2562 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2563 u32 dif = (adapter->gotcl > adapter->gorcl ?
2564 adapter->gotcl - adapter->gorcl :
2565 adapter->gorcl - adapter->gotcl) / 10000;
2566 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2568 ew32(ITR, 1000000000 / (itr * 256));
2571 /* Cause software interrupt to ensure rx ring is cleaned */
2572 ew32(ICS, E1000_ICS_RXDMT0);
2574 /* Force detection of hung controller every watchdog period */
2575 adapter->detect_tx_hung = true;
2577 /* Reschedule the task */
2578 if (!test_bit(__E1000_DOWN, &adapter->flags))
2579 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2582 mutex_unlock(&adapter->mutex);
2585 enum latency_range {
2589 latency_invalid = 255
2593 * e1000_update_itr - update the dynamic ITR value based on statistics
2594 * @adapter: pointer to adapter
2595 * @itr_setting: current adapter->itr
2596 * @packets: the number of packets during this measurement interval
2597 * @bytes: the number of bytes during this measurement interval
2599 * Stores a new ITR value based on packets and byte
2600 * counts during the last interrupt. The advantage of per interrupt
2601 * computation is faster updates and more accurate ITR for the current
2602 * traffic pattern. Constants in this function were computed
2603 * based on theoretical maximum wire speed and thresholds were set based
2604 * on testing data as well as attempting to minimize response time
2605 * while increasing bulk throughput.
2606 * this functionality is controlled by the InterruptThrottleRate module
2607 * parameter (see e1000_param.c)
2609 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2610 u16 itr_setting, int packets, int bytes)
2612 unsigned int retval = itr_setting;
2613 struct e1000_hw *hw = &adapter->hw;
2615 if (unlikely(hw->mac_type < e1000_82540))
2616 goto update_itr_done;
2619 goto update_itr_done;
2621 switch (itr_setting) {
2622 case lowest_latency:
2623 /* jumbo frames get bulk treatment*/
2624 if (bytes/packets > 8000)
2625 retval = bulk_latency;
2626 else if ((packets < 5) && (bytes > 512))
2627 retval = low_latency;
2629 case low_latency: /* 50 usec aka 20000 ints/s */
2630 if (bytes > 10000) {
2631 /* jumbo frames need bulk latency setting */
2632 if (bytes/packets > 8000)
2633 retval = bulk_latency;
2634 else if ((packets < 10) || ((bytes/packets) > 1200))
2635 retval = bulk_latency;
2636 else if ((packets > 35))
2637 retval = lowest_latency;
2638 } else if (bytes/packets > 2000)
2639 retval = bulk_latency;
2640 else if (packets <= 2 && bytes < 512)
2641 retval = lowest_latency;
2643 case bulk_latency: /* 250 usec aka 4000 ints/s */
2644 if (bytes > 25000) {
2646 retval = low_latency;
2647 } else if (bytes < 6000) {
2648 retval = low_latency;
2657 static void e1000_set_itr(struct e1000_adapter *adapter)
2659 struct e1000_hw *hw = &adapter->hw;
2661 u32 new_itr = adapter->itr;
2663 if (unlikely(hw->mac_type < e1000_82540))
2666 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2667 if (unlikely(adapter->link_speed != SPEED_1000)) {
2673 adapter->tx_itr = e1000_update_itr(adapter,
2675 adapter->total_tx_packets,
2676 adapter->total_tx_bytes);
2677 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2678 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2679 adapter->tx_itr = low_latency;
2681 adapter->rx_itr = e1000_update_itr(adapter,
2683 adapter->total_rx_packets,
2684 adapter->total_rx_bytes);
2685 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2686 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2687 adapter->rx_itr = low_latency;
2689 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2691 switch (current_itr) {
2692 /* counts and packets in update_itr are dependent on these numbers */
2693 case lowest_latency:
2697 new_itr = 20000; /* aka hwitr = ~200 */
2707 if (new_itr != adapter->itr) {
2708 /* this attempts to bias the interrupt rate towards Bulk
2709 * by adding intermediate steps when interrupt rate is
2711 new_itr = new_itr > adapter->itr ?
2712 min(adapter->itr + (new_itr >> 2), new_itr) :
2714 adapter->itr = new_itr;
2715 ew32(ITR, 1000000000 / (new_itr * 256));
2719 #define E1000_TX_FLAGS_CSUM 0x00000001
2720 #define E1000_TX_FLAGS_VLAN 0x00000002
2721 #define E1000_TX_FLAGS_TSO 0x00000004
2722 #define E1000_TX_FLAGS_IPV4 0x00000008
2723 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2724 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2725 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2727 static int e1000_tso(struct e1000_adapter *adapter,
2728 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2730 struct e1000_context_desc *context_desc;
2731 struct e1000_buffer *buffer_info;
2734 u16 ipcse = 0, tucse, mss;
2735 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2738 if (skb_is_gso(skb)) {
2739 if (skb_header_cloned(skb)) {
2740 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2745 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2746 mss = skb_shinfo(skb)->gso_size;
2747 if (skb->protocol == htons(ETH_P_IP)) {
2748 struct iphdr *iph = ip_hdr(skb);
2751 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2755 cmd_length = E1000_TXD_CMD_IP;
2756 ipcse = skb_transport_offset(skb) - 1;
2757 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2758 ipv6_hdr(skb)->payload_len = 0;
2759 tcp_hdr(skb)->check =
2760 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2761 &ipv6_hdr(skb)->daddr,
2765 ipcss = skb_network_offset(skb);
2766 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2767 tucss = skb_transport_offset(skb);
2768 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2771 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2772 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2774 i = tx_ring->next_to_use;
2775 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2776 buffer_info = &tx_ring->buffer_info[i];
2778 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2779 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2780 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2781 context_desc->upper_setup.tcp_fields.tucss = tucss;
2782 context_desc->upper_setup.tcp_fields.tucso = tucso;
2783 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2784 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2785 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2786 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2788 buffer_info->time_stamp = jiffies;
2789 buffer_info->next_to_watch = i;
2791 if (++i == tx_ring->count) i = 0;
2792 tx_ring->next_to_use = i;
2799 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2800 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2802 struct e1000_context_desc *context_desc;
2803 struct e1000_buffer *buffer_info;
2806 u32 cmd_len = E1000_TXD_CMD_DEXT;
2808 if (skb->ip_summed != CHECKSUM_PARTIAL)
2811 switch (skb->protocol) {
2812 case cpu_to_be16(ETH_P_IP):
2813 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2814 cmd_len |= E1000_TXD_CMD_TCP;
2816 case cpu_to_be16(ETH_P_IPV6):
2817 /* XXX not handling all IPV6 headers */
2818 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2819 cmd_len |= E1000_TXD_CMD_TCP;
2822 if (unlikely(net_ratelimit()))
2823 e_warn(drv, "checksum_partial proto=%x!\n",
2828 css = skb_checksum_start_offset(skb);
2830 i = tx_ring->next_to_use;
2831 buffer_info = &tx_ring->buffer_info[i];
2832 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2834 context_desc->lower_setup.ip_config = 0;
2835 context_desc->upper_setup.tcp_fields.tucss = css;
2836 context_desc->upper_setup.tcp_fields.tucso =
2837 css + skb->csum_offset;
2838 context_desc->upper_setup.tcp_fields.tucse = 0;
2839 context_desc->tcp_seg_setup.data = 0;
2840 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2842 buffer_info->time_stamp = jiffies;
2843 buffer_info->next_to_watch = i;
2845 if (unlikely(++i == tx_ring->count)) i = 0;
2846 tx_ring->next_to_use = i;
2851 #define E1000_MAX_TXD_PWR 12
2852 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2854 static int e1000_tx_map(struct e1000_adapter *adapter,
2855 struct e1000_tx_ring *tx_ring,
2856 struct sk_buff *skb, unsigned int first,
2857 unsigned int max_per_txd, unsigned int nr_frags,
2860 struct e1000_hw *hw = &adapter->hw;
2861 struct pci_dev *pdev = adapter->pdev;
2862 struct e1000_buffer *buffer_info;
2863 unsigned int len = skb_headlen(skb);
2864 unsigned int offset = 0, size, count = 0, i;
2865 unsigned int f, bytecount, segs;
2867 i = tx_ring->next_to_use;
2870 buffer_info = &tx_ring->buffer_info[i];
2871 size = min(len, max_per_txd);
2872 /* Workaround for Controller erratum --
2873 * descriptor for non-tso packet in a linear SKB that follows a
2874 * tso gets written back prematurely before the data is fully
2875 * DMA'd to the controller */
2876 if (!skb->data_len && tx_ring->last_tx_tso &&
2878 tx_ring->last_tx_tso = false;
2882 /* Workaround for premature desc write-backs
2883 * in TSO mode. Append 4-byte sentinel desc */
2884 if (unlikely(mss && !nr_frags && size == len && size > 8))
2886 /* work-around for errata 10 and it applies
2887 * to all controllers in PCI-X mode
2888 * The fix is to make sure that the first descriptor of a
2889 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2891 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2892 (size > 2015) && count == 0))
2895 /* Workaround for potential 82544 hang in PCI-X. Avoid
2896 * terminating buffers within evenly-aligned dwords. */
2897 if (unlikely(adapter->pcix_82544 &&
2898 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2902 buffer_info->length = size;
2903 /* set time_stamp *before* dma to help avoid a possible race */
2904 buffer_info->time_stamp = jiffies;
2905 buffer_info->mapped_as_page = false;
2906 buffer_info->dma = dma_map_single(&pdev->dev,
2908 size, DMA_TO_DEVICE);
2909 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2911 buffer_info->next_to_watch = i;
2918 if (unlikely(i == tx_ring->count))
2923 for (f = 0; f < nr_frags; f++) {
2924 const struct skb_frag_struct *frag;
2926 frag = &skb_shinfo(skb)->frags[f];
2927 len = skb_frag_size(frag);
2931 unsigned long bufend;
2933 if (unlikely(i == tx_ring->count))
2936 buffer_info = &tx_ring->buffer_info[i];
2937 size = min(len, max_per_txd);
2938 /* Workaround for premature desc write-backs
2939 * in TSO mode. Append 4-byte sentinel desc */
2940 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2942 /* Workaround for potential 82544 hang in PCI-X.
2943 * Avoid terminating buffers within evenly-aligned
2945 bufend = (unsigned long)
2946 page_to_phys(skb_frag_page(frag));
2947 bufend += offset + size - 1;
2948 if (unlikely(adapter->pcix_82544 &&
2953 buffer_info->length = size;
2954 buffer_info->time_stamp = jiffies;
2955 buffer_info->mapped_as_page = true;
2956 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2957 offset, size, DMA_TO_DEVICE);
2958 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2960 buffer_info->next_to_watch = i;
2968 segs = skb_shinfo(skb)->gso_segs ?: 1;
2969 /* multiply data chunks by size of headers */
2970 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2972 tx_ring->buffer_info[i].skb = skb;
2973 tx_ring->buffer_info[i].segs = segs;
2974 tx_ring->buffer_info[i].bytecount = bytecount;
2975 tx_ring->buffer_info[first].next_to_watch = i;
2980 dev_err(&pdev->dev, "TX DMA map failed\n");
2981 buffer_info->dma = 0;
2987 i += tx_ring->count;
2989 buffer_info = &tx_ring->buffer_info[i];
2990 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2996 static void e1000_tx_queue(struct e1000_adapter *adapter,
2997 struct e1000_tx_ring *tx_ring, int tx_flags,
3000 struct e1000_hw *hw = &adapter->hw;
3001 struct e1000_tx_desc *tx_desc = NULL;
3002 struct e1000_buffer *buffer_info;
3003 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3006 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3007 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3009 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3011 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3012 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3015 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3016 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3017 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3020 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3021 txd_lower |= E1000_TXD_CMD_VLE;
3022 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3025 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3026 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3028 i = tx_ring->next_to_use;
3031 buffer_info = &tx_ring->buffer_info[i];
3032 tx_desc = E1000_TX_DESC(*tx_ring, i);
3033 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3034 tx_desc->lower.data =
3035 cpu_to_le32(txd_lower | buffer_info->length);
3036 tx_desc->upper.data = cpu_to_le32(txd_upper);
3037 if (unlikely(++i == tx_ring->count)) i = 0;
3040 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3042 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3043 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3044 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3046 /* Force memory writes to complete before letting h/w
3047 * know there are new descriptors to fetch. (Only
3048 * applicable for weak-ordered memory model archs,
3049 * such as IA-64). */
3052 tx_ring->next_to_use = i;
3053 writel(i, hw->hw_addr + tx_ring->tdt);
3054 /* we need this if more than one processor can write to our tail
3055 * at a time, it syncronizes IO on IA64/Altix systems */
3060 * 82547 workaround to avoid controller hang in half-duplex environment.
3061 * The workaround is to avoid queuing a large packet that would span
3062 * the internal Tx FIFO ring boundary by notifying the stack to resend
3063 * the packet at a later time. This gives the Tx FIFO an opportunity to
3064 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3065 * to the beginning of the Tx FIFO.
3068 #define E1000_FIFO_HDR 0x10
3069 #define E1000_82547_PAD_LEN 0x3E0
3071 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3072 struct sk_buff *skb)
3074 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3075 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3077 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3079 if (adapter->link_duplex != HALF_DUPLEX)
3080 goto no_fifo_stall_required;
3082 if (atomic_read(&adapter->tx_fifo_stall))
3085 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3086 atomic_set(&adapter->tx_fifo_stall, 1);
3090 no_fifo_stall_required:
3091 adapter->tx_fifo_head += skb_fifo_len;
3092 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3093 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3097 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3099 struct e1000_adapter *adapter = netdev_priv(netdev);
3100 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3102 netif_stop_queue(netdev);
3103 /* Herbert's original patch had:
3104 * smp_mb__after_netif_stop_queue();
3105 * but since that doesn't exist yet, just open code it. */
3108 /* We need to check again in a case another CPU has just
3109 * made room available. */
3110 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3114 netif_start_queue(netdev);
3115 ++adapter->restart_queue;
3119 static int e1000_maybe_stop_tx(struct net_device *netdev,
3120 struct e1000_tx_ring *tx_ring, int size)
3122 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3124 return __e1000_maybe_stop_tx(netdev, size);
3127 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3128 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3129 struct net_device *netdev)
3131 struct e1000_adapter *adapter = netdev_priv(netdev);
3132 struct e1000_hw *hw = &adapter->hw;
3133 struct e1000_tx_ring *tx_ring;
3134 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3135 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3136 unsigned int tx_flags = 0;
3137 unsigned int len = skb_headlen(skb);
3138 unsigned int nr_frags;
3144 /* This goes back to the question of how to logically map a tx queue
3145 * to a flow. Right now, performance is impacted slightly negatively
3146 * if using multiple tx queues. If the stack breaks away from a
3147 * single qdisc implementation, we can look at this again. */
3148 tx_ring = adapter->tx_ring;
3150 if (unlikely(skb->len <= 0)) {
3151 dev_kfree_skb_any(skb);
3152 return NETDEV_TX_OK;
3155 mss = skb_shinfo(skb)->gso_size;
3156 /* The controller does a simple calculation to
3157 * make sure there is enough room in the FIFO before
3158 * initiating the DMA for each buffer. The calc is:
3159 * 4 = ceil(buffer len/mss). To make sure we don't
3160 * overrun the FIFO, adjust the max buffer len if mss
3164 max_per_txd = min(mss << 2, max_per_txd);
3165 max_txd_pwr = fls(max_per_txd) - 1;
3167 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3168 if (skb->data_len && hdr_len == len) {
3169 switch (hw->mac_type) {
3170 unsigned int pull_size;
3172 /* Make sure we have room to chop off 4 bytes,
3173 * and that the end alignment will work out to
3174 * this hardware's requirements
3175 * NOTE: this is a TSO only workaround
3176 * if end byte alignment not correct move us
3177 * into the next dword */
3178 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3181 pull_size = min((unsigned int)4, skb->data_len);
3182 if (!__pskb_pull_tail(skb, pull_size)) {
3183 e_err(drv, "__pskb_pull_tail "
3185 dev_kfree_skb_any(skb);
3186 return NETDEV_TX_OK;
3188 len = skb_headlen(skb);
3197 /* reserve a descriptor for the offload context */
3198 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3202 /* Controller Erratum workaround */
3203 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3206 count += TXD_USE_COUNT(len, max_txd_pwr);
3208 if (adapter->pcix_82544)
3211 /* work-around for errata 10 and it applies to all controllers
3212 * in PCI-X mode, so add one more descriptor to the count
3214 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3218 nr_frags = skb_shinfo(skb)->nr_frags;
3219 for (f = 0; f < nr_frags; f++)
3220 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3222 if (adapter->pcix_82544)
3225 /* need: count + 2 desc gap to keep tail from touching
3226 * head, otherwise try next time */
3227 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3228 return NETDEV_TX_BUSY;
3230 if (unlikely((hw->mac_type == e1000_82547) &&
3231 (e1000_82547_fifo_workaround(adapter, skb)))) {
3232 netif_stop_queue(netdev);
3233 if (!test_bit(__E1000_DOWN, &adapter->flags))
3234 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3235 return NETDEV_TX_BUSY;
3238 if (vlan_tx_tag_present(skb)) {
3239 tx_flags |= E1000_TX_FLAGS_VLAN;
3240 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3243 first = tx_ring->next_to_use;
3245 tso = e1000_tso(adapter, tx_ring, skb);
3247 dev_kfree_skb_any(skb);
3248 return NETDEV_TX_OK;
3252 if (likely(hw->mac_type != e1000_82544))
3253 tx_ring->last_tx_tso = true;
3254 tx_flags |= E1000_TX_FLAGS_TSO;
3255 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3256 tx_flags |= E1000_TX_FLAGS_CSUM;
3258 if (likely(skb->protocol == htons(ETH_P_IP)))
3259 tx_flags |= E1000_TX_FLAGS_IPV4;
3261 if (unlikely(skb->no_fcs))
3262 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3264 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3268 skb_tx_timestamp(skb);
3270 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3271 /* Make sure there is space in the ring for the next send. */
3272 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3275 dev_kfree_skb_any(skb);
3276 tx_ring->buffer_info[first].time_stamp = 0;
3277 tx_ring->next_to_use = first;
3280 return NETDEV_TX_OK;
3283 #define NUM_REGS 38 /* 1 based count */
3284 static void e1000_regdump(struct e1000_adapter *adapter)
3286 struct e1000_hw *hw = &adapter->hw;
3288 u32 *regs_buff = regs;
3291 static const char * const reg_name[] = {
3293 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3294 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3295 "TIDV", "TXDCTL", "TADV", "TARC0",
3296 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3298 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3299 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3300 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303 regs_buff[0] = er32(CTRL);
3304 regs_buff[1] = er32(STATUS);
3306 regs_buff[2] = er32(RCTL);
3307 regs_buff[3] = er32(RDLEN);
3308 regs_buff[4] = er32(RDH);
3309 regs_buff[5] = er32(RDT);
3310 regs_buff[6] = er32(RDTR);
3312 regs_buff[7] = er32(TCTL);
3313 regs_buff[8] = er32(TDBAL);
3314 regs_buff[9] = er32(TDBAH);
3315 regs_buff[10] = er32(TDLEN);
3316 regs_buff[11] = er32(TDH);
3317 regs_buff[12] = er32(TDT);
3318 regs_buff[13] = er32(TIDV);
3319 regs_buff[14] = er32(TXDCTL);
3320 regs_buff[15] = er32(TADV);
3321 regs_buff[16] = er32(TARC0);
3323 regs_buff[17] = er32(TDBAL1);
3324 regs_buff[18] = er32(TDBAH1);
3325 regs_buff[19] = er32(TDLEN1);
3326 regs_buff[20] = er32(TDH1);
3327 regs_buff[21] = er32(TDT1);
3328 regs_buff[22] = er32(TXDCTL1);
3329 regs_buff[23] = er32(TARC1);
3330 regs_buff[24] = er32(CTRL_EXT);
3331 regs_buff[25] = er32(ERT);
3332 regs_buff[26] = er32(RDBAL0);
3333 regs_buff[27] = er32(RDBAH0);
3334 regs_buff[28] = er32(TDFH);
3335 regs_buff[29] = er32(TDFT);
3336 regs_buff[30] = er32(TDFHS);
3337 regs_buff[31] = er32(TDFTS);
3338 regs_buff[32] = er32(TDFPC);
3339 regs_buff[33] = er32(RDFH);
3340 regs_buff[34] = er32(RDFT);
3341 regs_buff[35] = er32(RDFHS);
3342 regs_buff[36] = er32(RDFTS);
3343 regs_buff[37] = er32(RDFPC);
3345 pr_info("Register dump\n");
3346 for (i = 0; i < NUM_REGS; i++)
3347 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3351 * e1000_dump: Print registers, tx ring and rx ring
3353 static void e1000_dump(struct e1000_adapter *adapter)
3355 /* this code doesn't handle multiple rings */
3356 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3357 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360 if (!netif_msg_hw(adapter))
3363 /* Print Registers */
3364 e1000_regdump(adapter);
3369 pr_info("TX Desc ring0 dump\n");
3371 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3373 * Legacy Transmit Descriptor
3374 * +--------------------------------------------------------------+
3375 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3376 * +--------------------------------------------------------------+
3377 * 8 | Special | CSS | Status | CMD | CSO | Length |
3378 * +--------------------------------------------------------------+
3379 * 63 48 47 36 35 32 31 24 23 16 15 0
3381 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382 * 63 48 47 40 39 32 31 16 15 8 7 0
3383 * +----------------------------------------------------------------+
3384 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3385 * +----------------------------------------------------------------+
3386 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3387 * +----------------------------------------------------------------+
3388 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3390 * Extended Data Descriptor (DTYP=0x1)
3391 * +----------------------------------------------------------------+
3392 * 0 | Buffer Address [63:0] |
3393 * +----------------------------------------------------------------+
3394 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3395 * +----------------------------------------------------------------+
3396 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3398 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3399 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3401 if (!netif_msg_tx_done(adapter))
3402 goto rx_ring_summary;
3404 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3407 struct my_u { __le64 a; __le64 b; };
3408 struct my_u *u = (struct my_u *)tx_desc;
3411 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3413 else if (i == tx_ring->next_to_use)
3415 else if (i == tx_ring->next_to_clean)
3420 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3421 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422 le64_to_cpu(u->a), le64_to_cpu(u->b),
3423 (u64)buffer_info->dma, buffer_info->length,
3424 buffer_info->next_to_watch,
3425 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3432 pr_info("\nRX Desc ring dump\n");
3434 /* Legacy Receive Descriptor Format
3436 * +-----------------------------------------------------+
3437 * | Buffer Address [63:0] |
3438 * +-----------------------------------------------------+
3439 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3440 * +-----------------------------------------------------+
3441 * 63 48 47 40 39 32 31 16 15 0
3443 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3445 if (!netif_msg_rx_status(adapter))
3448 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3449 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3450 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3451 struct my_u { __le64 a; __le64 b; };
3452 struct my_u *u = (struct my_u *)rx_desc;
3455 if (i == rx_ring->next_to_use)
3457 else if (i == rx_ring->next_to_clean)
3462 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3463 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3464 (u64)buffer_info->dma, buffer_info->skb, type);
3467 /* dump the descriptor caches */
3469 pr_info("Rx descriptor cache in 64bit format\n");
3470 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3471 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3473 readl(adapter->hw.hw_addr + i+4),
3474 readl(adapter->hw.hw_addr + i),
3475 readl(adapter->hw.hw_addr + i+12),
3476 readl(adapter->hw.hw_addr + i+8));
3479 pr_info("Tx descriptor cache in 64bit format\n");
3480 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3481 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3483 readl(adapter->hw.hw_addr + i+4),
3484 readl(adapter->hw.hw_addr + i),
3485 readl(adapter->hw.hw_addr + i+12),
3486 readl(adapter->hw.hw_addr + i+8));
3493 * e1000_tx_timeout - Respond to a Tx Hang
3494 * @netdev: network interface device structure
3497 static void e1000_tx_timeout(struct net_device *netdev)
3499 struct e1000_adapter *adapter = netdev_priv(netdev);
3501 /* Do the reset outside of interrupt context */
3502 adapter->tx_timeout_count++;
3503 schedule_work(&adapter->reset_task);
3506 static void e1000_reset_task(struct work_struct *work)
3508 struct e1000_adapter *adapter =
3509 container_of(work, struct e1000_adapter, reset_task);
3511 if (test_bit(__E1000_DOWN, &adapter->flags))
3513 e_err(drv, "Reset adapter\n");
3514 e1000_reinit_safe(adapter);
3518 * e1000_get_stats - Get System Network Statistics
3519 * @netdev: network interface device structure
3521 * Returns the address of the device statistics structure.
3522 * The statistics are actually updated from the watchdog.
3525 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3527 /* only return the current stats */
3528 return &netdev->stats;
3532 * e1000_change_mtu - Change the Maximum Transfer Unit
3533 * @netdev: network interface device structure
3534 * @new_mtu: new value for maximum frame size
3536 * Returns 0 on success, negative on failure
3539 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3541 struct e1000_adapter *adapter = netdev_priv(netdev);
3542 struct e1000_hw *hw = &adapter->hw;
3543 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3545 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3546 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3547 e_err(probe, "Invalid MTU setting\n");
3551 /* Adapter-specific max frame size limits. */
3552 switch (hw->mac_type) {
3553 case e1000_undefined ... e1000_82542_rev2_1:
3554 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3555 e_err(probe, "Jumbo Frames not supported.\n");
3560 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3566 /* e1000_down has a dependency on max_frame_size */
3567 hw->max_frame_size = max_frame;
3568 if (netif_running(netdev))
3569 e1000_down(adapter);
3571 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3572 * means we reserve 2 more, this pushes us to allocate from the next
3574 * i.e. RXBUFFER_2048 --> size-4096 slab
3575 * however with the new *_jumbo_rx* routines, jumbo receives will use
3576 * fragmented skbs */
3578 if (max_frame <= E1000_RXBUFFER_2048)
3579 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3581 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3582 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3583 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3584 adapter->rx_buffer_len = PAGE_SIZE;
3587 /* adjust allocation if LPE protects us, and we aren't using SBP */
3588 if (!hw->tbi_compatibility_on &&
3589 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3590 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3591 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3593 pr_info("%s changing MTU from %d to %d\n",
3594 netdev->name, netdev->mtu, new_mtu);
3595 netdev->mtu = new_mtu;
3597 if (netif_running(netdev))
3600 e1000_reset(adapter);
3602 clear_bit(__E1000_RESETTING, &adapter->flags);
3608 * e1000_update_stats - Update the board statistics counters
3609 * @adapter: board private structure
3612 void e1000_update_stats(struct e1000_adapter *adapter)
3614 struct net_device *netdev = adapter->netdev;
3615 struct e1000_hw *hw = &adapter->hw;
3616 struct pci_dev *pdev = adapter->pdev;
3617 unsigned long flags;
3620 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3623 * Prevent stats update while adapter is being reset, or if the pci
3624 * connection is down.
3626 if (adapter->link_speed == 0)
3628 if (pci_channel_offline(pdev))
3631 spin_lock_irqsave(&adapter->stats_lock, flags);
3633 /* these counters are modified from e1000_tbi_adjust_stats,
3634 * called from the interrupt context, so they must only
3635 * be written while holding adapter->stats_lock
3638 adapter->stats.crcerrs += er32(CRCERRS);
3639 adapter->stats.gprc += er32(GPRC);
3640 adapter->stats.gorcl += er32(GORCL);
3641 adapter->stats.gorch += er32(GORCH);
3642 adapter->stats.bprc += er32(BPRC);
3643 adapter->stats.mprc += er32(MPRC);
3644 adapter->stats.roc += er32(ROC);
3646 adapter->stats.prc64 += er32(PRC64);
3647 adapter->stats.prc127 += er32(PRC127);
3648 adapter->stats.prc255 += er32(PRC255);
3649 adapter->stats.prc511 += er32(PRC511);
3650 adapter->stats.prc1023 += er32(PRC1023);
3651 adapter->stats.prc1522 += er32(PRC1522);
3653 adapter->stats.symerrs += er32(SYMERRS);
3654 adapter->stats.mpc += er32(MPC);
3655 adapter->stats.scc += er32(SCC);
3656 adapter->stats.ecol += er32(ECOL);
3657 adapter->stats.mcc += er32(MCC);
3658 adapter->stats.latecol += er32(LATECOL);
3659 adapter->stats.dc += er32(DC);
3660 adapter->stats.sec += er32(SEC);
3661 adapter->stats.rlec += er32(RLEC);
3662 adapter->stats.xonrxc += er32(XONRXC);
3663 adapter->stats.xontxc += er32(XONTXC);
3664 adapter->stats.xoffrxc += er32(XOFFRXC);
3665 adapter->stats.xofftxc += er32(XOFFTXC);
3666 adapter->stats.fcruc += er32(FCRUC);
3667 adapter->stats.gptc += er32(GPTC);
3668 adapter->stats.gotcl += er32(GOTCL);
3669 adapter->stats.gotch += er32(GOTCH);
3670 adapter->stats.rnbc += er32(RNBC);
3671 adapter->stats.ruc += er32(RUC);
3672 adapter->stats.rfc += er32(RFC);
3673 adapter->stats.rjc += er32(RJC);
3674 adapter->stats.torl += er32(TORL);
3675 adapter->stats.torh += er32(TORH);
3676 adapter->stats.totl += er32(TOTL);
3677 adapter->stats.toth += er32(TOTH);
3678 adapter->stats.tpr += er32(TPR);
3680 adapter->stats.ptc64 += er32(PTC64);
3681 adapter->stats.ptc127 += er32(PTC127);
3682 adapter->stats.ptc255 += er32(PTC255);
3683 adapter->stats.ptc511 += er32(PTC511);
3684 adapter->stats.ptc1023 += er32(PTC1023);
3685 adapter->stats.ptc1522 += er32(PTC1522);
3687 adapter->stats.mptc += er32(MPTC);
3688 adapter->stats.bptc += er32(BPTC);
3690 /* used for adaptive IFS */
3692 hw->tx_packet_delta = er32(TPT);
3693 adapter->stats.tpt += hw->tx_packet_delta;
3694 hw->collision_delta = er32(COLC);
3695 adapter->stats.colc += hw->collision_delta;
3697 if (hw->mac_type >= e1000_82543) {
3698 adapter->stats.algnerrc += er32(ALGNERRC);
3699 adapter->stats.rxerrc += er32(RXERRC);
3700 adapter->stats.tncrs += er32(TNCRS);
3701 adapter->stats.cexterr += er32(CEXTERR);
3702 adapter->stats.tsctc += er32(TSCTC);
3703 adapter->stats.tsctfc += er32(TSCTFC);
3706 /* Fill out the OS statistics structure */
3707 netdev->stats.multicast = adapter->stats.mprc;
3708 netdev->stats.collisions = adapter->stats.colc;
3712 /* RLEC on some newer hardware can be incorrect so build
3713 * our own version based on RUC and ROC */
3714 netdev->stats.rx_errors = adapter->stats.rxerrc +
3715 adapter->stats.crcerrs + adapter->stats.algnerrc +
3716 adapter->stats.ruc + adapter->stats.roc +
3717 adapter->stats.cexterr;
3718 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3719 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3720 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3721 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3722 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3725 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3726 netdev->stats.tx_errors = adapter->stats.txerrc;
3727 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3728 netdev->stats.tx_window_errors = adapter->stats.latecol;
3729 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3730 if (hw->bad_tx_carr_stats_fd &&
3731 adapter->link_duplex == FULL_DUPLEX) {
3732 netdev->stats.tx_carrier_errors = 0;
3733 adapter->stats.tncrs = 0;
3736 /* Tx Dropped needs to be maintained elsewhere */
3739 if (hw->media_type == e1000_media_type_copper) {
3740 if ((adapter->link_speed == SPEED_1000) &&
3741 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3742 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3743 adapter->phy_stats.idle_errors += phy_tmp;
3746 if ((hw->mac_type <= e1000_82546) &&
3747 (hw->phy_type == e1000_phy_m88) &&
3748 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3749 adapter->phy_stats.receive_errors += phy_tmp;
3752 /* Management Stats */
3753 if (hw->has_smbus) {
3754 adapter->stats.mgptc += er32(MGTPTC);
3755 adapter->stats.mgprc += er32(MGTPRC);
3756 adapter->stats.mgpdc += er32(MGTPDC);
3759 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3763 * e1000_intr - Interrupt Handler
3764 * @irq: interrupt number
3765 * @data: pointer to a network interface device structure
3768 static irqreturn_t e1000_intr(int irq, void *data)
3770 struct net_device *netdev = data;
3771 struct e1000_adapter *adapter = netdev_priv(netdev);
3772 struct e1000_hw *hw = &adapter->hw;
3773 u32 icr = er32(ICR);
3775 if (unlikely((!icr)))
3776 return IRQ_NONE; /* Not our interrupt */
3779 * we might have caused the interrupt, but the above
3780 * read cleared it, and just in case the driver is
3781 * down there is nothing to do so return handled
3783 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3786 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3787 hw->get_link_status = 1;
3788 /* guard against interrupt when we're going down */
3789 if (!test_bit(__E1000_DOWN, &adapter->flags))
3790 schedule_delayed_work(&adapter->watchdog_task, 1);
3793 /* disable interrupts, without the synchronize_irq bit */
3795 E1000_WRITE_FLUSH();
3797 if (likely(napi_schedule_prep(&adapter->napi))) {
3798 adapter->total_tx_bytes = 0;
3799 adapter->total_tx_packets = 0;
3800 adapter->total_rx_bytes = 0;
3801 adapter->total_rx_packets = 0;
3802 __napi_schedule(&adapter->napi);
3804 /* this really should not happen! if it does it is basically a
3805 * bug, but not a hard error, so enable ints and continue */
3806 if (!test_bit(__E1000_DOWN, &adapter->flags))
3807 e1000_irq_enable(adapter);
3814 * e1000_clean - NAPI Rx polling callback
3815 * @adapter: board private structure
3817 static int e1000_clean(struct napi_struct *napi, int budget)
3819 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3820 int tx_clean_complete = 0, work_done = 0;
3822 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3824 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3826 if (!tx_clean_complete)
3829 /* If budget not fully consumed, exit the polling mode */
3830 if (work_done < budget) {
3831 if (likely(adapter->itr_setting & 3))
3832 e1000_set_itr(adapter);
3833 napi_complete(napi);
3834 if (!test_bit(__E1000_DOWN, &adapter->flags))
3835 e1000_irq_enable(adapter);
3842 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3843 * @adapter: board private structure
3845 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3846 struct e1000_tx_ring *tx_ring)
3848 struct e1000_hw *hw = &adapter->hw;
3849 struct net_device *netdev = adapter->netdev;
3850 struct e1000_tx_desc *tx_desc, *eop_desc;
3851 struct e1000_buffer *buffer_info;
3852 unsigned int i, eop;
3853 unsigned int count = 0;
3854 unsigned int total_tx_bytes=0, total_tx_packets=0;
3856 i = tx_ring->next_to_clean;
3857 eop = tx_ring->buffer_info[i].next_to_watch;
3858 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3860 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3861 (count < tx_ring->count)) {
3862 bool cleaned = false;
3863 rmb(); /* read buffer_info after eop_desc */
3864 for ( ; !cleaned; count++) {
3865 tx_desc = E1000_TX_DESC(*tx_ring, i);
3866 buffer_info = &tx_ring->buffer_info[i];
3867 cleaned = (i == eop);
3870 total_tx_packets += buffer_info->segs;
3871 total_tx_bytes += buffer_info->bytecount;
3873 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3874 tx_desc->upper.data = 0;
3876 if (unlikely(++i == tx_ring->count)) i = 0;
3879 eop = tx_ring->buffer_info[i].next_to_watch;
3880 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3883 tx_ring->next_to_clean = i;
3885 #define TX_WAKE_THRESHOLD 32
3886 if (unlikely(count && netif_carrier_ok(netdev) &&
3887 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3888 /* Make sure that anybody stopping the queue after this
3889 * sees the new next_to_clean.
3893 if (netif_queue_stopped(netdev) &&
3894 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3895 netif_wake_queue(netdev);
3896 ++adapter->restart_queue;
3900 if (adapter->detect_tx_hung) {
3901 /* Detect a transmit hang in hardware, this serializes the
3902 * check with the clearing of time_stamp and movement of i */
3903 adapter->detect_tx_hung = false;
3904 if (tx_ring->buffer_info[eop].time_stamp &&
3905 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3906 (adapter->tx_timeout_factor * HZ)) &&
3907 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3909 /* detected Tx unit hang */
3910 e_err(drv, "Detected Tx Unit Hang\n"
3914 " next_to_use <%x>\n"
3915 " next_to_clean <%x>\n"
3916 "buffer_info[next_to_clean]\n"
3917 " time_stamp <%lx>\n"
3918 " next_to_watch <%x>\n"
3920 " next_to_watch.status <%x>\n",
3921 (unsigned long)((tx_ring - adapter->tx_ring) /
3922 sizeof(struct e1000_tx_ring)),
3923 readl(hw->hw_addr + tx_ring->tdh),
3924 readl(hw->hw_addr + tx_ring->tdt),
3925 tx_ring->next_to_use,
3926 tx_ring->next_to_clean,
3927 tx_ring->buffer_info[eop].time_stamp,
3930 eop_desc->upper.fields.status);
3931 e1000_dump(adapter);
3932 netif_stop_queue(netdev);
3935 adapter->total_tx_bytes += total_tx_bytes;
3936 adapter->total_tx_packets += total_tx_packets;
3937 netdev->stats.tx_bytes += total_tx_bytes;
3938 netdev->stats.tx_packets += total_tx_packets;
3939 return count < tx_ring->count;
3943 * e1000_rx_checksum - Receive Checksum Offload for 82543
3944 * @adapter: board private structure
3945 * @status_err: receive descriptor status and error fields
3946 * @csum: receive descriptor csum field
3947 * @sk_buff: socket buffer with received data
3950 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3951 u32 csum, struct sk_buff *skb)
3953 struct e1000_hw *hw = &adapter->hw;
3954 u16 status = (u16)status_err;
3955 u8 errors = (u8)(status_err >> 24);
3957 skb_checksum_none_assert(skb);
3959 /* 82543 or newer only */
3960 if (unlikely(hw->mac_type < e1000_82543)) return;
3961 /* Ignore Checksum bit is set */
3962 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3963 /* TCP/UDP checksum error bit is set */
3964 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3965 /* let the stack verify checksum errors */
3966 adapter->hw_csum_err++;
3969 /* TCP/UDP Checksum has not been calculated */
3970 if (!(status & E1000_RXD_STAT_TCPCS))
3973 /* It must be a TCP or UDP packet with a valid checksum */
3974 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3975 /* TCP checksum is good */
3976 skb->ip_summed = CHECKSUM_UNNECESSARY;
3978 adapter->hw_csum_good++;
3982 * e1000_consume_page - helper function
3984 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3989 skb->data_len += length;
3990 skb->truesize += PAGE_SIZE;
3994 * e1000_receive_skb - helper function to handle rx indications
3995 * @adapter: board private structure
3996 * @status: descriptor status field as written by hardware
3997 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3998 * @skb: pointer to sk_buff to be indicated to stack
4000 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4001 __le16 vlan, struct sk_buff *skb)
4003 skb->protocol = eth_type_trans(skb, adapter->netdev);
4005 if (status & E1000_RXD_STAT_VP) {
4006 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4008 __vlan_hwaccel_put_tag(skb, vid);
4010 napi_gro_receive(&adapter->napi, skb);
4014 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4015 * @adapter: board private structure
4016 * @rx_ring: ring to clean
4017 * @work_done: amount of napi work completed this call
4018 * @work_to_do: max amount of work allowed for this call to do
4020 * the return value indicates whether actual cleaning was done, there
4021 * is no guarantee that everything was cleaned
4023 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4024 struct e1000_rx_ring *rx_ring,
4025 int *work_done, int work_to_do)
4027 struct e1000_hw *hw = &adapter->hw;
4028 struct net_device *netdev = adapter->netdev;
4029 struct pci_dev *pdev = adapter->pdev;
4030 struct e1000_rx_desc *rx_desc, *next_rxd;
4031 struct e1000_buffer *buffer_info, *next_buffer;
4032 unsigned long irq_flags;
4035 int cleaned_count = 0;
4036 bool cleaned = false;
4037 unsigned int total_rx_bytes=0, total_rx_packets=0;
4039 i = rx_ring->next_to_clean;
4040 rx_desc = E1000_RX_DESC(*rx_ring, i);
4041 buffer_info = &rx_ring->buffer_info[i];
4043 while (rx_desc->status & E1000_RXD_STAT_DD) {
4044 struct sk_buff *skb;
4047 if (*work_done >= work_to_do)
4050 rmb(); /* read descriptor and rx_buffer_info after status DD */
4052 status = rx_desc->status;
4053 skb = buffer_info->skb;
4054 buffer_info->skb = NULL;
4056 if (++i == rx_ring->count) i = 0;
4057 next_rxd = E1000_RX_DESC(*rx_ring, i);
4060 next_buffer = &rx_ring->buffer_info[i];
4064 dma_unmap_page(&pdev->dev, buffer_info->dma,
4065 buffer_info->length, DMA_FROM_DEVICE);
4066 buffer_info->dma = 0;
4068 length = le16_to_cpu(rx_desc->length);
4070 /* errors is only valid for DD + EOP descriptors */
4071 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4072 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4076 mapped = page_address(buffer_info->page);
4077 last_byte = *(mapped + length - 1);
4078 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4080 spin_lock_irqsave(&adapter->stats_lock,
4082 e1000_tbi_adjust_stats(hw, &adapter->stats,
4084 spin_unlock_irqrestore(&adapter->stats_lock,
4088 if (netdev->features & NETIF_F_RXALL)
4090 /* recycle both page and skb */
4091 buffer_info->skb = skb;
4092 /* an error means any chain goes out the window
4094 if (rx_ring->rx_skb_top)
4095 dev_kfree_skb(rx_ring->rx_skb_top);
4096 rx_ring->rx_skb_top = NULL;
4101 #define rxtop rx_ring->rx_skb_top
4103 if (!(status & E1000_RXD_STAT_EOP)) {
4104 /* this descriptor is only the beginning (or middle) */
4106 /* this is the beginning of a chain */
4108 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4111 /* this is the middle of a chain */
4112 skb_fill_page_desc(rxtop,
4113 skb_shinfo(rxtop)->nr_frags,
4114 buffer_info->page, 0, length);
4115 /* re-use the skb, only consumed the page */
4116 buffer_info->skb = skb;
4118 e1000_consume_page(buffer_info, rxtop, length);
4122 /* end of the chain */
4123 skb_fill_page_desc(rxtop,
4124 skb_shinfo(rxtop)->nr_frags,
4125 buffer_info->page, 0, length);
4126 /* re-use the current skb, we only consumed the
4128 buffer_info->skb = skb;
4131 e1000_consume_page(buffer_info, skb, length);
4133 /* no chain, got EOP, this buf is the packet
4134 * copybreak to save the put_page/alloc_page */
4135 if (length <= copybreak &&
4136 skb_tailroom(skb) >= length) {
4138 vaddr = kmap_atomic(buffer_info->page);
4139 memcpy(skb_tail_pointer(skb), vaddr, length);
4140 kunmap_atomic(vaddr);
4141 /* re-use the page, so don't erase
4142 * buffer_info->page */
4143 skb_put(skb, length);
4145 skb_fill_page_desc(skb, 0,
4146 buffer_info->page, 0,
4148 e1000_consume_page(buffer_info, skb,
4154 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4155 e1000_rx_checksum(adapter,
4157 ((u32)(rx_desc->errors) << 24),
4158 le16_to_cpu(rx_desc->csum), skb);
4160 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4161 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4162 pskb_trim(skb, skb->len - 4);
4165 /* eth type trans needs skb->data to point to something */
4166 if (!pskb_may_pull(skb, ETH_HLEN)) {
4167 e_err(drv, "pskb_may_pull failed.\n");
4172 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4175 rx_desc->status = 0;
4177 /* return some buffers to hardware, one at a time is too slow */
4178 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4179 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4183 /* use prefetched values */
4185 buffer_info = next_buffer;
4187 rx_ring->next_to_clean = i;
4189 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4191 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4193 adapter->total_rx_packets += total_rx_packets;
4194 adapter->total_rx_bytes += total_rx_bytes;
4195 netdev->stats.rx_bytes += total_rx_bytes;
4196 netdev->stats.rx_packets += total_rx_packets;
4201 * this should improve performance for small packets with large amounts
4202 * of reassembly being done in the stack
4204 static void e1000_check_copybreak(struct net_device *netdev,
4205 struct e1000_buffer *buffer_info,
4206 u32 length, struct sk_buff **skb)
4208 struct sk_buff *new_skb;
4210 if (length > copybreak)
4213 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4217 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4218 (*skb)->data - NET_IP_ALIGN,
4219 length + NET_IP_ALIGN);
4220 /* save the skb in buffer_info as good */
4221 buffer_info->skb = *skb;
4226 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4227 * @adapter: board private structure
4228 * @rx_ring: ring to clean
4229 * @work_done: amount of napi work completed this call
4230 * @work_to_do: max amount of work allowed for this call to do
4232 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4233 struct e1000_rx_ring *rx_ring,
4234 int *work_done, int work_to_do)
4236 struct e1000_hw *hw = &adapter->hw;
4237 struct net_device *netdev = adapter->netdev;
4238 struct pci_dev *pdev = adapter->pdev;
4239 struct e1000_rx_desc *rx_desc, *next_rxd;
4240 struct e1000_buffer *buffer_info, *next_buffer;
4241 unsigned long flags;
4244 int cleaned_count = 0;
4245 bool cleaned = false;
4246 unsigned int total_rx_bytes=0, total_rx_packets=0;
4248 i = rx_ring->next_to_clean;
4249 rx_desc = E1000_RX_DESC(*rx_ring, i);
4250 buffer_info = &rx_ring->buffer_info[i];
4252 while (rx_desc->status & E1000_RXD_STAT_DD) {
4253 struct sk_buff *skb;
4256 if (*work_done >= work_to_do)
4259 rmb(); /* read descriptor and rx_buffer_info after status DD */
4261 status = rx_desc->status;
4262 skb = buffer_info->skb;
4263 buffer_info->skb = NULL;
4265 prefetch(skb->data - NET_IP_ALIGN);
4267 if (++i == rx_ring->count) i = 0;
4268 next_rxd = E1000_RX_DESC(*rx_ring, i);
4271 next_buffer = &rx_ring->buffer_info[i];
4275 dma_unmap_single(&pdev->dev, buffer_info->dma,
4276 buffer_info->length, DMA_FROM_DEVICE);
4277 buffer_info->dma = 0;
4279 length = le16_to_cpu(rx_desc->length);
4280 /* !EOP means multiple descriptors were used to store a single
4281 * packet, if thats the case we need to toss it. In fact, we
4282 * to toss every packet with the EOP bit clear and the next
4283 * frame that _does_ have the EOP bit set, as it is by
4284 * definition only a frame fragment
4286 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4287 adapter->discarding = true;
4289 if (adapter->discarding) {
4290 /* All receives must fit into a single buffer */
4291 e_dbg("Receive packet consumed multiple buffers\n");
4293 buffer_info->skb = skb;
4294 if (status & E1000_RXD_STAT_EOP)
4295 adapter->discarding = false;
4299 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4300 u8 last_byte = *(skb->data + length - 1);
4301 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4303 spin_lock_irqsave(&adapter->stats_lock, flags);
4304 e1000_tbi_adjust_stats(hw, &adapter->stats,
4306 spin_unlock_irqrestore(&adapter->stats_lock,
4310 if (netdev->features & NETIF_F_RXALL)
4313 buffer_info->skb = skb;
4319 total_rx_bytes += (length - 4); /* don't count FCS */
4322 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4323 /* adjust length to remove Ethernet CRC, this must be
4324 * done after the TBI_ACCEPT workaround above
4328 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4330 skb_put(skb, length);
4332 /* Receive Checksum Offload */
4333 e1000_rx_checksum(adapter,
4335 ((u32)(rx_desc->errors) << 24),
4336 le16_to_cpu(rx_desc->csum), skb);
4338 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4341 rx_desc->status = 0;
4343 /* return some buffers to hardware, one at a time is too slow */
4344 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4345 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4349 /* use prefetched values */
4351 buffer_info = next_buffer;
4353 rx_ring->next_to_clean = i;
4355 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4357 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4359 adapter->total_rx_packets += total_rx_packets;
4360 adapter->total_rx_bytes += total_rx_bytes;
4361 netdev->stats.rx_bytes += total_rx_bytes;
4362 netdev->stats.rx_packets += total_rx_packets;
4367 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4368 * @adapter: address of board private structure
4369 * @rx_ring: pointer to receive ring structure
4370 * @cleaned_count: number of buffers to allocate this pass
4374 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4375 struct e1000_rx_ring *rx_ring, int cleaned_count)
4377 struct net_device *netdev = adapter->netdev;
4378 struct pci_dev *pdev = adapter->pdev;
4379 struct e1000_rx_desc *rx_desc;
4380 struct e1000_buffer *buffer_info;
4381 struct sk_buff *skb;
4383 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4385 i = rx_ring->next_to_use;
4386 buffer_info = &rx_ring->buffer_info[i];
4388 while (cleaned_count--) {
4389 skb = buffer_info->skb;
4395 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4396 if (unlikely(!skb)) {
4397 /* Better luck next round */
4398 adapter->alloc_rx_buff_failed++;
4402 buffer_info->skb = skb;
4403 buffer_info->length = adapter->rx_buffer_len;
4405 /* allocate a new page if necessary */
4406 if (!buffer_info->page) {
4407 buffer_info->page = alloc_page(GFP_ATOMIC);
4408 if (unlikely(!buffer_info->page)) {
4409 adapter->alloc_rx_buff_failed++;
4414 if (!buffer_info->dma) {
4415 buffer_info->dma = dma_map_page(&pdev->dev,
4416 buffer_info->page, 0,
4417 buffer_info->length,
4419 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4420 put_page(buffer_info->page);
4422 buffer_info->page = NULL;
4423 buffer_info->skb = NULL;
4424 buffer_info->dma = 0;
4425 adapter->alloc_rx_buff_failed++;
4426 break; /* while !buffer_info->skb */
4430 rx_desc = E1000_RX_DESC(*rx_ring, i);
4431 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4433 if (unlikely(++i == rx_ring->count))
4435 buffer_info = &rx_ring->buffer_info[i];
4438 if (likely(rx_ring->next_to_use != i)) {
4439 rx_ring->next_to_use = i;
4440 if (unlikely(i-- == 0))
4441 i = (rx_ring->count - 1);
4443 /* Force memory writes to complete before letting h/w
4444 * know there are new descriptors to fetch. (Only
4445 * applicable for weak-ordered memory model archs,
4446 * such as IA-64). */
4448 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4453 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4454 * @adapter: address of board private structure
4457 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4458 struct e1000_rx_ring *rx_ring,
4461 struct e1000_hw *hw = &adapter->hw;
4462 struct net_device *netdev = adapter->netdev;
4463 struct pci_dev *pdev = adapter->pdev;
4464 struct e1000_rx_desc *rx_desc;
4465 struct e1000_buffer *buffer_info;
4466 struct sk_buff *skb;
4468 unsigned int bufsz = adapter->rx_buffer_len;
4470 i = rx_ring->next_to_use;
4471 buffer_info = &rx_ring->buffer_info[i];
4473 while (cleaned_count--) {
4474 skb = buffer_info->skb;
4480 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4481 if (unlikely(!skb)) {
4482 /* Better luck next round */
4483 adapter->alloc_rx_buff_failed++;
4487 /* Fix for errata 23, can't cross 64kB boundary */
4488 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4489 struct sk_buff *oldskb = skb;
4490 e_err(rx_err, "skb align check failed: %u bytes at "
4491 "%p\n", bufsz, skb->data);
4492 /* Try again, without freeing the previous */
4493 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4494 /* Failed allocation, critical failure */
4496 dev_kfree_skb(oldskb);
4497 adapter->alloc_rx_buff_failed++;
4501 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4504 dev_kfree_skb(oldskb);
4505 adapter->alloc_rx_buff_failed++;
4506 break; /* while !buffer_info->skb */
4509 /* Use new allocation */
4510 dev_kfree_skb(oldskb);
4512 buffer_info->skb = skb;
4513 buffer_info->length = adapter->rx_buffer_len;
4515 buffer_info->dma = dma_map_single(&pdev->dev,
4517 buffer_info->length,
4519 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4521 buffer_info->skb = NULL;
4522 buffer_info->dma = 0;
4523 adapter->alloc_rx_buff_failed++;
4524 break; /* while !buffer_info->skb */
4528 * XXX if it was allocated cleanly it will never map to a
4532 /* Fix for errata 23, can't cross 64kB boundary */
4533 if (!e1000_check_64k_bound(adapter,
4534 (void *)(unsigned long)buffer_info->dma,
4535 adapter->rx_buffer_len)) {
4536 e_err(rx_err, "dma align check failed: %u bytes at "
4537 "%p\n", adapter->rx_buffer_len,
4538 (void *)(unsigned long)buffer_info->dma);
4540 buffer_info->skb = NULL;
4542 dma_unmap_single(&pdev->dev, buffer_info->dma,
4543 adapter->rx_buffer_len,
4545 buffer_info->dma = 0;
4547 adapter->alloc_rx_buff_failed++;
4548 break; /* while !buffer_info->skb */
4550 rx_desc = E1000_RX_DESC(*rx_ring, i);
4551 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4553 if (unlikely(++i == rx_ring->count))
4555 buffer_info = &rx_ring->buffer_info[i];
4558 if (likely(rx_ring->next_to_use != i)) {
4559 rx_ring->next_to_use = i;
4560 if (unlikely(i-- == 0))
4561 i = (rx_ring->count - 1);
4563 /* Force memory writes to complete before letting h/w
4564 * know there are new descriptors to fetch. (Only
4565 * applicable for weak-ordered memory model archs,
4566 * such as IA-64). */
4568 writel(i, hw->hw_addr + rx_ring->rdt);
4573 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4577 static void e1000_smartspeed(struct e1000_adapter *adapter)
4579 struct e1000_hw *hw = &adapter->hw;
4583 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4584 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4587 if (adapter->smartspeed == 0) {
4588 /* If Master/Slave config fault is asserted twice,
4589 * we assume back-to-back */
4590 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4591 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4592 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4593 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4594 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4595 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4596 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4597 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4599 adapter->smartspeed++;
4600 if (!e1000_phy_setup_autoneg(hw) &&
4601 !e1000_read_phy_reg(hw, PHY_CTRL,
4603 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4604 MII_CR_RESTART_AUTO_NEG);
4605 e1000_write_phy_reg(hw, PHY_CTRL,
4610 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4611 /* If still no link, perhaps using 2/3 pair cable */
4612 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4613 phy_ctrl |= CR_1000T_MS_ENABLE;
4614 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4615 if (!e1000_phy_setup_autoneg(hw) &&
4616 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4617 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4618 MII_CR_RESTART_AUTO_NEG);
4619 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4622 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4623 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4624 adapter->smartspeed = 0;
4634 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4640 return e1000_mii_ioctl(netdev, ifr, cmd);
4653 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4656 struct e1000_adapter *adapter = netdev_priv(netdev);
4657 struct e1000_hw *hw = &adapter->hw;
4658 struct mii_ioctl_data *data = if_mii(ifr);
4661 unsigned long flags;
4663 if (hw->media_type != e1000_media_type_copper)
4668 data->phy_id = hw->phy_addr;
4671 spin_lock_irqsave(&adapter->stats_lock, flags);
4672 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4674 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4677 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4680 if (data->reg_num & ~(0x1F))
4682 mii_reg = data->val_in;
4683 spin_lock_irqsave(&adapter->stats_lock, flags);
4684 if (e1000_write_phy_reg(hw, data->reg_num,
4686 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4689 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4690 if (hw->media_type == e1000_media_type_copper) {
4691 switch (data->reg_num) {
4693 if (mii_reg & MII_CR_POWER_DOWN)
4695 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4697 hw->autoneg_advertised = 0x2F;
4702 else if (mii_reg & 0x2000)
4706 retval = e1000_set_spd_dplx(
4714 if (netif_running(adapter->netdev))
4715 e1000_reinit_locked(adapter);
4717 e1000_reset(adapter);
4719 case M88E1000_PHY_SPEC_CTRL:
4720 case M88E1000_EXT_PHY_SPEC_CTRL:
4721 if (e1000_phy_reset(hw))
4726 switch (data->reg_num) {
4728 if (mii_reg & MII_CR_POWER_DOWN)
4730 if (netif_running(adapter->netdev))
4731 e1000_reinit_locked(adapter);
4733 e1000_reset(adapter);
4741 return E1000_SUCCESS;
4744 void e1000_pci_set_mwi(struct e1000_hw *hw)
4746 struct e1000_adapter *adapter = hw->back;
4747 int ret_val = pci_set_mwi(adapter->pdev);
4750 e_err(probe, "Error in setting MWI\n");
4753 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4755 struct e1000_adapter *adapter = hw->back;
4757 pci_clear_mwi(adapter->pdev);
4760 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4762 struct e1000_adapter *adapter = hw->back;
4763 return pcix_get_mmrbc(adapter->pdev);
4766 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4768 struct e1000_adapter *adapter = hw->back;
4769 pcix_set_mmrbc(adapter->pdev, mmrbc);
4772 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4777 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4781 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4786 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4787 netdev_features_t features)
4789 struct e1000_hw *hw = &adapter->hw;
4793 if (features & NETIF_F_HW_VLAN_RX) {
4794 /* enable VLAN tag insert/strip */
4795 ctrl |= E1000_CTRL_VME;
4797 /* disable VLAN tag insert/strip */
4798 ctrl &= ~E1000_CTRL_VME;
4802 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4805 struct e1000_hw *hw = &adapter->hw;
4808 if (!test_bit(__E1000_DOWN, &adapter->flags))
4809 e1000_irq_disable(adapter);
4811 __e1000_vlan_mode(adapter, adapter->netdev->features);
4813 /* enable VLAN receive filtering */
4815 rctl &= ~E1000_RCTL_CFIEN;
4816 if (!(adapter->netdev->flags & IFF_PROMISC))
4817 rctl |= E1000_RCTL_VFE;
4819 e1000_update_mng_vlan(adapter);
4821 /* disable VLAN receive filtering */
4823 rctl &= ~E1000_RCTL_VFE;
4827 if (!test_bit(__E1000_DOWN, &adapter->flags))
4828 e1000_irq_enable(adapter);
4831 static void e1000_vlan_mode(struct net_device *netdev,
4832 netdev_features_t features)
4834 struct e1000_adapter *adapter = netdev_priv(netdev);
4836 if (!test_bit(__E1000_DOWN, &adapter->flags))
4837 e1000_irq_disable(adapter);
4839 __e1000_vlan_mode(adapter, features);
4841 if (!test_bit(__E1000_DOWN, &adapter->flags))
4842 e1000_irq_enable(adapter);
4845 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4847 struct e1000_adapter *adapter = netdev_priv(netdev);
4848 struct e1000_hw *hw = &adapter->hw;
4851 if ((hw->mng_cookie.status &
4852 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4853 (vid == adapter->mng_vlan_id))
4856 if (!e1000_vlan_used(adapter))
4857 e1000_vlan_filter_on_off(adapter, true);
4859 /* add VID to filter table */
4860 index = (vid >> 5) & 0x7F;
4861 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4862 vfta |= (1 << (vid & 0x1F));
4863 e1000_write_vfta(hw, index, vfta);
4865 set_bit(vid, adapter->active_vlans);
4870 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4872 struct e1000_adapter *adapter = netdev_priv(netdev);
4873 struct e1000_hw *hw = &adapter->hw;
4876 if (!test_bit(__E1000_DOWN, &adapter->flags))
4877 e1000_irq_disable(adapter);
4878 if (!test_bit(__E1000_DOWN, &adapter->flags))
4879 e1000_irq_enable(adapter);
4881 /* remove VID from filter table */
4882 index = (vid >> 5) & 0x7F;
4883 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4884 vfta &= ~(1 << (vid & 0x1F));
4885 e1000_write_vfta(hw, index, vfta);
4887 clear_bit(vid, adapter->active_vlans);
4889 if (!e1000_vlan_used(adapter))
4890 e1000_vlan_filter_on_off(adapter, false);
4895 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4899 if (!e1000_vlan_used(adapter))
4902 e1000_vlan_filter_on_off(adapter, true);
4903 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4904 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4907 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4909 struct e1000_hw *hw = &adapter->hw;
4913 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4914 * for the switch() below to work */
4915 if ((spd & 1) || (dplx & ~1))
4918 /* Fiber NICs only allow 1000 gbps Full duplex */
4919 if ((hw->media_type == e1000_media_type_fiber) &&
4920 spd != SPEED_1000 &&
4921 dplx != DUPLEX_FULL)
4924 switch (spd + dplx) {
4925 case SPEED_10 + DUPLEX_HALF:
4926 hw->forced_speed_duplex = e1000_10_half;
4928 case SPEED_10 + DUPLEX_FULL:
4929 hw->forced_speed_duplex = e1000_10_full;
4931 case SPEED_100 + DUPLEX_HALF:
4932 hw->forced_speed_duplex = e1000_100_half;
4934 case SPEED_100 + DUPLEX_FULL:
4935 hw->forced_speed_duplex = e1000_100_full;
4937 case SPEED_1000 + DUPLEX_FULL:
4939 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4941 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4948 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4952 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4954 struct net_device *netdev = pci_get_drvdata(pdev);
4955 struct e1000_adapter *adapter = netdev_priv(netdev);
4956 struct e1000_hw *hw = &adapter->hw;
4957 u32 ctrl, ctrl_ext, rctl, status;
4958 u32 wufc = adapter->wol;
4963 netif_device_detach(netdev);
4965 if (netif_running(netdev)) {
4966 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4967 e1000_down(adapter);
4971 retval = pci_save_state(pdev);
4976 status = er32(STATUS);
4977 if (status & E1000_STATUS_LU)
4978 wufc &= ~E1000_WUFC_LNKC;
4981 e1000_setup_rctl(adapter);
4982 e1000_set_rx_mode(netdev);
4986 /* turn on all-multi mode if wake on multicast is enabled */
4987 if (wufc & E1000_WUFC_MC)
4988 rctl |= E1000_RCTL_MPE;
4990 /* enable receives in the hardware */
4991 ew32(RCTL, rctl | E1000_RCTL_EN);
4993 if (hw->mac_type >= e1000_82540) {
4995 /* advertise wake from D3Cold */
4996 #define E1000_CTRL_ADVD3WUC 0x00100000
4997 /* phy power management enable */
4998 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4999 ctrl |= E1000_CTRL_ADVD3WUC |
5000 E1000_CTRL_EN_PHY_PWR_MGMT;
5004 if (hw->media_type == e1000_media_type_fiber ||
5005 hw->media_type == e1000_media_type_internal_serdes) {
5006 /* keep the laser running in D3 */
5007 ctrl_ext = er32(CTRL_EXT);
5008 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5009 ew32(CTRL_EXT, ctrl_ext);
5012 ew32(WUC, E1000_WUC_PME_EN);
5019 e1000_release_manageability(adapter);
5021 *enable_wake = !!wufc;
5023 /* make sure adapter isn't asleep if manageability is enabled */
5024 if (adapter->en_mng_pt)
5025 *enable_wake = true;
5027 if (netif_running(netdev))
5028 e1000_free_irq(adapter);
5030 pci_disable_device(pdev);
5036 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5041 retval = __e1000_shutdown(pdev, &wake);
5046 pci_prepare_to_sleep(pdev);
5048 pci_wake_from_d3(pdev, false);
5049 pci_set_power_state(pdev, PCI_D3hot);
5055 static int e1000_resume(struct pci_dev *pdev)
5057 struct net_device *netdev = pci_get_drvdata(pdev);
5058 struct e1000_adapter *adapter = netdev_priv(netdev);
5059 struct e1000_hw *hw = &adapter->hw;
5062 pci_set_power_state(pdev, PCI_D0);
5063 pci_restore_state(pdev);
5064 pci_save_state(pdev);
5066 if (adapter->need_ioport)
5067 err = pci_enable_device(pdev);
5069 err = pci_enable_device_mem(pdev);
5071 pr_err("Cannot enable PCI device from suspend\n");
5074 pci_set_master(pdev);
5076 pci_enable_wake(pdev, PCI_D3hot, 0);
5077 pci_enable_wake(pdev, PCI_D3cold, 0);
5079 if (netif_running(netdev)) {
5080 err = e1000_request_irq(adapter);
5085 e1000_power_up_phy(adapter);
5086 e1000_reset(adapter);
5089 e1000_init_manageability(adapter);
5091 if (netif_running(netdev))
5094 netif_device_attach(netdev);
5100 static void e1000_shutdown(struct pci_dev *pdev)
5104 __e1000_shutdown(pdev, &wake);
5106 if (system_state == SYSTEM_POWER_OFF) {
5107 pci_wake_from_d3(pdev, wake);
5108 pci_set_power_state(pdev, PCI_D3hot);
5112 #ifdef CONFIG_NET_POLL_CONTROLLER
5114 * Polling 'interrupt' - used by things like netconsole to send skbs
5115 * without having to re-enable interrupts. It's not called while
5116 * the interrupt routine is executing.
5118 static void e1000_netpoll(struct net_device *netdev)
5120 struct e1000_adapter *adapter = netdev_priv(netdev);
5122 disable_irq(adapter->pdev->irq);
5123 e1000_intr(adapter->pdev->irq, netdev);
5124 enable_irq(adapter->pdev->irq);
5129 * e1000_io_error_detected - called when PCI error is detected
5130 * @pdev: Pointer to PCI device
5131 * @state: The current pci connection state
5133 * This function is called after a PCI bus error affecting
5134 * this device has been detected.
5136 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5137 pci_channel_state_t state)
5139 struct net_device *netdev = pci_get_drvdata(pdev);
5140 struct e1000_adapter *adapter = netdev_priv(netdev);
5142 netif_device_detach(netdev);
5144 if (state == pci_channel_io_perm_failure)
5145 return PCI_ERS_RESULT_DISCONNECT;
5147 if (netif_running(netdev))
5148 e1000_down(adapter);
5149 pci_disable_device(pdev);
5151 /* Request a slot slot reset. */
5152 return PCI_ERS_RESULT_NEED_RESET;
5156 * e1000_io_slot_reset - called after the pci bus has been reset.
5157 * @pdev: Pointer to PCI device
5159 * Restart the card from scratch, as if from a cold-boot. Implementation
5160 * resembles the first-half of the e1000_resume routine.
5162 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5164 struct net_device *netdev = pci_get_drvdata(pdev);
5165 struct e1000_adapter *adapter = netdev_priv(netdev);
5166 struct e1000_hw *hw = &adapter->hw;
5169 if (adapter->need_ioport)
5170 err = pci_enable_device(pdev);
5172 err = pci_enable_device_mem(pdev);
5174 pr_err("Cannot re-enable PCI device after reset.\n");
5175 return PCI_ERS_RESULT_DISCONNECT;
5177 pci_set_master(pdev);
5179 pci_enable_wake(pdev, PCI_D3hot, 0);
5180 pci_enable_wake(pdev, PCI_D3cold, 0);
5182 e1000_reset(adapter);
5185 return PCI_ERS_RESULT_RECOVERED;
5189 * e1000_io_resume - called when traffic can start flowing again.
5190 * @pdev: Pointer to PCI device
5192 * This callback is called when the error recovery driver tells us that
5193 * its OK to resume normal operation. Implementation resembles the
5194 * second-half of the e1000_resume routine.
5196 static void e1000_io_resume(struct pci_dev *pdev)
5198 struct net_device *netdev = pci_get_drvdata(pdev);
5199 struct e1000_adapter *adapter = netdev_priv(netdev);
5201 e1000_init_manageability(adapter);
5203 if (netif_running(netdev)) {
5204 if (e1000_up(adapter)) {
5205 pr_info("can't bring device back up after reset\n");
5210 netif_device_attach(netdev);