1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <net/ip6_checksum.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 /* e1000_pci_tbl - PCI Device ID Table
44 * Last entry must be all 0s
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49 static const struct pci_device_id e1000_pci_tbl[] = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134 static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
147 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
152 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
153 struct e1000_rx_ring *rx_ring,
155 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
158 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
161 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163 static void e1000_tx_timeout(struct net_device *dev);
164 static void e1000_reset_task(struct work_struct *work);
165 static void e1000_smartspeed(struct e1000_adapter *adapter);
166 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167 struct sk_buff *skb);
169 static bool e1000_vlan_used(struct e1000_adapter *adapter);
170 static void e1000_vlan_mode(struct net_device *netdev,
171 netdev_features_t features);
172 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
174 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
175 __be16 proto, u16 vid);
176 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
177 __be16 proto, u16 vid);
178 static void e1000_restore_vlan(struct e1000_adapter *adapter);
181 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
182 static int e1000_resume(struct pci_dev *pdev);
184 static void e1000_shutdown(struct pci_dev *pdev);
186 #ifdef CONFIG_NET_POLL_CONTROLLER
187 /* for netdump / net console */
188 static void e1000_netpoll (struct net_device *netdev);
191 #define COPYBREAK_DEFAULT 256
192 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
193 module_param(copybreak, uint, 0644);
194 MODULE_PARM_DESC(copybreak,
195 "Maximum size of packet that is copied to a new buffer on receive");
197 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
198 pci_channel_state_t state);
199 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200 static void e1000_io_resume(struct pci_dev *pdev);
202 static const struct pci_error_handlers e1000_err_handler = {
203 .error_detected = e1000_io_error_detected,
204 .slot_reset = e1000_io_slot_reset,
205 .resume = e1000_io_resume,
208 static struct pci_driver e1000_driver = {
209 .name = e1000_driver_name,
210 .id_table = e1000_pci_tbl,
211 .probe = e1000_probe,
212 .remove = e1000_remove,
214 /* Power Management Hooks */
215 .suspend = e1000_suspend,
216 .resume = e1000_resume,
218 .shutdown = e1000_shutdown,
219 .err_handler = &e1000_err_handler
222 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_VERSION);
227 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
228 static int debug = -1;
229 module_param(debug, int, 0);
230 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
233 * e1000_get_hw_dev - return device
234 * used by hardware layer to print debugging information
237 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
239 struct e1000_adapter *adapter = hw->back;
240 return adapter->netdev;
244 * e1000_init_module - Driver Registration Routine
246 * e1000_init_module is the first routine called when the driver is
247 * loaded. All it does is register with the PCI subsystem.
249 static int __init e1000_init_module(void)
252 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
254 pr_info("%s\n", e1000_copyright);
256 ret = pci_register_driver(&e1000_driver);
257 if (copybreak != COPYBREAK_DEFAULT) {
259 pr_info("copybreak disabled\n");
261 pr_info("copybreak enabled for "
262 "packets <= %u bytes\n", copybreak);
267 module_init(e1000_init_module);
270 * e1000_exit_module - Driver Exit Cleanup Routine
272 * e1000_exit_module is called just before the driver is removed
275 static void __exit e1000_exit_module(void)
277 pci_unregister_driver(&e1000_driver);
280 module_exit(e1000_exit_module);
282 static int e1000_request_irq(struct e1000_adapter *adapter)
284 struct net_device *netdev = adapter->netdev;
285 irq_handler_t handler = e1000_intr;
286 int irq_flags = IRQF_SHARED;
289 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
292 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
298 static void e1000_free_irq(struct e1000_adapter *adapter)
300 struct net_device *netdev = adapter->netdev;
302 free_irq(adapter->pdev->irq, netdev);
306 * e1000_irq_disable - Mask off interrupt generation on the NIC
307 * @adapter: board private structure
309 static void e1000_irq_disable(struct e1000_adapter *adapter)
311 struct e1000_hw *hw = &adapter->hw;
315 synchronize_irq(adapter->pdev->irq);
319 * e1000_irq_enable - Enable default interrupt generation settings
320 * @adapter: board private structure
322 static void e1000_irq_enable(struct e1000_adapter *adapter)
324 struct e1000_hw *hw = &adapter->hw;
326 ew32(IMS, IMS_ENABLE_MASK);
330 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
332 struct e1000_hw *hw = &adapter->hw;
333 struct net_device *netdev = adapter->netdev;
334 u16 vid = hw->mng_cookie.vlan_id;
335 u16 old_vid = adapter->mng_vlan_id;
337 if (!e1000_vlan_used(adapter))
340 if (!test_bit(vid, adapter->active_vlans)) {
341 if (hw->mng_cookie.status &
342 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
343 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
344 adapter->mng_vlan_id = vid;
346 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
348 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
350 !test_bit(old_vid, adapter->active_vlans))
351 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
354 adapter->mng_vlan_id = vid;
358 static void e1000_init_manageability(struct e1000_adapter *adapter)
360 struct e1000_hw *hw = &adapter->hw;
362 if (adapter->en_mng_pt) {
363 u32 manc = er32(MANC);
365 /* disable hardware interception of ARP */
366 manc &= ~(E1000_MANC_ARP_EN);
372 static void e1000_release_manageability(struct e1000_adapter *adapter)
374 struct e1000_hw *hw = &adapter->hw;
376 if (adapter->en_mng_pt) {
377 u32 manc = er32(MANC);
379 /* re-enable hardware interception of ARP */
380 manc |= E1000_MANC_ARP_EN;
387 * e1000_configure - configure the hardware for RX and TX
388 * @adapter = private board structure
390 static void e1000_configure(struct e1000_adapter *adapter)
392 struct net_device *netdev = adapter->netdev;
395 e1000_set_rx_mode(netdev);
397 e1000_restore_vlan(adapter);
398 e1000_init_manageability(adapter);
400 e1000_configure_tx(adapter);
401 e1000_setup_rctl(adapter);
402 e1000_configure_rx(adapter);
403 /* call E1000_DESC_UNUSED which always leaves
404 * at least 1 descriptor unused to make sure
405 * next_to_use != next_to_clean
407 for (i = 0; i < adapter->num_rx_queues; i++) {
408 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
409 adapter->alloc_rx_buf(adapter, ring,
410 E1000_DESC_UNUSED(ring));
414 int e1000_up(struct e1000_adapter *adapter)
416 struct e1000_hw *hw = &adapter->hw;
418 /* hardware has been reset, we need to reload some things */
419 e1000_configure(adapter);
421 clear_bit(__E1000_DOWN, &adapter->flags);
423 napi_enable(&adapter->napi);
425 e1000_irq_enable(adapter);
427 netif_wake_queue(adapter->netdev);
429 /* fire a link change interrupt to start the watchdog */
430 ew32(ICS, E1000_ICS_LSC);
435 * e1000_power_up_phy - restore link in case the phy was powered down
436 * @adapter: address of board private structure
438 * The phy may be powered down to save power and turn off link when the
439 * driver is unloaded and wake on lan is not enabled (among others)
440 * *** this routine MUST be followed by a call to e1000_reset ***
442 void e1000_power_up_phy(struct e1000_adapter *adapter)
444 struct e1000_hw *hw = &adapter->hw;
447 /* Just clear the power down bit to wake the phy back up */
448 if (hw->media_type == e1000_media_type_copper) {
449 /* according to the manual, the phy will retain its
450 * settings across a power-down/up cycle
452 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
453 mii_reg &= ~MII_CR_POWER_DOWN;
454 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
458 static void e1000_power_down_phy(struct e1000_adapter *adapter)
460 struct e1000_hw *hw = &adapter->hw;
462 /* Power down the PHY so no link is implied when interface is down *
463 * The PHY cannot be powered down if any of the following is true *
466 * (c) SoL/IDER session is active
468 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
469 hw->media_type == e1000_media_type_copper) {
472 switch (hw->mac_type) {
475 case e1000_82545_rev_3:
478 case e1000_82546_rev_3:
480 case e1000_82541_rev_2:
482 case e1000_82547_rev_2:
483 if (er32(MANC) & E1000_MANC_SMBUS_EN)
489 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
490 mii_reg |= MII_CR_POWER_DOWN;
491 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
498 static void e1000_down_and_stop(struct e1000_adapter *adapter)
500 set_bit(__E1000_DOWN, &adapter->flags);
502 cancel_delayed_work_sync(&adapter->watchdog_task);
505 * Since the watchdog task can reschedule other tasks, we should cancel
506 * it first, otherwise we can run into the situation when a work is
507 * still running after the adapter has been turned down.
510 cancel_delayed_work_sync(&adapter->phy_info_task);
511 cancel_delayed_work_sync(&adapter->fifo_stall_task);
513 /* Only kill reset task if adapter is not resetting */
514 if (!test_bit(__E1000_RESETTING, &adapter->flags))
515 cancel_work_sync(&adapter->reset_task);
518 void e1000_down(struct e1000_adapter *adapter)
520 struct e1000_hw *hw = &adapter->hw;
521 struct net_device *netdev = adapter->netdev;
524 netif_carrier_off(netdev);
526 /* disable receives in the hardware */
528 ew32(RCTL, rctl & ~E1000_RCTL_EN);
529 /* flush and sleep below */
531 netif_tx_disable(netdev);
533 /* disable transmits in the hardware */
535 tctl &= ~E1000_TCTL_EN;
537 /* flush both disables and wait for them to finish */
541 napi_disable(&adapter->napi);
543 e1000_irq_disable(adapter);
545 /* Setting DOWN must be after irq_disable to prevent
546 * a screaming interrupt. Setting DOWN also prevents
547 * tasks from rescheduling.
549 e1000_down_and_stop(adapter);
551 adapter->link_speed = 0;
552 adapter->link_duplex = 0;
554 e1000_reset(adapter);
555 e1000_clean_all_tx_rings(adapter);
556 e1000_clean_all_rx_rings(adapter);
559 void e1000_reinit_locked(struct e1000_adapter *adapter)
561 WARN_ON(in_interrupt());
562 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
566 clear_bit(__E1000_RESETTING, &adapter->flags);
569 void e1000_reset(struct e1000_adapter *adapter)
571 struct e1000_hw *hw = &adapter->hw;
572 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
573 bool legacy_pba_adjust = false;
576 /* Repartition Pba for greater than 9k mtu
577 * To take effect CTRL.RST is required.
580 switch (hw->mac_type) {
581 case e1000_82542_rev2_0:
582 case e1000_82542_rev2_1:
587 case e1000_82541_rev_2:
588 legacy_pba_adjust = true;
592 case e1000_82545_rev_3:
595 case e1000_82546_rev_3:
599 case e1000_82547_rev_2:
600 legacy_pba_adjust = true;
603 case e1000_undefined:
608 if (legacy_pba_adjust) {
609 if (hw->max_frame_size > E1000_RXBUFFER_8192)
610 pba -= 8; /* allocate more FIFO for Tx */
612 if (hw->mac_type == e1000_82547) {
613 adapter->tx_fifo_head = 0;
614 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
615 adapter->tx_fifo_size =
616 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
617 atomic_set(&adapter->tx_fifo_stall, 0);
619 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
620 /* adjust PBA for jumbo frames */
623 /* To maintain wire speed transmits, the Tx FIFO should be
624 * large enough to accommodate two full transmit packets,
625 * rounded up to the next 1KB and expressed in KB. Likewise,
626 * the Rx FIFO should be large enough to accommodate at least
627 * one full receive packet and is similarly rounded up and
631 /* upper 16 bits has Tx packet buffer allocation size in KB */
632 tx_space = pba >> 16;
633 /* lower 16 bits has Rx packet buffer allocation size in KB */
635 /* the Tx fifo also stores 16 bytes of information about the Tx
636 * but don't include ethernet FCS because hardware appends it
638 min_tx_space = (hw->max_frame_size +
639 sizeof(struct e1000_tx_desc) -
641 min_tx_space = ALIGN(min_tx_space, 1024);
643 /* software strips receive CRC, so leave room for it */
644 min_rx_space = hw->max_frame_size;
645 min_rx_space = ALIGN(min_rx_space, 1024);
648 /* If current Tx allocation is less than the min Tx FIFO size,
649 * and the min Tx FIFO size is less than the current Rx FIFO
650 * allocation, take space away from current Rx allocation
652 if (tx_space < min_tx_space &&
653 ((min_tx_space - tx_space) < pba)) {
654 pba = pba - (min_tx_space - tx_space);
656 /* PCI/PCIx hardware has PBA alignment constraints */
657 switch (hw->mac_type) {
658 case e1000_82545 ... e1000_82546_rev_3:
659 pba &= ~(E1000_PBA_8K - 1);
665 /* if short on Rx space, Rx wins and must trump Tx
666 * adjustment or use Early Receive if available
668 if (pba < min_rx_space)
675 /* flow control settings:
676 * The high water mark must be low enough to fit one full frame
677 * (or the size used for early receive) above it in the Rx FIFO.
678 * Set it to the lower of:
679 * - 90% of the Rx FIFO size, and
680 * - the full Rx FIFO size minus the early receive size (for parts
681 * with ERT support assuming ERT set to E1000_ERT_2048), or
682 * - the full Rx FIFO size minus one full frame
684 hwm = min(((pba << 10) * 9 / 10),
685 ((pba << 10) - hw->max_frame_size));
687 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
688 hw->fc_low_water = hw->fc_high_water - 8;
689 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
691 hw->fc = hw->original_fc;
693 /* Allow time for pending master requests to run */
695 if (hw->mac_type >= e1000_82544)
698 if (e1000_init_hw(hw))
699 e_dev_err("Hardware Error\n");
700 e1000_update_mng_vlan(adapter);
702 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
703 if (hw->mac_type >= e1000_82544 &&
705 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
706 u32 ctrl = er32(CTRL);
707 /* clear phy power management bit if we are in gig only mode,
708 * which if enabled will attempt negotiation to 100Mb, which
709 * can cause a loss of link at power off or driver unload
711 ctrl &= ~E1000_CTRL_SWDPIN3;
715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
721 e1000_release_manageability(adapter);
724 /* Dump the eeprom for users having checksum issues */
725 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
732 u16 csum_old, csum_new = 0;
734 eeprom.len = ops->get_eeprom_len(netdev);
737 data = kmalloc(eeprom.len, GFP_KERNEL);
741 ops->get_eeprom(netdev, &eeprom, data);
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
774 * @pdev: PCI device information struct
776 * Return true if an adapter needs ioport resources
778 static int e1000_is_need_ioport(struct pci_dev *pdev)
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
808 static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
811 /* Since there is no support for separate Rx/Tx vlan accel
812 * enable/disable make sure Tx flag is always in same state as Rx.
814 if (features & NETIF_F_HW_VLAN_CTAG_RX)
815 features |= NETIF_F_HW_VLAN_CTAG_TX;
817 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
822 static int e1000_set_features(struct net_device *netdev,
823 netdev_features_t features)
825 struct e1000_adapter *adapter = netdev_priv(netdev);
826 netdev_features_t changed = features ^ netdev->features;
828 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
829 e1000_vlan_mode(netdev, features);
831 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
834 netdev->features = features;
835 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
837 if (netif_running(netdev))
838 e1000_reinit_locked(adapter);
840 e1000_reset(adapter);
845 static const struct net_device_ops e1000_netdev_ops = {
846 .ndo_open = e1000_open,
847 .ndo_stop = e1000_close,
848 .ndo_start_xmit = e1000_xmit_frame,
849 .ndo_get_stats = e1000_get_stats,
850 .ndo_set_rx_mode = e1000_set_rx_mode,
851 .ndo_set_mac_address = e1000_set_mac,
852 .ndo_tx_timeout = e1000_tx_timeout,
853 .ndo_change_mtu = e1000_change_mtu,
854 .ndo_do_ioctl = e1000_ioctl,
855 .ndo_validate_addr = eth_validate_addr,
856 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
857 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
858 #ifdef CONFIG_NET_POLL_CONTROLLER
859 .ndo_poll_controller = e1000_netpoll,
861 .ndo_fix_features = e1000_fix_features,
862 .ndo_set_features = e1000_set_features,
866 * e1000_init_hw_struct - initialize members of hw struct
867 * @adapter: board private struct
868 * @hw: structure used by e1000_hw.c
870 * Factors out initialization of the e1000_hw struct to its own function
871 * that can be called very early at init (just after struct allocation).
872 * Fields are initialized based on PCI device information and
873 * OS network device settings (MTU size).
874 * Returns negative error codes if MAC type setup fails.
876 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
879 struct pci_dev *pdev = adapter->pdev;
881 /* PCI config space info */
882 hw->vendor_id = pdev->vendor;
883 hw->device_id = pdev->device;
884 hw->subsystem_vendor_id = pdev->subsystem_vendor;
885 hw->subsystem_id = pdev->subsystem_device;
886 hw->revision_id = pdev->revision;
888 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
890 hw->max_frame_size = adapter->netdev->mtu +
891 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
892 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
894 /* identify the MAC */
895 if (e1000_set_mac_type(hw)) {
896 e_err(probe, "Unknown MAC Type\n");
900 switch (hw->mac_type) {
905 case e1000_82541_rev_2:
906 case e1000_82547_rev_2:
907 hw->phy_init_script = 1;
911 e1000_set_media_type(hw);
912 e1000_get_bus_info(hw);
914 hw->wait_autoneg_complete = false;
915 hw->tbi_compatibility_en = true;
916 hw->adaptive_ifs = true;
920 if (hw->media_type == e1000_media_type_copper) {
921 hw->mdix = AUTO_ALL_MODES;
922 hw->disable_polarity_correction = false;
923 hw->master_slave = E1000_MASTER_SLAVE;
930 * e1000_probe - Device Initialization Routine
931 * @pdev: PCI device information struct
932 * @ent: entry in e1000_pci_tbl
934 * Returns 0 on success, negative on failure
936 * e1000_probe initializes an adapter identified by a pci_dev structure.
937 * The OS initialization, configuring of the adapter private structure,
938 * and a hardware reset occur.
940 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
942 struct net_device *netdev;
943 struct e1000_adapter *adapter;
946 static int cards_found;
947 static int global_quad_port_a; /* global ksp3 port a indication */
948 int i, err, pci_using_dac;
951 u16 eeprom_apme_mask = E1000_EEPROM_APME;
952 int bars, need_ioport;
954 /* do not allocate ioport bars when not needed */
955 need_ioport = e1000_is_need_ioport(pdev);
957 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
958 err = pci_enable_device(pdev);
960 bars = pci_select_bars(pdev, IORESOURCE_MEM);
961 err = pci_enable_device_mem(pdev);
966 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
970 pci_set_master(pdev);
971 err = pci_save_state(pdev);
973 goto err_alloc_etherdev;
976 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
978 goto err_alloc_etherdev;
980 SET_NETDEV_DEV(netdev, &pdev->dev);
982 pci_set_drvdata(pdev, netdev);
983 adapter = netdev_priv(netdev);
984 adapter->netdev = netdev;
985 adapter->pdev = pdev;
986 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
987 adapter->bars = bars;
988 adapter->need_ioport = need_ioport;
994 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
998 if (adapter->need_ioport) {
999 for (i = BAR_1; i <= BAR_5; i++) {
1000 if (pci_resource_len(pdev, i) == 0)
1002 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1003 hw->io_base = pci_resource_start(pdev, i);
1009 /* make ready for any if (hw->...) below */
1010 err = e1000_init_hw_struct(adapter, hw);
1014 /* there is a workaround being applied below that limits
1015 * 64-bit DMA addresses to 64-bit hardware. There are some
1016 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1019 if ((hw->bus_type == e1000_bus_type_pcix) &&
1020 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1023 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1025 pr_err("No usable DMA config, aborting\n");
1030 netdev->netdev_ops = &e1000_netdev_ops;
1031 e1000_set_ethtool_ops(netdev);
1032 netdev->watchdog_timeo = 5 * HZ;
1033 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1035 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1037 adapter->bd_number = cards_found;
1039 /* setup the private structure */
1041 err = e1000_sw_init(adapter);
1046 if (hw->mac_type == e1000_ce4100) {
1047 hw->ce4100_gbe_mdio_base_virt =
1048 ioremap(pci_resource_start(pdev, BAR_1),
1049 pci_resource_len(pdev, BAR_1));
1051 if (!hw->ce4100_gbe_mdio_base_virt)
1052 goto err_mdio_ioremap;
1055 if (hw->mac_type >= e1000_82543) {
1056 netdev->hw_features = NETIF_F_SG |
1058 NETIF_F_HW_VLAN_CTAG_RX;
1059 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1060 NETIF_F_HW_VLAN_CTAG_FILTER;
1063 if ((hw->mac_type >= e1000_82544) &&
1064 (hw->mac_type != e1000_82547))
1065 netdev->hw_features |= NETIF_F_TSO;
1067 netdev->priv_flags |= IFF_SUPP_NOFCS;
1069 netdev->features |= netdev->hw_features;
1070 netdev->hw_features |= (NETIF_F_RXCSUM |
1074 if (pci_using_dac) {
1075 netdev->features |= NETIF_F_HIGHDMA;
1076 netdev->vlan_features |= NETIF_F_HIGHDMA;
1079 netdev->vlan_features |= (NETIF_F_TSO |
1083 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1084 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1085 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1086 netdev->priv_flags |= IFF_UNICAST_FLT;
1088 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1090 /* initialize eeprom parameters */
1091 if (e1000_init_eeprom_params(hw)) {
1092 e_err(probe, "EEPROM initialization failed\n");
1096 /* before reading the EEPROM, reset the controller to
1097 * put the device in a known good starting state
1102 /* make sure the EEPROM is good */
1103 if (e1000_validate_eeprom_checksum(hw) < 0) {
1104 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1105 e1000_dump_eeprom(adapter);
1106 /* set MAC address to all zeroes to invalidate and temporary
1107 * disable this device for the user. This blocks regular
1108 * traffic while still permitting ethtool ioctls from reaching
1109 * the hardware as well as allowing the user to run the
1110 * interface after manually setting a hw addr using
1113 memset(hw->mac_addr, 0, netdev->addr_len);
1115 /* copy the MAC address out of the EEPROM */
1116 if (e1000_read_mac_addr(hw))
1117 e_err(probe, "EEPROM Read Error\n");
1119 /* don't block initialization here due to bad MAC address */
1120 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1122 if (!is_valid_ether_addr(netdev->dev_addr))
1123 e_err(probe, "Invalid MAC Address\n");
1126 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1127 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1128 e1000_82547_tx_fifo_stall_task);
1129 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1130 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1132 e1000_check_options(adapter);
1134 /* Initial Wake on LAN setting
1135 * If APM wake is enabled in the EEPROM,
1136 * enable the ACPI Magic Packet filter
1139 switch (hw->mac_type) {
1140 case e1000_82542_rev2_0:
1141 case e1000_82542_rev2_1:
1145 e1000_read_eeprom(hw,
1146 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1147 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1150 case e1000_82546_rev_3:
1151 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1152 e1000_read_eeprom(hw,
1153 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1158 e1000_read_eeprom(hw,
1159 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1162 if (eeprom_data & eeprom_apme_mask)
1163 adapter->eeprom_wol |= E1000_WUFC_MAG;
1165 /* now that we have the eeprom settings, apply the special cases
1166 * where the eeprom may be wrong or the board simply won't support
1167 * wake on lan on a particular port
1169 switch (pdev->device) {
1170 case E1000_DEV_ID_82546GB_PCIE:
1171 adapter->eeprom_wol = 0;
1173 case E1000_DEV_ID_82546EB_FIBER:
1174 case E1000_DEV_ID_82546GB_FIBER:
1175 /* Wake events only supported on port A for dual fiber
1176 * regardless of eeprom setting
1178 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1179 adapter->eeprom_wol = 0;
1181 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1182 /* if quad port adapter, disable WoL on all but port A */
1183 if (global_quad_port_a != 0)
1184 adapter->eeprom_wol = 0;
1186 adapter->quad_port_a = true;
1187 /* Reset for multiple quad port adapters */
1188 if (++global_quad_port_a == 4)
1189 global_quad_port_a = 0;
1193 /* initialize the wol settings based on the eeprom settings */
1194 adapter->wol = adapter->eeprom_wol;
1195 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1197 /* Auto detect PHY address */
1198 if (hw->mac_type == e1000_ce4100) {
1199 for (i = 0; i < 32; i++) {
1201 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1203 if (tmp != 0 && tmp != 0xFF)
1211 /* reset the hardware with the new settings */
1212 e1000_reset(adapter);
1214 strcpy(netdev->name, "eth%d");
1215 err = register_netdev(netdev);
1219 e1000_vlan_filter_on_off(adapter, false);
1221 /* print bus type/speed/width info */
1222 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1223 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1224 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1225 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1226 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1227 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1228 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1231 /* carrier off reporting is important to ethtool even BEFORE open */
1232 netif_carrier_off(netdev);
1234 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1241 e1000_phy_hw_reset(hw);
1243 if (hw->flash_address)
1244 iounmap(hw->flash_address);
1245 kfree(adapter->tx_ring);
1246 kfree(adapter->rx_ring);
1250 iounmap(hw->ce4100_gbe_mdio_base_virt);
1251 iounmap(hw->hw_addr);
1253 free_netdev(netdev);
1255 pci_release_selected_regions(pdev, bars);
1257 pci_disable_device(pdev);
1262 * e1000_remove - Device Removal Routine
1263 * @pdev: PCI device information struct
1265 * e1000_remove is called by the PCI subsystem to alert the driver
1266 * that it should release a PCI device. That could be caused by a
1267 * Hot-Plug event, or because the driver is going to be removed from
1270 static void e1000_remove(struct pci_dev *pdev)
1272 struct net_device *netdev = pci_get_drvdata(pdev);
1273 struct e1000_adapter *adapter = netdev_priv(netdev);
1274 struct e1000_hw *hw = &adapter->hw;
1276 e1000_down_and_stop(adapter);
1277 e1000_release_manageability(adapter);
1279 unregister_netdev(netdev);
1281 e1000_phy_hw_reset(hw);
1283 kfree(adapter->tx_ring);
1284 kfree(adapter->rx_ring);
1286 if (hw->mac_type == e1000_ce4100)
1287 iounmap(hw->ce4100_gbe_mdio_base_virt);
1288 iounmap(hw->hw_addr);
1289 if (hw->flash_address)
1290 iounmap(hw->flash_address);
1291 pci_release_selected_regions(pdev, adapter->bars);
1293 free_netdev(netdev);
1295 pci_disable_device(pdev);
1299 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1300 * @adapter: board private structure to initialize
1302 * e1000_sw_init initializes the Adapter private data structure.
1303 * e1000_init_hw_struct MUST be called before this function
1305 static int e1000_sw_init(struct e1000_adapter *adapter)
1307 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1309 adapter->num_tx_queues = 1;
1310 adapter->num_rx_queues = 1;
1312 if (e1000_alloc_queues(adapter)) {
1313 e_err(probe, "Unable to allocate memory for queues\n");
1317 /* Explicitly disable IRQ since the NIC can be in any state. */
1318 e1000_irq_disable(adapter);
1320 spin_lock_init(&adapter->stats_lock);
1322 set_bit(__E1000_DOWN, &adapter->flags);
1328 * e1000_alloc_queues - Allocate memory for all rings
1329 * @adapter: board private structure to initialize
1331 * We allocate one ring per queue at run-time since we don't know the
1332 * number of queues at compile-time.
1334 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1336 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1337 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1338 if (!adapter->tx_ring)
1341 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1342 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1343 if (!adapter->rx_ring) {
1344 kfree(adapter->tx_ring);
1348 return E1000_SUCCESS;
1352 * e1000_open - Called when a network interface is made active
1353 * @netdev: network interface device structure
1355 * Returns 0 on success, negative value on failure
1357 * The open entry point is called when a network interface is made
1358 * active by the system (IFF_UP). At this point all resources needed
1359 * for transmit and receive operations are allocated, the interrupt
1360 * handler is registered with the OS, the watchdog task is started,
1361 * and the stack is notified that the interface is ready.
1363 static int e1000_open(struct net_device *netdev)
1365 struct e1000_adapter *adapter = netdev_priv(netdev);
1366 struct e1000_hw *hw = &adapter->hw;
1369 /* disallow open during test */
1370 if (test_bit(__E1000_TESTING, &adapter->flags))
1373 netif_carrier_off(netdev);
1375 /* allocate transmit descriptors */
1376 err = e1000_setup_all_tx_resources(adapter);
1380 /* allocate receive descriptors */
1381 err = e1000_setup_all_rx_resources(adapter);
1385 e1000_power_up_phy(adapter);
1387 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1388 if ((hw->mng_cookie.status &
1389 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1390 e1000_update_mng_vlan(adapter);
1393 /* before we allocate an interrupt, we must be ready to handle it.
1394 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1395 * as soon as we call pci_request_irq, so we have to setup our
1396 * clean_rx handler before we do so.
1398 e1000_configure(adapter);
1400 err = e1000_request_irq(adapter);
1404 /* From here on the code is the same as e1000_up() */
1405 clear_bit(__E1000_DOWN, &adapter->flags);
1407 napi_enable(&adapter->napi);
1409 e1000_irq_enable(adapter);
1411 netif_start_queue(netdev);
1413 /* fire a link status change interrupt to start the watchdog */
1414 ew32(ICS, E1000_ICS_LSC);
1416 return E1000_SUCCESS;
1419 e1000_power_down_phy(adapter);
1420 e1000_free_all_rx_resources(adapter);
1422 e1000_free_all_tx_resources(adapter);
1424 e1000_reset(adapter);
1430 * e1000_close - Disables a network interface
1431 * @netdev: network interface device structure
1433 * Returns 0, this is not allowed to fail
1435 * The close entry point is called when an interface is de-activated
1436 * by the OS. The hardware is still under the drivers control, but
1437 * needs to be disabled. A global MAC reset is issued to stop the
1438 * hardware, and all transmit and receive resources are freed.
1440 static int e1000_close(struct net_device *netdev)
1442 struct e1000_adapter *adapter = netdev_priv(netdev);
1443 struct e1000_hw *hw = &adapter->hw;
1444 int count = E1000_CHECK_RESET_COUNT;
1446 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1447 usleep_range(10000, 20000);
1449 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1450 e1000_down(adapter);
1451 e1000_power_down_phy(adapter);
1452 e1000_free_irq(adapter);
1454 e1000_free_all_tx_resources(adapter);
1455 e1000_free_all_rx_resources(adapter);
1457 /* kill manageability vlan ID if supported, but not if a vlan with
1458 * the same ID is registered on the host OS (let 8021q kill it)
1460 if ((hw->mng_cookie.status &
1461 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1462 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1463 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1464 adapter->mng_vlan_id);
1471 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1472 * @adapter: address of board private structure
1473 * @start: address of beginning of memory
1474 * @len: length of memory
1476 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1479 struct e1000_hw *hw = &adapter->hw;
1480 unsigned long begin = (unsigned long)start;
1481 unsigned long end = begin + len;
1483 /* First rev 82545 and 82546 need to not allow any memory
1484 * write location to cross 64k boundary due to errata 23
1486 if (hw->mac_type == e1000_82545 ||
1487 hw->mac_type == e1000_ce4100 ||
1488 hw->mac_type == e1000_82546) {
1489 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1496 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1497 * @adapter: board private structure
1498 * @txdr: tx descriptor ring (for a specific queue) to setup
1500 * Return 0 on success, negative on failure
1502 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1503 struct e1000_tx_ring *txdr)
1505 struct pci_dev *pdev = adapter->pdev;
1508 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1509 txdr->buffer_info = vzalloc(size);
1510 if (!txdr->buffer_info)
1513 /* round up to nearest 4K */
1515 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1516 txdr->size = ALIGN(txdr->size, 4096);
1518 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1522 vfree(txdr->buffer_info);
1526 /* Fix for errata 23, can't cross 64kB boundary */
1527 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1528 void *olddesc = txdr->desc;
1529 dma_addr_t olddma = txdr->dma;
1530 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1531 txdr->size, txdr->desc);
1532 /* Try again, without freeing the previous */
1533 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1534 &txdr->dma, GFP_KERNEL);
1535 /* Failed allocation, critical failure */
1537 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1539 goto setup_tx_desc_die;
1542 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1544 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1548 e_err(probe, "Unable to allocate aligned memory "
1549 "for the transmit descriptor ring\n");
1550 vfree(txdr->buffer_info);
1553 /* Free old allocation, new allocation was successful */
1554 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1558 memset(txdr->desc, 0, txdr->size);
1560 txdr->next_to_use = 0;
1561 txdr->next_to_clean = 0;
1567 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1568 * (Descriptors) for all queues
1569 * @adapter: board private structure
1571 * Return 0 on success, negative on failure
1573 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1577 for (i = 0; i < adapter->num_tx_queues; i++) {
1578 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1580 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1581 for (i-- ; i >= 0; i--)
1582 e1000_free_tx_resources(adapter,
1583 &adapter->tx_ring[i]);
1592 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1593 * @adapter: board private structure
1595 * Configure the Tx unit of the MAC after a reset.
1597 static void e1000_configure_tx(struct e1000_adapter *adapter)
1600 struct e1000_hw *hw = &adapter->hw;
1601 u32 tdlen, tctl, tipg;
1604 /* Setup the HW Tx Head and Tail descriptor pointers */
1606 switch (adapter->num_tx_queues) {
1609 tdba = adapter->tx_ring[0].dma;
1610 tdlen = adapter->tx_ring[0].count *
1611 sizeof(struct e1000_tx_desc);
1613 ew32(TDBAH, (tdba >> 32));
1614 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1617 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1618 E1000_TDH : E1000_82542_TDH);
1619 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1620 E1000_TDT : E1000_82542_TDT);
1624 /* Set the default values for the Tx Inter Packet Gap timer */
1625 if ((hw->media_type == e1000_media_type_fiber ||
1626 hw->media_type == e1000_media_type_internal_serdes))
1627 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1629 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1631 switch (hw->mac_type) {
1632 case e1000_82542_rev2_0:
1633 case e1000_82542_rev2_1:
1634 tipg = DEFAULT_82542_TIPG_IPGT;
1635 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1636 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1639 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1640 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1643 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1644 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1647 /* Set the Tx Interrupt Delay register */
1649 ew32(TIDV, adapter->tx_int_delay);
1650 if (hw->mac_type >= e1000_82540)
1651 ew32(TADV, adapter->tx_abs_int_delay);
1653 /* Program the Transmit Control Register */
1656 tctl &= ~E1000_TCTL_CT;
1657 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1658 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1660 e1000_config_collision_dist(hw);
1662 /* Setup Transmit Descriptor Settings for eop descriptor */
1663 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1665 /* only set IDE if we are delaying interrupts using the timers */
1666 if (adapter->tx_int_delay)
1667 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1669 if (hw->mac_type < e1000_82543)
1670 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1672 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1674 /* Cache if we're 82544 running in PCI-X because we'll
1675 * need this to apply a workaround later in the send path.
1677 if (hw->mac_type == e1000_82544 &&
1678 hw->bus_type == e1000_bus_type_pcix)
1679 adapter->pcix_82544 = true;
1686 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1687 * @adapter: board private structure
1688 * @rxdr: rx descriptor ring (for a specific queue) to setup
1690 * Returns 0 on success, negative on failure
1692 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1693 struct e1000_rx_ring *rxdr)
1695 struct pci_dev *pdev = adapter->pdev;
1698 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1699 rxdr->buffer_info = vzalloc(size);
1700 if (!rxdr->buffer_info)
1703 desc_len = sizeof(struct e1000_rx_desc);
1705 /* Round up to nearest 4K */
1707 rxdr->size = rxdr->count * desc_len;
1708 rxdr->size = ALIGN(rxdr->size, 4096);
1710 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1714 vfree(rxdr->buffer_info);
1718 /* Fix for errata 23, can't cross 64kB boundary */
1719 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1720 void *olddesc = rxdr->desc;
1721 dma_addr_t olddma = rxdr->dma;
1722 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1723 rxdr->size, rxdr->desc);
1724 /* Try again, without freeing the previous */
1725 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1726 &rxdr->dma, GFP_KERNEL);
1727 /* Failed allocation, critical failure */
1729 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731 goto setup_rx_desc_die;
1734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1736 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1738 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1740 e_err(probe, "Unable to allocate aligned memory for "
1741 "the Rx descriptor ring\n");
1742 goto setup_rx_desc_die;
1744 /* Free old allocation, new allocation was successful */
1745 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1749 memset(rxdr->desc, 0, rxdr->size);
1751 rxdr->next_to_clean = 0;
1752 rxdr->next_to_use = 0;
1753 rxdr->rx_skb_top = NULL;
1759 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1760 * (Descriptors) for all queues
1761 * @adapter: board private structure
1763 * Return 0 on success, negative on failure
1765 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1769 for (i = 0; i < adapter->num_rx_queues; i++) {
1770 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1772 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1773 for (i-- ; i >= 0; i--)
1774 e1000_free_rx_resources(adapter,
1775 &adapter->rx_ring[i]);
1784 * e1000_setup_rctl - configure the receive control registers
1785 * @adapter: Board private structure
1787 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1789 struct e1000_hw *hw = &adapter->hw;
1794 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1796 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1797 E1000_RCTL_RDMTS_HALF |
1798 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1800 if (hw->tbi_compatibility_on == 1)
1801 rctl |= E1000_RCTL_SBP;
1803 rctl &= ~E1000_RCTL_SBP;
1805 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1806 rctl &= ~E1000_RCTL_LPE;
1808 rctl |= E1000_RCTL_LPE;
1810 /* Setup buffer sizes */
1811 rctl &= ~E1000_RCTL_SZ_4096;
1812 rctl |= E1000_RCTL_BSEX;
1813 switch (adapter->rx_buffer_len) {
1814 case E1000_RXBUFFER_2048:
1816 rctl |= E1000_RCTL_SZ_2048;
1817 rctl &= ~E1000_RCTL_BSEX;
1819 case E1000_RXBUFFER_4096:
1820 rctl |= E1000_RCTL_SZ_4096;
1822 case E1000_RXBUFFER_8192:
1823 rctl |= E1000_RCTL_SZ_8192;
1825 case E1000_RXBUFFER_16384:
1826 rctl |= E1000_RCTL_SZ_16384;
1830 /* This is useful for sniffing bad packets. */
1831 if (adapter->netdev->features & NETIF_F_RXALL) {
1832 /* UPE and MPE will be handled by normal PROMISC logic
1833 * in e1000e_set_rx_mode
1835 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1836 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1837 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1839 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1840 E1000_RCTL_DPF | /* Allow filtered pause */
1841 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1842 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1843 * and that breaks VLANs.
1851 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1852 * @adapter: board private structure
1854 * Configure the Rx unit of the MAC after a reset.
1856 static void e1000_configure_rx(struct e1000_adapter *adapter)
1859 struct e1000_hw *hw = &adapter->hw;
1860 u32 rdlen, rctl, rxcsum;
1862 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1863 rdlen = adapter->rx_ring[0].count *
1864 sizeof(struct e1000_rx_desc);
1865 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1866 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1868 rdlen = adapter->rx_ring[0].count *
1869 sizeof(struct e1000_rx_desc);
1870 adapter->clean_rx = e1000_clean_rx_irq;
1871 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1874 /* disable receives while setting up the descriptors */
1876 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1878 /* set the Receive Delay Timer Register */
1879 ew32(RDTR, adapter->rx_int_delay);
1881 if (hw->mac_type >= e1000_82540) {
1882 ew32(RADV, adapter->rx_abs_int_delay);
1883 if (adapter->itr_setting != 0)
1884 ew32(ITR, 1000000000 / (adapter->itr * 256));
1887 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1888 * the Base and Length of the Rx Descriptor Ring
1890 switch (adapter->num_rx_queues) {
1893 rdba = adapter->rx_ring[0].dma;
1895 ew32(RDBAH, (rdba >> 32));
1896 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1899 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1900 E1000_RDH : E1000_82542_RDH);
1901 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1902 E1000_RDT : E1000_82542_RDT);
1906 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1907 if (hw->mac_type >= e1000_82543) {
1908 rxcsum = er32(RXCSUM);
1909 if (adapter->rx_csum)
1910 rxcsum |= E1000_RXCSUM_TUOFL;
1912 /* don't need to clear IPPCSE as it defaults to 0 */
1913 rxcsum &= ~E1000_RXCSUM_TUOFL;
1914 ew32(RXCSUM, rxcsum);
1917 /* Enable Receives */
1918 ew32(RCTL, rctl | E1000_RCTL_EN);
1922 * e1000_free_tx_resources - Free Tx Resources per Queue
1923 * @adapter: board private structure
1924 * @tx_ring: Tx descriptor ring for a specific queue
1926 * Free all transmit software resources
1928 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1929 struct e1000_tx_ring *tx_ring)
1931 struct pci_dev *pdev = adapter->pdev;
1933 e1000_clean_tx_ring(adapter, tx_ring);
1935 vfree(tx_ring->buffer_info);
1936 tx_ring->buffer_info = NULL;
1938 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1941 tx_ring->desc = NULL;
1945 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1946 * @adapter: board private structure
1948 * Free all transmit software resources
1950 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1954 for (i = 0; i < adapter->num_tx_queues; i++)
1955 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1959 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1960 struct e1000_tx_buffer *buffer_info)
1962 if (buffer_info->dma) {
1963 if (buffer_info->mapped_as_page)
1964 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1965 buffer_info->length, DMA_TO_DEVICE);
1967 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1968 buffer_info->length,
1970 buffer_info->dma = 0;
1972 if (buffer_info->skb) {
1973 dev_kfree_skb_any(buffer_info->skb);
1974 buffer_info->skb = NULL;
1976 buffer_info->time_stamp = 0;
1977 /* buffer_info must be completely set up in the transmit path */
1981 * e1000_clean_tx_ring - Free Tx Buffers
1982 * @adapter: board private structure
1983 * @tx_ring: ring to be cleaned
1985 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1986 struct e1000_tx_ring *tx_ring)
1988 struct e1000_hw *hw = &adapter->hw;
1989 struct e1000_tx_buffer *buffer_info;
1993 /* Free all the Tx ring sk_buffs */
1995 for (i = 0; i < tx_ring->count; i++) {
1996 buffer_info = &tx_ring->buffer_info[i];
1997 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2000 netdev_reset_queue(adapter->netdev);
2001 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2002 memset(tx_ring->buffer_info, 0, size);
2004 /* Zero out the descriptor ring */
2006 memset(tx_ring->desc, 0, tx_ring->size);
2008 tx_ring->next_to_use = 0;
2009 tx_ring->next_to_clean = 0;
2010 tx_ring->last_tx_tso = false;
2012 writel(0, hw->hw_addr + tx_ring->tdh);
2013 writel(0, hw->hw_addr + tx_ring->tdt);
2017 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2018 * @adapter: board private structure
2020 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2024 for (i = 0; i < adapter->num_tx_queues; i++)
2025 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2029 * e1000_free_rx_resources - Free Rx Resources
2030 * @adapter: board private structure
2031 * @rx_ring: ring to clean the resources from
2033 * Free all receive software resources
2035 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2036 struct e1000_rx_ring *rx_ring)
2038 struct pci_dev *pdev = adapter->pdev;
2040 e1000_clean_rx_ring(adapter, rx_ring);
2042 vfree(rx_ring->buffer_info);
2043 rx_ring->buffer_info = NULL;
2045 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2048 rx_ring->desc = NULL;
2052 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2053 * @adapter: board private structure
2055 * Free all receive software resources
2057 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2061 for (i = 0; i < adapter->num_rx_queues; i++)
2062 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2065 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2066 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2068 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2069 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2072 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2074 unsigned int len = e1000_frag_len(a);
2075 u8 *data = netdev_alloc_frag(len);
2078 data += E1000_HEADROOM;
2083 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2084 * @adapter: board private structure
2085 * @rx_ring: ring to free buffers from
2087 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2088 struct e1000_rx_ring *rx_ring)
2090 struct e1000_hw *hw = &adapter->hw;
2091 struct e1000_rx_buffer *buffer_info;
2092 struct pci_dev *pdev = adapter->pdev;
2096 /* Free all the Rx netfrags */
2097 for (i = 0; i < rx_ring->count; i++) {
2098 buffer_info = &rx_ring->buffer_info[i];
2099 if (adapter->clean_rx == e1000_clean_rx_irq) {
2100 if (buffer_info->dma)
2101 dma_unmap_single(&pdev->dev, buffer_info->dma,
2102 adapter->rx_buffer_len,
2104 if (buffer_info->rxbuf.data) {
2105 skb_free_frag(buffer_info->rxbuf.data);
2106 buffer_info->rxbuf.data = NULL;
2108 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2109 if (buffer_info->dma)
2110 dma_unmap_page(&pdev->dev, buffer_info->dma,
2111 adapter->rx_buffer_len,
2113 if (buffer_info->rxbuf.page) {
2114 put_page(buffer_info->rxbuf.page);
2115 buffer_info->rxbuf.page = NULL;
2119 buffer_info->dma = 0;
2122 /* there also may be some cached data from a chained receive */
2123 napi_free_frags(&adapter->napi);
2124 rx_ring->rx_skb_top = NULL;
2126 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2127 memset(rx_ring->buffer_info, 0, size);
2129 /* Zero out the descriptor ring */
2130 memset(rx_ring->desc, 0, rx_ring->size);
2132 rx_ring->next_to_clean = 0;
2133 rx_ring->next_to_use = 0;
2135 writel(0, hw->hw_addr + rx_ring->rdh);
2136 writel(0, hw->hw_addr + rx_ring->rdt);
2140 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2141 * @adapter: board private structure
2143 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2147 for (i = 0; i < adapter->num_rx_queues; i++)
2148 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2151 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2152 * and memory write and invalidate disabled for certain operations
2154 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2156 struct e1000_hw *hw = &adapter->hw;
2157 struct net_device *netdev = adapter->netdev;
2160 e1000_pci_clear_mwi(hw);
2163 rctl |= E1000_RCTL_RST;
2165 E1000_WRITE_FLUSH();
2168 if (netif_running(netdev))
2169 e1000_clean_all_rx_rings(adapter);
2172 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2174 struct e1000_hw *hw = &adapter->hw;
2175 struct net_device *netdev = adapter->netdev;
2179 rctl &= ~E1000_RCTL_RST;
2181 E1000_WRITE_FLUSH();
2184 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2185 e1000_pci_set_mwi(hw);
2187 if (netif_running(netdev)) {
2188 /* No need to loop, because 82542 supports only 1 queue */
2189 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2190 e1000_configure_rx(adapter);
2191 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2196 * e1000_set_mac - Change the Ethernet Address of the NIC
2197 * @netdev: network interface device structure
2198 * @p: pointer to an address structure
2200 * Returns 0 on success, negative on failure
2202 static int e1000_set_mac(struct net_device *netdev, void *p)
2204 struct e1000_adapter *adapter = netdev_priv(netdev);
2205 struct e1000_hw *hw = &adapter->hw;
2206 struct sockaddr *addr = p;
2208 if (!is_valid_ether_addr(addr->sa_data))
2209 return -EADDRNOTAVAIL;
2211 /* 82542 2.0 needs to be in reset to write receive address registers */
2213 if (hw->mac_type == e1000_82542_rev2_0)
2214 e1000_enter_82542_rst(adapter);
2216 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2217 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2219 e1000_rar_set(hw, hw->mac_addr, 0);
2221 if (hw->mac_type == e1000_82542_rev2_0)
2222 e1000_leave_82542_rst(adapter);
2228 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2229 * @netdev: network interface device structure
2231 * The set_rx_mode entry point is called whenever the unicast or multicast
2232 * address lists or the network interface flags are updated. This routine is
2233 * responsible for configuring the hardware for proper unicast, multicast,
2234 * promiscuous mode, and all-multi behavior.
2236 static void e1000_set_rx_mode(struct net_device *netdev)
2238 struct e1000_adapter *adapter = netdev_priv(netdev);
2239 struct e1000_hw *hw = &adapter->hw;
2240 struct netdev_hw_addr *ha;
2241 bool use_uc = false;
2244 int i, rar_entries = E1000_RAR_ENTRIES;
2245 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2246 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2251 /* Check for Promiscuous and All Multicast modes */
2255 if (netdev->flags & IFF_PROMISC) {
2256 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2257 rctl &= ~E1000_RCTL_VFE;
2259 if (netdev->flags & IFF_ALLMULTI)
2260 rctl |= E1000_RCTL_MPE;
2262 rctl &= ~E1000_RCTL_MPE;
2263 /* Enable VLAN filter if there is a VLAN */
2264 if (e1000_vlan_used(adapter))
2265 rctl |= E1000_RCTL_VFE;
2268 if (netdev_uc_count(netdev) > rar_entries - 1) {
2269 rctl |= E1000_RCTL_UPE;
2270 } else if (!(netdev->flags & IFF_PROMISC)) {
2271 rctl &= ~E1000_RCTL_UPE;
2277 /* 82542 2.0 needs to be in reset to write receive address registers */
2279 if (hw->mac_type == e1000_82542_rev2_0)
2280 e1000_enter_82542_rst(adapter);
2282 /* load the first 14 addresses into the exact filters 1-14. Unicast
2283 * addresses take precedence to avoid disabling unicast filtering
2286 * RAR 0 is used for the station MAC address
2287 * if there are not 14 addresses, go ahead and clear the filters
2291 netdev_for_each_uc_addr(ha, netdev) {
2292 if (i == rar_entries)
2294 e1000_rar_set(hw, ha->addr, i++);
2297 netdev_for_each_mc_addr(ha, netdev) {
2298 if (i == rar_entries) {
2299 /* load any remaining addresses into the hash table */
2300 u32 hash_reg, hash_bit, mta;
2301 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2302 hash_reg = (hash_value >> 5) & 0x7F;
2303 hash_bit = hash_value & 0x1F;
2304 mta = (1 << hash_bit);
2305 mcarray[hash_reg] |= mta;
2307 e1000_rar_set(hw, ha->addr, i++);
2311 for (; i < rar_entries; i++) {
2312 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2313 E1000_WRITE_FLUSH();
2314 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2315 E1000_WRITE_FLUSH();
2318 /* write the hash table completely, write from bottom to avoid
2319 * both stupid write combining chipsets, and flushing each write
2321 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2322 /* If we are on an 82544 has an errata where writing odd
2323 * offsets overwrites the previous even offset, but writing
2324 * backwards over the range solves the issue by always
2325 * writing the odd offset first
2327 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2329 E1000_WRITE_FLUSH();
2331 if (hw->mac_type == e1000_82542_rev2_0)
2332 e1000_leave_82542_rst(adapter);
2338 * e1000_update_phy_info_task - get phy info
2339 * @work: work struct contained inside adapter struct
2341 * Need to wait a few seconds after link up to get diagnostic information from
2344 static void e1000_update_phy_info_task(struct work_struct *work)
2346 struct e1000_adapter *adapter = container_of(work,
2347 struct e1000_adapter,
2348 phy_info_task.work);
2350 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2354 * e1000_82547_tx_fifo_stall_task - task to complete work
2355 * @work: work struct contained inside adapter struct
2357 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2359 struct e1000_adapter *adapter = container_of(work,
2360 struct e1000_adapter,
2361 fifo_stall_task.work);
2362 struct e1000_hw *hw = &adapter->hw;
2363 struct net_device *netdev = adapter->netdev;
2366 if (atomic_read(&adapter->tx_fifo_stall)) {
2367 if ((er32(TDT) == er32(TDH)) &&
2368 (er32(TDFT) == er32(TDFH)) &&
2369 (er32(TDFTS) == er32(TDFHS))) {
2371 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2372 ew32(TDFT, adapter->tx_head_addr);
2373 ew32(TDFH, adapter->tx_head_addr);
2374 ew32(TDFTS, adapter->tx_head_addr);
2375 ew32(TDFHS, adapter->tx_head_addr);
2377 E1000_WRITE_FLUSH();
2379 adapter->tx_fifo_head = 0;
2380 atomic_set(&adapter->tx_fifo_stall, 0);
2381 netif_wake_queue(netdev);
2382 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2383 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2388 bool e1000_has_link(struct e1000_adapter *adapter)
2390 struct e1000_hw *hw = &adapter->hw;
2391 bool link_active = false;
2393 /* get_link_status is set on LSC (link status) interrupt or rx
2394 * sequence error interrupt (except on intel ce4100).
2395 * get_link_status will stay false until the
2396 * e1000_check_for_link establishes link for copper adapters
2399 switch (hw->media_type) {
2400 case e1000_media_type_copper:
2401 if (hw->mac_type == e1000_ce4100)
2402 hw->get_link_status = 1;
2403 if (hw->get_link_status) {
2404 e1000_check_for_link(hw);
2405 link_active = !hw->get_link_status;
2410 case e1000_media_type_fiber:
2411 e1000_check_for_link(hw);
2412 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2414 case e1000_media_type_internal_serdes:
2415 e1000_check_for_link(hw);
2416 link_active = hw->serdes_has_link;
2426 * e1000_watchdog - work function
2427 * @work: work struct contained inside adapter struct
2429 static void e1000_watchdog(struct work_struct *work)
2431 struct e1000_adapter *adapter = container_of(work,
2432 struct e1000_adapter,
2433 watchdog_task.work);
2434 struct e1000_hw *hw = &adapter->hw;
2435 struct net_device *netdev = adapter->netdev;
2436 struct e1000_tx_ring *txdr = adapter->tx_ring;
2439 link = e1000_has_link(adapter);
2440 if ((netif_carrier_ok(netdev)) && link)
2444 if (!netif_carrier_ok(netdev)) {
2447 /* update snapshot of PHY registers on LSC */
2448 e1000_get_speed_and_duplex(hw,
2449 &adapter->link_speed,
2450 &adapter->link_duplex);
2453 pr_info("%s NIC Link is Up %d Mbps %s, "
2454 "Flow Control: %s\n",
2456 adapter->link_speed,
2457 adapter->link_duplex == FULL_DUPLEX ?
2458 "Full Duplex" : "Half Duplex",
2459 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2460 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2461 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2462 E1000_CTRL_TFCE) ? "TX" : "None")));
2464 /* adjust timeout factor according to speed/duplex */
2465 adapter->tx_timeout_factor = 1;
2466 switch (adapter->link_speed) {
2469 adapter->tx_timeout_factor = 16;
2473 /* maybe add some timeout factor ? */
2477 /* enable transmits in the hardware */
2479 tctl |= E1000_TCTL_EN;
2482 netif_carrier_on(netdev);
2483 if (!test_bit(__E1000_DOWN, &adapter->flags))
2484 schedule_delayed_work(&adapter->phy_info_task,
2486 adapter->smartspeed = 0;
2489 if (netif_carrier_ok(netdev)) {
2490 adapter->link_speed = 0;
2491 adapter->link_duplex = 0;
2492 pr_info("%s NIC Link is Down\n",
2494 netif_carrier_off(netdev);
2496 if (!test_bit(__E1000_DOWN, &adapter->flags))
2497 schedule_delayed_work(&adapter->phy_info_task,
2501 e1000_smartspeed(adapter);
2505 e1000_update_stats(adapter);
2507 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2508 adapter->tpt_old = adapter->stats.tpt;
2509 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2510 adapter->colc_old = adapter->stats.colc;
2512 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2513 adapter->gorcl_old = adapter->stats.gorcl;
2514 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2515 adapter->gotcl_old = adapter->stats.gotcl;
2517 e1000_update_adaptive(hw);
2519 if (!netif_carrier_ok(netdev)) {
2520 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2521 /* We've lost link, so the controller stops DMA,
2522 * but we've got queued Tx work that's never going
2523 * to get done, so reset controller to flush Tx.
2524 * (Do the reset outside of interrupt context).
2526 adapter->tx_timeout_count++;
2527 schedule_work(&adapter->reset_task);
2528 /* exit immediately since reset is imminent */
2533 /* Simple mode for Interrupt Throttle Rate (ITR) */
2534 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2535 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2536 * Total asymmetrical Tx or Rx gets ITR=8000;
2537 * everyone else is between 2000-8000.
2539 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2540 u32 dif = (adapter->gotcl > adapter->gorcl ?
2541 adapter->gotcl - adapter->gorcl :
2542 adapter->gorcl - adapter->gotcl) / 10000;
2543 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2545 ew32(ITR, 1000000000 / (itr * 256));
2548 /* Cause software interrupt to ensure rx ring is cleaned */
2549 ew32(ICS, E1000_ICS_RXDMT0);
2551 /* Force detection of hung controller every watchdog period */
2552 adapter->detect_tx_hung = true;
2554 /* Reschedule the task */
2555 if (!test_bit(__E1000_DOWN, &adapter->flags))
2556 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2559 enum latency_range {
2563 latency_invalid = 255
2567 * e1000_update_itr - update the dynamic ITR value based on statistics
2568 * @adapter: pointer to adapter
2569 * @itr_setting: current adapter->itr
2570 * @packets: the number of packets during this measurement interval
2571 * @bytes: the number of bytes during this measurement interval
2573 * Stores a new ITR value based on packets and byte
2574 * counts during the last interrupt. The advantage of per interrupt
2575 * computation is faster updates and more accurate ITR for the current
2576 * traffic pattern. Constants in this function were computed
2577 * based on theoretical maximum wire speed and thresholds were set based
2578 * on testing data as well as attempting to minimize response time
2579 * while increasing bulk throughput.
2580 * this functionality is controlled by the InterruptThrottleRate module
2581 * parameter (see e1000_param.c)
2583 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2584 u16 itr_setting, int packets, int bytes)
2586 unsigned int retval = itr_setting;
2587 struct e1000_hw *hw = &adapter->hw;
2589 if (unlikely(hw->mac_type < e1000_82540))
2590 goto update_itr_done;
2593 goto update_itr_done;
2595 switch (itr_setting) {
2596 case lowest_latency:
2597 /* jumbo frames get bulk treatment*/
2598 if (bytes/packets > 8000)
2599 retval = bulk_latency;
2600 else if ((packets < 5) && (bytes > 512))
2601 retval = low_latency;
2603 case low_latency: /* 50 usec aka 20000 ints/s */
2604 if (bytes > 10000) {
2605 /* jumbo frames need bulk latency setting */
2606 if (bytes/packets > 8000)
2607 retval = bulk_latency;
2608 else if ((packets < 10) || ((bytes/packets) > 1200))
2609 retval = bulk_latency;
2610 else if ((packets > 35))
2611 retval = lowest_latency;
2612 } else if (bytes/packets > 2000)
2613 retval = bulk_latency;
2614 else if (packets <= 2 && bytes < 512)
2615 retval = lowest_latency;
2617 case bulk_latency: /* 250 usec aka 4000 ints/s */
2618 if (bytes > 25000) {
2620 retval = low_latency;
2621 } else if (bytes < 6000) {
2622 retval = low_latency;
2631 static void e1000_set_itr(struct e1000_adapter *adapter)
2633 struct e1000_hw *hw = &adapter->hw;
2635 u32 new_itr = adapter->itr;
2637 if (unlikely(hw->mac_type < e1000_82540))
2640 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2641 if (unlikely(adapter->link_speed != SPEED_1000)) {
2647 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2648 adapter->total_tx_packets,
2649 adapter->total_tx_bytes);
2650 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2651 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2652 adapter->tx_itr = low_latency;
2654 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2655 adapter->total_rx_packets,
2656 adapter->total_rx_bytes);
2657 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2658 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2659 adapter->rx_itr = low_latency;
2661 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2663 switch (current_itr) {
2664 /* counts and packets in update_itr are dependent on these numbers */
2665 case lowest_latency:
2669 new_itr = 20000; /* aka hwitr = ~200 */
2679 if (new_itr != adapter->itr) {
2680 /* this attempts to bias the interrupt rate towards Bulk
2681 * by adding intermediate steps when interrupt rate is
2684 new_itr = new_itr > adapter->itr ?
2685 min(adapter->itr + (new_itr >> 2), new_itr) :
2687 adapter->itr = new_itr;
2688 ew32(ITR, 1000000000 / (new_itr * 256));
2692 #define E1000_TX_FLAGS_CSUM 0x00000001
2693 #define E1000_TX_FLAGS_VLAN 0x00000002
2694 #define E1000_TX_FLAGS_TSO 0x00000004
2695 #define E1000_TX_FLAGS_IPV4 0x00000008
2696 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2697 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2698 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2700 static int e1000_tso(struct e1000_adapter *adapter,
2701 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2704 struct e1000_context_desc *context_desc;
2705 struct e1000_tx_buffer *buffer_info;
2708 u16 ipcse = 0, tucse, mss;
2709 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2711 if (skb_is_gso(skb)) {
2714 err = skb_cow_head(skb, 0);
2718 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2719 mss = skb_shinfo(skb)->gso_size;
2720 if (protocol == htons(ETH_P_IP)) {
2721 struct iphdr *iph = ip_hdr(skb);
2724 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2728 cmd_length = E1000_TXD_CMD_IP;
2729 ipcse = skb_transport_offset(skb) - 1;
2730 } else if (skb_is_gso_v6(skb)) {
2731 ipv6_hdr(skb)->payload_len = 0;
2732 tcp_hdr(skb)->check =
2733 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2734 &ipv6_hdr(skb)->daddr,
2738 ipcss = skb_network_offset(skb);
2739 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2740 tucss = skb_transport_offset(skb);
2741 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2744 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2745 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2747 i = tx_ring->next_to_use;
2748 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2749 buffer_info = &tx_ring->buffer_info[i];
2751 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2752 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2753 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2754 context_desc->upper_setup.tcp_fields.tucss = tucss;
2755 context_desc->upper_setup.tcp_fields.tucso = tucso;
2756 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2757 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2758 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2759 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2761 buffer_info->time_stamp = jiffies;
2762 buffer_info->next_to_watch = i;
2764 if (++i == tx_ring->count)
2767 tx_ring->next_to_use = i;
2774 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2775 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2778 struct e1000_context_desc *context_desc;
2779 struct e1000_tx_buffer *buffer_info;
2782 u32 cmd_len = E1000_TXD_CMD_DEXT;
2784 if (skb->ip_summed != CHECKSUM_PARTIAL)
2788 case cpu_to_be16(ETH_P_IP):
2789 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2790 cmd_len |= E1000_TXD_CMD_TCP;
2792 case cpu_to_be16(ETH_P_IPV6):
2793 /* XXX not handling all IPV6 headers */
2794 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2795 cmd_len |= E1000_TXD_CMD_TCP;
2798 if (unlikely(net_ratelimit()))
2799 e_warn(drv, "checksum_partial proto=%x!\n",
2804 css = skb_checksum_start_offset(skb);
2806 i = tx_ring->next_to_use;
2807 buffer_info = &tx_ring->buffer_info[i];
2808 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2810 context_desc->lower_setup.ip_config = 0;
2811 context_desc->upper_setup.tcp_fields.tucss = css;
2812 context_desc->upper_setup.tcp_fields.tucso =
2813 css + skb->csum_offset;
2814 context_desc->upper_setup.tcp_fields.tucse = 0;
2815 context_desc->tcp_seg_setup.data = 0;
2816 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2818 buffer_info->time_stamp = jiffies;
2819 buffer_info->next_to_watch = i;
2821 if (unlikely(++i == tx_ring->count))
2824 tx_ring->next_to_use = i;
2829 #define E1000_MAX_TXD_PWR 12
2830 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2832 static int e1000_tx_map(struct e1000_adapter *adapter,
2833 struct e1000_tx_ring *tx_ring,
2834 struct sk_buff *skb, unsigned int first,
2835 unsigned int max_per_txd, unsigned int nr_frags,
2838 struct e1000_hw *hw = &adapter->hw;
2839 struct pci_dev *pdev = adapter->pdev;
2840 struct e1000_tx_buffer *buffer_info;
2841 unsigned int len = skb_headlen(skb);
2842 unsigned int offset = 0, size, count = 0, i;
2843 unsigned int f, bytecount, segs;
2845 i = tx_ring->next_to_use;
2848 buffer_info = &tx_ring->buffer_info[i];
2849 size = min(len, max_per_txd);
2850 /* Workaround for Controller erratum --
2851 * descriptor for non-tso packet in a linear SKB that follows a
2852 * tso gets written back prematurely before the data is fully
2853 * DMA'd to the controller
2855 if (!skb->data_len && tx_ring->last_tx_tso &&
2857 tx_ring->last_tx_tso = false;
2861 /* Workaround for premature desc write-backs
2862 * in TSO mode. Append 4-byte sentinel desc
2864 if (unlikely(mss && !nr_frags && size == len && size > 8))
2866 /* work-around for errata 10 and it applies
2867 * to all controllers in PCI-X mode
2868 * The fix is to make sure that the first descriptor of a
2869 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2871 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2872 (size > 2015) && count == 0))
2875 /* Workaround for potential 82544 hang in PCI-X. Avoid
2876 * terminating buffers within evenly-aligned dwords.
2878 if (unlikely(adapter->pcix_82544 &&
2879 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2883 buffer_info->length = size;
2884 /* set time_stamp *before* dma to help avoid a possible race */
2885 buffer_info->time_stamp = jiffies;
2886 buffer_info->mapped_as_page = false;
2887 buffer_info->dma = dma_map_single(&pdev->dev,
2889 size, DMA_TO_DEVICE);
2890 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2892 buffer_info->next_to_watch = i;
2899 if (unlikely(i == tx_ring->count))
2904 for (f = 0; f < nr_frags; f++) {
2905 const struct skb_frag_struct *frag;
2907 frag = &skb_shinfo(skb)->frags[f];
2908 len = skb_frag_size(frag);
2912 unsigned long bufend;
2914 if (unlikely(i == tx_ring->count))
2917 buffer_info = &tx_ring->buffer_info[i];
2918 size = min(len, max_per_txd);
2919 /* Workaround for premature desc write-backs
2920 * in TSO mode. Append 4-byte sentinel desc
2922 if (unlikely(mss && f == (nr_frags-1) &&
2923 size == len && size > 8))
2925 /* Workaround for potential 82544 hang in PCI-X.
2926 * Avoid terminating buffers within evenly-aligned
2929 bufend = (unsigned long)
2930 page_to_phys(skb_frag_page(frag));
2931 bufend += offset + size - 1;
2932 if (unlikely(adapter->pcix_82544 &&
2937 buffer_info->length = size;
2938 buffer_info->time_stamp = jiffies;
2939 buffer_info->mapped_as_page = true;
2940 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2941 offset, size, DMA_TO_DEVICE);
2942 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2944 buffer_info->next_to_watch = i;
2952 segs = skb_shinfo(skb)->gso_segs ?: 1;
2953 /* multiply data chunks by size of headers */
2954 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2956 tx_ring->buffer_info[i].skb = skb;
2957 tx_ring->buffer_info[i].segs = segs;
2958 tx_ring->buffer_info[i].bytecount = bytecount;
2959 tx_ring->buffer_info[first].next_to_watch = i;
2964 dev_err(&pdev->dev, "TX DMA map failed\n");
2965 buffer_info->dma = 0;
2971 i += tx_ring->count;
2973 buffer_info = &tx_ring->buffer_info[i];
2974 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2980 static void e1000_tx_queue(struct e1000_adapter *adapter,
2981 struct e1000_tx_ring *tx_ring, int tx_flags,
2984 struct e1000_tx_desc *tx_desc = NULL;
2985 struct e1000_tx_buffer *buffer_info;
2986 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2989 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2990 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2992 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2994 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2995 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2998 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2999 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3000 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3003 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3004 txd_lower |= E1000_TXD_CMD_VLE;
3005 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3008 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3009 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3011 i = tx_ring->next_to_use;
3014 buffer_info = &tx_ring->buffer_info[i];
3015 tx_desc = E1000_TX_DESC(*tx_ring, i);
3016 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3017 tx_desc->lower.data =
3018 cpu_to_le32(txd_lower | buffer_info->length);
3019 tx_desc->upper.data = cpu_to_le32(txd_upper);
3020 if (unlikely(++i == tx_ring->count))
3024 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3026 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3027 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3028 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3030 /* Force memory writes to complete before letting h/w
3031 * know there are new descriptors to fetch. (Only
3032 * applicable for weak-ordered memory model archs,
3037 tx_ring->next_to_use = i;
3040 /* 82547 workaround to avoid controller hang in half-duplex environment.
3041 * The workaround is to avoid queuing a large packet that would span
3042 * the internal Tx FIFO ring boundary by notifying the stack to resend
3043 * the packet at a later time. This gives the Tx FIFO an opportunity to
3044 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3045 * to the beginning of the Tx FIFO.
3048 #define E1000_FIFO_HDR 0x10
3049 #define E1000_82547_PAD_LEN 0x3E0
3051 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3052 struct sk_buff *skb)
3054 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3055 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3057 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3059 if (adapter->link_duplex != HALF_DUPLEX)
3060 goto no_fifo_stall_required;
3062 if (atomic_read(&adapter->tx_fifo_stall))
3065 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3066 atomic_set(&adapter->tx_fifo_stall, 1);
3070 no_fifo_stall_required:
3071 adapter->tx_fifo_head += skb_fifo_len;
3072 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3073 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3077 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3079 struct e1000_adapter *adapter = netdev_priv(netdev);
3080 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3082 netif_stop_queue(netdev);
3083 /* Herbert's original patch had:
3084 * smp_mb__after_netif_stop_queue();
3085 * but since that doesn't exist yet, just open code it.
3089 /* We need to check again in a case another CPU has just
3090 * made room available.
3092 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3096 netif_start_queue(netdev);
3097 ++adapter->restart_queue;
3101 static int e1000_maybe_stop_tx(struct net_device *netdev,
3102 struct e1000_tx_ring *tx_ring, int size)
3104 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3106 return __e1000_maybe_stop_tx(netdev, size);
3109 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
3110 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev)
3113 struct e1000_adapter *adapter = netdev_priv(netdev);
3114 struct e1000_hw *hw = &adapter->hw;
3115 struct e1000_tx_ring *tx_ring;
3116 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3117 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3118 unsigned int tx_flags = 0;
3119 unsigned int len = skb_headlen(skb);
3120 unsigned int nr_frags;
3125 __be16 protocol = vlan_get_protocol(skb);
3127 /* This goes back to the question of how to logically map a Tx queue
3128 * to a flow. Right now, performance is impacted slightly negatively
3129 * if using multiple Tx queues. If the stack breaks away from a
3130 * single qdisc implementation, we can look at this again.
3132 tx_ring = adapter->tx_ring;
3134 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3135 * packets may get corrupted during padding by HW.
3136 * To WA this issue, pad all small packets manually.
3138 if (eth_skb_pad(skb))
3139 return NETDEV_TX_OK;
3141 mss = skb_shinfo(skb)->gso_size;
3142 /* The controller does a simple calculation to
3143 * make sure there is enough room in the FIFO before
3144 * initiating the DMA for each buffer. The calc is:
3145 * 4 = ceil(buffer len/mss). To make sure we don't
3146 * overrun the FIFO, adjust the max buffer len if mss
3151 max_per_txd = min(mss << 2, max_per_txd);
3152 max_txd_pwr = fls(max_per_txd) - 1;
3154 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3155 if (skb->data_len && hdr_len == len) {
3156 switch (hw->mac_type) {
3157 unsigned int pull_size;
3159 /* Make sure we have room to chop off 4 bytes,
3160 * and that the end alignment will work out to
3161 * this hardware's requirements
3162 * NOTE: this is a TSO only workaround
3163 * if end byte alignment not correct move us
3164 * into the next dword
3166 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3170 pull_size = min((unsigned int)4, skb->data_len);
3171 if (!__pskb_pull_tail(skb, pull_size)) {
3172 e_err(drv, "__pskb_pull_tail "
3174 dev_kfree_skb_any(skb);
3175 return NETDEV_TX_OK;
3177 len = skb_headlen(skb);
3186 /* reserve a descriptor for the offload context */
3187 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3191 /* Controller Erratum workaround */
3192 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3195 count += TXD_USE_COUNT(len, max_txd_pwr);
3197 if (adapter->pcix_82544)
3200 /* work-around for errata 10 and it applies to all controllers
3201 * in PCI-X mode, so add one more descriptor to the count
3203 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3207 nr_frags = skb_shinfo(skb)->nr_frags;
3208 for (f = 0; f < nr_frags; f++)
3209 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3211 if (adapter->pcix_82544)
3214 /* need: count + 2 desc gap to keep tail from touching
3215 * head, otherwise try next time
3217 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3218 return NETDEV_TX_BUSY;
3220 if (unlikely((hw->mac_type == e1000_82547) &&
3221 (e1000_82547_fifo_workaround(adapter, skb)))) {
3222 netif_stop_queue(netdev);
3223 if (!test_bit(__E1000_DOWN, &adapter->flags))
3224 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3225 return NETDEV_TX_BUSY;
3228 if (skb_vlan_tag_present(skb)) {
3229 tx_flags |= E1000_TX_FLAGS_VLAN;
3230 tx_flags |= (skb_vlan_tag_get(skb) <<
3231 E1000_TX_FLAGS_VLAN_SHIFT);
3234 first = tx_ring->next_to_use;
3236 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3238 dev_kfree_skb_any(skb);
3239 return NETDEV_TX_OK;
3243 if (likely(hw->mac_type != e1000_82544))
3244 tx_ring->last_tx_tso = true;
3245 tx_flags |= E1000_TX_FLAGS_TSO;
3246 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3247 tx_flags |= E1000_TX_FLAGS_CSUM;
3249 if (protocol == htons(ETH_P_IP))
3250 tx_flags |= E1000_TX_FLAGS_IPV4;
3252 if (unlikely(skb->no_fcs))
3253 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3255 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3259 netdev_sent_queue(netdev, skb->len);
3260 skb_tx_timestamp(skb);
3262 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3263 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3266 if (!skb->xmit_more ||
3267 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3268 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3269 /* we need this if more than one processor can write to
3270 * our tail at a time, it synchronizes IO on IA64/Altix
3276 dev_kfree_skb_any(skb);
3277 tx_ring->buffer_info[first].time_stamp = 0;
3278 tx_ring->next_to_use = first;
3281 return NETDEV_TX_OK;
3284 #define NUM_REGS 38 /* 1 based count */
3285 static void e1000_regdump(struct e1000_adapter *adapter)
3287 struct e1000_hw *hw = &adapter->hw;
3289 u32 *regs_buff = regs;
3292 static const char * const reg_name[] = {
3294 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3295 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3296 "TIDV", "TXDCTL", "TADV", "TARC0",
3297 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3299 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3300 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3301 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3304 regs_buff[0] = er32(CTRL);
3305 regs_buff[1] = er32(STATUS);
3307 regs_buff[2] = er32(RCTL);
3308 regs_buff[3] = er32(RDLEN);
3309 regs_buff[4] = er32(RDH);
3310 regs_buff[5] = er32(RDT);
3311 regs_buff[6] = er32(RDTR);
3313 regs_buff[7] = er32(TCTL);
3314 regs_buff[8] = er32(TDBAL);
3315 regs_buff[9] = er32(TDBAH);
3316 regs_buff[10] = er32(TDLEN);
3317 regs_buff[11] = er32(TDH);
3318 regs_buff[12] = er32(TDT);
3319 regs_buff[13] = er32(TIDV);
3320 regs_buff[14] = er32(TXDCTL);
3321 regs_buff[15] = er32(TADV);
3322 regs_buff[16] = er32(TARC0);
3324 regs_buff[17] = er32(TDBAL1);
3325 regs_buff[18] = er32(TDBAH1);
3326 regs_buff[19] = er32(TDLEN1);
3327 regs_buff[20] = er32(TDH1);
3328 regs_buff[21] = er32(TDT1);
3329 regs_buff[22] = er32(TXDCTL1);
3330 regs_buff[23] = er32(TARC1);
3331 regs_buff[24] = er32(CTRL_EXT);
3332 regs_buff[25] = er32(ERT);
3333 regs_buff[26] = er32(RDBAL0);
3334 regs_buff[27] = er32(RDBAH0);
3335 regs_buff[28] = er32(TDFH);
3336 regs_buff[29] = er32(TDFT);
3337 regs_buff[30] = er32(TDFHS);
3338 regs_buff[31] = er32(TDFTS);
3339 regs_buff[32] = er32(TDFPC);
3340 regs_buff[33] = er32(RDFH);
3341 regs_buff[34] = er32(RDFT);
3342 regs_buff[35] = er32(RDFHS);
3343 regs_buff[36] = er32(RDFTS);
3344 regs_buff[37] = er32(RDFPC);
3346 pr_info("Register dump\n");
3347 for (i = 0; i < NUM_REGS; i++)
3348 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3352 * e1000_dump: Print registers, tx ring and rx ring
3354 static void e1000_dump(struct e1000_adapter *adapter)
3356 /* this code doesn't handle multiple rings */
3357 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3358 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3361 if (!netif_msg_hw(adapter))
3364 /* Print Registers */
3365 e1000_regdump(adapter);
3368 pr_info("TX Desc ring0 dump\n");
3370 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372 * Legacy Transmit Descriptor
3373 * +--------------------------------------------------------------+
3374 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3375 * +--------------------------------------------------------------+
3376 * 8 | Special | CSS | Status | CMD | CSO | Length |
3377 * +--------------------------------------------------------------+
3378 * 63 48 47 36 35 32 31 24 23 16 15 0
3380 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3381 * 63 48 47 40 39 32 31 16 15 8 7 0
3382 * +----------------------------------------------------------------+
3383 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3384 * +----------------------------------------------------------------+
3385 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3386 * +----------------------------------------------------------------+
3387 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3389 * Extended Data Descriptor (DTYP=0x1)
3390 * +----------------------------------------------------------------+
3391 * 0 | Buffer Address [63:0] |
3392 * +----------------------------------------------------------------+
3393 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3394 * +----------------------------------------------------------------+
3395 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3397 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3398 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3400 if (!netif_msg_tx_done(adapter))
3401 goto rx_ring_summary;
3403 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3404 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3405 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3406 struct my_u { __le64 a; __le64 b; };
3407 struct my_u *u = (struct my_u *)tx_desc;
3410 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412 else if (i == tx_ring->next_to_use)
3414 else if (i == tx_ring->next_to_clean)
3419 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3420 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3421 le64_to_cpu(u->a), le64_to_cpu(u->b),
3422 (u64)buffer_info->dma, buffer_info->length,
3423 buffer_info->next_to_watch,
3424 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3429 pr_info("\nRX Desc ring dump\n");
3431 /* Legacy Receive Descriptor Format
3433 * +-----------------------------------------------------+
3434 * | Buffer Address [63:0] |
3435 * +-----------------------------------------------------+
3436 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3437 * +-----------------------------------------------------+
3438 * 63 48 47 40 39 32 31 16 15 0
3440 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3442 if (!netif_msg_rx_status(adapter))
3445 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3446 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3447 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3448 struct my_u { __le64 a; __le64 b; };
3449 struct my_u *u = (struct my_u *)rx_desc;
3452 if (i == rx_ring->next_to_use)
3454 else if (i == rx_ring->next_to_clean)
3459 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3460 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3461 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3464 /* dump the descriptor caches */
3466 pr_info("Rx descriptor cache in 64bit format\n");
3467 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3468 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3470 readl(adapter->hw.hw_addr + i+4),
3471 readl(adapter->hw.hw_addr + i),
3472 readl(adapter->hw.hw_addr + i+12),
3473 readl(adapter->hw.hw_addr + i+8));
3476 pr_info("Tx descriptor cache in 64bit format\n");
3477 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3478 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3480 readl(adapter->hw.hw_addr + i+4),
3481 readl(adapter->hw.hw_addr + i),
3482 readl(adapter->hw.hw_addr + i+12),
3483 readl(adapter->hw.hw_addr + i+8));
3490 * e1000_tx_timeout - Respond to a Tx Hang
3491 * @netdev: network interface device structure
3493 static void e1000_tx_timeout(struct net_device *netdev)
3495 struct e1000_adapter *adapter = netdev_priv(netdev);
3497 /* Do the reset outside of interrupt context */
3498 adapter->tx_timeout_count++;
3499 schedule_work(&adapter->reset_task);
3502 static void e1000_reset_task(struct work_struct *work)
3504 struct e1000_adapter *adapter =
3505 container_of(work, struct e1000_adapter, reset_task);
3507 e_err(drv, "Reset adapter\n");
3508 e1000_reinit_locked(adapter);
3512 * e1000_get_stats - Get System Network Statistics
3513 * @netdev: network interface device structure
3515 * Returns the address of the device statistics structure.
3516 * The statistics are actually updated from the watchdog.
3518 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3520 /* only return the current stats */
3521 return &netdev->stats;
3525 * e1000_change_mtu - Change the Maximum Transfer Unit
3526 * @netdev: network interface device structure
3527 * @new_mtu: new value for maximum frame size
3529 * Returns 0 on success, negative on failure
3531 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3533 struct e1000_adapter *adapter = netdev_priv(netdev);
3534 struct e1000_hw *hw = &adapter->hw;
3535 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3537 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3538 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3539 e_err(probe, "Invalid MTU setting\n");
3543 /* Adapter-specific max frame size limits. */
3544 switch (hw->mac_type) {
3545 case e1000_undefined ... e1000_82542_rev2_1:
3546 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3547 e_err(probe, "Jumbo Frames not supported.\n");
3552 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3556 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3558 /* e1000_down has a dependency on max_frame_size */
3559 hw->max_frame_size = max_frame;
3560 if (netif_running(netdev)) {
3561 /* prevent buffers from being reallocated */
3562 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3563 e1000_down(adapter);
3566 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3567 * means we reserve 2 more, this pushes us to allocate from the next
3569 * i.e. RXBUFFER_2048 --> size-4096 slab
3570 * however with the new *_jumbo_rx* routines, jumbo receives will use
3574 if (max_frame <= E1000_RXBUFFER_2048)
3575 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3577 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3578 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3579 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3580 adapter->rx_buffer_len = PAGE_SIZE;
3583 /* adjust allocation if LPE protects us, and we aren't using SBP */
3584 if (!hw->tbi_compatibility_on &&
3585 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3586 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3587 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3589 pr_info("%s changing MTU from %d to %d\n",
3590 netdev->name, netdev->mtu, new_mtu);
3591 netdev->mtu = new_mtu;
3593 if (netif_running(netdev))
3596 e1000_reset(adapter);
3598 clear_bit(__E1000_RESETTING, &adapter->flags);
3604 * e1000_update_stats - Update the board statistics counters
3605 * @adapter: board private structure
3607 void e1000_update_stats(struct e1000_adapter *adapter)
3609 struct net_device *netdev = adapter->netdev;
3610 struct e1000_hw *hw = &adapter->hw;
3611 struct pci_dev *pdev = adapter->pdev;
3612 unsigned long flags;
3615 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3617 /* Prevent stats update while adapter is being reset, or if the pci
3618 * connection is down.
3620 if (adapter->link_speed == 0)
3622 if (pci_channel_offline(pdev))
3625 spin_lock_irqsave(&adapter->stats_lock, flags);
3627 /* these counters are modified from e1000_tbi_adjust_stats,
3628 * called from the interrupt context, so they must only
3629 * be written while holding adapter->stats_lock
3632 adapter->stats.crcerrs += er32(CRCERRS);
3633 adapter->stats.gprc += er32(GPRC);
3634 adapter->stats.gorcl += er32(GORCL);
3635 adapter->stats.gorch += er32(GORCH);
3636 adapter->stats.bprc += er32(BPRC);
3637 adapter->stats.mprc += er32(MPRC);
3638 adapter->stats.roc += er32(ROC);
3640 adapter->stats.prc64 += er32(PRC64);
3641 adapter->stats.prc127 += er32(PRC127);
3642 adapter->stats.prc255 += er32(PRC255);
3643 adapter->stats.prc511 += er32(PRC511);
3644 adapter->stats.prc1023 += er32(PRC1023);
3645 adapter->stats.prc1522 += er32(PRC1522);
3647 adapter->stats.symerrs += er32(SYMERRS);
3648 adapter->stats.mpc += er32(MPC);
3649 adapter->stats.scc += er32(SCC);
3650 adapter->stats.ecol += er32(ECOL);
3651 adapter->stats.mcc += er32(MCC);
3652 adapter->stats.latecol += er32(LATECOL);
3653 adapter->stats.dc += er32(DC);
3654 adapter->stats.sec += er32(SEC);
3655 adapter->stats.rlec += er32(RLEC);
3656 adapter->stats.xonrxc += er32(XONRXC);
3657 adapter->stats.xontxc += er32(XONTXC);
3658 adapter->stats.xoffrxc += er32(XOFFRXC);
3659 adapter->stats.xofftxc += er32(XOFFTXC);
3660 adapter->stats.fcruc += er32(FCRUC);
3661 adapter->stats.gptc += er32(GPTC);
3662 adapter->stats.gotcl += er32(GOTCL);
3663 adapter->stats.gotch += er32(GOTCH);
3664 adapter->stats.rnbc += er32(RNBC);
3665 adapter->stats.ruc += er32(RUC);
3666 adapter->stats.rfc += er32(RFC);
3667 adapter->stats.rjc += er32(RJC);
3668 adapter->stats.torl += er32(TORL);
3669 adapter->stats.torh += er32(TORH);
3670 adapter->stats.totl += er32(TOTL);
3671 adapter->stats.toth += er32(TOTH);
3672 adapter->stats.tpr += er32(TPR);
3674 adapter->stats.ptc64 += er32(PTC64);
3675 adapter->stats.ptc127 += er32(PTC127);
3676 adapter->stats.ptc255 += er32(PTC255);
3677 adapter->stats.ptc511 += er32(PTC511);
3678 adapter->stats.ptc1023 += er32(PTC1023);
3679 adapter->stats.ptc1522 += er32(PTC1522);
3681 adapter->stats.mptc += er32(MPTC);
3682 adapter->stats.bptc += er32(BPTC);
3684 /* used for adaptive IFS */
3686 hw->tx_packet_delta = er32(TPT);
3687 adapter->stats.tpt += hw->tx_packet_delta;
3688 hw->collision_delta = er32(COLC);
3689 adapter->stats.colc += hw->collision_delta;
3691 if (hw->mac_type >= e1000_82543) {
3692 adapter->stats.algnerrc += er32(ALGNERRC);
3693 adapter->stats.rxerrc += er32(RXERRC);
3694 adapter->stats.tncrs += er32(TNCRS);
3695 adapter->stats.cexterr += er32(CEXTERR);
3696 adapter->stats.tsctc += er32(TSCTC);
3697 adapter->stats.tsctfc += er32(TSCTFC);
3700 /* Fill out the OS statistics structure */
3701 netdev->stats.multicast = adapter->stats.mprc;
3702 netdev->stats.collisions = adapter->stats.colc;
3706 /* RLEC on some newer hardware can be incorrect so build
3707 * our own version based on RUC and ROC
3709 netdev->stats.rx_errors = adapter->stats.rxerrc +
3710 adapter->stats.crcerrs + adapter->stats.algnerrc +
3711 adapter->stats.ruc + adapter->stats.roc +
3712 adapter->stats.cexterr;
3713 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3714 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3715 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3716 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3717 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3720 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3721 netdev->stats.tx_errors = adapter->stats.txerrc;
3722 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3723 netdev->stats.tx_window_errors = adapter->stats.latecol;
3724 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3725 if (hw->bad_tx_carr_stats_fd &&
3726 adapter->link_duplex == FULL_DUPLEX) {
3727 netdev->stats.tx_carrier_errors = 0;
3728 adapter->stats.tncrs = 0;
3731 /* Tx Dropped needs to be maintained elsewhere */
3734 if (hw->media_type == e1000_media_type_copper) {
3735 if ((adapter->link_speed == SPEED_1000) &&
3736 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3737 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3738 adapter->phy_stats.idle_errors += phy_tmp;
3741 if ((hw->mac_type <= e1000_82546) &&
3742 (hw->phy_type == e1000_phy_m88) &&
3743 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3744 adapter->phy_stats.receive_errors += phy_tmp;
3747 /* Management Stats */
3748 if (hw->has_smbus) {
3749 adapter->stats.mgptc += er32(MGTPTC);
3750 adapter->stats.mgprc += er32(MGTPRC);
3751 adapter->stats.mgpdc += er32(MGTPDC);
3754 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3758 * e1000_intr - Interrupt Handler
3759 * @irq: interrupt number
3760 * @data: pointer to a network interface device structure
3762 static irqreturn_t e1000_intr(int irq, void *data)
3764 struct net_device *netdev = data;
3765 struct e1000_adapter *adapter = netdev_priv(netdev);
3766 struct e1000_hw *hw = &adapter->hw;
3767 u32 icr = er32(ICR);
3769 if (unlikely((!icr)))
3770 return IRQ_NONE; /* Not our interrupt */
3772 /* we might have caused the interrupt, but the above
3773 * read cleared it, and just in case the driver is
3774 * down there is nothing to do so return handled
3776 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3779 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3780 hw->get_link_status = 1;
3781 /* guard against interrupt when we're going down */
3782 if (!test_bit(__E1000_DOWN, &adapter->flags))
3783 schedule_delayed_work(&adapter->watchdog_task, 1);
3786 /* disable interrupts, without the synchronize_irq bit */
3788 E1000_WRITE_FLUSH();
3790 if (likely(napi_schedule_prep(&adapter->napi))) {
3791 adapter->total_tx_bytes = 0;
3792 adapter->total_tx_packets = 0;
3793 adapter->total_rx_bytes = 0;
3794 adapter->total_rx_packets = 0;
3795 __napi_schedule(&adapter->napi);
3797 /* this really should not happen! if it does it is basically a
3798 * bug, but not a hard error, so enable ints and continue
3800 if (!test_bit(__E1000_DOWN, &adapter->flags))
3801 e1000_irq_enable(adapter);
3808 * e1000_clean - NAPI Rx polling callback
3809 * @adapter: board private structure
3811 static int e1000_clean(struct napi_struct *napi, int budget)
3813 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3815 int tx_clean_complete = 0, work_done = 0;
3817 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3819 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3821 if (!tx_clean_complete)
3824 /* If budget not fully consumed, exit the polling mode */
3825 if (work_done < budget) {
3826 if (likely(adapter->itr_setting & 3))
3827 e1000_set_itr(adapter);
3828 napi_complete_done(napi, work_done);
3829 if (!test_bit(__E1000_DOWN, &adapter->flags))
3830 e1000_irq_enable(adapter);
3837 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3838 * @adapter: board private structure
3840 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3841 struct e1000_tx_ring *tx_ring)
3843 struct e1000_hw *hw = &adapter->hw;
3844 struct net_device *netdev = adapter->netdev;
3845 struct e1000_tx_desc *tx_desc, *eop_desc;
3846 struct e1000_tx_buffer *buffer_info;
3847 unsigned int i, eop;
3848 unsigned int count = 0;
3849 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3850 unsigned int bytes_compl = 0, pkts_compl = 0;
3852 i = tx_ring->next_to_clean;
3853 eop = tx_ring->buffer_info[i].next_to_watch;
3854 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3856 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3857 (count < tx_ring->count)) {
3858 bool cleaned = false;
3859 dma_rmb(); /* read buffer_info after eop_desc */
3860 for ( ; !cleaned; count++) {
3861 tx_desc = E1000_TX_DESC(*tx_ring, i);
3862 buffer_info = &tx_ring->buffer_info[i];
3863 cleaned = (i == eop);
3866 total_tx_packets += buffer_info->segs;
3867 total_tx_bytes += buffer_info->bytecount;
3868 if (buffer_info->skb) {
3869 bytes_compl += buffer_info->skb->len;
3874 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3875 tx_desc->upper.data = 0;
3877 if (unlikely(++i == tx_ring->count))
3881 eop = tx_ring->buffer_info[i].next_to_watch;
3882 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3885 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3886 * which will reuse the cleaned buffers.
3888 smp_store_release(&tx_ring->next_to_clean, i);
3890 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3892 #define TX_WAKE_THRESHOLD 32
3893 if (unlikely(count && netif_carrier_ok(netdev) &&
3894 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3895 /* Make sure that anybody stopping the queue after this
3896 * sees the new next_to_clean.
3900 if (netif_queue_stopped(netdev) &&
3901 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3902 netif_wake_queue(netdev);
3903 ++adapter->restart_queue;
3907 if (adapter->detect_tx_hung) {
3908 /* Detect a transmit hang in hardware, this serializes the
3909 * check with the clearing of time_stamp and movement of i
3911 adapter->detect_tx_hung = false;
3912 if (tx_ring->buffer_info[eop].time_stamp &&
3913 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3914 (adapter->tx_timeout_factor * HZ)) &&
3915 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3917 /* detected Tx unit hang */
3918 e_err(drv, "Detected Tx Unit Hang\n"
3922 " next_to_use <%x>\n"
3923 " next_to_clean <%x>\n"
3924 "buffer_info[next_to_clean]\n"
3925 " time_stamp <%lx>\n"
3926 " next_to_watch <%x>\n"
3928 " next_to_watch.status <%x>\n",
3929 (unsigned long)(tx_ring - adapter->tx_ring),
3930 readl(hw->hw_addr + tx_ring->tdh),
3931 readl(hw->hw_addr + tx_ring->tdt),
3932 tx_ring->next_to_use,
3933 tx_ring->next_to_clean,
3934 tx_ring->buffer_info[eop].time_stamp,
3937 eop_desc->upper.fields.status);
3938 e1000_dump(adapter);
3939 netif_stop_queue(netdev);
3942 adapter->total_tx_bytes += total_tx_bytes;
3943 adapter->total_tx_packets += total_tx_packets;
3944 netdev->stats.tx_bytes += total_tx_bytes;
3945 netdev->stats.tx_packets += total_tx_packets;
3946 return count < tx_ring->count;
3950 * e1000_rx_checksum - Receive Checksum Offload for 82543
3951 * @adapter: board private structure
3952 * @status_err: receive descriptor status and error fields
3953 * @csum: receive descriptor csum field
3954 * @sk_buff: socket buffer with received data
3956 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3957 u32 csum, struct sk_buff *skb)
3959 struct e1000_hw *hw = &adapter->hw;
3960 u16 status = (u16)status_err;
3961 u8 errors = (u8)(status_err >> 24);
3963 skb_checksum_none_assert(skb);
3965 /* 82543 or newer only */
3966 if (unlikely(hw->mac_type < e1000_82543))
3968 /* Ignore Checksum bit is set */
3969 if (unlikely(status & E1000_RXD_STAT_IXSM))
3971 /* TCP/UDP checksum error bit is set */
3972 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3973 /* let the stack verify checksum errors */
3974 adapter->hw_csum_err++;
3977 /* TCP/UDP Checksum has not been calculated */
3978 if (!(status & E1000_RXD_STAT_TCPCS))
3981 /* It must be a TCP or UDP packet with a valid checksum */
3982 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3983 /* TCP checksum is good */
3984 skb->ip_summed = CHECKSUM_UNNECESSARY;
3986 adapter->hw_csum_good++;
3990 * e1000_consume_page - helper function for jumbo Rx path
3992 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3995 bi->rxbuf.page = NULL;
3997 skb->data_len += length;
3998 skb->truesize += PAGE_SIZE;
4002 * e1000_receive_skb - helper function to handle rx indications
4003 * @adapter: board private structure
4004 * @status: descriptor status field as written by hardware
4005 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4006 * @skb: pointer to sk_buff to be indicated to stack
4008 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4009 __le16 vlan, struct sk_buff *skb)
4011 skb->protocol = eth_type_trans(skb, adapter->netdev);
4013 if (status & E1000_RXD_STAT_VP) {
4014 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4016 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4018 napi_gro_receive(&adapter->napi, skb);
4022 * e1000_tbi_adjust_stats
4023 * @hw: Struct containing variables accessed by shared code
4024 * @frame_len: The length of the frame in question
4025 * @mac_addr: The Ethernet destination address of the frame in question
4027 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4029 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4030 struct e1000_hw_stats *stats,
4031 u32 frame_len, const u8 *mac_addr)
4035 /* First adjust the frame length. */
4037 /* We need to adjust the statistics counters, since the hardware
4038 * counters overcount this packet as a CRC error and undercount
4039 * the packet as a good packet
4041 /* This packet should not be counted as a CRC error. */
4043 /* This packet does count as a Good Packet Received. */
4046 /* Adjust the Good Octets received counters */
4047 carry_bit = 0x80000000 & stats->gorcl;
4048 stats->gorcl += frame_len;
4049 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4050 * Received Count) was one before the addition,
4051 * AND it is zero after, then we lost the carry out,
4052 * need to add one to Gorch (Good Octets Received Count High).
4053 * This could be simplified if all environments supported
4056 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4058 /* Is this a broadcast or multicast? Check broadcast first,
4059 * since the test for a multicast frame will test positive on
4060 * a broadcast frame.
4062 if (is_broadcast_ether_addr(mac_addr))
4064 else if (is_multicast_ether_addr(mac_addr))
4067 if (frame_len == hw->max_frame_size) {
4068 /* In this case, the hardware has overcounted the number of
4075 /* Adjust the bin counters when the extra byte put the frame in the
4076 * wrong bin. Remember that the frame_len was adjusted above.
4078 if (frame_len == 64) {
4081 } else if (frame_len == 127) {
4084 } else if (frame_len == 255) {
4087 } else if (frame_len == 511) {
4090 } else if (frame_len == 1023) {
4093 } else if (frame_len == 1522) {
4098 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4099 u8 status, u8 errors,
4100 u32 length, const u8 *data)
4102 struct e1000_hw *hw = &adapter->hw;
4103 u8 last_byte = *(data + length - 1);
4105 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4106 unsigned long irq_flags;
4108 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4109 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4110 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4118 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4121 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4124 adapter->alloc_rx_buff_failed++;
4129 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4130 * @adapter: board private structure
4131 * @rx_ring: ring to clean
4132 * @work_done: amount of napi work completed this call
4133 * @work_to_do: max amount of work allowed for this call to do
4135 * the return value indicates whether actual cleaning was done, there
4136 * is no guarantee that everything was cleaned
4138 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4139 struct e1000_rx_ring *rx_ring,
4140 int *work_done, int work_to_do)
4142 struct net_device *netdev = adapter->netdev;
4143 struct pci_dev *pdev = adapter->pdev;
4144 struct e1000_rx_desc *rx_desc, *next_rxd;
4145 struct e1000_rx_buffer *buffer_info, *next_buffer;
4148 int cleaned_count = 0;
4149 bool cleaned = false;
4150 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4152 i = rx_ring->next_to_clean;
4153 rx_desc = E1000_RX_DESC(*rx_ring, i);
4154 buffer_info = &rx_ring->buffer_info[i];
4156 while (rx_desc->status & E1000_RXD_STAT_DD) {
4157 struct sk_buff *skb;
4160 if (*work_done >= work_to_do)
4163 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4165 status = rx_desc->status;
4167 if (++i == rx_ring->count)
4170 next_rxd = E1000_RX_DESC(*rx_ring, i);
4173 next_buffer = &rx_ring->buffer_info[i];
4177 dma_unmap_page(&pdev->dev, buffer_info->dma,
4178 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4179 buffer_info->dma = 0;
4181 length = le16_to_cpu(rx_desc->length);
4183 /* errors is only valid for DD + EOP descriptors */
4184 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4185 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4186 u8 *mapped = page_address(buffer_info->rxbuf.page);
4188 if (e1000_tbi_should_accept(adapter, status,
4192 } else if (netdev->features & NETIF_F_RXALL) {
4195 /* an error means any chain goes out the window
4198 if (rx_ring->rx_skb_top)
4199 dev_kfree_skb(rx_ring->rx_skb_top);
4200 rx_ring->rx_skb_top = NULL;
4205 #define rxtop rx_ring->rx_skb_top
4207 if (!(status & E1000_RXD_STAT_EOP)) {
4208 /* this descriptor is only the beginning (or middle) */
4210 /* this is the beginning of a chain */
4211 rxtop = napi_get_frags(&adapter->napi);
4215 skb_fill_page_desc(rxtop, 0,
4216 buffer_info->rxbuf.page,
4219 /* this is the middle of a chain */
4220 skb_fill_page_desc(rxtop,
4221 skb_shinfo(rxtop)->nr_frags,
4222 buffer_info->rxbuf.page, 0, length);
4224 e1000_consume_page(buffer_info, rxtop, length);
4228 /* end of the chain */
4229 skb_fill_page_desc(rxtop,
4230 skb_shinfo(rxtop)->nr_frags,
4231 buffer_info->rxbuf.page, 0, length);
4234 e1000_consume_page(buffer_info, skb, length);
4237 /* no chain, got EOP, this buf is the packet
4238 * copybreak to save the put_page/alloc_page
4240 p = buffer_info->rxbuf.page;
4241 if (length <= copybreak) {
4244 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4246 skb = e1000_alloc_rx_skb(adapter,
4251 vaddr = kmap_atomic(p);
4252 memcpy(skb_tail_pointer(skb), vaddr,
4254 kunmap_atomic(vaddr);
4255 /* re-use the page, so don't erase
4256 * buffer_info->rxbuf.page
4258 skb_put(skb, length);
4259 e1000_rx_checksum(adapter,
4260 status | rx_desc->errors << 24,
4261 le16_to_cpu(rx_desc->csum), skb);
4263 total_rx_bytes += skb->len;
4266 e1000_receive_skb(adapter, status,
4267 rx_desc->special, skb);
4270 skb = napi_get_frags(&adapter->napi);
4272 adapter->alloc_rx_buff_failed++;
4275 skb_fill_page_desc(skb, 0, p, 0,
4277 e1000_consume_page(buffer_info, skb,
4283 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4284 e1000_rx_checksum(adapter,
4286 ((u32)(rx_desc->errors) << 24),
4287 le16_to_cpu(rx_desc->csum), skb);
4289 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4290 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4291 pskb_trim(skb, skb->len - 4);
4294 if (status & E1000_RXD_STAT_VP) {
4295 __le16 vlan = rx_desc->special;
4296 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4298 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4301 napi_gro_frags(&adapter->napi);
4304 rx_desc->status = 0;
4306 /* return some buffers to hardware, one at a time is too slow */
4307 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4308 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4312 /* use prefetched values */
4314 buffer_info = next_buffer;
4316 rx_ring->next_to_clean = i;
4318 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4320 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4322 adapter->total_rx_packets += total_rx_packets;
4323 adapter->total_rx_bytes += total_rx_bytes;
4324 netdev->stats.rx_bytes += total_rx_bytes;
4325 netdev->stats.rx_packets += total_rx_packets;
4329 /* this should improve performance for small packets with large amounts
4330 * of reassembly being done in the stack
4332 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4333 struct e1000_rx_buffer *buffer_info,
4334 u32 length, const void *data)
4336 struct sk_buff *skb;
4338 if (length > copybreak)
4341 skb = e1000_alloc_rx_skb(adapter, length);
4345 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4346 length, DMA_FROM_DEVICE);
4348 memcpy(skb_put(skb, length), data, length);
4354 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4355 * @adapter: board private structure
4356 * @rx_ring: ring to clean
4357 * @work_done: amount of napi work completed this call
4358 * @work_to_do: max amount of work allowed for this call to do
4360 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4361 struct e1000_rx_ring *rx_ring,
4362 int *work_done, int work_to_do)
4364 struct net_device *netdev = adapter->netdev;
4365 struct pci_dev *pdev = adapter->pdev;
4366 struct e1000_rx_desc *rx_desc, *next_rxd;
4367 struct e1000_rx_buffer *buffer_info, *next_buffer;
4370 int cleaned_count = 0;
4371 bool cleaned = false;
4372 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4374 i = rx_ring->next_to_clean;
4375 rx_desc = E1000_RX_DESC(*rx_ring, i);
4376 buffer_info = &rx_ring->buffer_info[i];
4378 while (rx_desc->status & E1000_RXD_STAT_DD) {
4379 struct sk_buff *skb;
4383 if (*work_done >= work_to_do)
4386 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4388 status = rx_desc->status;
4389 length = le16_to_cpu(rx_desc->length);
4391 data = buffer_info->rxbuf.data;
4393 skb = e1000_copybreak(adapter, buffer_info, length, data);
4395 unsigned int frag_len = e1000_frag_len(adapter);
4397 skb = build_skb(data - E1000_HEADROOM, frag_len);
4399 adapter->alloc_rx_buff_failed++;
4403 skb_reserve(skb, E1000_HEADROOM);
4404 dma_unmap_single(&pdev->dev, buffer_info->dma,
4405 adapter->rx_buffer_len,
4407 buffer_info->dma = 0;
4408 buffer_info->rxbuf.data = NULL;
4411 if (++i == rx_ring->count)
4414 next_rxd = E1000_RX_DESC(*rx_ring, i);
4417 next_buffer = &rx_ring->buffer_info[i];
4422 /* !EOP means multiple descriptors were used to store a single
4423 * packet, if thats the case we need to toss it. In fact, we
4424 * to toss every packet with the EOP bit clear and the next
4425 * frame that _does_ have the EOP bit set, as it is by
4426 * definition only a frame fragment
4428 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4429 adapter->discarding = true;
4431 if (adapter->discarding) {
4432 /* All receives must fit into a single buffer */
4433 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4435 if (status & E1000_RXD_STAT_EOP)
4436 adapter->discarding = false;
4440 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4441 if (e1000_tbi_should_accept(adapter, status,
4445 } else if (netdev->features & NETIF_F_RXALL) {
4454 total_rx_bytes += (length - 4); /* don't count FCS */
4457 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4458 /* adjust length to remove Ethernet CRC, this must be
4459 * done after the TBI_ACCEPT workaround above
4463 if (buffer_info->rxbuf.data == NULL)
4464 skb_put(skb, length);
4465 else /* copybreak skb */
4466 skb_trim(skb, length);
4468 /* Receive Checksum Offload */
4469 e1000_rx_checksum(adapter,
4471 ((u32)(rx_desc->errors) << 24),
4472 le16_to_cpu(rx_desc->csum), skb);
4474 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4477 rx_desc->status = 0;
4479 /* return some buffers to hardware, one at a time is too slow */
4480 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4481 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4485 /* use prefetched values */
4487 buffer_info = next_buffer;
4489 rx_ring->next_to_clean = i;
4491 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4493 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4495 adapter->total_rx_packets += total_rx_packets;
4496 adapter->total_rx_bytes += total_rx_bytes;
4497 netdev->stats.rx_bytes += total_rx_bytes;
4498 netdev->stats.rx_packets += total_rx_packets;
4503 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4504 * @adapter: address of board private structure
4505 * @rx_ring: pointer to receive ring structure
4506 * @cleaned_count: number of buffers to allocate this pass
4509 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4510 struct e1000_rx_ring *rx_ring, int cleaned_count)
4512 struct pci_dev *pdev = adapter->pdev;
4513 struct e1000_rx_desc *rx_desc;
4514 struct e1000_rx_buffer *buffer_info;
4517 i = rx_ring->next_to_use;
4518 buffer_info = &rx_ring->buffer_info[i];
4520 while (cleaned_count--) {
4521 /* allocate a new page if necessary */
4522 if (!buffer_info->rxbuf.page) {
4523 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4524 if (unlikely(!buffer_info->rxbuf.page)) {
4525 adapter->alloc_rx_buff_failed++;
4530 if (!buffer_info->dma) {
4531 buffer_info->dma = dma_map_page(&pdev->dev,
4532 buffer_info->rxbuf.page, 0,
4533 adapter->rx_buffer_len,
4535 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4536 put_page(buffer_info->rxbuf.page);
4537 buffer_info->rxbuf.page = NULL;
4538 buffer_info->dma = 0;
4539 adapter->alloc_rx_buff_failed++;
4544 rx_desc = E1000_RX_DESC(*rx_ring, i);
4545 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4547 if (unlikely(++i == rx_ring->count))
4549 buffer_info = &rx_ring->buffer_info[i];
4552 if (likely(rx_ring->next_to_use != i)) {
4553 rx_ring->next_to_use = i;
4554 if (unlikely(i-- == 0))
4555 i = (rx_ring->count - 1);
4557 /* Force memory writes to complete before letting h/w
4558 * know there are new descriptors to fetch. (Only
4559 * applicable for weak-ordered memory model archs,
4563 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4568 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4569 * @adapter: address of board private structure
4571 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4572 struct e1000_rx_ring *rx_ring,
4575 struct e1000_hw *hw = &adapter->hw;
4576 struct pci_dev *pdev = adapter->pdev;
4577 struct e1000_rx_desc *rx_desc;
4578 struct e1000_rx_buffer *buffer_info;
4580 unsigned int bufsz = adapter->rx_buffer_len;
4582 i = rx_ring->next_to_use;
4583 buffer_info = &rx_ring->buffer_info[i];
4585 while (cleaned_count--) {
4588 if (buffer_info->rxbuf.data)
4591 data = e1000_alloc_frag(adapter);
4593 /* Better luck next round */
4594 adapter->alloc_rx_buff_failed++;
4598 /* Fix for errata 23, can't cross 64kB boundary */
4599 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4600 void *olddata = data;
4601 e_err(rx_err, "skb align check failed: %u bytes at "
4602 "%p\n", bufsz, data);
4603 /* Try again, without freeing the previous */
4604 data = e1000_alloc_frag(adapter);
4605 /* Failed allocation, critical failure */
4607 skb_free_frag(olddata);
4608 adapter->alloc_rx_buff_failed++;
4612 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4614 skb_free_frag(data);
4615 skb_free_frag(olddata);
4616 adapter->alloc_rx_buff_failed++;
4620 /* Use new allocation */
4621 skb_free_frag(olddata);
4623 buffer_info->dma = dma_map_single(&pdev->dev,
4625 adapter->rx_buffer_len,
4627 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4628 skb_free_frag(data);
4629 buffer_info->dma = 0;
4630 adapter->alloc_rx_buff_failed++;
4634 /* XXX if it was allocated cleanly it will never map to a
4638 /* Fix for errata 23, can't cross 64kB boundary */
4639 if (!e1000_check_64k_bound(adapter,
4640 (void *)(unsigned long)buffer_info->dma,
4641 adapter->rx_buffer_len)) {
4642 e_err(rx_err, "dma align check failed: %u bytes at "
4643 "%p\n", adapter->rx_buffer_len,
4644 (void *)(unsigned long)buffer_info->dma);
4646 dma_unmap_single(&pdev->dev, buffer_info->dma,
4647 adapter->rx_buffer_len,
4650 skb_free_frag(data);
4651 buffer_info->rxbuf.data = NULL;
4652 buffer_info->dma = 0;
4654 adapter->alloc_rx_buff_failed++;
4657 buffer_info->rxbuf.data = data;
4659 rx_desc = E1000_RX_DESC(*rx_ring, i);
4660 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4662 if (unlikely(++i == rx_ring->count))
4664 buffer_info = &rx_ring->buffer_info[i];
4667 if (likely(rx_ring->next_to_use != i)) {
4668 rx_ring->next_to_use = i;
4669 if (unlikely(i-- == 0))
4670 i = (rx_ring->count - 1);
4672 /* Force memory writes to complete before letting h/w
4673 * know there are new descriptors to fetch. (Only
4674 * applicable for weak-ordered memory model archs,
4678 writel(i, hw->hw_addr + rx_ring->rdt);
4683 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4686 static void e1000_smartspeed(struct e1000_adapter *adapter)
4688 struct e1000_hw *hw = &adapter->hw;
4692 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4693 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4696 if (adapter->smartspeed == 0) {
4697 /* If Master/Slave config fault is asserted twice,
4698 * we assume back-to-back
4700 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4701 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4703 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4704 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4706 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4707 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4708 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4709 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4711 adapter->smartspeed++;
4712 if (!e1000_phy_setup_autoneg(hw) &&
4713 !e1000_read_phy_reg(hw, PHY_CTRL,
4715 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4716 MII_CR_RESTART_AUTO_NEG);
4717 e1000_write_phy_reg(hw, PHY_CTRL,
4722 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4723 /* If still no link, perhaps using 2/3 pair cable */
4724 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4725 phy_ctrl |= CR_1000T_MS_ENABLE;
4726 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4727 if (!e1000_phy_setup_autoneg(hw) &&
4728 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4729 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4730 MII_CR_RESTART_AUTO_NEG);
4731 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4734 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4735 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4736 adapter->smartspeed = 0;
4745 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4751 return e1000_mii_ioctl(netdev, ifr, cmd);
4763 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4766 struct e1000_adapter *adapter = netdev_priv(netdev);
4767 struct e1000_hw *hw = &adapter->hw;
4768 struct mii_ioctl_data *data = if_mii(ifr);
4771 unsigned long flags;
4773 if (hw->media_type != e1000_media_type_copper)
4778 data->phy_id = hw->phy_addr;
4781 spin_lock_irqsave(&adapter->stats_lock, flags);
4782 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4784 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4787 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4790 if (data->reg_num & ~(0x1F))
4792 mii_reg = data->val_in;
4793 spin_lock_irqsave(&adapter->stats_lock, flags);
4794 if (e1000_write_phy_reg(hw, data->reg_num,
4796 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4799 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4800 if (hw->media_type == e1000_media_type_copper) {
4801 switch (data->reg_num) {
4803 if (mii_reg & MII_CR_POWER_DOWN)
4805 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4807 hw->autoneg_advertised = 0x2F;
4812 else if (mii_reg & 0x2000)
4816 retval = e1000_set_spd_dplx(
4824 if (netif_running(adapter->netdev))
4825 e1000_reinit_locked(adapter);
4827 e1000_reset(adapter);
4829 case M88E1000_PHY_SPEC_CTRL:
4830 case M88E1000_EXT_PHY_SPEC_CTRL:
4831 if (e1000_phy_reset(hw))
4836 switch (data->reg_num) {
4838 if (mii_reg & MII_CR_POWER_DOWN)
4840 if (netif_running(adapter->netdev))
4841 e1000_reinit_locked(adapter);
4843 e1000_reset(adapter);
4851 return E1000_SUCCESS;
4854 void e1000_pci_set_mwi(struct e1000_hw *hw)
4856 struct e1000_adapter *adapter = hw->back;
4857 int ret_val = pci_set_mwi(adapter->pdev);
4860 e_err(probe, "Error in setting MWI\n");
4863 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4865 struct e1000_adapter *adapter = hw->back;
4867 pci_clear_mwi(adapter->pdev);
4870 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4872 struct e1000_adapter *adapter = hw->back;
4873 return pcix_get_mmrbc(adapter->pdev);
4876 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4878 struct e1000_adapter *adapter = hw->back;
4879 pcix_set_mmrbc(adapter->pdev, mmrbc);
4882 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4887 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4891 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4896 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4897 netdev_features_t features)
4899 struct e1000_hw *hw = &adapter->hw;
4903 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4904 /* enable VLAN tag insert/strip */
4905 ctrl |= E1000_CTRL_VME;
4907 /* disable VLAN tag insert/strip */
4908 ctrl &= ~E1000_CTRL_VME;
4912 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4915 struct e1000_hw *hw = &adapter->hw;
4918 if (!test_bit(__E1000_DOWN, &adapter->flags))
4919 e1000_irq_disable(adapter);
4921 __e1000_vlan_mode(adapter, adapter->netdev->features);
4923 /* enable VLAN receive filtering */
4925 rctl &= ~E1000_RCTL_CFIEN;
4926 if (!(adapter->netdev->flags & IFF_PROMISC))
4927 rctl |= E1000_RCTL_VFE;
4929 e1000_update_mng_vlan(adapter);
4931 /* disable VLAN receive filtering */
4933 rctl &= ~E1000_RCTL_VFE;
4937 if (!test_bit(__E1000_DOWN, &adapter->flags))
4938 e1000_irq_enable(adapter);
4941 static void e1000_vlan_mode(struct net_device *netdev,
4942 netdev_features_t features)
4944 struct e1000_adapter *adapter = netdev_priv(netdev);
4946 if (!test_bit(__E1000_DOWN, &adapter->flags))
4947 e1000_irq_disable(adapter);
4949 __e1000_vlan_mode(adapter, features);
4951 if (!test_bit(__E1000_DOWN, &adapter->flags))
4952 e1000_irq_enable(adapter);
4955 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4956 __be16 proto, u16 vid)
4958 struct e1000_adapter *adapter = netdev_priv(netdev);
4959 struct e1000_hw *hw = &adapter->hw;
4962 if ((hw->mng_cookie.status &
4963 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4964 (vid == adapter->mng_vlan_id))
4967 if (!e1000_vlan_used(adapter))
4968 e1000_vlan_filter_on_off(adapter, true);
4970 /* add VID to filter table */
4971 index = (vid >> 5) & 0x7F;
4972 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4973 vfta |= (1 << (vid & 0x1F));
4974 e1000_write_vfta(hw, index, vfta);
4976 set_bit(vid, adapter->active_vlans);
4981 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4982 __be16 proto, u16 vid)
4984 struct e1000_adapter *adapter = netdev_priv(netdev);
4985 struct e1000_hw *hw = &adapter->hw;
4988 if (!test_bit(__E1000_DOWN, &adapter->flags))
4989 e1000_irq_disable(adapter);
4990 if (!test_bit(__E1000_DOWN, &adapter->flags))
4991 e1000_irq_enable(adapter);
4993 /* remove VID from filter table */
4994 index = (vid >> 5) & 0x7F;
4995 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4996 vfta &= ~(1 << (vid & 0x1F));
4997 e1000_write_vfta(hw, index, vfta);
4999 clear_bit(vid, adapter->active_vlans);
5001 if (!e1000_vlan_used(adapter))
5002 e1000_vlan_filter_on_off(adapter, false);
5007 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5011 if (!e1000_vlan_used(adapter))
5014 e1000_vlan_filter_on_off(adapter, true);
5015 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5016 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5019 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5021 struct e1000_hw *hw = &adapter->hw;
5025 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5026 * for the switch() below to work
5028 if ((spd & 1) || (dplx & ~1))
5031 /* Fiber NICs only allow 1000 gbps Full duplex */
5032 if ((hw->media_type == e1000_media_type_fiber) &&
5033 spd != SPEED_1000 &&
5034 dplx != DUPLEX_FULL)
5037 switch (spd + dplx) {
5038 case SPEED_10 + DUPLEX_HALF:
5039 hw->forced_speed_duplex = e1000_10_half;
5041 case SPEED_10 + DUPLEX_FULL:
5042 hw->forced_speed_duplex = e1000_10_full;
5044 case SPEED_100 + DUPLEX_HALF:
5045 hw->forced_speed_duplex = e1000_100_half;
5047 case SPEED_100 + DUPLEX_FULL:
5048 hw->forced_speed_duplex = e1000_100_full;
5050 case SPEED_1000 + DUPLEX_FULL:
5052 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5054 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5059 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5060 hw->mdix = AUTO_ALL_MODES;
5065 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5069 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5071 struct net_device *netdev = pci_get_drvdata(pdev);
5072 struct e1000_adapter *adapter = netdev_priv(netdev);
5073 struct e1000_hw *hw = &adapter->hw;
5074 u32 ctrl, ctrl_ext, rctl, status;
5075 u32 wufc = adapter->wol;
5080 netif_device_detach(netdev);
5082 if (netif_running(netdev)) {
5083 int count = E1000_CHECK_RESET_COUNT;
5085 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5086 usleep_range(10000, 20000);
5088 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5089 e1000_down(adapter);
5093 retval = pci_save_state(pdev);
5098 status = er32(STATUS);
5099 if (status & E1000_STATUS_LU)
5100 wufc &= ~E1000_WUFC_LNKC;
5103 e1000_setup_rctl(adapter);
5104 e1000_set_rx_mode(netdev);
5108 /* turn on all-multi mode if wake on multicast is enabled */
5109 if (wufc & E1000_WUFC_MC)
5110 rctl |= E1000_RCTL_MPE;
5112 /* enable receives in the hardware */
5113 ew32(RCTL, rctl | E1000_RCTL_EN);
5115 if (hw->mac_type >= e1000_82540) {
5117 /* advertise wake from D3Cold */
5118 #define E1000_CTRL_ADVD3WUC 0x00100000
5119 /* phy power management enable */
5120 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5121 ctrl |= E1000_CTRL_ADVD3WUC |
5122 E1000_CTRL_EN_PHY_PWR_MGMT;
5126 if (hw->media_type == e1000_media_type_fiber ||
5127 hw->media_type == e1000_media_type_internal_serdes) {
5128 /* keep the laser running in D3 */
5129 ctrl_ext = er32(CTRL_EXT);
5130 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5131 ew32(CTRL_EXT, ctrl_ext);
5134 ew32(WUC, E1000_WUC_PME_EN);
5141 e1000_release_manageability(adapter);
5143 *enable_wake = !!wufc;
5145 /* make sure adapter isn't asleep if manageability is enabled */
5146 if (adapter->en_mng_pt)
5147 *enable_wake = true;
5149 if (netif_running(netdev))
5150 e1000_free_irq(adapter);
5152 pci_disable_device(pdev);
5158 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5163 retval = __e1000_shutdown(pdev, &wake);
5168 pci_prepare_to_sleep(pdev);
5170 pci_wake_from_d3(pdev, false);
5171 pci_set_power_state(pdev, PCI_D3hot);
5177 static int e1000_resume(struct pci_dev *pdev)
5179 struct net_device *netdev = pci_get_drvdata(pdev);
5180 struct e1000_adapter *adapter = netdev_priv(netdev);
5181 struct e1000_hw *hw = &adapter->hw;
5184 pci_set_power_state(pdev, PCI_D0);
5185 pci_restore_state(pdev);
5186 pci_save_state(pdev);
5188 if (adapter->need_ioport)
5189 err = pci_enable_device(pdev);
5191 err = pci_enable_device_mem(pdev);
5193 pr_err("Cannot enable PCI device from suspend\n");
5196 pci_set_master(pdev);
5198 pci_enable_wake(pdev, PCI_D3hot, 0);
5199 pci_enable_wake(pdev, PCI_D3cold, 0);
5201 if (netif_running(netdev)) {
5202 err = e1000_request_irq(adapter);
5207 e1000_power_up_phy(adapter);
5208 e1000_reset(adapter);
5211 e1000_init_manageability(adapter);
5213 if (netif_running(netdev))
5216 netif_device_attach(netdev);
5222 static void e1000_shutdown(struct pci_dev *pdev)
5226 __e1000_shutdown(pdev, &wake);
5228 if (system_state == SYSTEM_POWER_OFF) {
5229 pci_wake_from_d3(pdev, wake);
5230 pci_set_power_state(pdev, PCI_D3hot);
5234 #ifdef CONFIG_NET_POLL_CONTROLLER
5235 /* Polling 'interrupt' - used by things like netconsole to send skbs
5236 * without having to re-enable interrupts. It's not called while
5237 * the interrupt routine is executing.
5239 static void e1000_netpoll(struct net_device *netdev)
5241 struct e1000_adapter *adapter = netdev_priv(netdev);
5243 disable_irq(adapter->pdev->irq);
5244 e1000_intr(adapter->pdev->irq, netdev);
5245 enable_irq(adapter->pdev->irq);
5250 * e1000_io_error_detected - called when PCI error is detected
5251 * @pdev: Pointer to PCI device
5252 * @state: The current pci connection state
5254 * This function is called after a PCI bus error affecting
5255 * this device has been detected.
5257 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5258 pci_channel_state_t state)
5260 struct net_device *netdev = pci_get_drvdata(pdev);
5261 struct e1000_adapter *adapter = netdev_priv(netdev);
5263 netif_device_detach(netdev);
5265 if (state == pci_channel_io_perm_failure)
5266 return PCI_ERS_RESULT_DISCONNECT;
5268 if (netif_running(netdev))
5269 e1000_down(adapter);
5270 pci_disable_device(pdev);
5272 /* Request a slot slot reset. */
5273 return PCI_ERS_RESULT_NEED_RESET;
5277 * e1000_io_slot_reset - called after the pci bus has been reset.
5278 * @pdev: Pointer to PCI device
5280 * Restart the card from scratch, as if from a cold-boot. Implementation
5281 * resembles the first-half of the e1000_resume routine.
5283 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5285 struct net_device *netdev = pci_get_drvdata(pdev);
5286 struct e1000_adapter *adapter = netdev_priv(netdev);
5287 struct e1000_hw *hw = &adapter->hw;
5290 if (adapter->need_ioport)
5291 err = pci_enable_device(pdev);
5293 err = pci_enable_device_mem(pdev);
5295 pr_err("Cannot re-enable PCI device after reset.\n");
5296 return PCI_ERS_RESULT_DISCONNECT;
5298 pci_set_master(pdev);
5300 pci_enable_wake(pdev, PCI_D3hot, 0);
5301 pci_enable_wake(pdev, PCI_D3cold, 0);
5303 e1000_reset(adapter);
5306 return PCI_ERS_RESULT_RECOVERED;
5310 * e1000_io_resume - called when traffic can start flowing again.
5311 * @pdev: Pointer to PCI device
5313 * This callback is called when the error recovery driver tells us that
5314 * its OK to resume normal operation. Implementation resembles the
5315 * second-half of the e1000_resume routine.
5317 static void e1000_io_resume(struct pci_dev *pdev)
5319 struct net_device *netdev = pci_get_drvdata(pdev);
5320 struct e1000_adapter *adapter = netdev_priv(netdev);
5322 e1000_init_manageability(adapter);
5324 if (netif_running(netdev)) {
5325 if (e1000_up(adapter)) {
5326 pr_info("can't bring device back up after reset\n");
5331 netif_device_attach(netdev);