1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/tcp.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/mii.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/cpu.h>
48 #include <linux/smp.h>
49 #include <linux/pm_qos_params.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/aer.h>
55 #define DRV_EXTRAVERSION "-k2"
57 #define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
58 char e1000e_driver_name[] = "e1000e";
59 const char e1000e_driver_version[] = DRV_VERSION;
61 static const struct e1000_info *e1000_info_tbl[] = {
62 [board_82571] = &e1000_82571_info,
63 [board_82572] = &e1000_82572_info,
64 [board_82573] = &e1000_82573_info,
65 [board_82574] = &e1000_82574_info,
66 [board_82583] = &e1000_82583_info,
67 [board_80003es2lan] = &e1000_es2_info,
68 [board_ich8lan] = &e1000_ich8_info,
69 [board_ich9lan] = &e1000_ich9_info,
70 [board_ich10lan] = &e1000_ich10_info,
71 [board_pchlan] = &e1000_pch_info,
72 [board_pch2lan] = &e1000_pch2_info,
75 struct e1000_reg_info {
80 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
81 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
82 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
83 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
84 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
86 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
87 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
88 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
89 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
90 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
92 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
94 /* General Registers */
96 {E1000_STATUS, "STATUS"},
97 {E1000_CTRL_EXT, "CTRL_EXT"},
99 /* Interrupt Registers */
103 {E1000_RCTL, "RCTL"},
104 {E1000_RDLEN, "RDLEN"},
107 {E1000_RDTR, "RDTR"},
108 {E1000_RXDCTL(0), "RXDCTL"},
110 {E1000_RDBAL, "RDBAL"},
111 {E1000_RDBAH, "RDBAH"},
112 {E1000_RDFH, "RDFH"},
113 {E1000_RDFT, "RDFT"},
114 {E1000_RDFHS, "RDFHS"},
115 {E1000_RDFTS, "RDFTS"},
116 {E1000_RDFPC, "RDFPC"},
119 {E1000_TCTL, "TCTL"},
120 {E1000_TDBAL, "TDBAL"},
121 {E1000_TDBAH, "TDBAH"},
122 {E1000_TDLEN, "TDLEN"},
125 {E1000_TIDV, "TIDV"},
126 {E1000_TXDCTL(0), "TXDCTL"},
127 {E1000_TADV, "TADV"},
128 {E1000_TARC(0), "TARC"},
129 {E1000_TDFH, "TDFH"},
130 {E1000_TDFT, "TDFT"},
131 {E1000_TDFHS, "TDFHS"},
132 {E1000_TDFTS, "TDFTS"},
133 {E1000_TDFPC, "TDFPC"},
135 /* List Terminator */
140 * e1000_regdump - register printout routine
142 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
148 switch (reginfo->ofs) {
149 case E1000_RXDCTL(0):
150 for (n = 0; n < 2; n++)
151 regs[n] = __er32(hw, E1000_RXDCTL(n));
153 case E1000_TXDCTL(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_TXDCTL(n));
158 for (n = 0; n < 2; n++)
159 regs[n] = __er32(hw, E1000_TARC(n));
162 printk(KERN_INFO "%-15s %08x\n",
163 reginfo->name, __er32(hw, reginfo->ofs));
167 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
168 printk(KERN_INFO "%-15s ", rname);
169 for (n = 0; n < 2; n++)
170 printk(KERN_CONT "%08x ", regs[n]);
171 printk(KERN_CONT "\n");
175 * e1000e_dump - Print registers, Tx-ring and Rx-ring
177 static void e1000e_dump(struct e1000_adapter *adapter)
179 struct net_device *netdev = adapter->netdev;
180 struct e1000_hw *hw = &adapter->hw;
181 struct e1000_reg_info *reginfo;
182 struct e1000_ring *tx_ring = adapter->tx_ring;
183 struct e1000_tx_desc *tx_desc;
188 struct e1000_buffer *buffer_info;
189 struct e1000_ring *rx_ring = adapter->rx_ring;
190 union e1000_rx_desc_packet_split *rx_desc_ps;
191 struct e1000_rx_desc *rx_desc;
201 if (!netif_msg_hw(adapter))
204 /* Print netdevice Info */
206 dev_info(&adapter->pdev->dev, "Net device Info\n");
207 printk(KERN_INFO "Device Name state "
208 "trans_start last_rx\n");
209 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
210 netdev->name, netdev->state, netdev->trans_start,
214 /* Print Registers */
215 dev_info(&adapter->pdev->dev, "Register Dump\n");
216 printk(KERN_INFO " Register Name Value\n");
217 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
218 reginfo->name; reginfo++) {
219 e1000_regdump(hw, reginfo);
222 /* Print Tx Ring Summary */
223 if (!netdev || !netif_running(netdev))
226 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
227 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
228 " leng ntw timestamp\n");
229 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
230 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
231 0, tx_ring->next_to_use, tx_ring->next_to_clean,
232 (unsigned long long)buffer_info->dma,
234 buffer_info->next_to_watch,
235 (unsigned long long)buffer_info->time_stamp);
238 if (!netif_msg_tx_done(adapter))
239 goto rx_ring_summary;
241 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
243 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
245 * Legacy Transmit Descriptor
246 * +--------------------------------------------------------------+
247 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
248 * +--------------------------------------------------------------+
249 * 8 | Special | CSS | Status | CMD | CSO | Length |
250 * +--------------------------------------------------------------+
251 * 63 48 47 36 35 32 31 24 23 16 15 0
253 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
254 * 63 48 47 40 39 32 31 16 15 8 7 0
255 * +----------------------------------------------------------------+
256 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
257 * +----------------------------------------------------------------+
258 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
259 * +----------------------------------------------------------------+
260 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
262 * Extended Data Descriptor (DTYP=0x1)
263 * +----------------------------------------------------------------+
264 * 0 | Buffer Address [63:0] |
265 * +----------------------------------------------------------------+
266 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
267 * +----------------------------------------------------------------+
268 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
270 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
271 " [bi->dma ] leng ntw timestamp bi->skb "
272 "<-- Legacy format\n");
273 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
274 " [bi->dma ] leng ntw timestamp bi->skb "
275 "<-- Ext Context format\n");
276 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
277 " [bi->dma ] leng ntw timestamp bi->skb "
278 "<-- Ext Data format\n");
279 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
280 tx_desc = E1000_TX_DESC(*tx_ring, i);
281 buffer_info = &tx_ring->buffer_info[i];
282 u0 = (struct my_u0 *)tx_desc;
283 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
284 "%04X %3X %016llX %p",
285 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
286 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
287 (unsigned long long)le64_to_cpu(u0->a),
288 (unsigned long long)le64_to_cpu(u0->b),
289 (unsigned long long)buffer_info->dma,
290 buffer_info->length, buffer_info->next_to_watch,
291 (unsigned long long)buffer_info->time_stamp,
293 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
294 printk(KERN_CONT " NTC/U\n");
295 else if (i == tx_ring->next_to_use)
296 printk(KERN_CONT " NTU\n");
297 else if (i == tx_ring->next_to_clean)
298 printk(KERN_CONT " NTC\n");
300 printk(KERN_CONT "\n");
302 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
303 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
304 16, 1, phys_to_virt(buffer_info->dma),
305 buffer_info->length, true);
308 /* Print Rx Ring Summary */
310 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
311 printk(KERN_INFO "Queue [NTU] [NTC]\n");
312 printk(KERN_INFO " %5d %5X %5X\n", 0,
313 rx_ring->next_to_use, rx_ring->next_to_clean);
316 if (!netif_msg_rx_status(adapter))
319 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
320 switch (adapter->rx_ps_pages) {
324 /* [Extended] Packet Split Receive Descriptor Format
326 * +-----------------------------------------------------+
327 * 0 | Buffer Address 0 [63:0] |
328 * +-----------------------------------------------------+
329 * 8 | Buffer Address 1 [63:0] |
330 * +-----------------------------------------------------+
331 * 16 | Buffer Address 2 [63:0] |
332 * +-----------------------------------------------------+
333 * 24 | Buffer Address 3 [63:0] |
334 * +-----------------------------------------------------+
336 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
338 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
339 "[bi->skb] <-- Ext Pkt Split format\n");
340 /* [Extended] Receive Descriptor (Write-Back) Format
342 * 63 48 47 32 31 13 12 8 7 4 3 0
343 * +------------------------------------------------------+
344 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
345 * | Checksum | Ident | | Queue | | Type |
346 * +------------------------------------------------------+
347 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
348 * +------------------------------------------------------+
349 * 63 48 47 32 31 20 19 0
351 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
353 "[ l3 l2 l1 hs] [reserved ] ---------------- "
354 "[bi->skb] <-- Ext Rx Write-Back format\n");
355 for (i = 0; i < rx_ring->count; i++) {
356 buffer_info = &rx_ring->buffer_info[i];
357 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
358 u1 = (struct my_u1 *)rx_desc_ps;
360 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
361 if (staterr & E1000_RXD_STAT_DD) {
362 /* Descriptor Done */
363 printk(KERN_INFO "RWB[0x%03X] %016llX "
364 "%016llX %016llX %016llX "
365 "---------------- %p", i,
366 (unsigned long long)le64_to_cpu(u1->a),
367 (unsigned long long)le64_to_cpu(u1->b),
368 (unsigned long long)le64_to_cpu(u1->c),
369 (unsigned long long)le64_to_cpu(u1->d),
372 printk(KERN_INFO "R [0x%03X] %016llX "
373 "%016llX %016llX %016llX %016llX %p", i,
374 (unsigned long long)le64_to_cpu(u1->a),
375 (unsigned long long)le64_to_cpu(u1->b),
376 (unsigned long long)le64_to_cpu(u1->c),
377 (unsigned long long)le64_to_cpu(u1->d),
378 (unsigned long long)buffer_info->dma,
381 if (netif_msg_pktdata(adapter))
382 print_hex_dump(KERN_INFO, "",
383 DUMP_PREFIX_ADDRESS, 16, 1,
384 phys_to_virt(buffer_info->dma),
385 adapter->rx_ps_bsize0, true);
388 if (i == rx_ring->next_to_use)
389 printk(KERN_CONT " NTU\n");
390 else if (i == rx_ring->next_to_clean)
391 printk(KERN_CONT " NTC\n");
393 printk(KERN_CONT "\n");
398 /* Legacy Receive Descriptor Format
400 * +-----------------------------------------------------+
401 * | Buffer Address [63:0] |
402 * +-----------------------------------------------------+
403 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
404 * +-----------------------------------------------------+
405 * 63 48 47 40 39 32 31 16 15 0
407 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
408 "[vl er S cks ln] [bi->dma ] [bi->skb] "
409 "<-- Legacy format\n");
410 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
411 rx_desc = E1000_RX_DESC(*rx_ring, i);
412 buffer_info = &rx_ring->buffer_info[i];
413 u0 = (struct my_u0 *)rx_desc;
414 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
416 (unsigned long long)le64_to_cpu(u0->a),
417 (unsigned long long)le64_to_cpu(u0->b),
418 (unsigned long long)buffer_info->dma,
420 if (i == rx_ring->next_to_use)
421 printk(KERN_CONT " NTU\n");
422 else if (i == rx_ring->next_to_clean)
423 printk(KERN_CONT " NTC\n");
425 printk(KERN_CONT "\n");
427 if (netif_msg_pktdata(adapter))
428 print_hex_dump(KERN_INFO, "",
431 phys_to_virt(buffer_info->dma),
432 adapter->rx_buffer_len, true);
441 * e1000_desc_unused - calculate if we have unused descriptors
443 static int e1000_desc_unused(struct e1000_ring *ring)
445 if (ring->next_to_clean > ring->next_to_use)
446 return ring->next_to_clean - ring->next_to_use - 1;
448 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
452 * e1000_receive_skb - helper function to handle Rx indications
453 * @adapter: board private structure
454 * @status: descriptor status field as written by hardware
455 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
456 * @skb: pointer to sk_buff to be indicated to stack
458 static void e1000_receive_skb(struct e1000_adapter *adapter,
459 struct net_device *netdev, struct sk_buff *skb,
460 u8 status, __le16 vlan)
462 u16 tag = le16_to_cpu(vlan);
463 skb->protocol = eth_type_trans(skb, netdev);
465 if (status & E1000_RXD_STAT_VP)
466 __vlan_hwaccel_put_tag(skb, tag);
468 napi_gro_receive(&adapter->napi, skb);
472 * e1000_rx_checksum - Receive Checksum Offload
473 * @adapter: board private structure
474 * @status_err: receive descriptor status and error fields
475 * @csum: receive descriptor csum field
476 * @sk_buff: socket buffer with received data
478 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
479 u32 csum, struct sk_buff *skb)
481 u16 status = (u16)status_err;
482 u8 errors = (u8)(status_err >> 24);
484 skb_checksum_none_assert(skb);
486 /* Ignore Checksum bit is set */
487 if (status & E1000_RXD_STAT_IXSM)
489 /* TCP/UDP checksum error bit is set */
490 if (errors & E1000_RXD_ERR_TCPE) {
491 /* let the stack verify checksum errors */
492 adapter->hw_csum_err++;
496 /* TCP/UDP Checksum has not been calculated */
497 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
500 /* It must be a TCP or UDP packet with a valid checksum */
501 if (status & E1000_RXD_STAT_TCPCS) {
502 /* TCP checksum is good */
503 skb->ip_summed = CHECKSUM_UNNECESSARY;
506 * IP fragment with UDP payload
507 * Hardware complements the payload checksum, so we undo it
508 * and then put the value in host order for further stack use.
510 __sum16 sum = (__force __sum16)htons(csum);
511 skb->csum = csum_unfold(~sum);
512 skb->ip_summed = CHECKSUM_COMPLETE;
514 adapter->hw_csum_good++;
518 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
519 * @adapter: address of board private structure
521 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
524 struct net_device *netdev = adapter->netdev;
525 struct pci_dev *pdev = adapter->pdev;
526 struct e1000_ring *rx_ring = adapter->rx_ring;
527 struct e1000_rx_desc *rx_desc;
528 struct e1000_buffer *buffer_info;
531 unsigned int bufsz = adapter->rx_buffer_len;
533 i = rx_ring->next_to_use;
534 buffer_info = &rx_ring->buffer_info[i];
536 while (cleaned_count--) {
537 skb = buffer_info->skb;
543 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
545 /* Better luck next round */
546 adapter->alloc_rx_buff_failed++;
550 buffer_info->skb = skb;
552 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
553 adapter->rx_buffer_len,
555 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
556 dev_err(&pdev->dev, "Rx DMA map failed\n");
557 adapter->rx_dma_failed++;
561 rx_desc = E1000_RX_DESC(*rx_ring, i);
562 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
564 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
566 * Force memory writes to complete before letting h/w
567 * know there are new descriptors to fetch. (Only
568 * applicable for weak-ordered memory model archs,
572 writel(i, adapter->hw.hw_addr + rx_ring->tail);
575 if (i == rx_ring->count)
577 buffer_info = &rx_ring->buffer_info[i];
580 rx_ring->next_to_use = i;
584 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
585 * @adapter: address of board private structure
587 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
590 struct net_device *netdev = adapter->netdev;
591 struct pci_dev *pdev = adapter->pdev;
592 union e1000_rx_desc_packet_split *rx_desc;
593 struct e1000_ring *rx_ring = adapter->rx_ring;
594 struct e1000_buffer *buffer_info;
595 struct e1000_ps_page *ps_page;
599 i = rx_ring->next_to_use;
600 buffer_info = &rx_ring->buffer_info[i];
602 while (cleaned_count--) {
603 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
605 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
606 ps_page = &buffer_info->ps_pages[j];
607 if (j >= adapter->rx_ps_pages) {
608 /* all unused desc entries get hw null ptr */
609 rx_desc->read.buffer_addr[j + 1] =
613 if (!ps_page->page) {
614 ps_page->page = alloc_page(GFP_ATOMIC);
615 if (!ps_page->page) {
616 adapter->alloc_rx_buff_failed++;
619 ps_page->dma = dma_map_page(&pdev->dev,
623 if (dma_mapping_error(&pdev->dev,
625 dev_err(&adapter->pdev->dev,
626 "Rx DMA page map failed\n");
627 adapter->rx_dma_failed++;
632 * Refresh the desc even if buffer_addrs
633 * didn't change because each write-back
636 rx_desc->read.buffer_addr[j + 1] =
637 cpu_to_le64(ps_page->dma);
640 skb = netdev_alloc_skb_ip_align(netdev,
641 adapter->rx_ps_bsize0);
644 adapter->alloc_rx_buff_failed++;
648 buffer_info->skb = skb;
649 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
650 adapter->rx_ps_bsize0,
652 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
653 dev_err(&pdev->dev, "Rx DMA map failed\n");
654 adapter->rx_dma_failed++;
656 dev_kfree_skb_any(skb);
657 buffer_info->skb = NULL;
661 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
663 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
665 * Force memory writes to complete before letting h/w
666 * know there are new descriptors to fetch. (Only
667 * applicable for weak-ordered memory model archs,
671 writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
675 if (i == rx_ring->count)
677 buffer_info = &rx_ring->buffer_info[i];
681 rx_ring->next_to_use = i;
685 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
686 * @adapter: address of board private structure
687 * @cleaned_count: number of buffers to allocate this pass
690 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
693 struct net_device *netdev = adapter->netdev;
694 struct pci_dev *pdev = adapter->pdev;
695 struct e1000_rx_desc *rx_desc;
696 struct e1000_ring *rx_ring = adapter->rx_ring;
697 struct e1000_buffer *buffer_info;
700 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
702 i = rx_ring->next_to_use;
703 buffer_info = &rx_ring->buffer_info[i];
705 while (cleaned_count--) {
706 skb = buffer_info->skb;
712 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
713 if (unlikely(!skb)) {
714 /* Better luck next round */
715 adapter->alloc_rx_buff_failed++;
719 buffer_info->skb = skb;
721 /* allocate a new page if necessary */
722 if (!buffer_info->page) {
723 buffer_info->page = alloc_page(GFP_ATOMIC);
724 if (unlikely(!buffer_info->page)) {
725 adapter->alloc_rx_buff_failed++;
730 if (!buffer_info->dma)
731 buffer_info->dma = dma_map_page(&pdev->dev,
732 buffer_info->page, 0,
736 rx_desc = E1000_RX_DESC(*rx_ring, i);
737 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
739 if (unlikely(++i == rx_ring->count))
741 buffer_info = &rx_ring->buffer_info[i];
744 if (likely(rx_ring->next_to_use != i)) {
745 rx_ring->next_to_use = i;
746 if (unlikely(i-- == 0))
747 i = (rx_ring->count - 1);
749 /* Force memory writes to complete before letting h/w
750 * know there are new descriptors to fetch. (Only
751 * applicable for weak-ordered memory model archs,
754 writel(i, adapter->hw.hw_addr + rx_ring->tail);
759 * e1000_clean_rx_irq - Send received data up the network stack; legacy
760 * @adapter: board private structure
762 * the return value indicates whether actual cleaning was done, there
763 * is no guarantee that everything was cleaned
765 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
766 int *work_done, int work_to_do)
768 struct net_device *netdev = adapter->netdev;
769 struct pci_dev *pdev = adapter->pdev;
770 struct e1000_hw *hw = &adapter->hw;
771 struct e1000_ring *rx_ring = adapter->rx_ring;
772 struct e1000_rx_desc *rx_desc, *next_rxd;
773 struct e1000_buffer *buffer_info, *next_buffer;
776 int cleaned_count = 0;
778 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
780 i = rx_ring->next_to_clean;
781 rx_desc = E1000_RX_DESC(*rx_ring, i);
782 buffer_info = &rx_ring->buffer_info[i];
784 while (rx_desc->status & E1000_RXD_STAT_DD) {
788 if (*work_done >= work_to_do)
791 rmb(); /* read descriptor and rx_buffer_info after status DD */
793 status = rx_desc->status;
794 skb = buffer_info->skb;
795 buffer_info->skb = NULL;
797 prefetch(skb->data - NET_IP_ALIGN);
800 if (i == rx_ring->count)
802 next_rxd = E1000_RX_DESC(*rx_ring, i);
805 next_buffer = &rx_ring->buffer_info[i];
809 dma_unmap_single(&pdev->dev,
811 adapter->rx_buffer_len,
813 buffer_info->dma = 0;
815 length = le16_to_cpu(rx_desc->length);
818 * !EOP means multiple descriptors were used to store a single
819 * packet, if that's the case we need to toss it. In fact, we
820 * need to toss every packet with the EOP bit clear and the
821 * next frame that _does_ have the EOP bit set, as it is by
822 * definition only a frame fragment
824 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
825 adapter->flags2 |= FLAG2_IS_DISCARDING;
827 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
828 /* All receives must fit into a single buffer */
829 e_dbg("Receive packet consumed multiple buffers\n");
831 buffer_info->skb = skb;
832 if (status & E1000_RXD_STAT_EOP)
833 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
837 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
839 buffer_info->skb = skb;
843 /* adjust length to remove Ethernet CRC */
844 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
847 total_rx_bytes += length;
851 * code added for copybreak, this should improve
852 * performance for small packets with large amounts
853 * of reassembly being done in the stack
855 if (length < copybreak) {
856 struct sk_buff *new_skb =
857 netdev_alloc_skb_ip_align(netdev, length);
859 skb_copy_to_linear_data_offset(new_skb,
865 /* save the skb in buffer_info as good */
866 buffer_info->skb = skb;
869 /* else just continue with the old one */
871 /* end copybreak code */
872 skb_put(skb, length);
874 /* Receive Checksum Offload */
875 e1000_rx_checksum(adapter,
877 ((u32)(rx_desc->errors) << 24),
878 le16_to_cpu(rx_desc->csum), skb);
880 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
885 /* return some buffers to hardware, one at a time is too slow */
886 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
887 adapter->alloc_rx_buf(adapter, cleaned_count);
891 /* use prefetched values */
893 buffer_info = next_buffer;
895 rx_ring->next_to_clean = i;
897 cleaned_count = e1000_desc_unused(rx_ring);
899 adapter->alloc_rx_buf(adapter, cleaned_count);
901 adapter->total_rx_bytes += total_rx_bytes;
902 adapter->total_rx_packets += total_rx_packets;
906 static void e1000_put_txbuf(struct e1000_adapter *adapter,
907 struct e1000_buffer *buffer_info)
909 if (buffer_info->dma) {
910 if (buffer_info->mapped_as_page)
911 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
912 buffer_info->length, DMA_TO_DEVICE);
914 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
915 buffer_info->length, DMA_TO_DEVICE);
916 buffer_info->dma = 0;
918 if (buffer_info->skb) {
919 dev_kfree_skb_any(buffer_info->skb);
920 buffer_info->skb = NULL;
922 buffer_info->time_stamp = 0;
925 static void e1000_print_hw_hang(struct work_struct *work)
927 struct e1000_adapter *adapter = container_of(work,
928 struct e1000_adapter,
930 struct e1000_ring *tx_ring = adapter->tx_ring;
931 unsigned int i = tx_ring->next_to_clean;
932 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
933 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
934 struct e1000_hw *hw = &adapter->hw;
935 u16 phy_status, phy_1000t_status, phy_ext_status;
938 if (test_bit(__E1000_DOWN, &adapter->state))
941 e1e_rphy(hw, PHY_STATUS, &phy_status);
942 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
943 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
945 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
947 /* detected Hardware unit hang */
948 e_err("Detected Hardware Unit Hang:\n"
951 " next_to_use <%x>\n"
952 " next_to_clean <%x>\n"
953 "buffer_info[next_to_clean]:\n"
954 " time_stamp <%lx>\n"
955 " next_to_watch <%x>\n"
957 " next_to_watch.status <%x>\n"
960 "PHY 1000BASE-T Status <%x>\n"
961 "PHY Extended Status <%x>\n"
963 readl(adapter->hw.hw_addr + tx_ring->head),
964 readl(adapter->hw.hw_addr + tx_ring->tail),
965 tx_ring->next_to_use,
966 tx_ring->next_to_clean,
967 tx_ring->buffer_info[eop].time_stamp,
970 eop_desc->upper.fields.status,
979 * e1000_clean_tx_irq - Reclaim resources after transmit completes
980 * @adapter: board private structure
982 * the return value indicates whether actual cleaning was done, there
983 * is no guarantee that everything was cleaned
985 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
987 struct net_device *netdev = adapter->netdev;
988 struct e1000_hw *hw = &adapter->hw;
989 struct e1000_ring *tx_ring = adapter->tx_ring;
990 struct e1000_tx_desc *tx_desc, *eop_desc;
991 struct e1000_buffer *buffer_info;
993 unsigned int count = 0;
994 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
996 i = tx_ring->next_to_clean;
997 eop = tx_ring->buffer_info[i].next_to_watch;
998 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1000 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1001 (count < tx_ring->count)) {
1002 bool cleaned = false;
1003 rmb(); /* read buffer_info after eop_desc */
1004 for (; !cleaned; count++) {
1005 tx_desc = E1000_TX_DESC(*tx_ring, i);
1006 buffer_info = &tx_ring->buffer_info[i];
1007 cleaned = (i == eop);
1010 total_tx_packets += buffer_info->segs;
1011 total_tx_bytes += buffer_info->bytecount;
1014 e1000_put_txbuf(adapter, buffer_info);
1015 tx_desc->upper.data = 0;
1018 if (i == tx_ring->count)
1022 if (i == tx_ring->next_to_use)
1024 eop = tx_ring->buffer_info[i].next_to_watch;
1025 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1028 tx_ring->next_to_clean = i;
1030 #define TX_WAKE_THRESHOLD 32
1031 if (count && netif_carrier_ok(netdev) &&
1032 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1033 /* Make sure that anybody stopping the queue after this
1034 * sees the new next_to_clean.
1038 if (netif_queue_stopped(netdev) &&
1039 !(test_bit(__E1000_DOWN, &adapter->state))) {
1040 netif_wake_queue(netdev);
1041 ++adapter->restart_queue;
1045 if (adapter->detect_tx_hung) {
1047 * Detect a transmit hang in hardware, this serializes the
1048 * check with the clearing of time_stamp and movement of i
1050 adapter->detect_tx_hung = 0;
1051 if (tx_ring->buffer_info[i].time_stamp &&
1052 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1053 + (adapter->tx_timeout_factor * HZ)) &&
1054 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
1055 schedule_work(&adapter->print_hang_task);
1056 netif_stop_queue(netdev);
1059 adapter->total_tx_bytes += total_tx_bytes;
1060 adapter->total_tx_packets += total_tx_packets;
1061 return count < tx_ring->count;
1065 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1066 * @adapter: board private structure
1068 * the return value indicates whether actual cleaning was done, there
1069 * is no guarantee that everything was cleaned
1071 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1072 int *work_done, int work_to_do)
1074 struct e1000_hw *hw = &adapter->hw;
1075 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1076 struct net_device *netdev = adapter->netdev;
1077 struct pci_dev *pdev = adapter->pdev;
1078 struct e1000_ring *rx_ring = adapter->rx_ring;
1079 struct e1000_buffer *buffer_info, *next_buffer;
1080 struct e1000_ps_page *ps_page;
1081 struct sk_buff *skb;
1083 u32 length, staterr;
1084 int cleaned_count = 0;
1086 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1088 i = rx_ring->next_to_clean;
1089 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1090 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1091 buffer_info = &rx_ring->buffer_info[i];
1093 while (staterr & E1000_RXD_STAT_DD) {
1094 if (*work_done >= work_to_do)
1097 skb = buffer_info->skb;
1098 rmb(); /* read descriptor and rx_buffer_info after status DD */
1100 /* in the packet split case this is header only */
1101 prefetch(skb->data - NET_IP_ALIGN);
1104 if (i == rx_ring->count)
1106 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1109 next_buffer = &rx_ring->buffer_info[i];
1113 dma_unmap_single(&pdev->dev, buffer_info->dma,
1114 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1115 buffer_info->dma = 0;
1117 /* see !EOP comment in other Rx routine */
1118 if (!(staterr & E1000_RXD_STAT_EOP))
1119 adapter->flags2 |= FLAG2_IS_DISCARDING;
1121 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1122 e_dbg("Packet Split buffers didn't pick up the full "
1124 dev_kfree_skb_irq(skb);
1125 if (staterr & E1000_RXD_STAT_EOP)
1126 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1130 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
1131 dev_kfree_skb_irq(skb);
1135 length = le16_to_cpu(rx_desc->wb.middle.length0);
1138 e_dbg("Last part of the packet spanning multiple "
1140 dev_kfree_skb_irq(skb);
1145 skb_put(skb, length);
1149 * this looks ugly, but it seems compiler issues make it
1150 * more efficient than reusing j
1152 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1155 * page alloc/put takes too long and effects small packet
1156 * throughput, so unsplit small packets and save the alloc/put
1157 * only valid in softirq (napi) context to call kmap_*
1159 if (l1 && (l1 <= copybreak) &&
1160 ((length + l1) <= adapter->rx_ps_bsize0)) {
1163 ps_page = &buffer_info->ps_pages[0];
1166 * there is no documentation about how to call
1167 * kmap_atomic, so we can't hold the mapping
1170 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
1171 PAGE_SIZE, DMA_FROM_DEVICE);
1172 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
1173 memcpy(skb_tail_pointer(skb), vaddr, l1);
1174 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1175 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
1176 PAGE_SIZE, DMA_FROM_DEVICE);
1178 /* remove the CRC */
1179 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1187 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1188 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1192 ps_page = &buffer_info->ps_pages[j];
1193 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1196 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1197 ps_page->page = NULL;
1199 skb->data_len += length;
1200 skb->truesize += length;
1203 /* strip the ethernet crc, problem is we're using pages now so
1204 * this whole operation can get a little cpu intensive
1206 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
1207 pskb_trim(skb, skb->len - 4);
1210 total_rx_bytes += skb->len;
1213 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1214 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1216 if (rx_desc->wb.upper.header_status &
1217 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1218 adapter->rx_hdr_split++;
1220 e1000_receive_skb(adapter, netdev, skb,
1221 staterr, rx_desc->wb.middle.vlan);
1224 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1225 buffer_info->skb = NULL;
1227 /* return some buffers to hardware, one at a time is too slow */
1228 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1229 adapter->alloc_rx_buf(adapter, cleaned_count);
1233 /* use prefetched values */
1235 buffer_info = next_buffer;
1237 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1239 rx_ring->next_to_clean = i;
1241 cleaned_count = e1000_desc_unused(rx_ring);
1243 adapter->alloc_rx_buf(adapter, cleaned_count);
1245 adapter->total_rx_bytes += total_rx_bytes;
1246 adapter->total_rx_packets += total_rx_packets;
1251 * e1000_consume_page - helper function
1253 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1258 skb->data_len += length;
1259 skb->truesize += length;
1263 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1264 * @adapter: board private structure
1266 * the return value indicates whether actual cleaning was done, there
1267 * is no guarantee that everything was cleaned
1270 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1271 int *work_done, int work_to_do)
1273 struct net_device *netdev = adapter->netdev;
1274 struct pci_dev *pdev = adapter->pdev;
1275 struct e1000_ring *rx_ring = adapter->rx_ring;
1276 struct e1000_rx_desc *rx_desc, *next_rxd;
1277 struct e1000_buffer *buffer_info, *next_buffer;
1280 int cleaned_count = 0;
1281 bool cleaned = false;
1282 unsigned int total_rx_bytes=0, total_rx_packets=0;
1284 i = rx_ring->next_to_clean;
1285 rx_desc = E1000_RX_DESC(*rx_ring, i);
1286 buffer_info = &rx_ring->buffer_info[i];
1288 while (rx_desc->status & E1000_RXD_STAT_DD) {
1289 struct sk_buff *skb;
1292 if (*work_done >= work_to_do)
1295 rmb(); /* read descriptor and rx_buffer_info after status DD */
1297 status = rx_desc->status;
1298 skb = buffer_info->skb;
1299 buffer_info->skb = NULL;
1302 if (i == rx_ring->count)
1304 next_rxd = E1000_RX_DESC(*rx_ring, i);
1307 next_buffer = &rx_ring->buffer_info[i];
1311 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1313 buffer_info->dma = 0;
1315 length = le16_to_cpu(rx_desc->length);
1317 /* errors is only valid for DD + EOP descriptors */
1318 if (unlikely((status & E1000_RXD_STAT_EOP) &&
1319 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
1320 /* recycle both page and skb */
1321 buffer_info->skb = skb;
1322 /* an error means any chain goes out the window
1324 if (rx_ring->rx_skb_top)
1325 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1326 rx_ring->rx_skb_top = NULL;
1330 #define rxtop (rx_ring->rx_skb_top)
1331 if (!(status & E1000_RXD_STAT_EOP)) {
1332 /* this descriptor is only the beginning (or middle) */
1334 /* this is the beginning of a chain */
1336 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1339 /* this is the middle of a chain */
1340 skb_fill_page_desc(rxtop,
1341 skb_shinfo(rxtop)->nr_frags,
1342 buffer_info->page, 0, length);
1343 /* re-use the skb, only consumed the page */
1344 buffer_info->skb = skb;
1346 e1000_consume_page(buffer_info, rxtop, length);
1350 /* end of the chain */
1351 skb_fill_page_desc(rxtop,
1352 skb_shinfo(rxtop)->nr_frags,
1353 buffer_info->page, 0, length);
1354 /* re-use the current skb, we only consumed the
1356 buffer_info->skb = skb;
1359 e1000_consume_page(buffer_info, skb, length);
1361 /* no chain, got EOP, this buf is the packet
1362 * copybreak to save the put_page/alloc_page */
1363 if (length <= copybreak &&
1364 skb_tailroom(skb) >= length) {
1366 vaddr = kmap_atomic(buffer_info->page,
1367 KM_SKB_DATA_SOFTIRQ);
1368 memcpy(skb_tail_pointer(skb), vaddr,
1370 kunmap_atomic(vaddr,
1371 KM_SKB_DATA_SOFTIRQ);
1372 /* re-use the page, so don't erase
1373 * buffer_info->page */
1374 skb_put(skb, length);
1376 skb_fill_page_desc(skb, 0,
1377 buffer_info->page, 0,
1379 e1000_consume_page(buffer_info, skb,
1385 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1386 e1000_rx_checksum(adapter,
1388 ((u32)(rx_desc->errors) << 24),
1389 le16_to_cpu(rx_desc->csum), skb);
1391 /* probably a little skewed due to removing CRC */
1392 total_rx_bytes += skb->len;
1395 /* eth type trans needs skb->data to point to something */
1396 if (!pskb_may_pull(skb, ETH_HLEN)) {
1397 e_err("pskb_may_pull failed.\n");
1398 dev_kfree_skb_irq(skb);
1402 e1000_receive_skb(adapter, netdev, skb, status,
1406 rx_desc->status = 0;
1408 /* return some buffers to hardware, one at a time is too slow */
1409 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1410 adapter->alloc_rx_buf(adapter, cleaned_count);
1414 /* use prefetched values */
1416 buffer_info = next_buffer;
1418 rx_ring->next_to_clean = i;
1420 cleaned_count = e1000_desc_unused(rx_ring);
1422 adapter->alloc_rx_buf(adapter, cleaned_count);
1424 adapter->total_rx_bytes += total_rx_bytes;
1425 adapter->total_rx_packets += total_rx_packets;
1430 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1431 * @adapter: board private structure
1433 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1435 struct e1000_ring *rx_ring = adapter->rx_ring;
1436 struct e1000_buffer *buffer_info;
1437 struct e1000_ps_page *ps_page;
1438 struct pci_dev *pdev = adapter->pdev;
1441 /* Free all the Rx ring sk_buffs */
1442 for (i = 0; i < rx_ring->count; i++) {
1443 buffer_info = &rx_ring->buffer_info[i];
1444 if (buffer_info->dma) {
1445 if (adapter->clean_rx == e1000_clean_rx_irq)
1446 dma_unmap_single(&pdev->dev, buffer_info->dma,
1447 adapter->rx_buffer_len,
1449 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1450 dma_unmap_page(&pdev->dev, buffer_info->dma,
1453 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1454 dma_unmap_single(&pdev->dev, buffer_info->dma,
1455 adapter->rx_ps_bsize0,
1457 buffer_info->dma = 0;
1460 if (buffer_info->page) {
1461 put_page(buffer_info->page);
1462 buffer_info->page = NULL;
1465 if (buffer_info->skb) {
1466 dev_kfree_skb(buffer_info->skb);
1467 buffer_info->skb = NULL;
1470 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1471 ps_page = &buffer_info->ps_pages[j];
1474 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1477 put_page(ps_page->page);
1478 ps_page->page = NULL;
1482 /* there also may be some cached data from a chained receive */
1483 if (rx_ring->rx_skb_top) {
1484 dev_kfree_skb(rx_ring->rx_skb_top);
1485 rx_ring->rx_skb_top = NULL;
1488 /* Zero out the descriptor ring */
1489 memset(rx_ring->desc, 0, rx_ring->size);
1491 rx_ring->next_to_clean = 0;
1492 rx_ring->next_to_use = 0;
1493 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1495 writel(0, adapter->hw.hw_addr + rx_ring->head);
1496 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1499 static void e1000e_downshift_workaround(struct work_struct *work)
1501 struct e1000_adapter *adapter = container_of(work,
1502 struct e1000_adapter, downshift_task);
1504 if (test_bit(__E1000_DOWN, &adapter->state))
1507 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1511 * e1000_intr_msi - Interrupt Handler
1512 * @irq: interrupt number
1513 * @data: pointer to a network interface device structure
1515 static irqreturn_t e1000_intr_msi(int irq, void *data)
1517 struct net_device *netdev = data;
1518 struct e1000_adapter *adapter = netdev_priv(netdev);
1519 struct e1000_hw *hw = &adapter->hw;
1520 u32 icr = er32(ICR);
1523 * read ICR disables interrupts using IAM
1526 if (icr & E1000_ICR_LSC) {
1527 hw->mac.get_link_status = 1;
1529 * ICH8 workaround-- Call gig speed drop workaround on cable
1530 * disconnect (LSC) before accessing any PHY registers
1532 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1533 (!(er32(STATUS) & E1000_STATUS_LU)))
1534 schedule_work(&adapter->downshift_task);
1537 * 80003ES2LAN workaround-- For packet buffer work-around on
1538 * link down event; disable receives here in the ISR and reset
1539 * adapter in watchdog
1541 if (netif_carrier_ok(netdev) &&
1542 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1543 /* disable receives */
1544 u32 rctl = er32(RCTL);
1545 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1546 adapter->flags |= FLAG_RX_RESTART_NOW;
1548 /* guard against interrupt when we're going down */
1549 if (!test_bit(__E1000_DOWN, &adapter->state))
1550 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1553 if (napi_schedule_prep(&adapter->napi)) {
1554 adapter->total_tx_bytes = 0;
1555 adapter->total_tx_packets = 0;
1556 adapter->total_rx_bytes = 0;
1557 adapter->total_rx_packets = 0;
1558 __napi_schedule(&adapter->napi);
1565 * e1000_intr - Interrupt Handler
1566 * @irq: interrupt number
1567 * @data: pointer to a network interface device structure
1569 static irqreturn_t e1000_intr(int irq, void *data)
1571 struct net_device *netdev = data;
1572 struct e1000_adapter *adapter = netdev_priv(netdev);
1573 struct e1000_hw *hw = &adapter->hw;
1574 u32 rctl, icr = er32(ICR);
1576 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1577 return IRQ_NONE; /* Not our interrupt */
1580 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1581 * not set, then the adapter didn't send an interrupt
1583 if (!(icr & E1000_ICR_INT_ASSERTED))
1587 * Interrupt Auto-Mask...upon reading ICR,
1588 * interrupts are masked. No need for the
1592 if (icr & E1000_ICR_LSC) {
1593 hw->mac.get_link_status = 1;
1595 * ICH8 workaround-- Call gig speed drop workaround on cable
1596 * disconnect (LSC) before accessing any PHY registers
1598 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1599 (!(er32(STATUS) & E1000_STATUS_LU)))
1600 schedule_work(&adapter->downshift_task);
1603 * 80003ES2LAN workaround--
1604 * For packet buffer work-around on link down event;
1605 * disable receives here in the ISR and
1606 * reset adapter in watchdog
1608 if (netif_carrier_ok(netdev) &&
1609 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1610 /* disable receives */
1612 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1613 adapter->flags |= FLAG_RX_RESTART_NOW;
1615 /* guard against interrupt when we're going down */
1616 if (!test_bit(__E1000_DOWN, &adapter->state))
1617 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1620 if (napi_schedule_prep(&adapter->napi)) {
1621 adapter->total_tx_bytes = 0;
1622 adapter->total_tx_packets = 0;
1623 adapter->total_rx_bytes = 0;
1624 adapter->total_rx_packets = 0;
1625 __napi_schedule(&adapter->napi);
1631 static irqreturn_t e1000_msix_other(int irq, void *data)
1633 struct net_device *netdev = data;
1634 struct e1000_adapter *adapter = netdev_priv(netdev);
1635 struct e1000_hw *hw = &adapter->hw;
1636 u32 icr = er32(ICR);
1638 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1639 if (!test_bit(__E1000_DOWN, &adapter->state))
1640 ew32(IMS, E1000_IMS_OTHER);
1644 if (icr & adapter->eiac_mask)
1645 ew32(ICS, (icr & adapter->eiac_mask));
1647 if (icr & E1000_ICR_OTHER) {
1648 if (!(icr & E1000_ICR_LSC))
1649 goto no_link_interrupt;
1650 hw->mac.get_link_status = 1;
1651 /* guard against interrupt when we're going down */
1652 if (!test_bit(__E1000_DOWN, &adapter->state))
1653 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1657 if (!test_bit(__E1000_DOWN, &adapter->state))
1658 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1664 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1666 struct net_device *netdev = data;
1667 struct e1000_adapter *adapter = netdev_priv(netdev);
1668 struct e1000_hw *hw = &adapter->hw;
1669 struct e1000_ring *tx_ring = adapter->tx_ring;
1672 adapter->total_tx_bytes = 0;
1673 adapter->total_tx_packets = 0;
1675 if (!e1000_clean_tx_irq(adapter))
1676 /* Ring was not completely cleaned, so fire another interrupt */
1677 ew32(ICS, tx_ring->ims_val);
1682 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1684 struct net_device *netdev = data;
1685 struct e1000_adapter *adapter = netdev_priv(netdev);
1687 /* Write the ITR value calculated at the end of the
1688 * previous interrupt.
1690 if (adapter->rx_ring->set_itr) {
1691 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1692 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1693 adapter->rx_ring->set_itr = 0;
1696 if (napi_schedule_prep(&adapter->napi)) {
1697 adapter->total_rx_bytes = 0;
1698 adapter->total_rx_packets = 0;
1699 __napi_schedule(&adapter->napi);
1705 * e1000_configure_msix - Configure MSI-X hardware
1707 * e1000_configure_msix sets up the hardware to properly
1708 * generate MSI-X interrupts.
1710 static void e1000_configure_msix(struct e1000_adapter *adapter)
1712 struct e1000_hw *hw = &adapter->hw;
1713 struct e1000_ring *rx_ring = adapter->rx_ring;
1714 struct e1000_ring *tx_ring = adapter->tx_ring;
1716 u32 ctrl_ext, ivar = 0;
1718 adapter->eiac_mask = 0;
1720 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1721 if (hw->mac.type == e1000_82574) {
1722 u32 rfctl = er32(RFCTL);
1723 rfctl |= E1000_RFCTL_ACK_DIS;
1727 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1728 /* Configure Rx vector */
1729 rx_ring->ims_val = E1000_IMS_RXQ0;
1730 adapter->eiac_mask |= rx_ring->ims_val;
1731 if (rx_ring->itr_val)
1732 writel(1000000000 / (rx_ring->itr_val * 256),
1733 hw->hw_addr + rx_ring->itr_register);
1735 writel(1, hw->hw_addr + rx_ring->itr_register);
1736 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1738 /* Configure Tx vector */
1739 tx_ring->ims_val = E1000_IMS_TXQ0;
1741 if (tx_ring->itr_val)
1742 writel(1000000000 / (tx_ring->itr_val * 256),
1743 hw->hw_addr + tx_ring->itr_register);
1745 writel(1, hw->hw_addr + tx_ring->itr_register);
1746 adapter->eiac_mask |= tx_ring->ims_val;
1747 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1749 /* set vector for Other Causes, e.g. link changes */
1751 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1752 if (rx_ring->itr_val)
1753 writel(1000000000 / (rx_ring->itr_val * 256),
1754 hw->hw_addr + E1000_EITR_82574(vector));
1756 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1758 /* Cause Tx interrupts on every write back */
1763 /* enable MSI-X PBA support */
1764 ctrl_ext = er32(CTRL_EXT);
1765 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1767 /* Auto-Mask Other interrupts upon ICR read */
1768 #define E1000_EIAC_MASK_82574 0x01F00000
1769 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1770 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1771 ew32(CTRL_EXT, ctrl_ext);
1775 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1777 if (adapter->msix_entries) {
1778 pci_disable_msix(adapter->pdev);
1779 kfree(adapter->msix_entries);
1780 adapter->msix_entries = NULL;
1781 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1782 pci_disable_msi(adapter->pdev);
1783 adapter->flags &= ~FLAG_MSI_ENABLED;
1788 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1790 * Attempt to configure interrupts using the best available
1791 * capabilities of the hardware and kernel.
1793 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1798 switch (adapter->int_mode) {
1799 case E1000E_INT_MODE_MSIX:
1800 if (adapter->flags & FLAG_HAS_MSIX) {
1801 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1802 adapter->msix_entries = kcalloc(adapter->num_vectors,
1803 sizeof(struct msix_entry),
1805 if (adapter->msix_entries) {
1806 for (i = 0; i < adapter->num_vectors; i++)
1807 adapter->msix_entries[i].entry = i;
1809 err = pci_enable_msix(adapter->pdev,
1810 adapter->msix_entries,
1811 adapter->num_vectors);
1815 /* MSI-X failed, so fall through and try MSI */
1816 e_err("Failed to initialize MSI-X interrupts. "
1817 "Falling back to MSI interrupts.\n");
1818 e1000e_reset_interrupt_capability(adapter);
1820 adapter->int_mode = E1000E_INT_MODE_MSI;
1822 case E1000E_INT_MODE_MSI:
1823 if (!pci_enable_msi(adapter->pdev)) {
1824 adapter->flags |= FLAG_MSI_ENABLED;
1826 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1827 e_err("Failed to initialize MSI interrupts. Falling "
1828 "back to legacy interrupts.\n");
1831 case E1000E_INT_MODE_LEGACY:
1832 /* Don't do anything; this is the system default */
1836 /* store the number of vectors being used */
1837 adapter->num_vectors = 1;
1841 * e1000_request_msix - Initialize MSI-X interrupts
1843 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1846 static int e1000_request_msix(struct e1000_adapter *adapter)
1848 struct net_device *netdev = adapter->netdev;
1849 int err = 0, vector = 0;
1851 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1852 snprintf(adapter->rx_ring->name,
1853 sizeof(adapter->rx_ring->name) - 1,
1854 "%s-rx-0", netdev->name);
1856 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1857 err = request_irq(adapter->msix_entries[vector].vector,
1858 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1862 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1863 adapter->rx_ring->itr_val = adapter->itr;
1866 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1867 snprintf(adapter->tx_ring->name,
1868 sizeof(adapter->tx_ring->name) - 1,
1869 "%s-tx-0", netdev->name);
1871 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1872 err = request_irq(adapter->msix_entries[vector].vector,
1873 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1877 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1878 adapter->tx_ring->itr_val = adapter->itr;
1881 err = request_irq(adapter->msix_entries[vector].vector,
1882 e1000_msix_other, 0, netdev->name, netdev);
1886 e1000_configure_msix(adapter);
1893 * e1000_request_irq - initialize interrupts
1895 * Attempts to configure interrupts using the best available
1896 * capabilities of the hardware and kernel.
1898 static int e1000_request_irq(struct e1000_adapter *adapter)
1900 struct net_device *netdev = adapter->netdev;
1903 if (adapter->msix_entries) {
1904 err = e1000_request_msix(adapter);
1907 /* fall back to MSI */
1908 e1000e_reset_interrupt_capability(adapter);
1909 adapter->int_mode = E1000E_INT_MODE_MSI;
1910 e1000e_set_interrupt_capability(adapter);
1912 if (adapter->flags & FLAG_MSI_ENABLED) {
1913 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1914 netdev->name, netdev);
1918 /* fall back to legacy interrupt */
1919 e1000e_reset_interrupt_capability(adapter);
1920 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1923 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1924 netdev->name, netdev);
1926 e_err("Unable to allocate interrupt, Error: %d\n", err);
1931 static void e1000_free_irq(struct e1000_adapter *adapter)
1933 struct net_device *netdev = adapter->netdev;
1935 if (adapter->msix_entries) {
1938 free_irq(adapter->msix_entries[vector].vector, netdev);
1941 free_irq(adapter->msix_entries[vector].vector, netdev);
1944 /* Other Causes interrupt vector */
1945 free_irq(adapter->msix_entries[vector].vector, netdev);
1949 free_irq(adapter->pdev->irq, netdev);
1953 * e1000_irq_disable - Mask off interrupt generation on the NIC
1955 static void e1000_irq_disable(struct e1000_adapter *adapter)
1957 struct e1000_hw *hw = &adapter->hw;
1960 if (adapter->msix_entries)
1961 ew32(EIAC_82574, 0);
1964 if (adapter->msix_entries) {
1966 for (i = 0; i < adapter->num_vectors; i++)
1967 synchronize_irq(adapter->msix_entries[i].vector);
1969 synchronize_irq(adapter->pdev->irq);
1974 * e1000_irq_enable - Enable default interrupt generation settings
1976 static void e1000_irq_enable(struct e1000_adapter *adapter)
1978 struct e1000_hw *hw = &adapter->hw;
1980 if (adapter->msix_entries) {
1981 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1982 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1984 ew32(IMS, IMS_ENABLE_MASK);
1990 * e1000e_get_hw_control - get control of the h/w from f/w
1991 * @adapter: address of board private structure
1993 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1994 * For ASF and Pass Through versions of f/w this means that
1995 * the driver is loaded. For AMT version (only with 82573)
1996 * of the f/w this means that the network i/f is open.
1998 void e1000e_get_hw_control(struct e1000_adapter *adapter)
2000 struct e1000_hw *hw = &adapter->hw;
2004 /* Let firmware know the driver has taken over */
2005 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2007 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2008 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2009 ctrl_ext = er32(CTRL_EXT);
2010 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2015 * e1000e_release_hw_control - release control of the h/w to f/w
2016 * @adapter: address of board private structure
2018 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2019 * For ASF and Pass Through versions of f/w this means that the
2020 * driver is no longer loaded. For AMT version (only with 82573) i
2021 * of the f/w this means that the network i/f is closed.
2024 void e1000e_release_hw_control(struct e1000_adapter *adapter)
2026 struct e1000_hw *hw = &adapter->hw;
2030 /* Let firmware taken over control of h/w */
2031 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2033 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2034 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2035 ctrl_ext = er32(CTRL_EXT);
2036 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2041 * @e1000_alloc_ring - allocate memory for a ring structure
2043 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2044 struct e1000_ring *ring)
2046 struct pci_dev *pdev = adapter->pdev;
2048 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2057 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2058 * @adapter: board private structure
2060 * Return 0 on success, negative on failure
2062 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2064 struct e1000_ring *tx_ring = adapter->tx_ring;
2065 int err = -ENOMEM, size;
2067 size = sizeof(struct e1000_buffer) * tx_ring->count;
2068 tx_ring->buffer_info = vzalloc(size);
2069 if (!tx_ring->buffer_info)
2072 /* round up to nearest 4K */
2073 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2074 tx_ring->size = ALIGN(tx_ring->size, 4096);
2076 err = e1000_alloc_ring_dma(adapter, tx_ring);
2080 tx_ring->next_to_use = 0;
2081 tx_ring->next_to_clean = 0;
2085 vfree(tx_ring->buffer_info);
2086 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2091 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2092 * @adapter: board private structure
2094 * Returns 0 on success, negative on failure
2096 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2098 struct e1000_ring *rx_ring = adapter->rx_ring;
2099 struct e1000_buffer *buffer_info;
2100 int i, size, desc_len, err = -ENOMEM;
2102 size = sizeof(struct e1000_buffer) * rx_ring->count;
2103 rx_ring->buffer_info = vzalloc(size);
2104 if (!rx_ring->buffer_info)
2107 for (i = 0; i < rx_ring->count; i++) {
2108 buffer_info = &rx_ring->buffer_info[i];
2109 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2110 sizeof(struct e1000_ps_page),
2112 if (!buffer_info->ps_pages)
2116 desc_len = sizeof(union e1000_rx_desc_packet_split);
2118 /* Round up to nearest 4K */
2119 rx_ring->size = rx_ring->count * desc_len;
2120 rx_ring->size = ALIGN(rx_ring->size, 4096);
2122 err = e1000_alloc_ring_dma(adapter, rx_ring);
2126 rx_ring->next_to_clean = 0;
2127 rx_ring->next_to_use = 0;
2128 rx_ring->rx_skb_top = NULL;
2133 for (i = 0; i < rx_ring->count; i++) {
2134 buffer_info = &rx_ring->buffer_info[i];
2135 kfree(buffer_info->ps_pages);
2138 vfree(rx_ring->buffer_info);
2139 e_err("Unable to allocate memory for the receive descriptor ring\n");
2144 * e1000_clean_tx_ring - Free Tx Buffers
2145 * @adapter: board private structure
2147 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2149 struct e1000_ring *tx_ring = adapter->tx_ring;
2150 struct e1000_buffer *buffer_info;
2154 for (i = 0; i < tx_ring->count; i++) {
2155 buffer_info = &tx_ring->buffer_info[i];
2156 e1000_put_txbuf(adapter, buffer_info);
2159 size = sizeof(struct e1000_buffer) * tx_ring->count;
2160 memset(tx_ring->buffer_info, 0, size);
2162 memset(tx_ring->desc, 0, tx_ring->size);
2164 tx_ring->next_to_use = 0;
2165 tx_ring->next_to_clean = 0;
2167 writel(0, adapter->hw.hw_addr + tx_ring->head);
2168 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2172 * e1000e_free_tx_resources - Free Tx Resources per Queue
2173 * @adapter: board private structure
2175 * Free all transmit software resources
2177 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2179 struct pci_dev *pdev = adapter->pdev;
2180 struct e1000_ring *tx_ring = adapter->tx_ring;
2182 e1000_clean_tx_ring(adapter);
2184 vfree(tx_ring->buffer_info);
2185 tx_ring->buffer_info = NULL;
2187 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2189 tx_ring->desc = NULL;
2193 * e1000e_free_rx_resources - Free Rx Resources
2194 * @adapter: board private structure
2196 * Free all receive software resources
2199 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2201 struct pci_dev *pdev = adapter->pdev;
2202 struct e1000_ring *rx_ring = adapter->rx_ring;
2205 e1000_clean_rx_ring(adapter);
2207 for (i = 0; i < rx_ring->count; i++)
2208 kfree(rx_ring->buffer_info[i].ps_pages);
2210 vfree(rx_ring->buffer_info);
2211 rx_ring->buffer_info = NULL;
2213 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2215 rx_ring->desc = NULL;
2219 * e1000_update_itr - update the dynamic ITR value based on statistics
2220 * @adapter: pointer to adapter
2221 * @itr_setting: current adapter->itr
2222 * @packets: the number of packets during this measurement interval
2223 * @bytes: the number of bytes during this measurement interval
2225 * Stores a new ITR value based on packets and byte
2226 * counts during the last interrupt. The advantage of per interrupt
2227 * computation is faster updates and more accurate ITR for the current
2228 * traffic pattern. Constants in this function were computed
2229 * based on theoretical maximum wire speed and thresholds were set based
2230 * on testing data as well as attempting to minimize response time
2231 * while increasing bulk throughput. This functionality is controlled
2232 * by the InterruptThrottleRate module parameter.
2234 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2235 u16 itr_setting, int packets,
2238 unsigned int retval = itr_setting;
2241 goto update_itr_done;
2243 switch (itr_setting) {
2244 case lowest_latency:
2245 /* handle TSO and jumbo frames */
2246 if (bytes/packets > 8000)
2247 retval = bulk_latency;
2248 else if ((packets < 5) && (bytes > 512))
2249 retval = low_latency;
2251 case low_latency: /* 50 usec aka 20000 ints/s */
2252 if (bytes > 10000) {
2253 /* this if handles the TSO accounting */
2254 if (bytes/packets > 8000)
2255 retval = bulk_latency;
2256 else if ((packets < 10) || ((bytes/packets) > 1200))
2257 retval = bulk_latency;
2258 else if ((packets > 35))
2259 retval = lowest_latency;
2260 } else if (bytes/packets > 2000) {
2261 retval = bulk_latency;
2262 } else if (packets <= 2 && bytes < 512) {
2263 retval = lowest_latency;
2266 case bulk_latency: /* 250 usec aka 4000 ints/s */
2267 if (bytes > 25000) {
2269 retval = low_latency;
2270 } else if (bytes < 6000) {
2271 retval = low_latency;
2280 static void e1000_set_itr(struct e1000_adapter *adapter)
2282 struct e1000_hw *hw = &adapter->hw;
2284 u32 new_itr = adapter->itr;
2286 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2287 if (adapter->link_speed != SPEED_1000) {
2293 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2298 adapter->tx_itr = e1000_update_itr(adapter,
2300 adapter->total_tx_packets,
2301 adapter->total_tx_bytes);
2302 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2303 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2304 adapter->tx_itr = low_latency;
2306 adapter->rx_itr = e1000_update_itr(adapter,
2308 adapter->total_rx_packets,
2309 adapter->total_rx_bytes);
2310 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2311 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2312 adapter->rx_itr = low_latency;
2314 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2316 switch (current_itr) {
2317 /* counts and packets in update_itr are dependent on these numbers */
2318 case lowest_latency:
2322 new_itr = 20000; /* aka hwitr = ~200 */
2332 if (new_itr != adapter->itr) {
2334 * this attempts to bias the interrupt rate towards Bulk
2335 * by adding intermediate steps when interrupt rate is
2338 new_itr = new_itr > adapter->itr ?
2339 min(adapter->itr + (new_itr >> 2), new_itr) :
2341 adapter->itr = new_itr;
2342 adapter->rx_ring->itr_val = new_itr;
2343 if (adapter->msix_entries)
2344 adapter->rx_ring->set_itr = 1;
2347 ew32(ITR, 1000000000 / (new_itr * 256));
2354 * e1000_alloc_queues - Allocate memory for all rings
2355 * @adapter: board private structure to initialize
2357 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2359 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2360 if (!adapter->tx_ring)
2363 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2364 if (!adapter->rx_ring)
2369 e_err("Unable to allocate memory for queues\n");
2370 kfree(adapter->rx_ring);
2371 kfree(adapter->tx_ring);
2376 * e1000_clean - NAPI Rx polling callback
2377 * @napi: struct associated with this polling callback
2378 * @budget: amount of packets driver is allowed to process this poll
2380 static int e1000_clean(struct napi_struct *napi, int budget)
2382 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2383 struct e1000_hw *hw = &adapter->hw;
2384 struct net_device *poll_dev = adapter->netdev;
2385 int tx_cleaned = 1, work_done = 0;
2387 adapter = netdev_priv(poll_dev);
2389 if (adapter->msix_entries &&
2390 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2393 tx_cleaned = e1000_clean_tx_irq(adapter);
2396 adapter->clean_rx(adapter, &work_done, budget);
2401 /* If budget not fully consumed, exit the polling mode */
2402 if (work_done < budget) {
2403 if (adapter->itr_setting & 3)
2404 e1000_set_itr(adapter);
2405 napi_complete(napi);
2406 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2407 if (adapter->msix_entries)
2408 ew32(IMS, adapter->rx_ring->ims_val);
2410 e1000_irq_enable(adapter);
2417 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2419 struct e1000_adapter *adapter = netdev_priv(netdev);
2420 struct e1000_hw *hw = &adapter->hw;
2423 /* don't update vlan cookie if already programmed */
2424 if ((adapter->hw.mng_cookie.status &
2425 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2426 (vid == adapter->mng_vlan_id))
2429 /* add VID to filter table */
2430 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2431 index = (vid >> 5) & 0x7F;
2432 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2433 vfta |= (1 << (vid & 0x1F));
2434 hw->mac.ops.write_vfta(hw, index, vfta);
2437 set_bit(vid, adapter->active_vlans);
2440 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2442 struct e1000_adapter *adapter = netdev_priv(netdev);
2443 struct e1000_hw *hw = &adapter->hw;
2446 if ((adapter->hw.mng_cookie.status &
2447 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2448 (vid == adapter->mng_vlan_id)) {
2449 /* release control to f/w */
2450 e1000e_release_hw_control(adapter);
2454 /* remove VID from filter table */
2455 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2456 index = (vid >> 5) & 0x7F;
2457 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2458 vfta &= ~(1 << (vid & 0x1F));
2459 hw->mac.ops.write_vfta(hw, index, vfta);
2462 clear_bit(vid, adapter->active_vlans);
2466 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2467 * @adapter: board private structure to initialize
2469 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2471 struct net_device *netdev = adapter->netdev;
2472 struct e1000_hw *hw = &adapter->hw;
2475 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2476 /* disable VLAN receive filtering */
2478 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2481 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2482 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2483 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2489 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2490 * @adapter: board private structure to initialize
2492 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2494 struct e1000_hw *hw = &adapter->hw;
2497 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2498 /* enable VLAN receive filtering */
2500 rctl |= E1000_RCTL_VFE;
2501 rctl &= ~E1000_RCTL_CFIEN;
2507 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2508 * @adapter: board private structure to initialize
2510 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2512 struct e1000_hw *hw = &adapter->hw;
2515 /* disable VLAN tag insert/strip */
2517 ctrl &= ~E1000_CTRL_VME;
2522 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2523 * @adapter: board private structure to initialize
2525 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2527 struct e1000_hw *hw = &adapter->hw;
2530 /* enable VLAN tag insert/strip */
2532 ctrl |= E1000_CTRL_VME;
2536 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2538 struct net_device *netdev = adapter->netdev;
2539 u16 vid = adapter->hw.mng_cookie.vlan_id;
2540 u16 old_vid = adapter->mng_vlan_id;
2542 if (adapter->hw.mng_cookie.status &
2543 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2544 e1000_vlan_rx_add_vid(netdev, vid);
2545 adapter->mng_vlan_id = vid;
2548 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2549 e1000_vlan_rx_kill_vid(netdev, old_vid);
2552 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2556 e1000_vlan_rx_add_vid(adapter->netdev, 0);
2558 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2559 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2562 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2564 struct e1000_hw *hw = &adapter->hw;
2565 u32 manc, manc2h, mdef, i, j;
2567 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2573 * enable receiving management packets to the host. this will probably
2574 * generate destination unreachable messages from the host OS, but
2575 * the packets will be handled on SMBUS
2577 manc |= E1000_MANC_EN_MNG2HOST;
2578 manc2h = er32(MANC2H);
2580 switch (hw->mac.type) {
2582 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2587 * Check if IPMI pass-through decision filter already exists;
2590 for (i = 0, j = 0; i < 8; i++) {
2591 mdef = er32(MDEF(i));
2593 /* Ignore filters with anything other than IPMI ports */
2594 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2597 /* Enable this decision filter in MANC2H */
2604 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2607 /* Create new decision filter in an empty filter */
2608 for (i = 0, j = 0; i < 8; i++)
2609 if (er32(MDEF(i)) == 0) {
2610 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2611 E1000_MDEF_PORT_664));
2618 e_warn("Unable to create IPMI pass-through filter\n");
2622 ew32(MANC2H, manc2h);
2627 * e1000_configure_tx - Configure Transmit Unit after Reset
2628 * @adapter: board private structure
2630 * Configure the Tx unit of the MAC after a reset.
2632 static void e1000_configure_tx(struct e1000_adapter *adapter)
2634 struct e1000_hw *hw = &adapter->hw;
2635 struct e1000_ring *tx_ring = adapter->tx_ring;
2637 u32 tdlen, tctl, tipg, tarc;
2640 /* Setup the HW Tx Head and Tail descriptor pointers */
2641 tdba = tx_ring->dma;
2642 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2643 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2644 ew32(TDBAH, (tdba >> 32));
2648 tx_ring->head = E1000_TDH;
2649 tx_ring->tail = E1000_TDT;
2651 /* Set the default values for the Tx Inter Packet Gap timer */
2652 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2653 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2654 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2656 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2657 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2659 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2660 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2663 /* Set the Tx Interrupt Delay register */
2664 ew32(TIDV, adapter->tx_int_delay);
2665 /* Tx irq moderation */
2666 ew32(TADV, adapter->tx_abs_int_delay);
2668 if (adapter->flags2 & FLAG2_DMA_BURST) {
2669 u32 txdctl = er32(TXDCTL(0));
2670 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2671 E1000_TXDCTL_WTHRESH);
2673 * set up some performance related parameters to encourage the
2674 * hardware to use the bus more efficiently in bursts, depends
2675 * on the tx_int_delay to be enabled,
2676 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
2677 * hthresh = 1 ==> prefetch when one or more available
2678 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2679 * BEWARE: this seems to work but should be considered first if
2680 * there are Tx hangs or other Tx related bugs
2682 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2683 ew32(TXDCTL(0), txdctl);
2684 /* erratum work around: set txdctl the same for both queues */
2685 ew32(TXDCTL(1), txdctl);
2688 /* Program the Transmit Control Register */
2690 tctl &= ~E1000_TCTL_CT;
2691 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2692 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2694 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2695 tarc = er32(TARC(0));
2697 * set the speed mode bit, we'll clear it if we're not at
2698 * gigabit link later
2700 #define SPEED_MODE_BIT (1 << 21)
2701 tarc |= SPEED_MODE_BIT;
2702 ew32(TARC(0), tarc);
2705 /* errata: program both queues to unweighted RR */
2706 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2707 tarc = er32(TARC(0));
2709 ew32(TARC(0), tarc);
2710 tarc = er32(TARC(1));
2712 ew32(TARC(1), tarc);
2715 /* Setup Transmit Descriptor Settings for eop descriptor */
2716 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2718 /* only set IDE if we are delaying interrupts using the timers */
2719 if (adapter->tx_int_delay)
2720 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2722 /* enable Report Status bit */
2723 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2727 e1000e_config_collision_dist(hw);
2731 * e1000_setup_rctl - configure the receive control registers
2732 * @adapter: Board private structure
2734 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2735 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2736 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2738 struct e1000_hw *hw = &adapter->hw;
2742 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2743 if (hw->mac.type == e1000_pch2lan) {
2746 if (adapter->netdev->mtu > ETH_DATA_LEN)
2747 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2749 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2752 e_dbg("failed to enable jumbo frame workaround mode\n");
2755 /* Program MC offset vector base */
2757 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2758 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2759 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2760 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2762 /* Do not Store bad packets */
2763 rctl &= ~E1000_RCTL_SBP;
2765 /* Enable Long Packet receive */
2766 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2767 rctl &= ~E1000_RCTL_LPE;
2769 rctl |= E1000_RCTL_LPE;
2771 /* Some systems expect that the CRC is included in SMBUS traffic. The
2772 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2773 * host memory when this is enabled
2775 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2776 rctl |= E1000_RCTL_SECRC;
2778 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2779 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2782 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2784 phy_data |= (1 << 2);
2785 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2787 e1e_rphy(hw, 22, &phy_data);
2789 phy_data |= (1 << 14);
2790 e1e_wphy(hw, 0x10, 0x2823);
2791 e1e_wphy(hw, 0x11, 0x0003);
2792 e1e_wphy(hw, 22, phy_data);
2795 /* Setup buffer sizes */
2796 rctl &= ~E1000_RCTL_SZ_4096;
2797 rctl |= E1000_RCTL_BSEX;
2798 switch (adapter->rx_buffer_len) {
2801 rctl |= E1000_RCTL_SZ_2048;
2802 rctl &= ~E1000_RCTL_BSEX;
2805 rctl |= E1000_RCTL_SZ_4096;
2808 rctl |= E1000_RCTL_SZ_8192;
2811 rctl |= E1000_RCTL_SZ_16384;
2816 * 82571 and greater support packet-split where the protocol
2817 * header is placed in skb->data and the packet data is
2818 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2819 * In the case of a non-split, skb->data is linearly filled,
2820 * followed by the page buffers. Therefore, skb->data is
2821 * sized to hold the largest protocol header.
2823 * allocations using alloc_page take too long for regular MTU
2824 * so only enable packet split for jumbo frames
2826 * Using pages when the page size is greater than 16k wastes
2827 * a lot of memory, since we allocate 3 pages at all times
2830 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2831 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
2832 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2833 adapter->rx_ps_pages = pages;
2835 adapter->rx_ps_pages = 0;
2837 if (adapter->rx_ps_pages) {
2840 /* Configure extra packet-split registers */
2841 rfctl = er32(RFCTL);
2842 rfctl |= E1000_RFCTL_EXTEN;
2844 * disable packet split support for IPv6 extension headers,
2845 * because some malformed IPv6 headers can hang the Rx
2847 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2848 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2852 /* Enable Packet split descriptors */
2853 rctl |= E1000_RCTL_DTYP_PS;
2855 psrctl |= adapter->rx_ps_bsize0 >>
2856 E1000_PSRCTL_BSIZE0_SHIFT;
2858 switch (adapter->rx_ps_pages) {
2860 psrctl |= PAGE_SIZE <<
2861 E1000_PSRCTL_BSIZE3_SHIFT;
2863 psrctl |= PAGE_SIZE <<
2864 E1000_PSRCTL_BSIZE2_SHIFT;
2866 psrctl |= PAGE_SIZE >>
2867 E1000_PSRCTL_BSIZE1_SHIFT;
2871 ew32(PSRCTL, psrctl);
2875 /* just started the receive unit, no need to restart */
2876 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2880 * e1000_configure_rx - Configure Receive Unit after Reset
2881 * @adapter: board private structure
2883 * Configure the Rx unit of the MAC after a reset.
2885 static void e1000_configure_rx(struct e1000_adapter *adapter)
2887 struct e1000_hw *hw = &adapter->hw;
2888 struct e1000_ring *rx_ring = adapter->rx_ring;
2890 u32 rdlen, rctl, rxcsum, ctrl_ext;
2892 if (adapter->rx_ps_pages) {
2893 /* this is a 32 byte descriptor */
2894 rdlen = rx_ring->count *
2895 sizeof(union e1000_rx_desc_packet_split);
2896 adapter->clean_rx = e1000_clean_rx_irq_ps;
2897 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2898 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2899 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2900 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2901 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2903 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2904 adapter->clean_rx = e1000_clean_rx_irq;
2905 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2908 /* disable receives while setting up the descriptors */
2910 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2912 usleep_range(10000, 20000);
2914 if (adapter->flags2 & FLAG2_DMA_BURST) {
2916 * set the writeback threshold (only takes effect if the RDTR
2917 * is set). set GRAN=1 and write back up to 0x4 worth, and
2918 * enable prefetching of 0x20 Rx descriptors
2924 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
2925 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
2928 * override the delay timers for enabling bursting, only if
2929 * the value was not set by the user via module options
2931 if (adapter->rx_int_delay == DEFAULT_RDTR)
2932 adapter->rx_int_delay = BURST_RDTR;
2933 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
2934 adapter->rx_abs_int_delay = BURST_RADV;
2937 /* set the Receive Delay Timer Register */
2938 ew32(RDTR, adapter->rx_int_delay);
2940 /* irq moderation */
2941 ew32(RADV, adapter->rx_abs_int_delay);
2942 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
2943 ew32(ITR, 1000000000 / (adapter->itr * 256));
2945 ctrl_ext = er32(CTRL_EXT);
2946 /* Auto-Mask interrupts upon ICR access */
2947 ctrl_ext |= E1000_CTRL_EXT_IAME;
2948 ew32(IAM, 0xffffffff);
2949 ew32(CTRL_EXT, ctrl_ext);
2953 * Setup the HW Rx Head and Tail Descriptor Pointers and
2954 * the Base and Length of the Rx Descriptor Ring
2956 rdba = rx_ring->dma;
2957 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
2958 ew32(RDBAH, (rdba >> 32));
2962 rx_ring->head = E1000_RDH;
2963 rx_ring->tail = E1000_RDT;
2965 /* Enable Receive Checksum Offload for TCP and UDP */
2966 rxcsum = er32(RXCSUM);
2967 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2968 rxcsum |= E1000_RXCSUM_TUOFL;
2971 * IPv4 payload checksum for UDP fragments must be
2972 * used in conjunction with packet-split.
2974 if (adapter->rx_ps_pages)
2975 rxcsum |= E1000_RXCSUM_IPPCSE;
2977 rxcsum &= ~E1000_RXCSUM_TUOFL;
2978 /* no need to clear IPPCSE as it defaults to 0 */
2980 ew32(RXCSUM, rxcsum);
2983 * Enable early receives on supported devices, only takes effect when
2984 * packet size is equal or larger than the specified value (in 8 byte
2985 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2987 if ((adapter->flags & FLAG_HAS_ERT) ||
2988 (adapter->hw.mac.type == e1000_pch2lan)) {
2989 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2990 u32 rxdctl = er32(RXDCTL(0));
2991 ew32(RXDCTL(0), rxdctl | 0x3);
2992 if (adapter->flags & FLAG_HAS_ERT)
2993 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2995 * With jumbo frames and early-receive enabled,
2996 * excessive C-state transition latencies result in
2997 * dropped transactions.
2999 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3001 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3002 PM_QOS_DEFAULT_VALUE);
3006 /* Enable Receives */
3011 * e1000_update_mc_addr_list - Update Multicast addresses
3012 * @hw: pointer to the HW structure
3013 * @mc_addr_list: array of multicast addresses to program
3014 * @mc_addr_count: number of multicast addresses to program
3016 * Updates the Multicast Table Array.
3017 * The caller must have a packed mc_addr_list of multicast addresses.
3019 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
3022 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
3026 * e1000_set_multi - Multicast and Promiscuous mode set
3027 * @netdev: network interface device structure
3029 * The set_multi entry point is called whenever the multicast address
3030 * list or the network interface flags are updated. This routine is
3031 * responsible for configuring the hardware for proper multicast,
3032 * promiscuous mode, and all-multi behavior.
3034 static void e1000_set_multi(struct net_device *netdev)
3036 struct e1000_adapter *adapter = netdev_priv(netdev);
3037 struct e1000_hw *hw = &adapter->hw;
3038 struct netdev_hw_addr *ha;
3042 /* Check for Promiscuous and All Multicast modes */
3046 if (netdev->flags & IFF_PROMISC) {
3047 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3048 rctl &= ~E1000_RCTL_VFE;
3049 /* Do not hardware filter VLANs in promisc mode */
3050 e1000e_vlan_filter_disable(adapter);
3052 if (netdev->flags & IFF_ALLMULTI) {
3053 rctl |= E1000_RCTL_MPE;
3054 rctl &= ~E1000_RCTL_UPE;
3056 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3058 e1000e_vlan_filter_enable(adapter);
3063 if (!netdev_mc_empty(netdev)) {
3066 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3070 /* prepare a packed array of only addresses. */
3071 netdev_for_each_mc_addr(ha, netdev)
3072 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3074 e1000_update_mc_addr_list(hw, mta_list, i);
3078 * if we're called from probe, we might not have
3079 * anything to do here, so clear out the list
3081 e1000_update_mc_addr_list(hw, NULL, 0);
3084 if (netdev->features & NETIF_F_HW_VLAN_RX)
3085 e1000e_vlan_strip_enable(adapter);
3087 e1000e_vlan_strip_disable(adapter);
3091 * e1000_configure - configure the hardware for Rx and Tx
3092 * @adapter: private board structure
3094 static void e1000_configure(struct e1000_adapter *adapter)
3096 e1000_set_multi(adapter->netdev);
3098 e1000_restore_vlan(adapter);
3099 e1000_init_manageability_pt(adapter);
3101 e1000_configure_tx(adapter);
3102 e1000_setup_rctl(adapter);
3103 e1000_configure_rx(adapter);
3104 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
3108 * e1000e_power_up_phy - restore link in case the phy was powered down
3109 * @adapter: address of board private structure
3111 * The phy may be powered down to save power and turn off link when the
3112 * driver is unloaded and wake on lan is not enabled (among others)
3113 * *** this routine MUST be followed by a call to e1000e_reset ***
3115 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3117 if (adapter->hw.phy.ops.power_up)
3118 adapter->hw.phy.ops.power_up(&adapter->hw);
3120 adapter->hw.mac.ops.setup_link(&adapter->hw);
3124 * e1000_power_down_phy - Power down the PHY
3126 * Power down the PHY so no link is implied when interface is down.
3127 * The PHY cannot be powered down if management or WoL is active.
3129 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3131 /* WoL is enabled */
3135 if (adapter->hw.phy.ops.power_down)
3136 adapter->hw.phy.ops.power_down(&adapter->hw);
3140 * e1000e_reset - bring the hardware into a known good state
3142 * This function boots the hardware and enables some settings that
3143 * require a configuration cycle of the hardware - those cannot be
3144 * set/changed during runtime. After reset the device needs to be
3145 * properly configured for Rx, Tx etc.
3147 void e1000e_reset(struct e1000_adapter *adapter)
3149 struct e1000_mac_info *mac = &adapter->hw.mac;
3150 struct e1000_fc_info *fc = &adapter->hw.fc;
3151 struct e1000_hw *hw = &adapter->hw;
3152 u32 tx_space, min_tx_space, min_rx_space;
3153 u32 pba = adapter->pba;
3156 /* reset Packet Buffer Allocation to default */
3159 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3161 * To maintain wire speed transmits, the Tx FIFO should be
3162 * large enough to accommodate two full transmit packets,
3163 * rounded up to the next 1KB and expressed in KB. Likewise,
3164 * the Rx FIFO should be large enough to accommodate at least
3165 * one full receive packet and is similarly rounded up and
3169 /* upper 16 bits has Tx packet buffer allocation size in KB */
3170 tx_space = pba >> 16;
3171 /* lower 16 bits has Rx packet buffer allocation size in KB */
3174 * the Tx fifo also stores 16 bytes of information about the Tx
3175 * but don't include ethernet FCS because hardware appends it
3177 min_tx_space = (adapter->max_frame_size +
3178 sizeof(struct e1000_tx_desc) -
3180 min_tx_space = ALIGN(min_tx_space, 1024);
3181 min_tx_space >>= 10;
3182 /* software strips receive CRC, so leave room for it */
3183 min_rx_space = adapter->max_frame_size;
3184 min_rx_space = ALIGN(min_rx_space, 1024);
3185 min_rx_space >>= 10;
3188 * If current Tx allocation is less than the min Tx FIFO size,
3189 * and the min Tx FIFO size is less than the current Rx FIFO
3190 * allocation, take space away from current Rx allocation
3192 if ((tx_space < min_tx_space) &&
3193 ((min_tx_space - tx_space) < pba)) {
3194 pba -= min_tx_space - tx_space;
3197 * if short on Rx space, Rx wins and must trump Tx
3198 * adjustment or use Early Receive if available
3200 if ((pba < min_rx_space) &&
3201 (!(adapter->flags & FLAG_HAS_ERT)))
3202 /* ERT enabled in e1000_configure_rx */
3210 * flow control settings
3212 * The high water mark must be low enough to fit one full frame
3213 * (or the size used for early receive) above it in the Rx FIFO.
3214 * Set it to the lower of:
3215 * - 90% of the Rx FIFO size, and
3216 * - the full Rx FIFO size minus the early receive size (for parts
3217 * with ERT support assuming ERT set to E1000_ERT_2048), or
3218 * - the full Rx FIFO size minus one full frame
3220 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3221 fc->pause_time = 0xFFFF;
3223 fc->pause_time = E1000_FC_PAUSE_TIME;
3225 fc->current_mode = fc->requested_mode;
3227 switch (hw->mac.type) {
3229 if ((adapter->flags & FLAG_HAS_ERT) &&
3230 (adapter->netdev->mtu > ETH_DATA_LEN))
3231 hwm = min(((pba << 10) * 9 / 10),
3232 ((pba << 10) - (E1000_ERT_2048 << 3)));
3234 hwm = min(((pba << 10) * 9 / 10),
3235 ((pba << 10) - adapter->max_frame_size));
3237 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3238 fc->low_water = fc->high_water - 8;
3242 * Workaround PCH LOM adapter hangs with certain network
3243 * loads. If hangs persist, try disabling Tx flow control.
3245 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3246 fc->high_water = 0x3500;
3247 fc->low_water = 0x1500;
3249 fc->high_water = 0x5000;
3250 fc->low_water = 0x3000;
3252 fc->refresh_time = 0x1000;
3255 fc->high_water = 0x05C20;
3256 fc->low_water = 0x05048;
3257 fc->pause_time = 0x0650;
3258 fc->refresh_time = 0x0400;
3259 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3267 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3268 * fit in receive buffer and early-receive not supported.
3270 if (adapter->itr_setting & 0x3) {
3271 if (((adapter->max_frame_size * 2) > (pba << 10)) &&
3272 !(adapter->flags & FLAG_HAS_ERT)) {
3273 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3274 dev_info(&adapter->pdev->dev,
3275 "Interrupt Throttle Rate turned off\n");
3276 adapter->flags2 |= FLAG2_DISABLE_AIM;
3279 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3280 dev_info(&adapter->pdev->dev,
3281 "Interrupt Throttle Rate turned on\n");
3282 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3283 adapter->itr = 20000;
3284 ew32(ITR, 1000000000 / (adapter->itr * 256));
3288 /* Allow time for pending master requests to run */
3289 mac->ops.reset_hw(hw);
3292 * For parts with AMT enabled, let the firmware know
3293 * that the network interface is in control
3295 if (adapter->flags & FLAG_HAS_AMT)
3296 e1000e_get_hw_control(adapter);
3300 if (mac->ops.init_hw(hw))
3301 e_err("Hardware Error\n");
3303 e1000_update_mng_vlan(adapter);
3305 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3306 ew32(VET, ETH_P_8021Q);
3308 e1000e_reset_adaptive(hw);
3310 if (!netif_running(adapter->netdev) &&
3311 !test_bit(__E1000_TESTING, &adapter->state)) {
3312 e1000_power_down_phy(adapter);
3316 e1000_get_phy_info(hw);
3318 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3319 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3322 * speed up time to link by disabling smart power down, ignore
3323 * the return value of this function because there is nothing
3324 * different we would do if it failed
3326 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3327 phy_data &= ~IGP02E1000_PM_SPD;
3328 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3332 int e1000e_up(struct e1000_adapter *adapter)
3334 struct e1000_hw *hw = &adapter->hw;
3336 /* hardware has been reset, we need to reload some things */
3337 e1000_configure(adapter);
3339 clear_bit(__E1000_DOWN, &adapter->state);
3341 napi_enable(&adapter->napi);
3342 if (adapter->msix_entries)
3343 e1000_configure_msix(adapter);
3344 e1000_irq_enable(adapter);
3346 netif_wake_queue(adapter->netdev);
3348 /* fire a link change interrupt to start the watchdog */
3349 if (adapter->msix_entries)
3350 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3352 ew32(ICS, E1000_ICS_LSC);
3357 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3359 struct e1000_hw *hw = &adapter->hw;
3361 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3364 /* flush pending descriptor writebacks to memory */
3365 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3366 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3368 /* execute the writes immediately */
3372 static void e1000e_update_stats(struct e1000_adapter *adapter);
3374 void e1000e_down(struct e1000_adapter *adapter)
3376 struct net_device *netdev = adapter->netdev;
3377 struct e1000_hw *hw = &adapter->hw;
3381 * signal that we're down so the interrupt handler does not
3382 * reschedule our watchdog timer
3384 set_bit(__E1000_DOWN, &adapter->state);
3386 /* disable receives in the hardware */
3388 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3389 /* flush and sleep below */
3391 netif_stop_queue(netdev);
3393 /* disable transmits in the hardware */
3395 tctl &= ~E1000_TCTL_EN;
3397 /* flush both disables and wait for them to finish */
3399 usleep_range(10000, 20000);
3401 napi_disable(&adapter->napi);
3402 e1000_irq_disable(adapter);
3404 del_timer_sync(&adapter->watchdog_timer);
3405 del_timer_sync(&adapter->phy_info_timer);
3407 netif_carrier_off(netdev);
3409 spin_lock(&adapter->stats64_lock);
3410 e1000e_update_stats(adapter);
3411 spin_unlock(&adapter->stats64_lock);
3413 adapter->link_speed = 0;
3414 adapter->link_duplex = 0;
3416 if (!pci_channel_offline(adapter->pdev))
3417 e1000e_reset(adapter);
3419 e1000e_flush_descriptors(adapter);
3421 e1000_clean_tx_ring(adapter);
3422 e1000_clean_rx_ring(adapter);
3425 * TODO: for power management, we could drop the link and
3426 * pci_disable_device here.
3430 void e1000e_reinit_locked(struct e1000_adapter *adapter)
3433 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3434 usleep_range(1000, 2000);
3435 e1000e_down(adapter);
3437 clear_bit(__E1000_RESETTING, &adapter->state);
3441 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3442 * @adapter: board private structure to initialize
3444 * e1000_sw_init initializes the Adapter private data structure.
3445 * Fields are initialized based on PCI device information and
3446 * OS network device settings (MTU size).
3448 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3450 struct net_device *netdev = adapter->netdev;
3452 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3453 adapter->rx_ps_bsize0 = 128;
3454 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3455 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3457 spin_lock_init(&adapter->stats64_lock);
3459 e1000e_set_interrupt_capability(adapter);
3461 if (e1000_alloc_queues(adapter))
3464 /* Explicitly disable IRQ since the NIC can be in any state. */
3465 e1000_irq_disable(adapter);
3467 set_bit(__E1000_DOWN, &adapter->state);
3472 * e1000_intr_msi_test - Interrupt Handler
3473 * @irq: interrupt number
3474 * @data: pointer to a network interface device structure
3476 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3478 struct net_device *netdev = data;
3479 struct e1000_adapter *adapter = netdev_priv(netdev);
3480 struct e1000_hw *hw = &adapter->hw;
3481 u32 icr = er32(ICR);
3483 e_dbg("icr is %08X\n", icr);
3484 if (icr & E1000_ICR_RXSEQ) {
3485 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3493 * e1000_test_msi_interrupt - Returns 0 for successful test
3494 * @adapter: board private struct
3496 * code flow taken from tg3.c
3498 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3500 struct net_device *netdev = adapter->netdev;
3501 struct e1000_hw *hw = &adapter->hw;
3504 /* poll_enable hasn't been called yet, so don't need disable */
3505 /* clear any pending events */
3508 /* free the real vector and request a test handler */
3509 e1000_free_irq(adapter);
3510 e1000e_reset_interrupt_capability(adapter);
3512 /* Assume that the test fails, if it succeeds then the test
3513 * MSI irq handler will unset this flag */
3514 adapter->flags |= FLAG_MSI_TEST_FAILED;
3516 err = pci_enable_msi(adapter->pdev);
3518 goto msi_test_failed;
3520 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3521 netdev->name, netdev);
3523 pci_disable_msi(adapter->pdev);
3524 goto msi_test_failed;
3529 e1000_irq_enable(adapter);
3531 /* fire an unusual interrupt on the test handler */
3532 ew32(ICS, E1000_ICS_RXSEQ);
3536 e1000_irq_disable(adapter);
3540 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3541 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3542 e_info("MSI interrupt test failed, using legacy interrupt.\n");
3544 e_dbg("MSI interrupt test succeeded!\n");
3546 free_irq(adapter->pdev->irq, netdev);
3547 pci_disable_msi(adapter->pdev);
3550 e1000e_set_interrupt_capability(adapter);
3551 return e1000_request_irq(adapter);
3555 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3556 * @adapter: board private struct
3558 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3560 static int e1000_test_msi(struct e1000_adapter *adapter)
3565 if (!(adapter->flags & FLAG_MSI_ENABLED))
3568 /* disable SERR in case the MSI write causes a master abort */
3569 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3570 if (pci_cmd & PCI_COMMAND_SERR)
3571 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3572 pci_cmd & ~PCI_COMMAND_SERR);
3574 err = e1000_test_msi_interrupt(adapter);
3576 /* re-enable SERR */
3577 if (pci_cmd & PCI_COMMAND_SERR) {
3578 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3579 pci_cmd |= PCI_COMMAND_SERR;
3580 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3587 * e1000_open - Called when a network interface is made active
3588 * @netdev: network interface device structure
3590 * Returns 0 on success, negative value on failure
3592 * The open entry point is called when a network interface is made
3593 * active by the system (IFF_UP). At this point all resources needed
3594 * for transmit and receive operations are allocated, the interrupt
3595 * handler is registered with the OS, the watchdog timer is started,
3596 * and the stack is notified that the interface is ready.
3598 static int e1000_open(struct net_device *netdev)
3600 struct e1000_adapter *adapter = netdev_priv(netdev);
3601 struct e1000_hw *hw = &adapter->hw;
3602 struct pci_dev *pdev = adapter->pdev;
3605 /* disallow open during test */
3606 if (test_bit(__E1000_TESTING, &adapter->state))
3609 pm_runtime_get_sync(&pdev->dev);
3611 netif_carrier_off(netdev);
3613 /* allocate transmit descriptors */
3614 err = e1000e_setup_tx_resources(adapter);
3618 /* allocate receive descriptors */
3619 err = e1000e_setup_rx_resources(adapter);
3624 * If AMT is enabled, let the firmware know that the network
3625 * interface is now open and reset the part to a known state.
3627 if (adapter->flags & FLAG_HAS_AMT) {
3628 e1000e_get_hw_control(adapter);
3629 e1000e_reset(adapter);
3632 e1000e_power_up_phy(adapter);
3634 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3635 if ((adapter->hw.mng_cookie.status &
3636 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3637 e1000_update_mng_vlan(adapter);
3639 /* DMA latency requirement to workaround early-receive/jumbo issue */
3640 if ((adapter->flags & FLAG_HAS_ERT) ||
3641 (adapter->hw.mac.type == e1000_pch2lan))
3642 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3643 PM_QOS_CPU_DMA_LATENCY,
3644 PM_QOS_DEFAULT_VALUE);
3647 * before we allocate an interrupt, we must be ready to handle it.
3648 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3649 * as soon as we call pci_request_irq, so we have to setup our
3650 * clean_rx handler before we do so.
3652 e1000_configure(adapter);
3654 err = e1000_request_irq(adapter);
3659 * Work around PCIe errata with MSI interrupts causing some chipsets to
3660 * ignore e1000e MSI messages, which means we need to test our MSI
3663 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3664 err = e1000_test_msi(adapter);
3666 e_err("Interrupt allocation failed\n");
3671 /* From here on the code is the same as e1000e_up() */
3672 clear_bit(__E1000_DOWN, &adapter->state);
3674 napi_enable(&adapter->napi);
3676 e1000_irq_enable(adapter);
3678 netif_start_queue(netdev);
3680 adapter->idle_check = true;
3681 pm_runtime_put(&pdev->dev);
3683 /* fire a link status change interrupt to start the watchdog */
3684 if (adapter->msix_entries)
3685 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3687 ew32(ICS, E1000_ICS_LSC);
3692 e1000e_release_hw_control(adapter);
3693 e1000_power_down_phy(adapter);
3694 e1000e_free_rx_resources(adapter);
3696 e1000e_free_tx_resources(adapter);
3698 e1000e_reset(adapter);
3699 pm_runtime_put_sync(&pdev->dev);
3705 * e1000_close - Disables a network interface
3706 * @netdev: network interface device structure
3708 * Returns 0, this is not allowed to fail
3710 * The close entry point is called when an interface is de-activated
3711 * by the OS. The hardware is still under the drivers control, but
3712 * needs to be disabled. A global MAC reset is issued to stop the
3713 * hardware, and all transmit and receive resources are freed.
3715 static int e1000_close(struct net_device *netdev)
3717 struct e1000_adapter *adapter = netdev_priv(netdev);
3718 struct pci_dev *pdev = adapter->pdev;
3720 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3722 pm_runtime_get_sync(&pdev->dev);
3724 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3725 e1000e_down(adapter);
3726 e1000_free_irq(adapter);
3728 e1000_power_down_phy(adapter);
3730 e1000e_free_tx_resources(adapter);
3731 e1000e_free_rx_resources(adapter);
3734 * kill manageability vlan ID if supported, but not if a vlan with
3735 * the same ID is registered on the host OS (let 8021q kill it)
3737 if (adapter->hw.mng_cookie.status &
3738 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
3739 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3742 * If AMT is enabled, let the firmware know that the network
3743 * interface is now closed
3745 if ((adapter->flags & FLAG_HAS_AMT) &&
3746 !test_bit(__E1000_TESTING, &adapter->state))
3747 e1000e_release_hw_control(adapter);
3749 if ((adapter->flags & FLAG_HAS_ERT) ||
3750 (adapter->hw.mac.type == e1000_pch2lan))
3751 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3753 pm_runtime_put_sync(&pdev->dev);
3758 * e1000_set_mac - Change the Ethernet Address of the NIC
3759 * @netdev: network interface device structure
3760 * @p: pointer to an address structure
3762 * Returns 0 on success, negative on failure
3764 static int e1000_set_mac(struct net_device *netdev, void *p)
3766 struct e1000_adapter *adapter = netdev_priv(netdev);
3767 struct sockaddr *addr = p;
3769 if (!is_valid_ether_addr(addr->sa_data))
3770 return -EADDRNOTAVAIL;
3772 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3773 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3775 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3777 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3778 /* activate the work around */
3779 e1000e_set_laa_state_82571(&adapter->hw, 1);
3782 * Hold a copy of the LAA in RAR[14] This is done so that
3783 * between the time RAR[0] gets clobbered and the time it
3784 * gets fixed (in e1000_watchdog), the actual LAA is in one
3785 * of the RARs and no incoming packets directed to this port
3786 * are dropped. Eventually the LAA will be in RAR[0] and
3789 e1000e_rar_set(&adapter->hw,
3790 adapter->hw.mac.addr,
3791 adapter->hw.mac.rar_entry_count - 1);
3798 * e1000e_update_phy_task - work thread to update phy
3799 * @work: pointer to our work struct
3801 * this worker thread exists because we must acquire a
3802 * semaphore to read the phy, which we could msleep while
3803 * waiting for it, and we can't msleep in a timer.
3805 static void e1000e_update_phy_task(struct work_struct *work)
3807 struct e1000_adapter *adapter = container_of(work,
3808 struct e1000_adapter, update_phy_task);
3810 if (test_bit(__E1000_DOWN, &adapter->state))
3813 e1000_get_phy_info(&adapter->hw);
3817 * Need to wait a few seconds after link up to get diagnostic information from
3820 static void e1000_update_phy_info(unsigned long data)
3822 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3824 if (test_bit(__E1000_DOWN, &adapter->state))
3827 schedule_work(&adapter->update_phy_task);
3831 * e1000e_update_phy_stats - Update the PHY statistics counters
3832 * @adapter: board private structure
3834 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
3836 struct e1000_hw *hw = &adapter->hw;
3840 ret_val = hw->phy.ops.acquire(hw);
3846 #define HV_PHY_STATS_PAGE 778
3848 * A page set is expensive so check if already on desired page.
3849 * If not, set to the page with the PHY status registers.
3851 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3855 if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
3856 ret_val = e1000e_write_phy_reg_mdic(hw,
3857 IGP01E1000_PHY_PAGE_SELECT,
3858 (HV_PHY_STATS_PAGE <<
3864 /* Read/clear the upper 16-bit registers and read/accumulate lower */
3866 /* Single Collision Count */
3867 e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
3869 ret_val = e1000e_read_phy_reg_mdic(hw,
3870 HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
3873 adapter->stats.scc += phy_data;
3875 /* Excessive Collision Count */
3876 e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
3878 ret_val = e1000e_read_phy_reg_mdic(hw,
3879 HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
3882 adapter->stats.ecol += phy_data;
3884 /* Multiple Collision Count */
3885 e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
3887 ret_val = e1000e_read_phy_reg_mdic(hw,
3888 HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
3891 adapter->stats.mcc += phy_data;
3893 /* Late Collision Count */
3894 e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
3896 ret_val = e1000e_read_phy_reg_mdic(hw,
3898 MAX_PHY_REG_ADDRESS,
3901 adapter->stats.latecol += phy_data;
3903 /* Collision Count - also used for adaptive IFS */
3904 e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
3906 ret_val = e1000e_read_phy_reg_mdic(hw,
3907 HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
3910 hw->mac.collision_delta = phy_data;
3913 e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
3915 ret_val = e1000e_read_phy_reg_mdic(hw,
3916 HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
3919 adapter->stats.dc += phy_data;
3921 /* Transmit with no CRS */
3922 e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
3924 ret_val = e1000e_read_phy_reg_mdic(hw,
3925 HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
3928 adapter->stats.tncrs += phy_data;
3931 hw->phy.ops.release(hw);
3935 * e1000e_update_stats - Update the board statistics counters
3936 * @adapter: board private structure
3938 static void e1000e_update_stats(struct e1000_adapter *adapter)
3940 struct net_device *netdev = adapter->netdev;
3941 struct e1000_hw *hw = &adapter->hw;
3942 struct pci_dev *pdev = adapter->pdev;
3945 * Prevent stats update while adapter is being reset, or if the pci
3946 * connection is down.
3948 if (adapter->link_speed == 0)
3950 if (pci_channel_offline(pdev))
3953 adapter->stats.crcerrs += er32(CRCERRS);
3954 adapter->stats.gprc += er32(GPRC);
3955 adapter->stats.gorc += er32(GORCL);
3956 er32(GORCH); /* Clear gorc */
3957 adapter->stats.bprc += er32(BPRC);
3958 adapter->stats.mprc += er32(MPRC);
3959 adapter->stats.roc += er32(ROC);
3961 adapter->stats.mpc += er32(MPC);
3963 /* Half-duplex statistics */
3964 if (adapter->link_duplex == HALF_DUPLEX) {
3965 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
3966 e1000e_update_phy_stats(adapter);
3968 adapter->stats.scc += er32(SCC);
3969 adapter->stats.ecol += er32(ECOL);
3970 adapter->stats.mcc += er32(MCC);
3971 adapter->stats.latecol += er32(LATECOL);
3972 adapter->stats.dc += er32(DC);
3974 hw->mac.collision_delta = er32(COLC);
3976 if ((hw->mac.type != e1000_82574) &&
3977 (hw->mac.type != e1000_82583))
3978 adapter->stats.tncrs += er32(TNCRS);
3980 adapter->stats.colc += hw->mac.collision_delta;
3983 adapter->stats.xonrxc += er32(XONRXC);
3984 adapter->stats.xontxc += er32(XONTXC);
3985 adapter->stats.xoffrxc += er32(XOFFRXC);
3986 adapter->stats.xofftxc += er32(XOFFTXC);
3987 adapter->stats.gptc += er32(GPTC);
3988 adapter->stats.gotc += er32(GOTCL);
3989 er32(GOTCH); /* Clear gotc */
3990 adapter->stats.rnbc += er32(RNBC);
3991 adapter->stats.ruc += er32(RUC);
3993 adapter->stats.mptc += er32(MPTC);
3994 adapter->stats.bptc += er32(BPTC);
3996 /* used for adaptive IFS */
3998 hw->mac.tx_packet_delta = er32(TPT);
3999 adapter->stats.tpt += hw->mac.tx_packet_delta;
4001 adapter->stats.algnerrc += er32(ALGNERRC);
4002 adapter->stats.rxerrc += er32(RXERRC);
4003 adapter->stats.cexterr += er32(CEXTERR);
4004 adapter->stats.tsctc += er32(TSCTC);
4005 adapter->stats.tsctfc += er32(TSCTFC);
4007 /* Fill out the OS statistics structure */
4008 netdev->stats.multicast = adapter->stats.mprc;
4009 netdev->stats.collisions = adapter->stats.colc;
4014 * RLEC on some newer hardware can be incorrect so build
4015 * our own version based on RUC and ROC
4017 netdev->stats.rx_errors = adapter->stats.rxerrc +
4018 adapter->stats.crcerrs + adapter->stats.algnerrc +
4019 adapter->stats.ruc + adapter->stats.roc +
4020 adapter->stats.cexterr;
4021 netdev->stats.rx_length_errors = adapter->stats.ruc +
4023 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4024 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4025 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4028 netdev->stats.tx_errors = adapter->stats.ecol +
4029 adapter->stats.latecol;
4030 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4031 netdev->stats.tx_window_errors = adapter->stats.latecol;
4032 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4034 /* Tx Dropped needs to be maintained elsewhere */
4036 /* Management Stats */
4037 adapter->stats.mgptc += er32(MGTPTC);
4038 adapter->stats.mgprc += er32(MGTPRC);
4039 adapter->stats.mgpdc += er32(MGTPDC);
4043 * e1000_phy_read_status - Update the PHY register status snapshot
4044 * @adapter: board private structure
4046 static void e1000_phy_read_status(struct e1000_adapter *adapter)
4048 struct e1000_hw *hw = &adapter->hw;
4049 struct e1000_phy_regs *phy = &adapter->phy_regs;
4051 if ((er32(STATUS) & E1000_STATUS_LU) &&
4052 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4055 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4056 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4057 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4058 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4059 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4060 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4061 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4062 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4064 e_warn("Error reading PHY register\n");
4067 * Do not read PHY registers if link is not up
4068 * Set values to typical power-on defaults
4070 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4071 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4072 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4074 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4075 ADVERTISE_ALL | ADVERTISE_CSMA);
4077 phy->expansion = EXPANSION_ENABLENPAGE;
4078 phy->ctrl1000 = ADVERTISE_1000FULL;
4080 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4084 static void e1000_print_link_info(struct e1000_adapter *adapter)
4086 struct e1000_hw *hw = &adapter->hw;
4087 u32 ctrl = er32(CTRL);
4089 /* Link status message must follow this format for user tools */
4090 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
4091 "Flow Control: %s\n",
4092 adapter->netdev->name,
4093 adapter->link_speed,
4094 (adapter->link_duplex == FULL_DUPLEX) ?
4095 "Full Duplex" : "Half Duplex",
4096 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
4098 ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
4099 ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
4102 static bool e1000e_has_link(struct e1000_adapter *adapter)
4104 struct e1000_hw *hw = &adapter->hw;
4105 bool link_active = 0;
4109 * get_link_status is set on LSC (link status) interrupt or
4110 * Rx sequence error interrupt. get_link_status will stay
4111 * false until the check_for_link establishes link
4112 * for copper adapters ONLY
4114 switch (hw->phy.media_type) {
4115 case e1000_media_type_copper:
4116 if (hw->mac.get_link_status) {
4117 ret_val = hw->mac.ops.check_for_link(hw);
4118 link_active = !hw->mac.get_link_status;
4123 case e1000_media_type_fiber:
4124 ret_val = hw->mac.ops.check_for_link(hw);
4125 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4127 case e1000_media_type_internal_serdes:
4128 ret_val = hw->mac.ops.check_for_link(hw);
4129 link_active = adapter->hw.mac.serdes_has_link;
4132 case e1000_media_type_unknown:
4136 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4137 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4138 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4139 e_info("Gigabit has been disabled, downgrading speed\n");
4145 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4147 /* make sure the receive unit is started */
4148 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4149 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4150 struct e1000_hw *hw = &adapter->hw;
4151 u32 rctl = er32(RCTL);
4152 ew32(RCTL, rctl | E1000_RCTL_EN);
4153 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4157 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4159 struct e1000_hw *hw = &adapter->hw;
4162 * With 82574 controllers, PHY needs to be checked periodically
4163 * for hung state and reset, if two calls return true
4165 if (e1000_check_phy_82574(hw))
4166 adapter->phy_hang_count++;
4168 adapter->phy_hang_count = 0;
4170 if (adapter->phy_hang_count > 1) {
4171 adapter->phy_hang_count = 0;
4172 schedule_work(&adapter->reset_task);
4177 * e1000_watchdog - Timer Call-back
4178 * @data: pointer to adapter cast into an unsigned long
4180 static void e1000_watchdog(unsigned long data)
4182 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4184 /* Do the rest outside of interrupt context */
4185 schedule_work(&adapter->watchdog_task);
4187 /* TODO: make this use queue_delayed_work() */
4190 static void e1000_watchdog_task(struct work_struct *work)
4192 struct e1000_adapter *adapter = container_of(work,
4193 struct e1000_adapter, watchdog_task);
4194 struct net_device *netdev = adapter->netdev;
4195 struct e1000_mac_info *mac = &adapter->hw.mac;
4196 struct e1000_phy_info *phy = &adapter->hw.phy;
4197 struct e1000_ring *tx_ring = adapter->tx_ring;
4198 struct e1000_hw *hw = &adapter->hw;
4201 if (test_bit(__E1000_DOWN, &adapter->state))
4204 link = e1000e_has_link(adapter);
4205 if ((netif_carrier_ok(netdev)) && link) {
4206 /* Cancel scheduled suspend requests. */
4207 pm_runtime_resume(netdev->dev.parent);
4209 e1000e_enable_receives(adapter);
4213 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4214 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4215 e1000_update_mng_vlan(adapter);
4218 if (!netif_carrier_ok(netdev)) {
4221 /* Cancel scheduled suspend requests. */
4222 pm_runtime_resume(netdev->dev.parent);
4224 /* update snapshot of PHY registers on LSC */
4225 e1000_phy_read_status(adapter);
4226 mac->ops.get_link_up_info(&adapter->hw,
4227 &adapter->link_speed,
4228 &adapter->link_duplex);
4229 e1000_print_link_info(adapter);
4231 * On supported PHYs, check for duplex mismatch only
4232 * if link has autonegotiated at 10/100 half
4234 if ((hw->phy.type == e1000_phy_igp_3 ||
4235 hw->phy.type == e1000_phy_bm) &&
4236 (hw->mac.autoneg == true) &&
4237 (adapter->link_speed == SPEED_10 ||
4238 adapter->link_speed == SPEED_100) &&
4239 (adapter->link_duplex == HALF_DUPLEX)) {
4242 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4244 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4245 e_info("Autonegotiated half duplex but"
4246 " link partner cannot autoneg. "
4247 " Try forcing full duplex if "
4248 "link gets many collisions.\n");
4251 /* adjust timeout factor according to speed/duplex */
4252 adapter->tx_timeout_factor = 1;
4253 switch (adapter->link_speed) {
4256 adapter->tx_timeout_factor = 16;
4260 adapter->tx_timeout_factor = 10;
4265 * workaround: re-program speed mode bit after
4268 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4271 tarc0 = er32(TARC(0));
4272 tarc0 &= ~SPEED_MODE_BIT;
4273 ew32(TARC(0), tarc0);
4277 * disable TSO for pcie and 10/100 speeds, to avoid
4278 * some hardware issues
4280 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4281 switch (adapter->link_speed) {
4284 e_info("10/100 speed: disabling TSO\n");
4285 netdev->features &= ~NETIF_F_TSO;
4286 netdev->features &= ~NETIF_F_TSO6;
4289 netdev->features |= NETIF_F_TSO;
4290 netdev->features |= NETIF_F_TSO6;
4299 * enable transmits in the hardware, need to do this
4300 * after setting TARC(0)
4303 tctl |= E1000_TCTL_EN;
4307 * Perform any post-link-up configuration before
4308 * reporting link up.
4310 if (phy->ops.cfg_on_link_up)
4311 phy->ops.cfg_on_link_up(hw);
4313 netif_carrier_on(netdev);
4315 if (!test_bit(__E1000_DOWN, &adapter->state))
4316 mod_timer(&adapter->phy_info_timer,
4317 round_jiffies(jiffies + 2 * HZ));
4320 if (netif_carrier_ok(netdev)) {
4321 adapter->link_speed = 0;
4322 adapter->link_duplex = 0;
4323 /* Link status message must follow this format */
4324 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4325 adapter->netdev->name);
4326 netif_carrier_off(netdev);
4327 if (!test_bit(__E1000_DOWN, &adapter->state))
4328 mod_timer(&adapter->phy_info_timer,
4329 round_jiffies(jiffies + 2 * HZ));
4331 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4332 schedule_work(&adapter->reset_task);
4334 pm_schedule_suspend(netdev->dev.parent,
4340 spin_lock(&adapter->stats64_lock);
4341 e1000e_update_stats(adapter);
4342 spin_unlock(&adapter->stats64_lock);
4344 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4345 adapter->tpt_old = adapter->stats.tpt;
4346 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4347 adapter->colc_old = adapter->stats.colc;
4349 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4350 adapter->gorc_old = adapter->stats.gorc;
4351 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4352 adapter->gotc_old = adapter->stats.gotc;
4354 e1000e_update_adaptive(&adapter->hw);
4356 if (!netif_carrier_ok(netdev) &&
4357 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4359 * We've lost link, so the controller stops DMA,
4360 * but we've got queued Tx work that's never going
4361 * to get done, so reset controller to flush Tx.
4362 * (Do the reset outside of interrupt context).
4364 schedule_work(&adapter->reset_task);
4365 /* return immediately since reset is imminent */
4369 /* Simple mode for Interrupt Throttle Rate (ITR) */
4370 if (adapter->itr_setting == 4) {
4372 * Symmetric Tx/Rx gets a reduced ITR=2000;
4373 * Total asymmetrical Tx or Rx gets ITR=8000;
4374 * everyone else is between 2000-8000.
4376 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4377 u32 dif = (adapter->gotc > adapter->gorc ?
4378 adapter->gotc - adapter->gorc :
4379 adapter->gorc - adapter->gotc) / 10000;
4380 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4382 ew32(ITR, 1000000000 / (itr * 256));
4385 /* Cause software interrupt to ensure Rx ring is cleaned */
4386 if (adapter->msix_entries)
4387 ew32(ICS, adapter->rx_ring->ims_val);
4389 ew32(ICS, E1000_ICS_RXDMT0);
4391 /* flush pending descriptors to memory before detecting Tx hang */
4392 e1000e_flush_descriptors(adapter);
4394 /* Force detection of hung controller every watchdog period */
4395 adapter->detect_tx_hung = 1;
4398 * With 82571 controllers, LAA may be overwritten due to controller
4399 * reset from the other port. Set the appropriate LAA in RAR[0]
4401 if (e1000e_get_laa_state_82571(hw))
4402 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4404 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4405 e1000e_check_82574_phy_workaround(adapter);
4407 /* Reset the timer */
4408 if (!test_bit(__E1000_DOWN, &adapter->state))
4409 mod_timer(&adapter->watchdog_timer,
4410 round_jiffies(jiffies + 2 * HZ));
4413 #define E1000_TX_FLAGS_CSUM 0x00000001
4414 #define E1000_TX_FLAGS_VLAN 0x00000002
4415 #define E1000_TX_FLAGS_TSO 0x00000004
4416 #define E1000_TX_FLAGS_IPV4 0x00000008
4417 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4418 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4420 static int e1000_tso(struct e1000_adapter *adapter,
4421 struct sk_buff *skb)
4423 struct e1000_ring *tx_ring = adapter->tx_ring;
4424 struct e1000_context_desc *context_desc;
4425 struct e1000_buffer *buffer_info;
4428 u16 ipcse = 0, tucse, mss;
4429 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4431 if (!skb_is_gso(skb))
4434 if (skb_header_cloned(skb)) {
4435 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4441 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4442 mss = skb_shinfo(skb)->gso_size;
4443 if (skb->protocol == htons(ETH_P_IP)) {
4444 struct iphdr *iph = ip_hdr(skb);
4447 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4449 cmd_length = E1000_TXD_CMD_IP;
4450 ipcse = skb_transport_offset(skb) - 1;
4451 } else if (skb_is_gso_v6(skb)) {
4452 ipv6_hdr(skb)->payload_len = 0;
4453 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4454 &ipv6_hdr(skb)->daddr,
4458 ipcss = skb_network_offset(skb);
4459 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4460 tucss = skb_transport_offset(skb);
4461 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4464 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4465 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4467 i = tx_ring->next_to_use;
4468 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4469 buffer_info = &tx_ring->buffer_info[i];
4471 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4472 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4473 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4474 context_desc->upper_setup.tcp_fields.tucss = tucss;
4475 context_desc->upper_setup.tcp_fields.tucso = tucso;
4476 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4477 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4478 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4479 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4481 buffer_info->time_stamp = jiffies;
4482 buffer_info->next_to_watch = i;
4485 if (i == tx_ring->count)
4487 tx_ring->next_to_use = i;
4492 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4494 struct e1000_ring *tx_ring = adapter->tx_ring;
4495 struct e1000_context_desc *context_desc;
4496 struct e1000_buffer *buffer_info;
4499 u32 cmd_len = E1000_TXD_CMD_DEXT;
4502 if (skb->ip_summed != CHECKSUM_PARTIAL)
4505 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4506 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4508 protocol = skb->protocol;
4511 case cpu_to_be16(ETH_P_IP):
4512 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4513 cmd_len |= E1000_TXD_CMD_TCP;
4515 case cpu_to_be16(ETH_P_IPV6):
4516 /* XXX not handling all IPV6 headers */
4517 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4518 cmd_len |= E1000_TXD_CMD_TCP;
4521 if (unlikely(net_ratelimit()))
4522 e_warn("checksum_partial proto=%x!\n",
4523 be16_to_cpu(protocol));
4527 css = skb_checksum_start_offset(skb);
4529 i = tx_ring->next_to_use;
4530 buffer_info = &tx_ring->buffer_info[i];
4531 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4533 context_desc->lower_setup.ip_config = 0;
4534 context_desc->upper_setup.tcp_fields.tucss = css;
4535 context_desc->upper_setup.tcp_fields.tucso =
4536 css + skb->csum_offset;
4537 context_desc->upper_setup.tcp_fields.tucse = 0;
4538 context_desc->tcp_seg_setup.data = 0;
4539 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4541 buffer_info->time_stamp = jiffies;
4542 buffer_info->next_to_watch = i;
4545 if (i == tx_ring->count)
4547 tx_ring->next_to_use = i;
4552 #define E1000_MAX_PER_TXD 8192
4553 #define E1000_MAX_TXD_PWR 12
4555 static int e1000_tx_map(struct e1000_adapter *adapter,
4556 struct sk_buff *skb, unsigned int first,
4557 unsigned int max_per_txd, unsigned int nr_frags,
4560 struct e1000_ring *tx_ring = adapter->tx_ring;
4561 struct pci_dev *pdev = adapter->pdev;
4562 struct e1000_buffer *buffer_info;
4563 unsigned int len = skb_headlen(skb);
4564 unsigned int offset = 0, size, count = 0, i;
4565 unsigned int f, bytecount, segs;
4567 i = tx_ring->next_to_use;
4570 buffer_info = &tx_ring->buffer_info[i];
4571 size = min(len, max_per_txd);
4573 buffer_info->length = size;
4574 buffer_info->time_stamp = jiffies;
4575 buffer_info->next_to_watch = i;
4576 buffer_info->dma = dma_map_single(&pdev->dev,
4578 size, DMA_TO_DEVICE);
4579 buffer_info->mapped_as_page = false;
4580 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4589 if (i == tx_ring->count)
4594 for (f = 0; f < nr_frags; f++) {
4595 struct skb_frag_struct *frag;
4597 frag = &skb_shinfo(skb)->frags[f];
4599 offset = frag->page_offset;
4603 if (i == tx_ring->count)
4606 buffer_info = &tx_ring->buffer_info[i];
4607 size = min(len, max_per_txd);
4609 buffer_info->length = size;
4610 buffer_info->time_stamp = jiffies;
4611 buffer_info->next_to_watch = i;
4612 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
4615 buffer_info->mapped_as_page = true;
4616 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4625 segs = skb_shinfo(skb)->gso_segs ? : 1;
4626 /* multiply data chunks by size of headers */
4627 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4629 tx_ring->buffer_info[i].skb = skb;
4630 tx_ring->buffer_info[i].segs = segs;
4631 tx_ring->buffer_info[i].bytecount = bytecount;
4632 tx_ring->buffer_info[first].next_to_watch = i;
4637 dev_err(&pdev->dev, "Tx DMA map failed\n");
4638 buffer_info->dma = 0;
4644 i += tx_ring->count;
4646 buffer_info = &tx_ring->buffer_info[i];
4647 e1000_put_txbuf(adapter, buffer_info);
4653 static void e1000_tx_queue(struct e1000_adapter *adapter,
4654 int tx_flags, int count)
4656 struct e1000_ring *tx_ring = adapter->tx_ring;
4657 struct e1000_tx_desc *tx_desc = NULL;
4658 struct e1000_buffer *buffer_info;
4659 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4662 if (tx_flags & E1000_TX_FLAGS_TSO) {
4663 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4665 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4667 if (tx_flags & E1000_TX_FLAGS_IPV4)
4668 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4671 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4672 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4673 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4676 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4677 txd_lower |= E1000_TXD_CMD_VLE;
4678 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4681 i = tx_ring->next_to_use;
4684 buffer_info = &tx_ring->buffer_info[i];
4685 tx_desc = E1000_TX_DESC(*tx_ring, i);
4686 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4687 tx_desc->lower.data =
4688 cpu_to_le32(txd_lower | buffer_info->length);
4689 tx_desc->upper.data = cpu_to_le32(txd_upper);
4692 if (i == tx_ring->count)
4694 } while (--count > 0);
4696 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4699 * Force memory writes to complete before letting h/w
4700 * know there are new descriptors to fetch. (Only
4701 * applicable for weak-ordered memory model archs,
4706 tx_ring->next_to_use = i;
4707 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4709 * we need this if more than one processor can write to our tail
4710 * at a time, it synchronizes IO on IA64/Altix systems
4715 #define MINIMUM_DHCP_PACKET_SIZE 282
4716 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4717 struct sk_buff *skb)
4719 struct e1000_hw *hw = &adapter->hw;
4722 if (vlan_tx_tag_present(skb)) {
4723 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4724 (adapter->hw.mng_cookie.status &
4725 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4729 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4732 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4736 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4739 if (ip->protocol != IPPROTO_UDP)
4742 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4743 if (ntohs(udp->dest) != 67)
4746 offset = (u8 *)udp + 8 - skb->data;
4747 length = skb->len - offset;
4748 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4754 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4756 struct e1000_adapter *adapter = netdev_priv(netdev);
4758 netif_stop_queue(netdev);
4760 * Herbert's original patch had:
4761 * smp_mb__after_netif_stop_queue();
4762 * but since that doesn't exist yet, just open code it.
4767 * We need to check again in a case another CPU has just
4768 * made room available.
4770 if (e1000_desc_unused(adapter->tx_ring) < size)
4774 netif_start_queue(netdev);
4775 ++adapter->restart_queue;
4779 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4781 struct e1000_adapter *adapter = netdev_priv(netdev);
4783 if (e1000_desc_unused(adapter->tx_ring) >= size)
4785 return __e1000_maybe_stop_tx(netdev, size);
4788 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4789 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4790 struct net_device *netdev)
4792 struct e1000_adapter *adapter = netdev_priv(netdev);
4793 struct e1000_ring *tx_ring = adapter->tx_ring;
4795 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4796 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4797 unsigned int tx_flags = 0;
4798 unsigned int len = skb_headlen(skb);
4799 unsigned int nr_frags;
4805 if (test_bit(__E1000_DOWN, &adapter->state)) {
4806 dev_kfree_skb_any(skb);
4807 return NETDEV_TX_OK;
4810 if (skb->len <= 0) {
4811 dev_kfree_skb_any(skb);
4812 return NETDEV_TX_OK;
4815 mss = skb_shinfo(skb)->gso_size;
4817 * The controller does a simple calculation to
4818 * make sure there is enough room in the FIFO before
4819 * initiating the DMA for each buffer. The calc is:
4820 * 4 = ceil(buffer len/mss). To make sure we don't
4821 * overrun the FIFO, adjust the max buffer len if mss
4826 max_per_txd = min(mss << 2, max_per_txd);
4827 max_txd_pwr = fls(max_per_txd) - 1;
4830 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4831 * points to just header, pull a few bytes of payload from
4832 * frags into skb->data
4834 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4836 * we do this workaround for ES2LAN, but it is un-necessary,
4837 * avoiding it could save a lot of cycles
4839 if (skb->data_len && (hdr_len == len)) {
4840 unsigned int pull_size;
4842 pull_size = min((unsigned int)4, skb->data_len);
4843 if (!__pskb_pull_tail(skb, pull_size)) {
4844 e_err("__pskb_pull_tail failed.\n");
4845 dev_kfree_skb_any(skb);
4846 return NETDEV_TX_OK;
4848 len = skb_headlen(skb);
4852 /* reserve a descriptor for the offload context */
4853 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4857 count += TXD_USE_COUNT(len, max_txd_pwr);
4859 nr_frags = skb_shinfo(skb)->nr_frags;
4860 for (f = 0; f < nr_frags; f++)
4861 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4864 if (adapter->hw.mac.tx_pkt_filtering)
4865 e1000_transfer_dhcp_info(adapter, skb);
4868 * need: count + 2 desc gap to keep tail from touching
4869 * head, otherwise try next time
4871 if (e1000_maybe_stop_tx(netdev, count + 2))
4872 return NETDEV_TX_BUSY;
4874 if (vlan_tx_tag_present(skb)) {
4875 tx_flags |= E1000_TX_FLAGS_VLAN;
4876 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4879 first = tx_ring->next_to_use;
4881 tso = e1000_tso(adapter, skb);
4883 dev_kfree_skb_any(skb);
4884 return NETDEV_TX_OK;
4888 tx_flags |= E1000_TX_FLAGS_TSO;
4889 else if (e1000_tx_csum(adapter, skb))
4890 tx_flags |= E1000_TX_FLAGS_CSUM;
4893 * Old method was to assume IPv4 packet by default if TSO was enabled.
4894 * 82571 hardware supports TSO capabilities for IPv6 as well...
4895 * no longer assume, we must.
4897 if (skb->protocol == htons(ETH_P_IP))
4898 tx_flags |= E1000_TX_FLAGS_IPV4;
4900 /* if count is 0 then mapping error has occurred */
4901 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4903 e1000_tx_queue(adapter, tx_flags, count);
4904 /* Make sure there is space in the ring for the next send. */
4905 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4908 dev_kfree_skb_any(skb);
4909 tx_ring->buffer_info[first].time_stamp = 0;
4910 tx_ring->next_to_use = first;
4913 return NETDEV_TX_OK;
4917 * e1000_tx_timeout - Respond to a Tx Hang
4918 * @netdev: network interface device structure
4920 static void e1000_tx_timeout(struct net_device *netdev)
4922 struct e1000_adapter *adapter = netdev_priv(netdev);
4924 /* Do the reset outside of interrupt context */
4925 adapter->tx_timeout_count++;
4926 schedule_work(&adapter->reset_task);
4929 static void e1000_reset_task(struct work_struct *work)
4931 struct e1000_adapter *adapter;
4932 adapter = container_of(work, struct e1000_adapter, reset_task);
4934 /* don't run the task if already down */
4935 if (test_bit(__E1000_DOWN, &adapter->state))
4938 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4939 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4940 e1000e_dump(adapter);
4941 e_err("Reset adapter\n");
4943 e1000e_reinit_locked(adapter);
4947 * e1000_get_stats64 - Get System Network Statistics
4948 * @netdev: network interface device structure
4949 * @stats: rtnl_link_stats64 pointer
4951 * Returns the address of the device statistics structure.
4953 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
4954 struct rtnl_link_stats64 *stats)
4956 struct e1000_adapter *adapter = netdev_priv(netdev);
4958 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4959 spin_lock(&adapter->stats64_lock);
4960 e1000e_update_stats(adapter);
4961 /* Fill out the OS statistics structure */
4962 stats->rx_bytes = adapter->stats.gorc;
4963 stats->rx_packets = adapter->stats.gprc;
4964 stats->tx_bytes = adapter->stats.gotc;
4965 stats->tx_packets = adapter->stats.gptc;
4966 stats->multicast = adapter->stats.mprc;
4967 stats->collisions = adapter->stats.colc;
4972 * RLEC on some newer hardware can be incorrect so build
4973 * our own version based on RUC and ROC
4975 stats->rx_errors = adapter->stats.rxerrc +
4976 adapter->stats.crcerrs + adapter->stats.algnerrc +
4977 adapter->stats.ruc + adapter->stats.roc +
4978 adapter->stats.cexterr;
4979 stats->rx_length_errors = adapter->stats.ruc +
4981 stats->rx_crc_errors = adapter->stats.crcerrs;
4982 stats->rx_frame_errors = adapter->stats.algnerrc;
4983 stats->rx_missed_errors = adapter->stats.mpc;
4986 stats->tx_errors = adapter->stats.ecol +
4987 adapter->stats.latecol;
4988 stats->tx_aborted_errors = adapter->stats.ecol;
4989 stats->tx_window_errors = adapter->stats.latecol;
4990 stats->tx_carrier_errors = adapter->stats.tncrs;
4992 /* Tx Dropped needs to be maintained elsewhere */
4994 spin_unlock(&adapter->stats64_lock);
4999 * e1000_change_mtu - Change the Maximum Transfer Unit
5000 * @netdev: network interface device structure
5001 * @new_mtu: new value for maximum frame size
5003 * Returns 0 on success, negative on failure
5005 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5007 struct e1000_adapter *adapter = netdev_priv(netdev);
5008 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5010 /* Jumbo frame support */
5011 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5012 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5013 e_err("Jumbo Frames not supported.\n");
5017 /* Supported frame sizes */
5018 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5019 (max_frame > adapter->max_hw_frame_size)) {
5020 e_err("Unsupported MTU setting\n");
5024 /* Jumbo frame workaround on 82579 requires CRC be stripped */
5025 if ((adapter->hw.mac.type == e1000_pch2lan) &&
5026 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5027 (new_mtu > ETH_DATA_LEN)) {
5028 e_err("Jumbo Frames not supported on 82579 when CRC "
5029 "stripping is disabled.\n");
5033 /* 82573 Errata 17 */
5034 if (((adapter->hw.mac.type == e1000_82573) ||
5035 (adapter->hw.mac.type == e1000_82574)) &&
5036 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
5037 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
5038 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
5041 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5042 usleep_range(1000, 2000);
5043 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5044 adapter->max_frame_size = max_frame;
5045 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5046 netdev->mtu = new_mtu;
5047 if (netif_running(netdev))
5048 e1000e_down(adapter);
5051 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5052 * means we reserve 2 more, this pushes us to allocate from the next
5054 * i.e. RXBUFFER_2048 --> size-4096 slab
5055 * However with the new *_jumbo_rx* routines, jumbo receives will use
5059 if (max_frame <= 2048)
5060 adapter->rx_buffer_len = 2048;
5062 adapter->rx_buffer_len = 4096;
5064 /* adjust allocation if LPE protects us, and we aren't using SBP */
5065 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5066 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5067 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5070 if (netif_running(netdev))
5073 e1000e_reset(adapter);
5075 clear_bit(__E1000_RESETTING, &adapter->state);
5080 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5083 struct e1000_adapter *adapter = netdev_priv(netdev);
5084 struct mii_ioctl_data *data = if_mii(ifr);
5086 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5091 data->phy_id = adapter->hw.phy.addr;
5094 e1000_phy_read_status(adapter);
5096 switch (data->reg_num & 0x1F) {
5098 data->val_out = adapter->phy_regs.bmcr;
5101 data->val_out = adapter->phy_regs.bmsr;
5104 data->val_out = (adapter->hw.phy.id >> 16);
5107 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5110 data->val_out = adapter->phy_regs.advertise;
5113 data->val_out = adapter->phy_regs.lpa;
5116 data->val_out = adapter->phy_regs.expansion;
5119 data->val_out = adapter->phy_regs.ctrl1000;
5122 data->val_out = adapter->phy_regs.stat1000;
5125 data->val_out = adapter->phy_regs.estatus;
5138 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5144 return e1000_mii_ioctl(netdev, ifr, cmd);
5150 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5152 struct e1000_hw *hw = &adapter->hw;
5157 /* copy MAC RARs to PHY RARs */
5158 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5160 /* copy MAC MTA to PHY MTA */
5161 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5162 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5163 e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
5164 e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
5167 /* configure PHY Rx Control register */
5168 e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
5169 mac_reg = er32(RCTL);
5170 if (mac_reg & E1000_RCTL_UPE)
5171 phy_reg |= BM_RCTL_UPE;
5172 if (mac_reg & E1000_RCTL_MPE)
5173 phy_reg |= BM_RCTL_MPE;
5174 phy_reg &= ~(BM_RCTL_MO_MASK);
5175 if (mac_reg & E1000_RCTL_MO_3)
5176 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5177 << BM_RCTL_MO_SHIFT);
5178 if (mac_reg & E1000_RCTL_BAM)
5179 phy_reg |= BM_RCTL_BAM;
5180 if (mac_reg & E1000_RCTL_PMCF)
5181 phy_reg |= BM_RCTL_PMCF;
5182 mac_reg = er32(CTRL);
5183 if (mac_reg & E1000_CTRL_RFCE)
5184 phy_reg |= BM_RCTL_RFCE;
5185 e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
5187 /* enable PHY wakeup in MAC register */
5189 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5191 /* configure and enable PHY wakeup in PHY registers */
5192 e1e_wphy(&adapter->hw, BM_WUFC, wufc);
5193 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5195 /* activate PHY wakeup */
5196 retval = hw->phy.ops.acquire(hw);
5198 e_err("Could not acquire PHY\n");
5201 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5202 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5203 retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
5205 e_err("Could not read PHY page 769\n");
5208 phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5209 retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
5211 e_err("Could not set PHY Host Wakeup bit\n");
5213 hw->phy.ops.release(hw);
5218 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5221 struct net_device *netdev = pci_get_drvdata(pdev);
5222 struct e1000_adapter *adapter = netdev_priv(netdev);
5223 struct e1000_hw *hw = &adapter->hw;
5224 u32 ctrl, ctrl_ext, rctl, status;
5225 /* Runtime suspend should only enable wakeup for link changes */
5226 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5229 netif_device_detach(netdev);
5231 if (netif_running(netdev)) {
5232 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5233 e1000e_down(adapter);
5234 e1000_free_irq(adapter);
5236 e1000e_reset_interrupt_capability(adapter);
5238 retval = pci_save_state(pdev);
5242 status = er32(STATUS);
5243 if (status & E1000_STATUS_LU)
5244 wufc &= ~E1000_WUFC_LNKC;
5247 e1000_setup_rctl(adapter);
5248 e1000_set_multi(netdev);
5250 /* turn on all-multi mode if wake on multicast is enabled */
5251 if (wufc & E1000_WUFC_MC) {
5253 rctl |= E1000_RCTL_MPE;
5258 /* advertise wake from D3Cold */
5259 #define E1000_CTRL_ADVD3WUC 0x00100000
5260 /* phy power management enable */
5261 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5262 ctrl |= E1000_CTRL_ADVD3WUC;
5263 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5264 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5267 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5268 adapter->hw.phy.media_type ==
5269 e1000_media_type_internal_serdes) {
5270 /* keep the laser running in D3 */
5271 ctrl_ext = er32(CTRL_EXT);
5272 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5273 ew32(CTRL_EXT, ctrl_ext);
5276 if (adapter->flags & FLAG_IS_ICH)
5277 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
5279 /* Allow time for pending master requests to run */
5280 e1000e_disable_pcie_master(&adapter->hw);
5282 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5283 /* enable wakeup by the PHY */
5284 retval = e1000_init_phy_wakeup(adapter, wufc);
5288 /* enable wakeup by the MAC */
5290 ew32(WUC, E1000_WUC_PME_EN);
5297 *enable_wake = !!wufc;
5299 /* make sure adapter isn't asleep if manageability is enabled */
5300 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5301 (hw->mac.ops.check_mng_mode(hw)))
5302 *enable_wake = true;
5304 if (adapter->hw.phy.type == e1000_phy_igp_3)
5305 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5308 * Release control of h/w to f/w. If f/w is AMT enabled, this
5309 * would have already happened in close and is redundant.
5311 e1000e_release_hw_control(adapter);
5313 pci_disable_device(pdev);
5318 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5320 if (sleep && wake) {
5321 pci_prepare_to_sleep(pdev);
5325 pci_wake_from_d3(pdev, wake);
5326 pci_set_power_state(pdev, PCI_D3hot);
5329 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5332 struct net_device *netdev = pci_get_drvdata(pdev);
5333 struct e1000_adapter *adapter = netdev_priv(netdev);
5336 * The pci-e switch on some quad port adapters will report a
5337 * correctable error when the MAC transitions from D0 to D3. To
5338 * prevent this we need to mask off the correctable errors on the
5339 * downstream port of the pci-e switch.
5341 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5342 struct pci_dev *us_dev = pdev->bus->self;
5343 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
5346 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
5347 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
5348 (devctl & ~PCI_EXP_DEVCTL_CERE));
5350 e1000_power_off(pdev, sleep, wake);
5352 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
5354 e1000_power_off(pdev, sleep, wake);
5358 #ifdef CONFIG_PCIEASPM
5359 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5361 pci_disable_link_state(pdev, state);
5364 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5370 * Both device and parent should have the same ASPM setting.
5371 * Disable ASPM in downstream component first and then upstream.
5373 pos = pci_pcie_cap(pdev);
5374 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
5376 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5378 if (!pdev->bus->self)
5381 pos = pci_pcie_cap(pdev->bus->self);
5382 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16);
5384 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5387 void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5389 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5390 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5391 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5393 __e1000e_disable_aspm(pdev, state);
5397 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5399 return !!adapter->tx_ring->buffer_info;
5402 static int __e1000_resume(struct pci_dev *pdev)
5404 struct net_device *netdev = pci_get_drvdata(pdev);
5405 struct e1000_adapter *adapter = netdev_priv(netdev);
5406 struct e1000_hw *hw = &adapter->hw;
5409 pci_set_power_state(pdev, PCI_D0);
5410 pci_restore_state(pdev);
5411 pci_save_state(pdev);
5412 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5413 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5415 e1000e_set_interrupt_capability(adapter);
5416 if (netif_running(netdev)) {
5417 err = e1000_request_irq(adapter);
5422 e1000e_power_up_phy(adapter);
5424 /* report the system wakeup cause from S3/S4 */
5425 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5428 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5430 e_info("PHY Wakeup cause - %s\n",
5431 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5432 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5433 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5434 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5435 phy_data & E1000_WUS_LNKC ? "Link Status "
5436 " Change" : "other");
5438 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5440 u32 wus = er32(WUS);
5442 e_info("MAC Wakeup cause - %s\n",
5443 wus & E1000_WUS_EX ? "Unicast Packet" :
5444 wus & E1000_WUS_MC ? "Multicast Packet" :
5445 wus & E1000_WUS_BC ? "Broadcast Packet" :
5446 wus & E1000_WUS_MAG ? "Magic Packet" :
5447 wus & E1000_WUS_LNKC ? "Link Status Change" :
5453 e1000e_reset(adapter);
5455 e1000_init_manageability_pt(adapter);
5457 if (netif_running(netdev))
5460 netif_device_attach(netdev);
5463 * If the controller has AMT, do not set DRV_LOAD until the interface
5464 * is up. For all other cases, let the f/w know that the h/w is now
5465 * under the control of the driver.
5467 if (!(adapter->flags & FLAG_HAS_AMT))
5468 e1000e_get_hw_control(adapter);
5473 #ifdef CONFIG_PM_SLEEP
5474 static int e1000_suspend(struct device *dev)
5476 struct pci_dev *pdev = to_pci_dev(dev);
5480 retval = __e1000_shutdown(pdev, &wake, false);
5482 e1000_complete_shutdown(pdev, true, wake);
5487 static int e1000_resume(struct device *dev)
5489 struct pci_dev *pdev = to_pci_dev(dev);
5490 struct net_device *netdev = pci_get_drvdata(pdev);
5491 struct e1000_adapter *adapter = netdev_priv(netdev);
5493 if (e1000e_pm_ready(adapter))
5494 adapter->idle_check = true;
5496 return __e1000_resume(pdev);
5498 #endif /* CONFIG_PM_SLEEP */
5500 #ifdef CONFIG_PM_RUNTIME
5501 static int e1000_runtime_suspend(struct device *dev)
5503 struct pci_dev *pdev = to_pci_dev(dev);
5504 struct net_device *netdev = pci_get_drvdata(pdev);
5505 struct e1000_adapter *adapter = netdev_priv(netdev);
5507 if (e1000e_pm_ready(adapter)) {
5510 __e1000_shutdown(pdev, &wake, true);
5516 static int e1000_idle(struct device *dev)
5518 struct pci_dev *pdev = to_pci_dev(dev);
5519 struct net_device *netdev = pci_get_drvdata(pdev);
5520 struct e1000_adapter *adapter = netdev_priv(netdev);
5522 if (!e1000e_pm_ready(adapter))
5525 if (adapter->idle_check) {
5526 adapter->idle_check = false;
5527 if (!e1000e_has_link(adapter))
5528 pm_schedule_suspend(dev, MSEC_PER_SEC);
5534 static int e1000_runtime_resume(struct device *dev)
5536 struct pci_dev *pdev = to_pci_dev(dev);
5537 struct net_device *netdev = pci_get_drvdata(pdev);
5538 struct e1000_adapter *adapter = netdev_priv(netdev);
5540 if (!e1000e_pm_ready(adapter))
5543 adapter->idle_check = !dev->power.runtime_auto;
5544 return __e1000_resume(pdev);
5546 #endif /* CONFIG_PM_RUNTIME */
5547 #endif /* CONFIG_PM */
5549 static void e1000_shutdown(struct pci_dev *pdev)
5553 __e1000_shutdown(pdev, &wake, false);
5555 if (system_state == SYSTEM_POWER_OFF)
5556 e1000_complete_shutdown(pdev, false, wake);
5559 #ifdef CONFIG_NET_POLL_CONTROLLER
5561 static irqreturn_t e1000_intr_msix(int irq, void *data)
5563 struct net_device *netdev = data;
5564 struct e1000_adapter *adapter = netdev_priv(netdev);
5566 if (adapter->msix_entries) {
5567 int vector, msix_irq;
5570 msix_irq = adapter->msix_entries[vector].vector;
5571 disable_irq(msix_irq);
5572 e1000_intr_msix_rx(msix_irq, netdev);
5573 enable_irq(msix_irq);
5576 msix_irq = adapter->msix_entries[vector].vector;
5577 disable_irq(msix_irq);
5578 e1000_intr_msix_tx(msix_irq, netdev);
5579 enable_irq(msix_irq);
5582 msix_irq = adapter->msix_entries[vector].vector;
5583 disable_irq(msix_irq);
5584 e1000_msix_other(msix_irq, netdev);
5585 enable_irq(msix_irq);
5592 * Polling 'interrupt' - used by things like netconsole to send skbs
5593 * without having to re-enable interrupts. It's not called while
5594 * the interrupt routine is executing.
5596 static void e1000_netpoll(struct net_device *netdev)
5598 struct e1000_adapter *adapter = netdev_priv(netdev);
5600 switch (adapter->int_mode) {
5601 case E1000E_INT_MODE_MSIX:
5602 e1000_intr_msix(adapter->pdev->irq, netdev);
5604 case E1000E_INT_MODE_MSI:
5605 disable_irq(adapter->pdev->irq);
5606 e1000_intr_msi(adapter->pdev->irq, netdev);
5607 enable_irq(adapter->pdev->irq);
5609 default: /* E1000E_INT_MODE_LEGACY */
5610 disable_irq(adapter->pdev->irq);
5611 e1000_intr(adapter->pdev->irq, netdev);
5612 enable_irq(adapter->pdev->irq);
5619 * e1000_io_error_detected - called when PCI error is detected
5620 * @pdev: Pointer to PCI device
5621 * @state: The current pci connection state
5623 * This function is called after a PCI bus error affecting
5624 * this device has been detected.
5626 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5627 pci_channel_state_t state)
5629 struct net_device *netdev = pci_get_drvdata(pdev);
5630 struct e1000_adapter *adapter = netdev_priv(netdev);
5632 netif_device_detach(netdev);
5634 if (state == pci_channel_io_perm_failure)
5635 return PCI_ERS_RESULT_DISCONNECT;
5637 if (netif_running(netdev))
5638 e1000e_down(adapter);
5639 pci_disable_device(pdev);
5641 /* Request a slot slot reset. */
5642 return PCI_ERS_RESULT_NEED_RESET;
5646 * e1000_io_slot_reset - called after the pci bus has been reset.
5647 * @pdev: Pointer to PCI device
5649 * Restart the card from scratch, as if from a cold-boot. Implementation
5650 * resembles the first-half of the e1000_resume routine.
5652 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5654 struct net_device *netdev = pci_get_drvdata(pdev);
5655 struct e1000_adapter *adapter = netdev_priv(netdev);
5656 struct e1000_hw *hw = &adapter->hw;
5658 pci_ers_result_t result;
5660 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5661 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5662 err = pci_enable_device_mem(pdev);
5665 "Cannot re-enable PCI device after reset.\n");
5666 result = PCI_ERS_RESULT_DISCONNECT;
5668 pci_set_master(pdev);
5669 pdev->state_saved = true;
5670 pci_restore_state(pdev);
5672 pci_enable_wake(pdev, PCI_D3hot, 0);
5673 pci_enable_wake(pdev, PCI_D3cold, 0);
5675 e1000e_reset(adapter);
5677 result = PCI_ERS_RESULT_RECOVERED;
5680 pci_cleanup_aer_uncorrect_error_status(pdev);
5686 * e1000_io_resume - called when traffic can start flowing again.
5687 * @pdev: Pointer to PCI device
5689 * This callback is called when the error recovery driver tells us that
5690 * its OK to resume normal operation. Implementation resembles the
5691 * second-half of the e1000_resume routine.
5693 static void e1000_io_resume(struct pci_dev *pdev)
5695 struct net_device *netdev = pci_get_drvdata(pdev);
5696 struct e1000_adapter *adapter = netdev_priv(netdev);
5698 e1000_init_manageability_pt(adapter);
5700 if (netif_running(netdev)) {
5701 if (e1000e_up(adapter)) {
5703 "can't bring device back up after reset\n");
5708 netif_device_attach(netdev);
5711 * If the controller has AMT, do not set DRV_LOAD until the interface
5712 * is up. For all other cases, let the f/w know that the h/w is now
5713 * under the control of the driver.
5715 if (!(adapter->flags & FLAG_HAS_AMT))
5716 e1000e_get_hw_control(adapter);
5720 static void e1000_print_device_info(struct e1000_adapter *adapter)
5722 struct e1000_hw *hw = &adapter->hw;
5723 struct net_device *netdev = adapter->netdev;
5725 u8 pba_str[E1000_PBANUM_LENGTH];
5727 /* print bus type/speed/width info */
5728 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5730 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5734 e_info("Intel(R) PRO/%s Network Connection\n",
5735 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5736 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5737 E1000_PBANUM_LENGTH);
5739 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
5740 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5741 hw->mac.type, hw->phy.type, pba_str);
5744 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5746 struct e1000_hw *hw = &adapter->hw;
5750 if (hw->mac.type != e1000_82573)
5753 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5754 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
5755 /* Deep Smart Power Down (DSPD) */
5756 dev_warn(&adapter->pdev->dev,
5757 "Warning: detected DSPD enabled in EEPROM\n");
5761 static const struct net_device_ops e1000e_netdev_ops = {
5762 .ndo_open = e1000_open,
5763 .ndo_stop = e1000_close,
5764 .ndo_start_xmit = e1000_xmit_frame,
5765 .ndo_get_stats64 = e1000e_get_stats64,
5766 .ndo_set_multicast_list = e1000_set_multi,
5767 .ndo_set_mac_address = e1000_set_mac,
5768 .ndo_change_mtu = e1000_change_mtu,
5769 .ndo_do_ioctl = e1000_ioctl,
5770 .ndo_tx_timeout = e1000_tx_timeout,
5771 .ndo_validate_addr = eth_validate_addr,
5773 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5774 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5775 #ifdef CONFIG_NET_POLL_CONTROLLER
5776 .ndo_poll_controller = e1000_netpoll,
5781 * e1000_probe - Device Initialization Routine
5782 * @pdev: PCI device information struct
5783 * @ent: entry in e1000_pci_tbl
5785 * Returns 0 on success, negative on failure
5787 * e1000_probe initializes an adapter identified by a pci_dev structure.
5788 * The OS initialization, configuring of the adapter private structure,
5789 * and a hardware reset occur.
5791 static int __devinit e1000_probe(struct pci_dev *pdev,
5792 const struct pci_device_id *ent)
5794 struct net_device *netdev;
5795 struct e1000_adapter *adapter;
5796 struct e1000_hw *hw;
5797 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
5798 resource_size_t mmio_start, mmio_len;
5799 resource_size_t flash_start, flash_len;
5801 static int cards_found;
5802 int i, err, pci_using_dac;
5803 u16 eeprom_data = 0;
5804 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5806 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5807 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5809 err = pci_enable_device_mem(pdev);
5814 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5816 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5820 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5822 err = dma_set_coherent_mask(&pdev->dev,
5825 dev_err(&pdev->dev, "No usable DMA "
5826 "configuration, aborting\n");
5832 err = pci_request_selected_regions_exclusive(pdev,
5833 pci_select_bars(pdev, IORESOURCE_MEM),
5834 e1000e_driver_name);
5838 /* AER (Advanced Error Reporting) hooks */
5839 pci_enable_pcie_error_reporting(pdev);
5841 pci_set_master(pdev);
5842 /* PCI config space info */
5843 err = pci_save_state(pdev);
5845 goto err_alloc_etherdev;
5848 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5850 goto err_alloc_etherdev;
5852 SET_NETDEV_DEV(netdev, &pdev->dev);
5854 netdev->irq = pdev->irq;
5856 pci_set_drvdata(pdev, netdev);
5857 adapter = netdev_priv(netdev);
5859 adapter->netdev = netdev;
5860 adapter->pdev = pdev;
5862 adapter->pba = ei->pba;
5863 adapter->flags = ei->flags;
5864 adapter->flags2 = ei->flags2;
5865 adapter->hw.adapter = adapter;
5866 adapter->hw.mac.type = ei->mac;
5867 adapter->max_hw_frame_size = ei->max_hw_frame_size;
5868 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
5870 mmio_start = pci_resource_start(pdev, 0);
5871 mmio_len = pci_resource_len(pdev, 0);
5874 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
5875 if (!adapter->hw.hw_addr)
5878 if ((adapter->flags & FLAG_HAS_FLASH) &&
5879 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
5880 flash_start = pci_resource_start(pdev, 1);
5881 flash_len = pci_resource_len(pdev, 1);
5882 adapter->hw.flash_address = ioremap(flash_start, flash_len);
5883 if (!adapter->hw.flash_address)
5887 /* construct the net_device struct */
5888 netdev->netdev_ops = &e1000e_netdev_ops;
5889 e1000e_set_ethtool_ops(netdev);
5890 netdev->watchdog_timeo = 5 * HZ;
5891 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
5892 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
5894 netdev->mem_start = mmio_start;
5895 netdev->mem_end = mmio_start + mmio_len;
5897 adapter->bd_number = cards_found++;
5899 e1000e_check_options(adapter);
5901 /* setup adapter struct */
5902 err = e1000_sw_init(adapter);
5906 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5907 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5908 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5910 err = ei->get_variants(adapter);
5914 if ((adapter->flags & FLAG_IS_ICH) &&
5915 (adapter->flags & FLAG_READ_ONLY_NVM))
5916 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
5918 hw->mac.ops.get_bus_info(&adapter->hw);
5920 adapter->hw.phy.autoneg_wait_to_complete = 0;
5922 /* Copper options */
5923 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
5924 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5925 adapter->hw.phy.disable_polarity_correction = 0;
5926 adapter->hw.phy.ms_type = e1000_ms_hw_default;
5929 if (e1000_check_reset_block(&adapter->hw))
5930 e_info("PHY reset is blocked due to SOL/IDER session.\n");
5932 netdev->features = NETIF_F_SG |
5934 NETIF_F_HW_VLAN_TX |
5937 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
5938 netdev->features |= NETIF_F_HW_VLAN_FILTER;
5940 netdev->features |= NETIF_F_TSO;
5941 netdev->features |= NETIF_F_TSO6;
5943 netdev->vlan_features |= NETIF_F_TSO;
5944 netdev->vlan_features |= NETIF_F_TSO6;
5945 netdev->vlan_features |= NETIF_F_HW_CSUM;
5946 netdev->vlan_features |= NETIF_F_SG;
5948 if (pci_using_dac) {
5949 netdev->features |= NETIF_F_HIGHDMA;
5950 netdev->vlan_features |= NETIF_F_HIGHDMA;
5953 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5954 adapter->flags |= FLAG_MNG_PT_ENABLED;
5957 * before reading the NVM, reset the controller to
5958 * put the device in a known good starting state
5960 adapter->hw.mac.ops.reset_hw(&adapter->hw);
5963 * systems with ASPM and others may see the checksum fail on the first
5964 * attempt. Let's give it a few tries
5967 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
5970 e_err("The NVM Checksum Is Not Valid\n");
5976 e1000_eeprom_checks(adapter);
5978 /* copy the MAC address */
5979 if (e1000e_read_mac_addr(&adapter->hw))
5980 e_err("NVM Read Error while reading MAC address\n");
5982 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
5983 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
5985 if (!is_valid_ether_addr(netdev->perm_addr)) {
5986 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
5991 init_timer(&adapter->watchdog_timer);
5992 adapter->watchdog_timer.function = e1000_watchdog;
5993 adapter->watchdog_timer.data = (unsigned long) adapter;
5995 init_timer(&adapter->phy_info_timer);
5996 adapter->phy_info_timer.function = e1000_update_phy_info;
5997 adapter->phy_info_timer.data = (unsigned long) adapter;
5999 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6000 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6001 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6002 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6003 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6004 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
6006 /* Initialize link parameters. User can change them with ethtool */
6007 adapter->hw.mac.autoneg = 1;
6008 adapter->fc_autoneg = 1;
6009 adapter->hw.fc.requested_mode = e1000_fc_default;
6010 adapter->hw.fc.current_mode = e1000_fc_default;
6011 adapter->hw.phy.autoneg_advertised = 0x2f;
6013 /* ring size defaults */
6014 adapter->rx_ring->count = 256;
6015 adapter->tx_ring->count = 256;
6018 * Initial Wake on LAN setting - If APM wake is enabled in
6019 * the EEPROM, enable the ACPI Magic Packet filter
6021 if (adapter->flags & FLAG_APME_IN_WUC) {
6022 /* APME bit in EEPROM is mapped to WUC.APME */
6023 eeprom_data = er32(WUC);
6024 eeprom_apme_mask = E1000_WUC_APME;
6025 if ((hw->mac.type > e1000_ich10lan) &&
6026 (eeprom_data & E1000_WUC_PHY_WAKE))
6027 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6028 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6029 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6030 (adapter->hw.bus.func == 1))
6031 e1000_read_nvm(&adapter->hw,
6032 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
6034 e1000_read_nvm(&adapter->hw,
6035 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6038 /* fetch WoL from EEPROM */
6039 if (eeprom_data & eeprom_apme_mask)
6040 adapter->eeprom_wol |= E1000_WUFC_MAG;
6043 * now that we have the eeprom settings, apply the special cases
6044 * where the eeprom may be wrong or the board simply won't support
6045 * wake on lan on a particular port
6047 if (!(adapter->flags & FLAG_HAS_WOL))
6048 adapter->eeprom_wol = 0;
6050 /* initialize the wol settings based on the eeprom settings */
6051 adapter->wol = adapter->eeprom_wol;
6052 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6054 /* save off EEPROM version number */
6055 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6057 /* reset the hardware with the new settings */
6058 e1000e_reset(adapter);
6061 * If the controller has AMT, do not set DRV_LOAD until the interface
6062 * is up. For all other cases, let the f/w know that the h/w is now
6063 * under the control of the driver.
6065 if (!(adapter->flags & FLAG_HAS_AMT))
6066 e1000e_get_hw_control(adapter);
6068 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
6069 err = register_netdev(netdev);
6073 /* carrier off reporting is important to ethtool even BEFORE open */
6074 netif_carrier_off(netdev);
6076 e1000_print_device_info(adapter);
6078 if (pci_dev_run_wake(pdev))
6079 pm_runtime_put_noidle(&pdev->dev);
6084 if (!(adapter->flags & FLAG_HAS_AMT))
6085 e1000e_release_hw_control(adapter);
6087 if (!e1000_check_reset_block(&adapter->hw))
6088 e1000_phy_hw_reset(&adapter->hw);
6090 kfree(adapter->tx_ring);
6091 kfree(adapter->rx_ring);
6093 if (adapter->hw.flash_address)
6094 iounmap(adapter->hw.flash_address);
6095 e1000e_reset_interrupt_capability(adapter);
6097 iounmap(adapter->hw.hw_addr);
6099 free_netdev(netdev);
6101 pci_release_selected_regions(pdev,
6102 pci_select_bars(pdev, IORESOURCE_MEM));
6105 pci_disable_device(pdev);
6110 * e1000_remove - Device Removal Routine
6111 * @pdev: PCI device information struct
6113 * e1000_remove is called by the PCI subsystem to alert the driver
6114 * that it should release a PCI device. The could be caused by a
6115 * Hot-Plug event, or because the driver is going to be removed from
6118 static void __devexit e1000_remove(struct pci_dev *pdev)
6120 struct net_device *netdev = pci_get_drvdata(pdev);
6121 struct e1000_adapter *adapter = netdev_priv(netdev);
6122 bool down = test_bit(__E1000_DOWN, &adapter->state);
6125 * The timers may be rescheduled, so explicitly disable them
6126 * from being rescheduled.
6129 set_bit(__E1000_DOWN, &adapter->state);
6130 del_timer_sync(&adapter->watchdog_timer);
6131 del_timer_sync(&adapter->phy_info_timer);
6133 cancel_work_sync(&adapter->reset_task);
6134 cancel_work_sync(&adapter->watchdog_task);
6135 cancel_work_sync(&adapter->downshift_task);
6136 cancel_work_sync(&adapter->update_phy_task);
6137 cancel_work_sync(&adapter->led_blink_task);
6138 cancel_work_sync(&adapter->print_hang_task);
6140 if (!(netdev->flags & IFF_UP))
6141 e1000_power_down_phy(adapter);
6143 /* Don't lie to e1000_close() down the road. */
6145 clear_bit(__E1000_DOWN, &adapter->state);
6146 unregister_netdev(netdev);
6148 if (pci_dev_run_wake(pdev))
6149 pm_runtime_get_noresume(&pdev->dev);
6152 * Release control of h/w to f/w. If f/w is AMT enabled, this
6153 * would have already happened in close and is redundant.
6155 e1000e_release_hw_control(adapter);
6157 e1000e_reset_interrupt_capability(adapter);
6158 kfree(adapter->tx_ring);
6159 kfree(adapter->rx_ring);
6161 iounmap(adapter->hw.hw_addr);
6162 if (adapter->hw.flash_address)
6163 iounmap(adapter->hw.flash_address);
6164 pci_release_selected_regions(pdev,
6165 pci_select_bars(pdev, IORESOURCE_MEM));
6167 free_netdev(netdev);
6170 pci_disable_pcie_error_reporting(pdev);
6172 pci_disable_device(pdev);
6175 /* PCI Error Recovery (ERS) */
6176 static struct pci_error_handlers e1000_err_handler = {
6177 .error_detected = e1000_io_error_detected,
6178 .slot_reset = e1000_io_slot_reset,
6179 .resume = e1000_io_resume,
6182 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6183 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6184 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6185 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6186 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6187 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6188 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6189 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6190 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6191 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6193 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6194 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6195 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6196 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6198 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6199 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6200 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6202 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6203 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6204 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6206 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6207 board_80003es2lan },
6208 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6209 board_80003es2lan },
6210 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6211 board_80003es2lan },
6212 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6213 board_80003es2lan },
6215 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6216 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6217 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6218 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6219 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6220 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6221 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6222 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6224 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6225 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6226 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6227 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6228 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6229 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6230 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6231 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6232 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6234 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6235 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6236 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6238 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6239 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6240 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6242 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6243 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6244 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6245 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6247 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6248 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6250 { } /* terminate list */
6252 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6255 static const struct dev_pm_ops e1000_pm_ops = {
6256 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6257 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6258 e1000_runtime_resume, e1000_idle)
6262 /* PCI Device API Driver */
6263 static struct pci_driver e1000_driver = {
6264 .name = e1000e_driver_name,
6265 .id_table = e1000_pci_tbl,
6266 .probe = e1000_probe,
6267 .remove = __devexit_p(e1000_remove),
6269 .driver.pm = &e1000_pm_ops,
6271 .shutdown = e1000_shutdown,
6272 .err_handler = &e1000_err_handler
6276 * e1000_init_module - Driver Registration Routine
6278 * e1000_init_module is the first routine called when the driver is
6279 * loaded. All it does is register with the PCI subsystem.
6281 static int __init e1000_init_module(void)
6284 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6285 e1000e_driver_version);
6286 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
6287 ret = pci_register_driver(&e1000_driver);
6291 module_init(e1000_init_module);
6294 * e1000_exit_module - Driver Exit Cleanup Routine
6296 * e1000_exit_module is called just before the driver is removed
6299 static void __exit e1000_exit_module(void)
6301 pci_unregister_driver(&e1000_driver);
6303 module_exit(e1000_exit_module);
6306 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6307 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6308 MODULE_LICENSE("GPL");
6309 MODULE_VERSION(DRV_VERSION);