net: fix typo in drivers/net/usb/Kconfig
[linux-2.6-block.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
118 *
119 * V. Miscellaneous
120 *
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
126 *
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
128 *
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
131 *
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
135 *
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
139 */
140
1da177e4
LT
141#include <linux/module.h>
142#include <linux/moduleparam.h>
143#include <linux/kernel.h>
144#include <linux/types.h>
145#include <linux/slab.h>
146#include <linux/delay.h>
147#include <linux/init.h>
148#include <linux/pci.h>
1e7f0bd8 149#include <linux/dma-mapping.h>
1da177e4
LT
150#include <linux/netdevice.h>
151#include <linux/etherdevice.h>
152#include <linux/mii.h>
153#include <linux/if_vlan.h>
154#include <linux/skbuff.h>
155#include <linux/ethtool.h>
156#include <linux/string.h>
157#include <asm/unaligned.h>
158
159
160#define DRV_NAME "e100"
4e1dc97d 161#define DRV_EXT "-NAPI"
27345bb6 162#define DRV_VERSION "3.5.17-k4"DRV_EXT
1da177e4 163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
165#define PFX DRV_NAME ": "
166
167#define E100_WATCHDOG_PERIOD (2 * HZ)
168#define E100_NAPI_WEIGHT 16
169
170MODULE_DESCRIPTION(DRV_DESCRIPTION);
171MODULE_AUTHOR(DRV_COPYRIGHT);
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
175static int debug = 3;
8fb6f732 176static int eeprom_bad_csum_allow = 0;
27345bb6 177static int use_io = 0;
1da177e4 178module_param(debug, int, 0);
8fb6f732 179module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 180module_param(use_io, int, 0);
1da177e4 181MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 182MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 183MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
184#define DPRINTK(nlevel, klevel, fmt, args...) \
185 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
186 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
187 __FUNCTION__ , ## args))
188
189#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
190 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
191 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
192static struct pci_device_id e100_id_table[] = {
193 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
194 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
195 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
198 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
199 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
200 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
204 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
205 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
206 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
212 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
213 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
220 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
221 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
222 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
223 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
225 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
226 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
227 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
228 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
229 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
231 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
232 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 233 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
234 { 0, }
235};
236MODULE_DEVICE_TABLE(pci, e100_id_table);
237
238enum mac {
239 mac_82557_D100_A = 0,
240 mac_82557_D100_B = 1,
241 mac_82557_D100_C = 2,
242 mac_82558_D101_A4 = 4,
243 mac_82558_D101_B0 = 5,
244 mac_82559_D101M = 8,
245 mac_82559_D101S = 9,
246 mac_82550_D102 = 12,
247 mac_82550_D102_C = 13,
248 mac_82551_E = 14,
249 mac_82551_F = 15,
250 mac_82551_10 = 16,
251 mac_unknown = 0xFF,
252};
253
254enum phy {
255 phy_100a = 0x000003E0,
256 phy_100c = 0x035002A8,
257 phy_82555_tx = 0x015002A8,
258 phy_nsc_tx = 0x5C002000,
259 phy_82562_et = 0x033002A8,
260 phy_82562_em = 0x032002A8,
261 phy_82562_ek = 0x031002A8,
262 phy_82562_eh = 0x017002A8,
263 phy_unknown = 0xFFFFFFFF,
264};
265
266/* CSR (Control/Status Registers) */
267struct csr {
268 struct {
269 u8 status;
270 u8 stat_ack;
271 u8 cmd_lo;
272 u8 cmd_hi;
273 u32 gen_ptr;
274 } scb;
275 u32 port;
276 u16 flash_ctrl;
277 u8 eeprom_ctrl_lo;
278 u8 eeprom_ctrl_hi;
279 u32 mdi_ctrl;
280 u32 rx_dma_count;
281};
282
283enum scb_status {
284 rus_ready = 0x10,
285 rus_mask = 0x3C,
286};
287
288enum scb_stat_ack {
289 stat_ack_not_ours = 0x00,
290 stat_ack_sw_gen = 0x04,
291 stat_ack_rnr = 0x10,
292 stat_ack_cu_idle = 0x20,
293 stat_ack_frame_rx = 0x40,
294 stat_ack_cu_cmd_done = 0x80,
295 stat_ack_not_present = 0xFF,
296 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
297 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
298};
299
300enum scb_cmd_hi {
301 irq_mask_none = 0x00,
302 irq_mask_all = 0x01,
303 irq_sw_gen = 0x02,
304};
305
306enum scb_cmd_lo {
307 cuc_nop = 0x00,
308 ruc_start = 0x01,
309 ruc_load_base = 0x06,
310 cuc_start = 0x10,
311 cuc_resume = 0x20,
312 cuc_dump_addr = 0x40,
313 cuc_dump_stats = 0x50,
314 cuc_load_base = 0x60,
315 cuc_dump_reset = 0x70,
316};
317
318enum cuc_dump {
319 cuc_dump_complete = 0x0000A005,
320 cuc_dump_reset_complete = 0x0000A007,
321};
05479938 322
1da177e4
LT
323enum port {
324 software_reset = 0x0000,
325 selftest = 0x0001,
326 selective_reset = 0x0002,
327};
328
329enum eeprom_ctrl_lo {
330 eesk = 0x01,
331 eecs = 0x02,
332 eedi = 0x04,
333 eedo = 0x08,
334};
335
336enum mdi_ctrl {
337 mdi_write = 0x04000000,
338 mdi_read = 0x08000000,
339 mdi_ready = 0x10000000,
340};
341
342enum eeprom_op {
343 op_write = 0x05,
344 op_read = 0x06,
345 op_ewds = 0x10,
346 op_ewen = 0x13,
347};
348
349enum eeprom_offsets {
350 eeprom_cnfg_mdix = 0x03,
351 eeprom_id = 0x0A,
352 eeprom_config_asf = 0x0D,
353 eeprom_smbus_addr = 0x90,
354};
355
356enum eeprom_cnfg_mdix {
357 eeprom_mdix_enabled = 0x0080,
358};
359
360enum eeprom_id {
361 eeprom_id_wol = 0x0020,
362};
363
364enum eeprom_config_asf {
365 eeprom_asf = 0x8000,
366 eeprom_gcl = 0x4000,
367};
368
369enum cb_status {
370 cb_complete = 0x8000,
371 cb_ok = 0x2000,
372};
373
374enum cb_command {
375 cb_nop = 0x0000,
376 cb_iaaddr = 0x0001,
377 cb_config = 0x0002,
378 cb_multi = 0x0003,
379 cb_tx = 0x0004,
380 cb_ucode = 0x0005,
381 cb_dump = 0x0006,
382 cb_tx_sf = 0x0008,
383 cb_cid = 0x1f00,
384 cb_i = 0x2000,
385 cb_s = 0x4000,
386 cb_el = 0x8000,
387};
388
389struct rfd {
390 u16 status;
391 u16 command;
392 u32 link;
393 u32 rbd;
394 u16 actual_size;
395 u16 size;
396};
397
398struct rx {
399 struct rx *next, *prev;
400 struct sk_buff *skb;
401 dma_addr_t dma_addr;
402};
403
404#if defined(__BIG_ENDIAN_BITFIELD)
405#define X(a,b) b,a
406#else
407#define X(a,b) a,b
408#endif
409struct config {
410/*0*/ u8 X(byte_count:6, pad0:2);
411/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
412/*2*/ u8 adaptive_ifs;
413/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
414 term_write_cache_line:1), pad3:4);
415/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
416/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
417/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
418 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
419 rx_discard_overruns:1), rx_save_bad_frames:1);
420/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
421 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
422 tx_dynamic_tbd:1);
423/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
424/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
425 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
426/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
427 loopback:2);
428/*11*/ u8 X(linear_priority:3, pad11:5);
429/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
430/*13*/ u8 ip_addr_lo;
431/*14*/ u8 ip_addr_hi;
432/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
433 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
434 pad15_2:1), crs_or_cdt:1);
435/*16*/ u8 fc_delay_lo;
436/*17*/ u8 fc_delay_hi;
437/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
438 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
439/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
440 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
441 full_duplex_force:1), full_duplex_pin:1);
442/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
443/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
444/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
445 u8 pad_d102[9];
446};
447
448#define E100_MAX_MULTICAST_ADDRS 64
449struct multi {
450 u16 count;
451 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
452};
453
454/* Important: keep total struct u32-aligned */
455#define UCODE_SIZE 134
456struct cb {
457 u16 status;
458 u16 command;
459 u32 link;
460 union {
461 u8 iaaddr[ETH_ALEN];
462 u32 ucode[UCODE_SIZE];
463 struct config config;
464 struct multi multi;
465 struct {
466 u32 tbd_array;
467 u16 tcb_byte_count;
468 u8 threshold;
469 u8 tbd_count;
470 struct {
471 u32 buf_addr;
472 u16 size;
473 u16 eol;
474 } tbd;
475 } tcb;
476 u32 dump_buffer_addr;
477 } u;
478 struct cb *next, *prev;
479 dma_addr_t dma_addr;
480 struct sk_buff *skb;
481};
482
483enum loopback {
484 lb_none = 0, lb_mac = 1, lb_phy = 3,
485};
486
487struct stats {
488 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
489 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
490 tx_multiple_collisions, tx_total_collisions;
491 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
492 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
493 rx_short_frame_errors;
494 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
495 u16 xmt_tco_frames, rcv_tco_frames;
496 u32 complete;
497};
498
499struct mem {
500 struct {
501 u32 signature;
502 u32 result;
503 } selftest;
504 struct stats stats;
505 u8 dump_buf[596];
506};
507
508struct param_range {
509 u32 min;
510 u32 max;
511 u32 count;
512};
513
514struct params {
515 struct param_range rfds;
516 struct param_range cbs;
517};
518
519struct nic {
520 /* Begin: frequently used values: keep adjacent for cache effect */
521 u32 msg_enable ____cacheline_aligned;
522 struct net_device *netdev;
523 struct pci_dev *pdev;
524
525 struct rx *rxs ____cacheline_aligned;
526 struct rx *rx_to_use;
527 struct rx *rx_to_clean;
528 struct rfd blank_rfd;
1da177e4
LT
529
530 spinlock_t cb_lock ____cacheline_aligned;
531 spinlock_t cmd_lock;
532 struct csr __iomem *csr;
533 enum scb_cmd_lo cuc_cmd;
534 unsigned int cbs_avail;
535 struct cb *cbs;
536 struct cb *cb_to_use;
537 struct cb *cb_to_send;
538 struct cb *cb_to_clean;
539 u16 tx_command;
540 /* End: frequently used values: keep adjacent for cache effect */
541
542 enum {
543 ich = (1 << 0),
544 promiscuous = (1 << 1),
545 multicast_all = (1 << 2),
546 wol_magic = (1 << 3),
547 ich_10h_workaround = (1 << 4),
548 } flags ____cacheline_aligned;
549
550 enum mac mac;
551 enum phy phy;
552 struct params params;
553 struct net_device_stats net_stats;
554 struct timer_list watchdog;
555 struct timer_list blink_timer;
556 struct mii_if_info mii;
2acdb1e0 557 struct work_struct tx_timeout_task;
1da177e4
LT
558 enum loopback loopback;
559
560 struct mem *mem;
561 dma_addr_t dma_addr;
562
563 dma_addr_t cbs_dma_addr;
564 u8 adaptive_ifs;
565 u8 tx_threshold;
566 u32 tx_frames;
567 u32 tx_collisions;
568 u32 tx_deferred;
569 u32 tx_single_collisions;
570 u32 tx_multiple_collisions;
571 u32 tx_fc_pause;
572 u32 tx_tco_frames;
573
574 u32 rx_fc_pause;
575 u32 rx_fc_unsupported;
576 u32 rx_tco_frames;
577 u32 rx_over_length_errors;
578
579 u8 rev_id;
580 u16 leds;
581 u16 eeprom_wc;
582 u16 eeprom[256];
ac7c6669 583 spinlock_t mdio_lock;
1da177e4
LT
584};
585
586static inline void e100_write_flush(struct nic *nic)
587{
588 /* Flush previous PCI writes through intermediate bridges
589 * by doing a benign read */
27345bb6 590 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
591}
592
858119e1 593static void e100_enable_irq(struct nic *nic)
1da177e4
LT
594{
595 unsigned long flags;
596
597 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 598 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 599 e100_write_flush(nic);
ad8c48ad 600 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
601}
602
858119e1 603static void e100_disable_irq(struct nic *nic)
1da177e4
LT
604{
605 unsigned long flags;
606
607 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 608 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 609 e100_write_flush(nic);
ad8c48ad 610 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
611}
612
613static void e100_hw_reset(struct nic *nic)
614{
615 /* Put CU and RU into idle with a selective reset to get
616 * device off of PCI bus */
27345bb6 617 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
618 e100_write_flush(nic); udelay(20);
619
620 /* Now fully reset device */
27345bb6 621 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
622 e100_write_flush(nic); udelay(20);
623
624 /* Mask off our interrupt line - it's unmasked after reset */
625 e100_disable_irq(nic);
626}
627
628static int e100_self_test(struct nic *nic)
629{
630 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
631
632 /* Passing the self-test is a pretty good indication
633 * that the device can DMA to/from host memory */
634
635 nic->mem->selftest.signature = 0;
636 nic->mem->selftest.result = 0xFFFFFFFF;
637
27345bb6 638 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
639 e100_write_flush(nic);
640 /* Wait 10 msec for self-test to complete */
641 msleep(10);
642
643 /* Interrupts are enabled after self-test */
644 e100_disable_irq(nic);
645
646 /* Check results of self-test */
647 if(nic->mem->selftest.result != 0) {
648 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
649 nic->mem->selftest.result);
650 return -ETIMEDOUT;
651 }
652 if(nic->mem->selftest.signature == 0) {
653 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
654 return -ETIMEDOUT;
655 }
656
657 return 0;
658}
659
660static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
661{
662 u32 cmd_addr_data[3];
663 u8 ctrl;
664 int i, j;
665
666 /* Three cmds: write/erase enable, write data, write/erase disable */
667 cmd_addr_data[0] = op_ewen << (addr_len - 2);
668 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
669 cpu_to_le16(data);
670 cmd_addr_data[2] = op_ewds << (addr_len - 2);
671
672 /* Bit-bang cmds to write word to eeprom */
673 for(j = 0; j < 3; j++) {
674
675 /* Chip select */
27345bb6 676 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
677 e100_write_flush(nic); udelay(4);
678
679 for(i = 31; i >= 0; i--) {
680 ctrl = (cmd_addr_data[j] & (1 << i)) ?
681 eecs | eedi : eecs;
27345bb6 682 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
683 e100_write_flush(nic); udelay(4);
684
27345bb6 685 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
686 e100_write_flush(nic); udelay(4);
687 }
688 /* Wait 10 msec for cmd to complete */
689 msleep(10);
690
691 /* Chip deselect */
27345bb6 692 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
693 e100_write_flush(nic); udelay(4);
694 }
695};
696
697/* General technique stolen from the eepro100 driver - very clever */
698static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
699{
700 u32 cmd_addr_data;
701 u16 data = 0;
702 u8 ctrl;
703 int i;
704
705 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
706
707 /* Chip select */
27345bb6 708 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
709 e100_write_flush(nic); udelay(4);
710
711 /* Bit-bang to read word from eeprom */
712 for(i = 31; i >= 0; i--) {
713 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 714 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 715 e100_write_flush(nic); udelay(4);
05479938 716
27345bb6 717 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 718 e100_write_flush(nic); udelay(4);
05479938 719
1da177e4
LT
720 /* Eeprom drives a dummy zero to EEDO after receiving
721 * complete address. Use this to adjust addr_len. */
27345bb6 722 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
1da177e4
LT
723 if(!(ctrl & eedo) && i > 16) {
724 *addr_len -= (i - 16);
725 i = 17;
726 }
05479938 727
1da177e4
LT
728 data = (data << 1) | (ctrl & eedo ? 1 : 0);
729 }
730
731 /* Chip deselect */
27345bb6 732 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
733 e100_write_flush(nic); udelay(4);
734
735 return le16_to_cpu(data);
736};
737
738/* Load entire EEPROM image into driver cache and validate checksum */
739static int e100_eeprom_load(struct nic *nic)
740{
741 u16 addr, addr_len = 8, checksum = 0;
742
743 /* Try reading with an 8-bit addr len to discover actual addr len */
744 e100_eeprom_read(nic, &addr_len, 0);
745 nic->eeprom_wc = 1 << addr_len;
746
747 for(addr = 0; addr < nic->eeprom_wc; addr++) {
748 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
749 if(addr < nic->eeprom_wc - 1)
750 checksum += cpu_to_le16(nic->eeprom[addr]);
751 }
752
753 /* The checksum, stored in the last word, is calculated such that
754 * the sum of words should be 0xBABA */
755 checksum = le16_to_cpu(0xBABA - checksum);
756 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
757 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
758 if (!eeprom_bad_csum_allow)
759 return -EAGAIN;
1da177e4
LT
760 }
761
762 return 0;
763}
764
765/* Save (portion of) driver EEPROM cache to device and update checksum */
766static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
767{
768 u16 addr, addr_len = 8, checksum = 0;
769
770 /* Try reading with an 8-bit addr len to discover actual addr len */
771 e100_eeprom_read(nic, &addr_len, 0);
772 nic->eeprom_wc = 1 << addr_len;
773
774 if(start + count >= nic->eeprom_wc)
775 return -EINVAL;
776
777 for(addr = start; addr < start + count; addr++)
778 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
779
780 /* The checksum, stored in the last word, is calculated such that
781 * the sum of words should be 0xBABA */
782 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
783 checksum += cpu_to_le16(nic->eeprom[addr]);
784 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
785 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
786 nic->eeprom[nic->eeprom_wc - 1]);
787
788 return 0;
789}
790
962082b6 791#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 792#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 793static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
794{
795 unsigned long flags;
796 unsigned int i;
797 int err = 0;
798
799 spin_lock_irqsave(&nic->cmd_lock, flags);
800
801 /* Previous command is accepted when SCB clears */
802 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
27345bb6 803 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
804 break;
805 cpu_relax();
e6280f26 806 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
807 udelay(5);
808 }
809 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
810 err = -EAGAIN;
811 goto err_unlock;
812 }
813
814 if(unlikely(cmd != cuc_resume))
27345bb6
JB
815 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
816 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
817
818err_unlock:
819 spin_unlock_irqrestore(&nic->cmd_lock, flags);
820
821 return err;
822}
823
858119e1 824static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
825 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
826{
827 struct cb *cb;
828 unsigned long flags;
829 int err = 0;
830
831 spin_lock_irqsave(&nic->cb_lock, flags);
832
833 if(unlikely(!nic->cbs_avail)) {
834 err = -ENOMEM;
835 goto err_unlock;
836 }
837
838 cb = nic->cb_to_use;
839 nic->cb_to_use = cb->next;
840 nic->cbs_avail--;
841 cb->skb = skb;
842
843 if(unlikely(!nic->cbs_avail))
844 err = -ENOSPC;
845
846 cb_prepare(nic, cb, skb);
847
848 /* Order is important otherwise we'll be in a race with h/w:
849 * set S-bit in current first, then clear S-bit in previous. */
850 cb->command |= cpu_to_le16(cb_s);
851 wmb();
852 cb->prev->command &= cpu_to_le16(~cb_s);
853
854 while(nic->cb_to_send != nic->cb_to_use) {
855 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
856 nic->cb_to_send->dma_addr))) {
857 /* Ok, here's where things get sticky. It's
858 * possible that we can't schedule the command
859 * because the controller is too busy, so
860 * let's just queue the command and try again
861 * when another command is scheduled. */
962082b6
MC
862 if(err == -ENOSPC) {
863 //request a reset
864 schedule_work(&nic->tx_timeout_task);
865 }
1da177e4
LT
866 break;
867 } else {
868 nic->cuc_cmd = cuc_resume;
869 nic->cb_to_send = nic->cb_to_send->next;
870 }
871 }
872
873err_unlock:
874 spin_unlock_irqrestore(&nic->cb_lock, flags);
875
876 return err;
877}
878
879static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
880{
881 u32 data_out = 0;
882 unsigned int i;
ac7c6669 883 unsigned long flags;
1da177e4 884
ac7c6669
OM
885
886 /*
887 * Stratus87247: we shouldn't be writing the MDI control
888 * register until the Ready bit shows True. Also, since
889 * manipulation of the MDI control registers is a multi-step
890 * procedure it should be done under lock.
891 */
892 spin_lock_irqsave(&nic->mdio_lock, flags);
893 for (i = 100; i; --i) {
27345bb6 894 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
895 break;
896 udelay(20);
897 }
898 if (unlikely(!i)) {
899 printk("e100.mdio_ctrl(%s) won't go Ready\n",
900 nic->netdev->name );
901 spin_unlock_irqrestore(&nic->mdio_lock, flags);
902 return 0; /* No way to indicate timeout error */
903 }
27345bb6 904 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 905
ac7c6669 906 for (i = 0; i < 100; i++) {
1da177e4 907 udelay(20);
27345bb6 908 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
909 break;
910 }
ac7c6669 911 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
912 DPRINTK(HW, DEBUG,
913 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
914 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
915 return (u16)data_out;
916}
917
918static int mdio_read(struct net_device *netdev, int addr, int reg)
919{
920 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
921}
922
923static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
924{
925 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
926}
927
928static void e100_get_defaults(struct nic *nic)
929{
2afecc04
JB
930 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
931 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4
LT
932
933 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
934 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
935 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
936 if(nic->mac == mac_unknown)
937 nic->mac = mac_82557_D100_A;
938
939 nic->params.rfds = rfds;
940 nic->params.cbs = cbs;
941
942 /* Quadwords to DMA into FIFO before starting frame transmit */
943 nic->tx_threshold = 0xE0;
944
962082b6
MC
945 /* no interrupt for every tx completion, delay = 256us if not 557*/
946 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
947 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
948
949 /* Template for a freshly allocated RFD */
d52df4a3 950 nic->blank_rfd.command = cpu_to_le16(cb_el & cb_s);
1da177e4
LT
951 nic->blank_rfd.rbd = 0xFFFFFFFF;
952 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
953
954 /* MII setup */
955 nic->mii.phy_id_mask = 0x1F;
956 nic->mii.reg_num_mask = 0x1F;
957 nic->mii.dev = nic->netdev;
958 nic->mii.mdio_read = mdio_read;
959 nic->mii.mdio_write = mdio_write;
960}
961
962static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
963{
964 struct config *config = &cb->u.config;
965 u8 *c = (u8 *)config;
966
967 cb->command = cpu_to_le16(cb_config);
968
969 memset(config, 0, sizeof(struct config));
970
971 config->byte_count = 0x16; /* bytes in this struct */
972 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
973 config->direct_rx_dma = 0x1; /* reserved */
974 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
975 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
976 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
977 config->tx_underrun_retry = 0x3; /* # of underrun retries */
978 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
979 config->pad10 = 0x6;
980 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
981 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
982 config->ifs = 0x6; /* x16 = inter frame spacing */
983 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
984 config->pad15_1 = 0x1;
985 config->pad15_2 = 0x1;
986 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
987 config->fc_delay_hi = 0x40; /* time delay for fc frame */
988 config->tx_padding = 0x1; /* 1=pad short frames */
989 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
990 config->pad18 = 0x1;
991 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
992 config->pad20_1 = 0x1F;
993 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
994 config->pad21_1 = 0x5;
995
996 config->adaptive_ifs = nic->adaptive_ifs;
997 config->loopback = nic->loopback;
998
999 if(nic->mii.force_media && nic->mii.full_duplex)
1000 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1001
1002 if(nic->flags & promiscuous || nic->loopback) {
1003 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1004 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1005 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1006 }
1007
1008 if(nic->flags & multicast_all)
1009 config->multicast_all = 0x1; /* 1=accept, 0=no */
1010
6bdacb1a
MC
1011 /* disable WoL when up */
1012 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1013 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1014
1015 if(nic->mac >= mac_82558_D101_A4) {
1016 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1017 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1018 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1019 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1020 if(nic->mac >= mac_82559_D101M)
1021 config->tno_intr = 0x1; /* TCO stats enable */
1022 else
1023 config->standard_stat_counter = 0x0;
1024 }
1025
1026 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1027 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1028 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1029 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1030 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1031 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1032}
1033
2afecc04
JB
1034/********************************************************/
1035/* Micro code for 8086:1229 Rev 8 */
1036/********************************************************/
1037
1038/* Parameter values for the D101M B-step */
1039#define D101M_CPUSAVER_TIMER_DWORD 78
1040#define D101M_CPUSAVER_BUNDLE_DWORD 65
1041#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1042
1043#define D101M_B_RCVBUNDLE_UCODE \
1044{\
10450x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10460x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10470x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10480x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10490x00380438, 0x00000000, 0x00140000, 0x00380555, \
10500x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10510x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10520x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10530x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10540x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10550x00000000, 0x00000000, 0x00000000, 0x00000000, \
10560x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10570x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10580x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10590x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10600x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10610x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10620x00000000, 0x00000000, 0x00000000, 0x00000000, \
10630x00000000, 0x00000000, 0x00000000, 0x00000000, \
10640x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10650x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10660x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10670x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10680x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10690x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10700x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10710x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10720x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10730x00380559, 0x00000000, 0x00000000, 0x00000000, \
10740x00000000, 0x00000000, 0x00000000, 0x00000000, \
10750x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10760x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10770x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1078}
1079
1080/********************************************************/
1081/* Micro code for 8086:1229 Rev 9 */
1082/********************************************************/
1083
1084/* Parameter values for the D101S */
1085#define D101S_CPUSAVER_TIMER_DWORD 78
1086#define D101S_CPUSAVER_BUNDLE_DWORD 67
1087#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1088
1089#define D101S_RCVBUNDLE_UCODE \
1090{\
10910x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
10920x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
10930x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
10940x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
10950x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
10960x00308000, 0x00100610, 0x00100561, 0x000E0408, \
10970x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10980x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10990x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11000x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11010x00000000, 0x00000000, 0x00000000, 0x00000000, \
11020x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11030x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11040x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11050x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11060x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11070x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11080x00101313, 0x00380700, 0x00000000, 0x00000000, \
11090x00000000, 0x00000000, 0x00000000, 0x00000000, \
11100x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11110x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11120x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11130x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11140x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11150x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11160x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11170x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11180x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11190x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11200x00000000, 0x00000000, 0x00000000, 0x00000000, \
11210x00000000, 0x00000000, 0x00000000, 0x00130831, \
11220x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11230x00041000, 0x00010004, 0x00380700 \
1124}
1125
1126/********************************************************/
1127/* Micro code for the 8086:1229 Rev F/10 */
1128/********************************************************/
1129
1130/* Parameter values for the D102 E-step */
1131#define D102_E_CPUSAVER_TIMER_DWORD 42
1132#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1133#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1134
1135#define D102_E_RCVBUNDLE_UCODE \
1136{\
11370x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11380x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11390x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11400x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11410x00000000, 0x00000000, 0x00000000, 0x00000000, \
11420x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11430x00000000, 0x00000000, 0x00000000, 0x00000000, \
11440x00000000, 0x00000000, 0x00000000, 0x00000000, \
11450x00000000, 0x00000000, 0x00000000, 0x00000000, \
11460x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11470x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11480x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11490x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11500x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11510x00000000, 0x00000000, 0x00000000, 0x00000000, \
11520x00000000, 0x00000000, 0x00000000, 0x00000000, \
11530x00000000, 0x00000000, 0x00000000, 0x00000000, \
11540x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11550x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11560x00000000, 0x00000000, 0x00000000, 0x00000000, \
11570x00000000, 0x00000000, 0x00000000, 0x00000000, \
11580x00000000, 0x00000000, 0x00000000, 0x00000000, \
11590x00000000, 0x00000000, 0x00000000, 0x00000000, \
11600x00000000, 0x00000000, 0x00000000, 0x00000000, \
11610x00000000, 0x00000000, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00000000, 0x00000000, 0x00000000, 0x00000000, \
11650x00000000, 0x00000000, 0x00000000, 0x00000000, \
11660x00000000, 0x00000000, 0x00000000, 0x00000000, \
11670x00000000, 0x00000000, 0x00000000, 0x00000000, \
11680x00000000, 0x00000000, 0x00000000, 0x00000000, \
11690x00000000, 0x00000000, 0x00000000, 0x00000000, \
1170}
1171
24180333 1172static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4 1173{
2afecc04
JB
1174/* *INDENT-OFF* */
1175 static struct {
1176 u32 ucode[UCODE_SIZE + 1];
1177 u8 mac;
1178 u8 timer_dword;
1179 u8 bundle_dword;
1180 u8 min_size_dword;
1181 } ucode_opts[] = {
1182 { D101M_B_RCVBUNDLE_UCODE,
1183 mac_82559_D101M,
1184 D101M_CPUSAVER_TIMER_DWORD,
1185 D101M_CPUSAVER_BUNDLE_DWORD,
1186 D101M_CPUSAVER_MIN_SIZE_DWORD },
1187 { D101S_RCVBUNDLE_UCODE,
1188 mac_82559_D101S,
1189 D101S_CPUSAVER_TIMER_DWORD,
1190 D101S_CPUSAVER_BUNDLE_DWORD,
1191 D101S_CPUSAVER_MIN_SIZE_DWORD },
1192 { D102_E_RCVBUNDLE_UCODE,
1193 mac_82551_F,
1194 D102_E_CPUSAVER_TIMER_DWORD,
1195 D102_E_CPUSAVER_BUNDLE_DWORD,
1196 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1197 { D102_E_RCVBUNDLE_UCODE,
1198 mac_82551_10,
1199 D102_E_CPUSAVER_TIMER_DWORD,
1200 D102_E_CPUSAVER_BUNDLE_DWORD,
1201 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1202 { {0}, 0, 0, 0, 0}
1203 }, *opts;
1204/* *INDENT-ON* */
1205
1206/*************************************************************************
1207* CPUSaver parameters
1208*
1209* All CPUSaver parameters are 16-bit literals that are part of a
1210* "move immediate value" instruction. By changing the value of
1211* the literal in the instruction before the code is loaded, the
1212* driver can change the algorithm.
1213*
0779bf2d 1214* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1215* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1216* timer is reset each time a new packet is received. (see
1217* BUNDLEMAX below to set the limit on number of chained packets)
1218* The current default is 0x600 or 1536. Experiments show that
1219* the value should probably stay within the 0x200 - 0x1000.
1220*
05479938 1221* BUNDLEMAX -
2afecc04
JB
1222* This sets the maximum number of frames that will be bundled. In
1223* some situations, such as the TCP windowing algorithm, it may be
1224* better to limit the growth of the bundle size than let it go as
1225* high as it can, because that could cause too much added latency.
1226* The default is six, because this is the number of packets in the
1227* default TCP window size. A value of 1 would make CPUSaver indicate
1228* an interrupt for every frame received. If you do not want to put
1229* a limit on the bundle size, set this value to xFFFF.
1230*
05479938 1231* BUNDLESMALL -
2afecc04
JB
1232* This contains a bit-mask describing the minimum size frame that
1233* will be bundled. The default masks the lower 7 bits, which means
1234* that any frame less than 128 bytes in length will not be bundled,
1235* but will instead immediately generate an interrupt. This does
1236* not affect the current bundle in any way. Any frame that is 128
1237* bytes or large will be bundled normally. This feature is meant
1238* to provide immediate indication of ACK frames in a TCP environment.
1239* Customers were seeing poor performance when a machine with CPUSaver
1240* enabled was sending but not receiving. The delay introduced when
1241* the ACKs were received was enough to reduce total throughput, because
1242* the sender would sit idle until the ACK was finally seen.
1243*
1244* The current default is 0xFF80, which masks out the lower 7 bits.
1245* This means that any frame which is x7F (127) bytes or smaller
05479938 1246* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1247* bit mask, there are only a few valid values that can be used. To
1248* turn this feature off, the driver can write the value xFFFF to the
1249* lower word of this instruction (in the same way that the other
1250* parameters are used). Likewise, a value of 0xF800 (2047) would
1251* cause an interrupt to be generated for every frame, because all
1252* standard Ethernet frames are <= 2047 bytes in length.
1253*************************************************************************/
1254
05479938 1255/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1256 * workarounds it provides, set the following defines to:
1257 * BUNDLESMALL 0
1258 * BUNDLEMAX 1
1259 * INTDELAY 1
1260 */
1261#define BUNDLESMALL 1
1262#define BUNDLEMAX (u16)6
1263#define INTDELAY (u16)1536 /* 0x600 */
1264
1265 /* do not load u-code for ICH devices */
1266 if (nic->flags & ich)
1267 goto noloaducode;
1268
1269 /* Search for ucode match against h/w rev_id */
1270 for (opts = ucode_opts; opts->mac; opts++) {
1271 int i;
1272 u32 *ucode = opts->ucode;
1273 if (nic->mac != opts->mac)
1274 continue;
1275
1276 /* Insert user-tunable settings */
1277 ucode[opts->timer_dword] &= 0xFFFF0000;
1278 ucode[opts->timer_dword] |= INTDELAY;
1279 ucode[opts->bundle_dword] &= 0xFFFF0000;
1280 ucode[opts->bundle_dword] |= BUNDLEMAX;
1281 ucode[opts->min_size_dword] &= 0xFFFF0000;
1282 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1283
1284 for (i = 0; i < UCODE_SIZE; i++)
875521dd 1285 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
24180333 1286 cb->command = cpu_to_le16(cb_ucode | cb_el);
2afecc04
JB
1287 return;
1288 }
1289
1290noloaducode:
24180333
JB
1291 cb->command = cpu_to_le16(cb_nop | cb_el);
1292}
1293
1294static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1295 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1296{
1297 int err = 0, counter = 50;
1298 struct cb *cb = nic->cb_to_clean;
1299
1300 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1301 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1302
24180333
JB
1303 /* must restart cuc */
1304 nic->cuc_cmd = cuc_start;
1305
1306 /* wait for completion */
1307 e100_write_flush(nic);
1308 udelay(10);
1309
1310 /* wait for possibly (ouch) 500ms */
1311 while (!(cb->status & cpu_to_le16(cb_complete))) {
1312 msleep(10);
1313 if (!--counter) break;
1314 }
05479938 1315
24180333 1316 /* ack any interupts, something could have been set */
27345bb6 1317 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1318
1319 /* if the command failed, or is not OK, notify and return */
1320 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1321 DPRINTK(PROBE,ERR, "ucode load failed\n");
1322 err = -EPERM;
1323 }
05479938 1324
24180333 1325 return err;
1da177e4
LT
1326}
1327
1328static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1329 struct sk_buff *skb)
1330{
1331 cb->command = cpu_to_le16(cb_iaaddr);
1332 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1333}
1334
1335static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1336{
1337 cb->command = cpu_to_le16(cb_dump);
1338 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1339 offsetof(struct mem, dump_buf));
1340}
1341
1342#define NCONFIG_AUTO_SWITCH 0x0080
1343#define MII_NSC_CONG MII_RESV1
1344#define NSC_CONG_ENABLE 0x0100
1345#define NSC_CONG_TXREADY 0x0400
1346#define ADVERTISE_FC_SUPPORTED 0x0400
1347static int e100_phy_init(struct nic *nic)
1348{
1349 struct net_device *netdev = nic->netdev;
1350 u32 addr;
1351 u16 bmcr, stat, id_lo, id_hi, cong;
1352
1353 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1354 for(addr = 0; addr < 32; addr++) {
1355 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1356 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1357 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1358 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1359 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1360 break;
1361 }
1362 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1363 if(addr == 32)
1364 return -EAGAIN;
1365
1366 /* Selected the phy and isolate the rest */
1367 for(addr = 0; addr < 32; addr++) {
1368 if(addr != nic->mii.phy_id) {
1369 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1370 } else {
1371 bmcr = mdio_read(netdev, addr, MII_BMCR);
1372 mdio_write(netdev, addr, MII_BMCR,
1373 bmcr & ~BMCR_ISOLATE);
1374 }
1375 }
1376
1377 /* Get phy ID */
1378 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1379 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1380 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1381 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1382
1383 /* Handle National tx phys */
1384#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1385 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1386 /* Disable congestion control */
1387 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1388 cong |= NSC_CONG_TXREADY;
1389 cong &= ~NSC_CONG_ENABLE;
1390 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1391 }
1392
05479938 1393 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1394 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1395 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1396 /* enable/disable MDI/MDI-X auto-switching. */
1397 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1398 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1399 }
1da177e4
LT
1400
1401 return 0;
1402}
1403
1404static int e100_hw_init(struct nic *nic)
1405{
1406 int err;
1407
1408 e100_hw_reset(nic);
1409
1410 DPRINTK(HW, ERR, "e100_hw_init\n");
1411 if(!in_interrupt() && (err = e100_self_test(nic)))
1412 return err;
1413
1414 if((err = e100_phy_init(nic)))
1415 return err;
1416 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1417 return err;
1418 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1419 return err;
24180333 1420 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1da177e4
LT
1421 return err;
1422 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1423 return err;
1424 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1425 return err;
1426 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1427 nic->dma_addr + offsetof(struct mem, stats))))
1428 return err;
1429 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1430 return err;
1431
1432 e100_disable_irq(nic);
1433
1434 return 0;
1435}
1436
1437static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1438{
1439 struct net_device *netdev = nic->netdev;
1440 struct dev_mc_list *list = netdev->mc_list;
1441 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1442
1443 cb->command = cpu_to_le16(cb_multi);
1444 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1445 for(i = 0; list && i < count; i++, list = list->next)
1446 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1447 ETH_ALEN);
1448}
1449
1450static void e100_set_multicast_list(struct net_device *netdev)
1451{
1452 struct nic *nic = netdev_priv(netdev);
1453
1454 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1455 netdev->mc_count, netdev->flags);
1456
1457 if(netdev->flags & IFF_PROMISC)
1458 nic->flags |= promiscuous;
1459 else
1460 nic->flags &= ~promiscuous;
1461
1462 if(netdev->flags & IFF_ALLMULTI ||
1463 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1464 nic->flags |= multicast_all;
1465 else
1466 nic->flags &= ~multicast_all;
1467
1468 e100_exec_cb(nic, NULL, e100_configure);
1469 e100_exec_cb(nic, NULL, e100_multi);
1470}
1471
1472static void e100_update_stats(struct nic *nic)
1473{
1474 struct net_device_stats *ns = &nic->net_stats;
1475 struct stats *s = &nic->mem->stats;
1476 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1477 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1478 &s->complete;
1479
1480 /* Device's stats reporting may take several microseconds to
1481 * complete, so where always waiting for results of the
1482 * previous command. */
1483
1484 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1485 *complete = 0;
1486 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1487 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1488 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1489 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1490 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1491 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1492 ns->collisions += nic->tx_collisions;
1493 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1494 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1495 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1496 nic->rx_over_length_errors;
1497 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1498 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1499 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1500 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1501 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1502 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1503 le32_to_cpu(s->rx_alignment_errors) +
1504 le32_to_cpu(s->rx_short_frame_errors) +
1505 le32_to_cpu(s->rx_cdt_errors);
1506 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1507 nic->tx_single_collisions +=
1508 le32_to_cpu(s->tx_single_collisions);
1509 nic->tx_multiple_collisions +=
1510 le32_to_cpu(s->tx_multiple_collisions);
1511 if(nic->mac >= mac_82558_D101_A4) {
1512 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1513 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1514 nic->rx_fc_unsupported +=
1515 le32_to_cpu(s->fc_rcv_unsupported);
1516 if(nic->mac >= mac_82559_D101M) {
1517 nic->tx_tco_frames +=
1518 le16_to_cpu(s->xmt_tco_frames);
1519 nic->rx_tco_frames +=
1520 le16_to_cpu(s->rcv_tco_frames);
1521 }
1522 }
1523 }
1524
05479938 1525
1f53367d
MC
1526 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1527 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1528}
1529
1530static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1531{
1532 /* Adjust inter-frame-spacing (IFS) between two transmits if
1533 * we're getting collisions on a half-duplex connection. */
1534
1535 if(duplex == DUPLEX_HALF) {
1536 u32 prev = nic->adaptive_ifs;
1537 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1538
1539 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1540 (nic->tx_frames > min_frames)) {
1541 if(nic->adaptive_ifs < 60)
1542 nic->adaptive_ifs += 5;
1543 } else if (nic->tx_frames < min_frames) {
1544 if(nic->adaptive_ifs >= 5)
1545 nic->adaptive_ifs -= 5;
1546 }
1547 if(nic->adaptive_ifs != prev)
1548 e100_exec_cb(nic, NULL, e100_configure);
1549 }
1550}
1551
1552static void e100_watchdog(unsigned long data)
1553{
1554 struct nic *nic = (struct nic *)data;
1555 struct ethtool_cmd cmd;
1556
1557 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1558
1559 /* mii library handles link maintenance tasks */
1560
1561 mii_ethtool_gset(&nic->mii, &cmd);
1562
1563 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1564 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1565 cmd.speed == SPEED_100 ? "100" : "10",
1566 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1567 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1568 DPRINTK(LINK, INFO, "link down\n");
1569 }
1570
1571 mii_check_link(&nic->mii);
1572
1573 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1574 * allocation failure.
1575 * Unfortunately have to use a spinlock to not re-enable interrupts
1576 * accidentally, due to hardware that shares a register between the
1577 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1578 spin_lock_irq(&nic->cmd_lock);
27345bb6 1579 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1580 e100_write_flush(nic);
ad8c48ad 1581 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1582
1583 e100_update_stats(nic);
1584 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1585
1586 if(nic->mac <= mac_82557_D100_C)
1587 /* Issue a multicast command to workaround a 557 lock up */
1588 e100_set_multicast_list(nic->netdev);
1589
1590 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1591 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1592 nic->flags |= ich_10h_workaround;
1593 else
1594 nic->flags &= ~ich_10h_workaround;
1595
1596 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1597}
1598
858119e1 1599static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1600 struct sk_buff *skb)
1601{
1602 cb->command = nic->tx_command;
962082b6 1603 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1604 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1605 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1606 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1607 cb->u.tcb.tcb_byte_count = 0;
1608 cb->u.tcb.threshold = nic->tx_threshold;
1609 cb->u.tcb.tbd_count = 1;
1610 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1611 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1612 /* check for mapping failure? */
1da177e4
LT
1613 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1614}
1615
1616static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1617{
1618 struct nic *nic = netdev_priv(netdev);
1619 int err;
1620
1621 if(nic->flags & ich_10h_workaround) {
1622 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1623 Issue a NOP command followed by a 1us delay before
1624 issuing the Tx command. */
1f53367d
MC
1625 if(e100_exec_cmd(nic, cuc_nop, 0))
1626 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1627 udelay(1);
1628 }
1629
1630 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1631
1632 switch(err) {
1633 case -ENOSPC:
1634 /* We queued the skb, but now we're out of space. */
1635 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1636 netif_stop_queue(netdev);
1637 break;
1638 case -ENOMEM:
1639 /* This is a hard error - log it. */
1640 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1641 netif_stop_queue(netdev);
1642 return 1;
1643 }
1644
1645 netdev->trans_start = jiffies;
1646 return 0;
1647}
1648
858119e1 1649static int e100_tx_clean(struct nic *nic)
1da177e4
LT
1650{
1651 struct cb *cb;
1652 int tx_cleaned = 0;
1653
1654 spin_lock(&nic->cb_lock);
1655
1da177e4
LT
1656 /* Clean CBs marked complete */
1657 for(cb = nic->cb_to_clean;
1658 cb->status & cpu_to_le16(cb_complete);
1659 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1660 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1661 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1662 cb->status);
1663
1da177e4
LT
1664 if(likely(cb->skb != NULL)) {
1665 nic->net_stats.tx_packets++;
1666 nic->net_stats.tx_bytes += cb->skb->len;
1667
1668 pci_unmap_single(nic->pdev,
1669 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1670 le16_to_cpu(cb->u.tcb.tbd.size),
1671 PCI_DMA_TODEVICE);
1672 dev_kfree_skb_any(cb->skb);
1673 cb->skb = NULL;
1674 tx_cleaned = 1;
1675 }
1676 cb->status = 0;
1677 nic->cbs_avail++;
1678 }
1679
1680 spin_unlock(&nic->cb_lock);
1681
1682 /* Recover from running out of Tx resources in xmit_frame */
1683 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1684 netif_wake_queue(nic->netdev);
1685
1686 return tx_cleaned;
1687}
1688
1689static void e100_clean_cbs(struct nic *nic)
1690{
1691 if(nic->cbs) {
1692 while(nic->cbs_avail != nic->params.cbs.count) {
1693 struct cb *cb = nic->cb_to_clean;
1694 if(cb->skb) {
1695 pci_unmap_single(nic->pdev,
1696 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1697 le16_to_cpu(cb->u.tcb.tbd.size),
1698 PCI_DMA_TODEVICE);
1699 dev_kfree_skb(cb->skb);
1700 }
1701 nic->cb_to_clean = nic->cb_to_clean->next;
1702 nic->cbs_avail++;
1703 }
1704 pci_free_consistent(nic->pdev,
1705 sizeof(struct cb) * nic->params.cbs.count,
1706 nic->cbs, nic->cbs_dma_addr);
1707 nic->cbs = NULL;
1708 nic->cbs_avail = 0;
1709 }
1710 nic->cuc_cmd = cuc_start;
1711 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1712 nic->cbs;
1713}
1714
1715static int e100_alloc_cbs(struct nic *nic)
1716{
1717 struct cb *cb;
1718 unsigned int i, count = nic->params.cbs.count;
1719
1720 nic->cuc_cmd = cuc_start;
1721 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1722 nic->cbs_avail = 0;
1723
1724 nic->cbs = pci_alloc_consistent(nic->pdev,
1725 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1726 if(!nic->cbs)
1727 return -ENOMEM;
1728
1729 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1730 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1731 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1732
1733 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1734 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1735 ((i+1) % count) * sizeof(struct cb));
1736 cb->skb = NULL;
1737 }
1738
1739 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1740 nic->cbs_avail = count;
1741
1742 return 0;
1743}
1744
d52df4a3 1745static inline void e100_start_receiver(struct nic *nic)
1da177e4 1746{
d52df4a3
SF
1747 /* Start if RFA is non-NULL */
1748 if(nic->rx_to_clean->skb)
1749 e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
1da177e4
LT
1750}
1751
1752#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1753static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1754{
4187592b 1755 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1756 return -ENOMEM;
1757
1758 /* Align, init, and map the RFD. */
1da177e4 1759 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1760 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1761 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1762 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1763
1f53367d
MC
1764 if(pci_dma_mapping_error(rx->dma_addr)) {
1765 dev_kfree_skb_any(rx->skb);
097688ef 1766 rx->skb = NULL;
1f53367d
MC
1767 rx->dma_addr = 0;
1768 return -ENOMEM;
1769 }
1770
1da177e4
LT
1771 /* Link the RFD to end of RFA by linking previous RFD to
1772 * this one, and clearing EL bit of previous. */
1773 if(rx->prev->skb) {
1774 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1775 put_unaligned(cpu_to_le32(rx->dma_addr),
1776 (u32 *)&prev_rfd->link);
1777 wmb();
d52df4a3 1778 prev_rfd->command &= ~cpu_to_le16(cb_el & cb_s);
1da177e4
LT
1779 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1780 sizeof(struct rfd), PCI_DMA_TODEVICE);
1781 }
1782
1783 return 0;
1784}
1785
858119e1 1786static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1787 unsigned int *work_done, unsigned int work_to_do)
1788{
1789 struct sk_buff *skb = rx->skb;
1790 struct rfd *rfd = (struct rfd *)skb->data;
1791 u16 rfd_status, actual_size;
1792
1793 if(unlikely(work_done && *work_done >= work_to_do))
1794 return -EAGAIN;
1795
1796 /* Need to sync before taking a peek at cb_complete bit */
1797 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1798 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1799 rfd_status = le16_to_cpu(rfd->status);
1800
1801 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1802
1803 /* If data isn't ready, nothing to indicate */
1804 if(unlikely(!(rfd_status & cb_complete)))
1f53367d 1805 return -ENODATA;
1da177e4
LT
1806
1807 /* Get actual data size */
1808 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1809 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1810 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1811
1812 /* Get data */
1813 pci_unmap_single(nic->pdev, rx->dma_addr,
1814 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1815
1816 /* Pull off the RFD and put the actual data (minus eth hdr) */
1817 skb_reserve(skb, sizeof(struct rfd));
1818 skb_put(skb, actual_size);
1819 skb->protocol = eth_type_trans(skb, nic->netdev);
1820
1821 if(unlikely(!(rfd_status & cb_ok))) {
1822 /* Don't indicate if hardware indicates errors */
1da177e4 1823 dev_kfree_skb_any(skb);
136df52d 1824 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1825 /* Don't indicate oversized frames */
1826 nic->rx_over_length_errors++;
1da177e4
LT
1827 dev_kfree_skb_any(skb);
1828 } else {
1829 nic->net_stats.rx_packets++;
1830 nic->net_stats.rx_bytes += actual_size;
1831 nic->netdev->last_rx = jiffies;
1832 netif_receive_skb(skb);
1833 if(work_done)
1834 (*work_done)++;
1835 }
1836
1837 rx->skb = NULL;
1838
1839 return 0;
1840}
1841
858119e1 1842static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1843 unsigned int work_to_do)
1844{
1845 struct rx *rx;
1846
1847 /* Indicate newly arrived packets */
1848 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
d52df4a3 1849 if(e100_rx_indicate(nic, rx, work_done, work_to_do))
1da177e4
LT
1850 break; /* No more to clean */
1851 }
1852
1853 /* Alloc new skbs to refill list */
1854 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1855 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1856 break; /* Better luck next time (see watchdog) */
1857 }
1da177e4
LT
1858}
1859
1860static void e100_rx_clean_list(struct nic *nic)
1861{
1862 struct rx *rx;
1863 unsigned int i, count = nic->params.rfds.count;
1864
1865 if(nic->rxs) {
1866 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1867 if(rx->skb) {
1868 pci_unmap_single(nic->pdev, rx->dma_addr,
1869 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1870 dev_kfree_skb(rx->skb);
1871 }
1872 }
1873 kfree(nic->rxs);
1874 nic->rxs = NULL;
1875 }
1876
1877 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1878}
1879
1880static int e100_rx_alloc_list(struct nic *nic)
1881{
1882 struct rx *rx;
1883 unsigned int i, count = nic->params.rfds.count;
1884
1885 nic->rx_to_use = nic->rx_to_clean = NULL;
1886
c48e3fca 1887 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 1888 return -ENOMEM;
1da177e4
LT
1889
1890 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1891 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1892 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1893 if(e100_rx_alloc_skb(nic, rx)) {
1894 e100_rx_clean_list(nic);
1895 return -ENOMEM;
1896 }
1897 }
1898
1899 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1900
1901 return 0;
1902}
1903
7d12e780 1904static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
1905{
1906 struct net_device *netdev = dev_id;
1907 struct nic *nic = netdev_priv(netdev);
27345bb6 1908 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
1909
1910 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1911
1912 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1913 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1914 return IRQ_NONE;
1915
1916 /* Ack interrupt(s) */
27345bb6 1917 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 1918
0685c31b
MC
1919 if(likely(netif_rx_schedule_prep(netdev))) {
1920 e100_disable_irq(nic);
1921 __netif_rx_schedule(netdev);
1922 }
1da177e4
LT
1923
1924 return IRQ_HANDLED;
1925}
1926
1927static int e100_poll(struct net_device *netdev, int *budget)
1928{
1929 struct nic *nic = netdev_priv(netdev);
1930 unsigned int work_to_do = min(netdev->quota, *budget);
1931 unsigned int work_done = 0;
1932 int tx_cleaned;
1933
1934 e100_rx_clean(nic, &work_done, work_to_do);
1935 tx_cleaned = e100_tx_clean(nic);
1936
1937 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1938 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1939 netif_rx_complete(netdev);
1940 e100_enable_irq(nic);
1941 return 0;
1942 }
1943
1944 *budget -= work_done;
1945 netdev->quota -= work_done;
1946
1947 return 1;
1948}
1949
1950#ifdef CONFIG_NET_POLL_CONTROLLER
1951static void e100_netpoll(struct net_device *netdev)
1952{
1953 struct nic *nic = netdev_priv(netdev);
611494dc 1954
1da177e4 1955 e100_disable_irq(nic);
7d12e780 1956 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
1957 e100_tx_clean(nic);
1958 e100_enable_irq(nic);
1959}
1960#endif
1961
1962static struct net_device_stats *e100_get_stats(struct net_device *netdev)
1963{
1964 struct nic *nic = netdev_priv(netdev);
1965 return &nic->net_stats;
1966}
1967
1968static int e100_set_mac_address(struct net_device *netdev, void *p)
1969{
1970 struct nic *nic = netdev_priv(netdev);
1971 struct sockaddr *addr = p;
1972
1973 if (!is_valid_ether_addr(addr->sa_data))
1974 return -EADDRNOTAVAIL;
1975
1976 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1977 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
1978
1979 return 0;
1980}
1981
1982static int e100_change_mtu(struct net_device *netdev, int new_mtu)
1983{
1984 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1985 return -EINVAL;
1986 netdev->mtu = new_mtu;
1987 return 0;
1988}
1989
1990static int e100_asf(struct nic *nic)
1991{
1992 /* ASF can be enabled from eeprom */
1993 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
1994 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
1995 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
1996 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
1997}
1998
1999static int e100_up(struct nic *nic)
2000{
2001 int err;
2002
2003 if((err = e100_rx_alloc_list(nic)))
2004 return err;
2005 if((err = e100_alloc_cbs(nic)))
2006 goto err_rx_clean_list;
2007 if((err = e100_hw_init(nic)))
2008 goto err_clean_cbs;
2009 e100_set_multicast_list(nic->netdev);
d52df4a3 2010 e100_start_receiver(nic);
1da177e4 2011 mod_timer(&nic->watchdog, jiffies);
1fb9df5d 2012 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2013 nic->netdev->name, nic->netdev)))
2014 goto err_no_irq;
1da177e4 2015 netif_wake_queue(nic->netdev);
0236ebb7
MC
2016 netif_poll_enable(nic->netdev);
2017 /* enable ints _after_ enabling poll, preventing a race between
2018 * disable ints+schedule */
2019 e100_enable_irq(nic);
1da177e4
LT
2020 return 0;
2021
2022err_no_irq:
2023 del_timer_sync(&nic->watchdog);
2024err_clean_cbs:
2025 e100_clean_cbs(nic);
2026err_rx_clean_list:
2027 e100_rx_clean_list(nic);
2028 return err;
2029}
2030
2031static void e100_down(struct nic *nic)
2032{
0236ebb7
MC
2033 /* wait here for poll to complete */
2034 netif_poll_disable(nic->netdev);
2035 netif_stop_queue(nic->netdev);
1da177e4
LT
2036 e100_hw_reset(nic);
2037 free_irq(nic->pdev->irq, nic->netdev);
2038 del_timer_sync(&nic->watchdog);
2039 netif_carrier_off(nic->netdev);
1da177e4
LT
2040 e100_clean_cbs(nic);
2041 e100_rx_clean_list(nic);
2042}
2043
2044static void e100_tx_timeout(struct net_device *netdev)
2045{
2046 struct nic *nic = netdev_priv(netdev);
2047
05479938 2048 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2049 * in interrupt context */
2050 schedule_work(&nic->tx_timeout_task);
2051}
2052
c4028958 2053static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2054{
c4028958
DH
2055 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2056 struct net_device *netdev = nic->netdev;
2acdb1e0 2057
1da177e4 2058 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2059 ioread8(&nic->csr->scb.status));
1da177e4
LT
2060 e100_down(netdev_priv(netdev));
2061 e100_up(netdev_priv(netdev));
2062}
2063
2064static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2065{
2066 int err;
2067 struct sk_buff *skb;
2068
2069 /* Use driver resources to perform internal MAC or PHY
2070 * loopback test. A single packet is prepared and transmitted
2071 * in loopback mode, and the test passes if the received
2072 * packet compares byte-for-byte to the transmitted packet. */
2073
2074 if((err = e100_rx_alloc_list(nic)))
2075 return err;
2076 if((err = e100_alloc_cbs(nic)))
2077 goto err_clean_rx;
2078
2079 /* ICH PHY loopback is broken so do MAC loopback instead */
2080 if(nic->flags & ich && loopback_mode == lb_phy)
2081 loopback_mode = lb_mac;
2082
2083 nic->loopback = loopback_mode;
2084 if((err = e100_hw_init(nic)))
2085 goto err_loopback_none;
2086
2087 if(loopback_mode == lb_phy)
2088 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2089 BMCR_LOOPBACK);
2090
d52df4a3 2091 e100_start_receiver(nic);
1da177e4 2092
4187592b 2093 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2094 err = -ENOMEM;
2095 goto err_loopback_none;
2096 }
2097 skb_put(skb, ETH_DATA_LEN);
2098 memset(skb->data, 0xFF, ETH_DATA_LEN);
2099 e100_xmit_frame(skb, nic->netdev);
2100
2101 msleep(10);
2102
aa49cdd9
JB
2103 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2104 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2105
1da177e4
LT
2106 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2107 skb->data, ETH_DATA_LEN))
2108 err = -EAGAIN;
2109
2110err_loopback_none:
2111 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2112 nic->loopback = lb_none;
1da177e4 2113 e100_clean_cbs(nic);
aa49cdd9 2114 e100_hw_reset(nic);
1da177e4
LT
2115err_clean_rx:
2116 e100_rx_clean_list(nic);
2117 return err;
2118}
2119
2120#define MII_LED_CONTROL 0x1B
2121static void e100_blink_led(unsigned long data)
2122{
2123 struct nic *nic = (struct nic *)data;
2124 enum led_state {
2125 led_on = 0x01,
2126 led_off = 0x04,
2127 led_on_559 = 0x05,
2128 led_on_557 = 0x07,
2129 };
2130
2131 nic->leds = (nic->leds & led_on) ? led_off :
2132 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2133 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2134 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2135}
2136
2137static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2138{
2139 struct nic *nic = netdev_priv(netdev);
2140 return mii_ethtool_gset(&nic->mii, cmd);
2141}
2142
2143static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2144{
2145 struct nic *nic = netdev_priv(netdev);
2146 int err;
2147
2148 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2149 err = mii_ethtool_sset(&nic->mii, cmd);
2150 e100_exec_cb(nic, NULL, e100_configure);
2151
2152 return err;
2153}
2154
2155static void e100_get_drvinfo(struct net_device *netdev,
2156 struct ethtool_drvinfo *info)
2157{
2158 struct nic *nic = netdev_priv(netdev);
2159 strcpy(info->driver, DRV_NAME);
2160 strcpy(info->version, DRV_VERSION);
2161 strcpy(info->fw_version, "N/A");
2162 strcpy(info->bus_info, pci_name(nic->pdev));
2163}
2164
2165static int e100_get_regs_len(struct net_device *netdev)
2166{
2167 struct nic *nic = netdev_priv(netdev);
2168#define E100_PHY_REGS 0x1C
2169#define E100_REGS_LEN 1 + E100_PHY_REGS + \
2170 sizeof(nic->mem->dump_buf) / sizeof(u32)
2171 return E100_REGS_LEN * sizeof(u32);
2172}
2173
2174static void e100_get_regs(struct net_device *netdev,
2175 struct ethtool_regs *regs, void *p)
2176{
2177 struct nic *nic = netdev_priv(netdev);
2178 u32 *buff = p;
2179 int i;
2180
2181 regs->version = (1 << 24) | nic->rev_id;
27345bb6
JB
2182 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2183 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2184 ioread16(&nic->csr->scb.status);
1da177e4
LT
2185 for(i = E100_PHY_REGS; i >= 0; i--)
2186 buff[1 + E100_PHY_REGS - i] =
2187 mdio_read(netdev, nic->mii.phy_id, i);
2188 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2189 e100_exec_cb(nic, NULL, e100_dump);
2190 msleep(10);
2191 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2192 sizeof(nic->mem->dump_buf));
2193}
2194
2195static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2196{
2197 struct nic *nic = netdev_priv(netdev);
2198 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2199 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2200}
2201
2202static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2203{
2204 struct nic *nic = netdev_priv(netdev);
2205
2206 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2207 return -EOPNOTSUPP;
2208
2209 if(wol->wolopts)
2210 nic->flags |= wol_magic;
2211 else
2212 nic->flags &= ~wol_magic;
2213
1da177e4
LT
2214 e100_exec_cb(nic, NULL, e100_configure);
2215
2216 return 0;
2217}
2218
2219static u32 e100_get_msglevel(struct net_device *netdev)
2220{
2221 struct nic *nic = netdev_priv(netdev);
2222 return nic->msg_enable;
2223}
2224
2225static void e100_set_msglevel(struct net_device *netdev, u32 value)
2226{
2227 struct nic *nic = netdev_priv(netdev);
2228 nic->msg_enable = value;
2229}
2230
2231static int e100_nway_reset(struct net_device *netdev)
2232{
2233 struct nic *nic = netdev_priv(netdev);
2234 return mii_nway_restart(&nic->mii);
2235}
2236
2237static u32 e100_get_link(struct net_device *netdev)
2238{
2239 struct nic *nic = netdev_priv(netdev);
2240 return mii_link_ok(&nic->mii);
2241}
2242
2243static int e100_get_eeprom_len(struct net_device *netdev)
2244{
2245 struct nic *nic = netdev_priv(netdev);
2246 return nic->eeprom_wc << 1;
2247}
2248
2249#define E100_EEPROM_MAGIC 0x1234
2250static int e100_get_eeprom(struct net_device *netdev,
2251 struct ethtool_eeprom *eeprom, u8 *bytes)
2252{
2253 struct nic *nic = netdev_priv(netdev);
2254
2255 eeprom->magic = E100_EEPROM_MAGIC;
2256 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2257
2258 return 0;
2259}
2260
2261static int e100_set_eeprom(struct net_device *netdev,
2262 struct ethtool_eeprom *eeprom, u8 *bytes)
2263{
2264 struct nic *nic = netdev_priv(netdev);
2265
2266 if(eeprom->magic != E100_EEPROM_MAGIC)
2267 return -EINVAL;
2268
2269 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2270
2271 return e100_eeprom_save(nic, eeprom->offset >> 1,
2272 (eeprom->len >> 1) + 1);
2273}
2274
2275static void e100_get_ringparam(struct net_device *netdev,
2276 struct ethtool_ringparam *ring)
2277{
2278 struct nic *nic = netdev_priv(netdev);
2279 struct param_range *rfds = &nic->params.rfds;
2280 struct param_range *cbs = &nic->params.cbs;
2281
2282 ring->rx_max_pending = rfds->max;
2283 ring->tx_max_pending = cbs->max;
2284 ring->rx_mini_max_pending = 0;
2285 ring->rx_jumbo_max_pending = 0;
2286 ring->rx_pending = rfds->count;
2287 ring->tx_pending = cbs->count;
2288 ring->rx_mini_pending = 0;
2289 ring->rx_jumbo_pending = 0;
2290}
2291
2292static int e100_set_ringparam(struct net_device *netdev,
2293 struct ethtool_ringparam *ring)
2294{
2295 struct nic *nic = netdev_priv(netdev);
2296 struct param_range *rfds = &nic->params.rfds;
2297 struct param_range *cbs = &nic->params.cbs;
2298
05479938 2299 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2300 return -EINVAL;
2301
2302 if(netif_running(netdev))
2303 e100_down(nic);
2304 rfds->count = max(ring->rx_pending, rfds->min);
2305 rfds->count = min(rfds->count, rfds->max);
2306 cbs->count = max(ring->tx_pending, cbs->min);
2307 cbs->count = min(cbs->count, cbs->max);
2308 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2309 rfds->count, cbs->count);
2310 if(netif_running(netdev))
2311 e100_up(nic);
2312
2313 return 0;
2314}
2315
2316static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2317 "Link test (on/offline)",
2318 "Eeprom test (on/offline)",
2319 "Self test (offline)",
2320 "Mac loopback (offline)",
2321 "Phy loopback (offline)",
2322};
2323#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2324
2325static int e100_diag_test_count(struct net_device *netdev)
2326{
2327 return E100_TEST_LEN;
2328}
2329
2330static void e100_diag_test(struct net_device *netdev,
2331 struct ethtool_test *test, u64 *data)
2332{
2333 struct ethtool_cmd cmd;
2334 struct nic *nic = netdev_priv(netdev);
2335 int i, err;
2336
2337 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2338 data[0] = !mii_link_ok(&nic->mii);
2339 data[1] = e100_eeprom_load(nic);
2340 if(test->flags & ETH_TEST_FL_OFFLINE) {
2341
2342 /* save speed, duplex & autoneg settings */
2343 err = mii_ethtool_gset(&nic->mii, &cmd);
2344
2345 if(netif_running(netdev))
2346 e100_down(nic);
2347 data[2] = e100_self_test(nic);
2348 data[3] = e100_loopback_test(nic, lb_mac);
2349 data[4] = e100_loopback_test(nic, lb_phy);
2350
2351 /* restore speed, duplex & autoneg settings */
2352 err = mii_ethtool_sset(&nic->mii, &cmd);
2353
2354 if(netif_running(netdev))
2355 e100_up(nic);
2356 }
2357 for(i = 0; i < E100_TEST_LEN; i++)
2358 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2359
2360 msleep_interruptible(4 * 1000);
1da177e4
LT
2361}
2362
2363static int e100_phys_id(struct net_device *netdev, u32 data)
2364{
2365 struct nic *nic = netdev_priv(netdev);
2366
2367 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2368 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2369 mod_timer(&nic->blink_timer, jiffies);
2370 msleep_interruptible(data * 1000);
2371 del_timer_sync(&nic->blink_timer);
2372 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2373
2374 return 0;
2375}
2376
2377static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2378 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2379 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2380 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2381 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2382 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2383 "tx_heartbeat_errors", "tx_window_errors",
2384 /* device-specific stats */
2385 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2386 "tx_flow_control_pause", "rx_flow_control_pause",
2387 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2388};
2389#define E100_NET_STATS_LEN 21
2390#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2391
2392static int e100_get_stats_count(struct net_device *netdev)
2393{
2394 return E100_STATS_LEN;
2395}
2396
2397static void e100_get_ethtool_stats(struct net_device *netdev,
2398 struct ethtool_stats *stats, u64 *data)
2399{
2400 struct nic *nic = netdev_priv(netdev);
2401 int i;
2402
2403 for(i = 0; i < E100_NET_STATS_LEN; i++)
2404 data[i] = ((unsigned long *)&nic->net_stats)[i];
2405
2406 data[i++] = nic->tx_deferred;
2407 data[i++] = nic->tx_single_collisions;
2408 data[i++] = nic->tx_multiple_collisions;
2409 data[i++] = nic->tx_fc_pause;
2410 data[i++] = nic->rx_fc_pause;
2411 data[i++] = nic->rx_fc_unsupported;
2412 data[i++] = nic->tx_tco_frames;
2413 data[i++] = nic->rx_tco_frames;
2414}
2415
2416static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2417{
2418 switch(stringset) {
2419 case ETH_SS_TEST:
2420 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2421 break;
2422 case ETH_SS_STATS:
2423 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2424 break;
2425 }
2426}
2427
7282d491 2428static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2429 .get_settings = e100_get_settings,
2430 .set_settings = e100_set_settings,
2431 .get_drvinfo = e100_get_drvinfo,
2432 .get_regs_len = e100_get_regs_len,
2433 .get_regs = e100_get_regs,
2434 .get_wol = e100_get_wol,
2435 .set_wol = e100_set_wol,
2436 .get_msglevel = e100_get_msglevel,
2437 .set_msglevel = e100_set_msglevel,
2438 .nway_reset = e100_nway_reset,
2439 .get_link = e100_get_link,
2440 .get_eeprom_len = e100_get_eeprom_len,
2441 .get_eeprom = e100_get_eeprom,
2442 .set_eeprom = e100_set_eeprom,
2443 .get_ringparam = e100_get_ringparam,
2444 .set_ringparam = e100_set_ringparam,
2445 .self_test_count = e100_diag_test_count,
2446 .self_test = e100_diag_test,
2447 .get_strings = e100_get_strings,
2448 .phys_id = e100_phys_id,
2449 .get_stats_count = e100_get_stats_count,
2450 .get_ethtool_stats = e100_get_ethtool_stats,
a92dd923 2451 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2452};
2453
2454static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2455{
2456 struct nic *nic = netdev_priv(netdev);
2457
2458 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2459}
2460
2461static int e100_alloc(struct nic *nic)
2462{
2463 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2464 &nic->dma_addr);
2465 return nic->mem ? 0 : -ENOMEM;
2466}
2467
2468static void e100_free(struct nic *nic)
2469{
2470 if(nic->mem) {
2471 pci_free_consistent(nic->pdev, sizeof(struct mem),
2472 nic->mem, nic->dma_addr);
2473 nic->mem = NULL;
2474 }
2475}
2476
2477static int e100_open(struct net_device *netdev)
2478{
2479 struct nic *nic = netdev_priv(netdev);
2480 int err = 0;
2481
2482 netif_carrier_off(netdev);
2483 if((err = e100_up(nic)))
2484 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2485 return err;
2486}
2487
2488static int e100_close(struct net_device *netdev)
2489{
2490 e100_down(netdev_priv(netdev));
2491 return 0;
2492}
2493
2494static int __devinit e100_probe(struct pci_dev *pdev,
2495 const struct pci_device_id *ent)
2496{
2497 struct net_device *netdev;
2498 struct nic *nic;
2499 int err;
2500
2501 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2502 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2503 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2504 return -ENOMEM;
2505 }
2506
2507 netdev->open = e100_open;
2508 netdev->stop = e100_close;
2509 netdev->hard_start_xmit = e100_xmit_frame;
2510 netdev->get_stats = e100_get_stats;
2511 netdev->set_multicast_list = e100_set_multicast_list;
2512 netdev->set_mac_address = e100_set_mac_address;
2513 netdev->change_mtu = e100_change_mtu;
2514 netdev->do_ioctl = e100_do_ioctl;
2515 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2516 netdev->tx_timeout = e100_tx_timeout;
2517 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2518 netdev->poll = e100_poll;
2519 netdev->weight = E100_NAPI_WEIGHT;
2520#ifdef CONFIG_NET_POLL_CONTROLLER
2521 netdev->poll_controller = e100_netpoll;
2522#endif
0eb5a34c 2523 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2524
2525 nic = netdev_priv(netdev);
2526 nic->netdev = netdev;
2527 nic->pdev = pdev;
2528 nic->msg_enable = (1 << debug) - 1;
2529 pci_set_drvdata(pdev, netdev);
2530
2531 if((err = pci_enable_device(pdev))) {
2532 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2533 goto err_out_free_dev;
2534 }
2535
2536 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2537 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2538 "base address, aborting.\n");
2539 err = -ENODEV;
2540 goto err_out_disable_pdev;
2541 }
2542
2543 if((err = pci_request_regions(pdev, DRV_NAME))) {
2544 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2545 goto err_out_disable_pdev;
2546 }
2547
1e7f0bd8 2548 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2549 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2550 goto err_out_free_res;
2551 }
2552
2553 SET_MODULE_OWNER(netdev);
2554 SET_NETDEV_DEV(netdev, &pdev->dev);
2555
27345bb6
JB
2556 if (use_io)
2557 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2558
2559 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
1da177e4
LT
2560 if(!nic->csr) {
2561 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2562 err = -ENOMEM;
2563 goto err_out_free_res;
2564 }
2565
2566 if(ent->driver_data)
2567 nic->flags |= ich;
2568 else
2569 nic->flags &= ~ich;
2570
2571 e100_get_defaults(nic);
2572
1f53367d 2573 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2574 spin_lock_init(&nic->cb_lock);
2575 spin_lock_init(&nic->cmd_lock);
ac7c6669 2576 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2577
2578 /* Reset the device before pci_set_master() in case device is in some
2579 * funky state and has an interrupt pending - hint: we don't have the
2580 * interrupt handler registered yet. */
2581 e100_hw_reset(nic);
2582
2583 pci_set_master(pdev);
2584
2585 init_timer(&nic->watchdog);
2586 nic->watchdog.function = e100_watchdog;
2587 nic->watchdog.data = (unsigned long)nic;
2588 init_timer(&nic->blink_timer);
2589 nic->blink_timer.function = e100_blink_led;
2590 nic->blink_timer.data = (unsigned long)nic;
2591
c4028958 2592 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2593
1da177e4
LT
2594 if((err = e100_alloc(nic))) {
2595 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2596 goto err_out_iounmap;
2597 }
2598
1da177e4
LT
2599 if((err = e100_eeprom_load(nic)))
2600 goto err_out_free;
2601
f92d8728
MC
2602 e100_phy_init(nic);
2603
1da177e4 2604 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2605 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2606 if (!is_valid_ether_addr(netdev->perm_addr)) {
2607 if (!eeprom_bad_csum_allow) {
2608 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2609 "EEPROM, aborting.\n");
2610 err = -EAGAIN;
2611 goto err_out_free;
2612 } else {
2613 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2614 "you MUST configure one.\n");
2615 }
1da177e4
LT
2616 }
2617
2618 /* Wol magic packet can be enabled from eeprom */
2619 if((nic->mac >= mac_82558_D101_A4) &&
2620 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2621 nic->flags |= wol_magic;
2622
6bdacb1a 2623 /* ack any pending wake events, disable PME */
3435dbce
JB
2624 err = pci_enable_wake(pdev, 0, 0);
2625 if (err)
2626 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
1da177e4
LT
2627
2628 strcpy(netdev->name, "eth%d");
2629 if((err = register_netdev(netdev))) {
2630 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2631 goto err_out_free;
2632 }
2633
7c7459d1 2634 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
1da177e4 2635 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
27345bb6 2636 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq,
1da177e4
LT
2637 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2638 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2639
2640 return 0;
2641
2642err_out_free:
2643 e100_free(nic);
2644err_out_iounmap:
27345bb6 2645 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2646err_out_free_res:
2647 pci_release_regions(pdev);
2648err_out_disable_pdev:
2649 pci_disable_device(pdev);
2650err_out_free_dev:
2651 pci_set_drvdata(pdev, NULL);
2652 free_netdev(netdev);
2653 return err;
2654}
2655
2656static void __devexit e100_remove(struct pci_dev *pdev)
2657{
2658 struct net_device *netdev = pci_get_drvdata(pdev);
2659
2660 if(netdev) {
2661 struct nic *nic = netdev_priv(netdev);
2662 unregister_netdev(netdev);
2663 e100_free(nic);
2664 iounmap(nic->csr);
2665 free_netdev(netdev);
2666 pci_release_regions(pdev);
2667 pci_disable_device(pdev);
2668 pci_set_drvdata(pdev, NULL);
2669 }
2670}
2671
e8e82b76 2672#ifdef CONFIG_PM
1da177e4
LT
2673static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2674{
2675 struct net_device *netdev = pci_get_drvdata(pdev);
2676 struct nic *nic = netdev_priv(netdev);
2677
824545e7
AK
2678 if (netif_running(netdev))
2679 netif_poll_disable(nic->netdev);
e8e82b76
AK
2680 del_timer_sync(&nic->watchdog);
2681 netif_carrier_off(nic->netdev);
518d8338 2682 netif_device_detach(netdev);
a53a33da 2683
1da177e4 2684 pci_save_state(pdev);
e8e82b76
AK
2685
2686 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2687 pci_enable_wake(pdev, PCI_D3hot, 1);
2688 pci_enable_wake(pdev, PCI_D3cold, 1);
2689 } else {
2690 pci_enable_wake(pdev, PCI_D3hot, 0);
2691 pci_enable_wake(pdev, PCI_D3cold, 0);
2692 }
975b366a 2693
1da177e4 2694 pci_disable_device(pdev);
518d8338 2695 free_irq(pdev->irq, netdev);
e8e82b76 2696 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2697
2698 return 0;
2699}
2700
2701static int e100_resume(struct pci_dev *pdev)
2702{
2703 struct net_device *netdev = pci_get_drvdata(pdev);
2704 struct nic *nic = netdev_priv(netdev);
2705
975b366a 2706 pci_set_power_state(pdev, PCI_D0);
1da177e4 2707 pci_restore_state(pdev);
6bdacb1a 2708 /* ack any pending wake events, disable PME */
975b366a 2709 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2710
2711 netif_device_attach(netdev);
975b366a 2712 if (netif_running(netdev))
1da177e4
LT
2713 e100_up(nic);
2714
2715 return 0;
2716}
975b366a 2717#endif /* CONFIG_PM */
1da177e4 2718
d18c3db5 2719static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2720{
e8e82b76
AK
2721 struct net_device *netdev = pci_get_drvdata(pdev);
2722 struct nic *nic = netdev_priv(netdev);
2723
824545e7
AK
2724 if (netif_running(netdev))
2725 netif_poll_disable(nic->netdev);
e8e82b76
AK
2726 del_timer_sync(&nic->watchdog);
2727 netif_carrier_off(nic->netdev);
2728
2729 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2730 pci_enable_wake(pdev, PCI_D3hot, 1);
2731 pci_enable_wake(pdev, PCI_D3cold, 1);
2732 } else {
2733 pci_enable_wake(pdev, PCI_D3hot, 0);
2734 pci_enable_wake(pdev, PCI_D3cold, 0);
2735 }
2736
2737 pci_disable_device(pdev);
2738 pci_set_power_state(pdev, PCI_D3hot);
6bdacb1a
MC
2739}
2740
2cc30492
AK
2741/* ------------------ PCI Error Recovery infrastructure -------------- */
2742/**
2743 * e100_io_error_detected - called when PCI error is detected.
2744 * @pdev: Pointer to PCI device
2745 * @state: The current pci conneection state
2746 */
2747static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2748{
2749 struct net_device *netdev = pci_get_drvdata(pdev);
2750
2751 /* Similar to calling e100_down(), but avoids adpater I/O. */
2752 netdev->stop(netdev);
2753
2754 /* Detach; put netif into state similar to hotplug unplug. */
2755 netif_poll_enable(netdev);
2756 netif_device_detach(netdev);
b1d26f24 2757 pci_disable_device(pdev);
2cc30492
AK
2758
2759 /* Request a slot reset. */
2760 return PCI_ERS_RESULT_NEED_RESET;
2761}
2762
2763/**
2764 * e100_io_slot_reset - called after the pci bus has been reset.
2765 * @pdev: Pointer to PCI device
2766 *
2767 * Restart the card from scratch.
2768 */
2769static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2770{
2771 struct net_device *netdev = pci_get_drvdata(pdev);
2772 struct nic *nic = netdev_priv(netdev);
2773
2774 if (pci_enable_device(pdev)) {
2775 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2776 return PCI_ERS_RESULT_DISCONNECT;
2777 }
2778 pci_set_master(pdev);
2779
2780 /* Only one device per card can do a reset */
2781 if (0 != PCI_FUNC(pdev->devfn))
2782 return PCI_ERS_RESULT_RECOVERED;
2783 e100_hw_reset(nic);
2784 e100_phy_init(nic);
2785
2786 return PCI_ERS_RESULT_RECOVERED;
2787}
2788
2789/**
2790 * e100_io_resume - resume normal operations
2791 * @pdev: Pointer to PCI device
2792 *
2793 * Resume normal operations after an error recovery
2794 * sequence has been completed.
2795 */
2796static void e100_io_resume(struct pci_dev *pdev)
2797{
2798 struct net_device *netdev = pci_get_drvdata(pdev);
2799 struct nic *nic = netdev_priv(netdev);
2800
2801 /* ack any pending wake events, disable PME */
2802 pci_enable_wake(pdev, 0, 0);
2803
2804 netif_device_attach(netdev);
2805 if (netif_running(netdev)) {
2806 e100_open(netdev);
2807 mod_timer(&nic->watchdog, jiffies);
2808 }
2809}
2810
2811static struct pci_error_handlers e100_err_handler = {
2812 .error_detected = e100_io_error_detected,
2813 .slot_reset = e100_io_slot_reset,
2814 .resume = e100_io_resume,
2815};
6bdacb1a 2816
1da177e4
LT
2817static struct pci_driver e100_driver = {
2818 .name = DRV_NAME,
2819 .id_table = e100_id_table,
2820 .probe = e100_probe,
2821 .remove = __devexit_p(e100_remove),
e8e82b76 2822#ifdef CONFIG_PM
975b366a 2823 /* Power Management hooks */
1da177e4
LT
2824 .suspend = e100_suspend,
2825 .resume = e100_resume,
2826#endif
05479938 2827 .shutdown = e100_shutdown,
2cc30492 2828 .err_handler = &e100_err_handler,
1da177e4
LT
2829};
2830
2831static int __init e100_init_module(void)
2832{
2833 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2834 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2835 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2836 }
29917620 2837 return pci_register_driver(&e100_driver);
1da177e4
LT
2838}
2839
2840static void __exit e100_cleanup_module(void)
2841{
2842 pci_unregister_driver(&e100_driver);
2843}
2844
2845module_init(e100_init_module);
2846module_exit(e100_cleanup_module);