ehea: fix for dlpar support
[linux-2.6-block.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
118 *
119 * V. Miscellaneous
120 *
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
126 *
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
128 *
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
131 *
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
135 *
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
139 */
140
1da177e4
LT
141#include <linux/module.h>
142#include <linux/moduleparam.h>
143#include <linux/kernel.h>
144#include <linux/types.h>
145#include <linux/slab.h>
146#include <linux/delay.h>
147#include <linux/init.h>
148#include <linux/pci.h>
1e7f0bd8 149#include <linux/dma-mapping.h>
1da177e4
LT
150#include <linux/netdevice.h>
151#include <linux/etherdevice.h>
152#include <linux/mii.h>
153#include <linux/if_vlan.h>
154#include <linux/skbuff.h>
155#include <linux/ethtool.h>
156#include <linux/string.h>
157#include <asm/unaligned.h>
158
159
160#define DRV_NAME "e100"
4e1dc97d 161#define DRV_EXT "-NAPI"
76ddb3fd 162#define DRV_VERSION "3.5.17-k2"DRV_EXT
1da177e4 163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
165#define PFX DRV_NAME ": "
166
167#define E100_WATCHDOG_PERIOD (2 * HZ)
168#define E100_NAPI_WEIGHT 16
169
170MODULE_DESCRIPTION(DRV_DESCRIPTION);
171MODULE_AUTHOR(DRV_COPYRIGHT);
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
175static int debug = 3;
8fb6f732 176static int eeprom_bad_csum_allow = 0;
1da177e4 177module_param(debug, int, 0);
8fb6f732 178module_param(eeprom_bad_csum_allow, int, 0);
1da177e4 179MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 180MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
1da177e4
LT
181#define DPRINTK(nlevel, klevel, fmt, args...) \
182 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
183 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
184 __FUNCTION__ , ## args))
185
186#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
187 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
188 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
189static struct pci_device_id e100_id_table[] = {
190 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
191 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
192 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
193 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
194 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
195 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
198 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
199 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
200 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
204 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
205 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
206 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
212 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
213 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
214 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
215 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
220 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
221 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
222 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
223 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
225 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
226 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
227 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
228 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
229 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 230 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
231 { 0, }
232};
233MODULE_DEVICE_TABLE(pci, e100_id_table);
234
235enum mac {
236 mac_82557_D100_A = 0,
237 mac_82557_D100_B = 1,
238 mac_82557_D100_C = 2,
239 mac_82558_D101_A4 = 4,
240 mac_82558_D101_B0 = 5,
241 mac_82559_D101M = 8,
242 mac_82559_D101S = 9,
243 mac_82550_D102 = 12,
244 mac_82550_D102_C = 13,
245 mac_82551_E = 14,
246 mac_82551_F = 15,
247 mac_82551_10 = 16,
248 mac_unknown = 0xFF,
249};
250
251enum phy {
252 phy_100a = 0x000003E0,
253 phy_100c = 0x035002A8,
254 phy_82555_tx = 0x015002A8,
255 phy_nsc_tx = 0x5C002000,
256 phy_82562_et = 0x033002A8,
257 phy_82562_em = 0x032002A8,
258 phy_82562_ek = 0x031002A8,
259 phy_82562_eh = 0x017002A8,
260 phy_unknown = 0xFFFFFFFF,
261};
262
263/* CSR (Control/Status Registers) */
264struct csr {
265 struct {
266 u8 status;
267 u8 stat_ack;
268 u8 cmd_lo;
269 u8 cmd_hi;
270 u32 gen_ptr;
271 } scb;
272 u32 port;
273 u16 flash_ctrl;
274 u8 eeprom_ctrl_lo;
275 u8 eeprom_ctrl_hi;
276 u32 mdi_ctrl;
277 u32 rx_dma_count;
278};
279
280enum scb_status {
281 rus_ready = 0x10,
282 rus_mask = 0x3C,
283};
284
285enum scb_stat_ack {
286 stat_ack_not_ours = 0x00,
287 stat_ack_sw_gen = 0x04,
288 stat_ack_rnr = 0x10,
289 stat_ack_cu_idle = 0x20,
290 stat_ack_frame_rx = 0x40,
291 stat_ack_cu_cmd_done = 0x80,
292 stat_ack_not_present = 0xFF,
293 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
294 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
295};
296
297enum scb_cmd_hi {
298 irq_mask_none = 0x00,
299 irq_mask_all = 0x01,
300 irq_sw_gen = 0x02,
301};
302
303enum scb_cmd_lo {
304 cuc_nop = 0x00,
305 ruc_start = 0x01,
306 ruc_load_base = 0x06,
307 cuc_start = 0x10,
308 cuc_resume = 0x20,
309 cuc_dump_addr = 0x40,
310 cuc_dump_stats = 0x50,
311 cuc_load_base = 0x60,
312 cuc_dump_reset = 0x70,
313};
314
315enum cuc_dump {
316 cuc_dump_complete = 0x0000A005,
317 cuc_dump_reset_complete = 0x0000A007,
318};
05479938 319
1da177e4
LT
320enum port {
321 software_reset = 0x0000,
322 selftest = 0x0001,
323 selective_reset = 0x0002,
324};
325
326enum eeprom_ctrl_lo {
327 eesk = 0x01,
328 eecs = 0x02,
329 eedi = 0x04,
330 eedo = 0x08,
331};
332
333enum mdi_ctrl {
334 mdi_write = 0x04000000,
335 mdi_read = 0x08000000,
336 mdi_ready = 0x10000000,
337};
338
339enum eeprom_op {
340 op_write = 0x05,
341 op_read = 0x06,
342 op_ewds = 0x10,
343 op_ewen = 0x13,
344};
345
346enum eeprom_offsets {
347 eeprom_cnfg_mdix = 0x03,
348 eeprom_id = 0x0A,
349 eeprom_config_asf = 0x0D,
350 eeprom_smbus_addr = 0x90,
351};
352
353enum eeprom_cnfg_mdix {
354 eeprom_mdix_enabled = 0x0080,
355};
356
357enum eeprom_id {
358 eeprom_id_wol = 0x0020,
359};
360
361enum eeprom_config_asf {
362 eeprom_asf = 0x8000,
363 eeprom_gcl = 0x4000,
364};
365
366enum cb_status {
367 cb_complete = 0x8000,
368 cb_ok = 0x2000,
369};
370
371enum cb_command {
372 cb_nop = 0x0000,
373 cb_iaaddr = 0x0001,
374 cb_config = 0x0002,
375 cb_multi = 0x0003,
376 cb_tx = 0x0004,
377 cb_ucode = 0x0005,
378 cb_dump = 0x0006,
379 cb_tx_sf = 0x0008,
380 cb_cid = 0x1f00,
381 cb_i = 0x2000,
382 cb_s = 0x4000,
383 cb_el = 0x8000,
384};
385
386struct rfd {
387 u16 status;
388 u16 command;
389 u32 link;
390 u32 rbd;
391 u16 actual_size;
392 u16 size;
393};
394
395struct rx {
396 struct rx *next, *prev;
397 struct sk_buff *skb;
398 dma_addr_t dma_addr;
399};
400
401#if defined(__BIG_ENDIAN_BITFIELD)
402#define X(a,b) b,a
403#else
404#define X(a,b) a,b
405#endif
406struct config {
407/*0*/ u8 X(byte_count:6, pad0:2);
408/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
409/*2*/ u8 adaptive_ifs;
410/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
411 term_write_cache_line:1), pad3:4);
412/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
413/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
414/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
415 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
416 rx_discard_overruns:1), rx_save_bad_frames:1);
417/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
418 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
419 tx_dynamic_tbd:1);
420/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
421/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
422 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
423/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
424 loopback:2);
425/*11*/ u8 X(linear_priority:3, pad11:5);
426/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
427/*13*/ u8 ip_addr_lo;
428/*14*/ u8 ip_addr_hi;
429/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
430 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
431 pad15_2:1), crs_or_cdt:1);
432/*16*/ u8 fc_delay_lo;
433/*17*/ u8 fc_delay_hi;
434/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
435 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
436/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
437 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
438 full_duplex_force:1), full_duplex_pin:1);
439/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
440/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
441/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
442 u8 pad_d102[9];
443};
444
445#define E100_MAX_MULTICAST_ADDRS 64
446struct multi {
447 u16 count;
448 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
449};
450
451/* Important: keep total struct u32-aligned */
452#define UCODE_SIZE 134
453struct cb {
454 u16 status;
455 u16 command;
456 u32 link;
457 union {
458 u8 iaaddr[ETH_ALEN];
459 u32 ucode[UCODE_SIZE];
460 struct config config;
461 struct multi multi;
462 struct {
463 u32 tbd_array;
464 u16 tcb_byte_count;
465 u8 threshold;
466 u8 tbd_count;
467 struct {
468 u32 buf_addr;
469 u16 size;
470 u16 eol;
471 } tbd;
472 } tcb;
473 u32 dump_buffer_addr;
474 } u;
475 struct cb *next, *prev;
476 dma_addr_t dma_addr;
477 struct sk_buff *skb;
478};
479
480enum loopback {
481 lb_none = 0, lb_mac = 1, lb_phy = 3,
482};
483
484struct stats {
485 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
486 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
487 tx_multiple_collisions, tx_total_collisions;
488 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
489 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
490 rx_short_frame_errors;
491 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
492 u16 xmt_tco_frames, rcv_tco_frames;
493 u32 complete;
494};
495
496struct mem {
497 struct {
498 u32 signature;
499 u32 result;
500 } selftest;
501 struct stats stats;
502 u8 dump_buf[596];
503};
504
505struct param_range {
506 u32 min;
507 u32 max;
508 u32 count;
509};
510
511struct params {
512 struct param_range rfds;
513 struct param_range cbs;
514};
515
516struct nic {
517 /* Begin: frequently used values: keep adjacent for cache effect */
518 u32 msg_enable ____cacheline_aligned;
519 struct net_device *netdev;
520 struct pci_dev *pdev;
521
522 struct rx *rxs ____cacheline_aligned;
523 struct rx *rx_to_use;
524 struct rx *rx_to_clean;
525 struct rfd blank_rfd;
1da177e4
LT
526
527 spinlock_t cb_lock ____cacheline_aligned;
528 spinlock_t cmd_lock;
529 struct csr __iomem *csr;
530 enum scb_cmd_lo cuc_cmd;
531 unsigned int cbs_avail;
532 struct cb *cbs;
533 struct cb *cb_to_use;
534 struct cb *cb_to_send;
535 struct cb *cb_to_clean;
536 u16 tx_command;
537 /* End: frequently used values: keep adjacent for cache effect */
538
539 enum {
540 ich = (1 << 0),
541 promiscuous = (1 << 1),
542 multicast_all = (1 << 2),
543 wol_magic = (1 << 3),
544 ich_10h_workaround = (1 << 4),
545 } flags ____cacheline_aligned;
546
547 enum mac mac;
548 enum phy phy;
549 struct params params;
550 struct net_device_stats net_stats;
551 struct timer_list watchdog;
552 struct timer_list blink_timer;
553 struct mii_if_info mii;
2acdb1e0 554 struct work_struct tx_timeout_task;
1da177e4
LT
555 enum loopback loopback;
556
557 struct mem *mem;
558 dma_addr_t dma_addr;
559
560 dma_addr_t cbs_dma_addr;
561 u8 adaptive_ifs;
562 u8 tx_threshold;
563 u32 tx_frames;
564 u32 tx_collisions;
565 u32 tx_deferred;
566 u32 tx_single_collisions;
567 u32 tx_multiple_collisions;
568 u32 tx_fc_pause;
569 u32 tx_tco_frames;
570
571 u32 rx_fc_pause;
572 u32 rx_fc_unsupported;
573 u32 rx_tco_frames;
574 u32 rx_over_length_errors;
575
576 u8 rev_id;
577 u16 leds;
578 u16 eeprom_wc;
579 u16 eeprom[256];
ac7c6669 580 spinlock_t mdio_lock;
1da177e4
LT
581};
582
583static inline void e100_write_flush(struct nic *nic)
584{
585 /* Flush previous PCI writes through intermediate bridges
586 * by doing a benign read */
587 (void)readb(&nic->csr->scb.status);
588}
589
858119e1 590static void e100_enable_irq(struct nic *nic)
1da177e4
LT
591{
592 unsigned long flags;
593
594 spin_lock_irqsave(&nic->cmd_lock, flags);
595 writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 596 e100_write_flush(nic);
ad8c48ad 597 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
598}
599
858119e1 600static void e100_disable_irq(struct nic *nic)
1da177e4
LT
601{
602 unsigned long flags;
603
604 spin_lock_irqsave(&nic->cmd_lock, flags);
605 writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 606 e100_write_flush(nic);
ad8c48ad 607 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
608}
609
610static void e100_hw_reset(struct nic *nic)
611{
612 /* Put CU and RU into idle with a selective reset to get
613 * device off of PCI bus */
614 writel(selective_reset, &nic->csr->port);
615 e100_write_flush(nic); udelay(20);
616
617 /* Now fully reset device */
618 writel(software_reset, &nic->csr->port);
619 e100_write_flush(nic); udelay(20);
620
621 /* Mask off our interrupt line - it's unmasked after reset */
622 e100_disable_irq(nic);
623}
624
625static int e100_self_test(struct nic *nic)
626{
627 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
628
629 /* Passing the self-test is a pretty good indication
630 * that the device can DMA to/from host memory */
631
632 nic->mem->selftest.signature = 0;
633 nic->mem->selftest.result = 0xFFFFFFFF;
634
635 writel(selftest | dma_addr, &nic->csr->port);
636 e100_write_flush(nic);
637 /* Wait 10 msec for self-test to complete */
638 msleep(10);
639
640 /* Interrupts are enabled after self-test */
641 e100_disable_irq(nic);
642
643 /* Check results of self-test */
644 if(nic->mem->selftest.result != 0) {
645 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
646 nic->mem->selftest.result);
647 return -ETIMEDOUT;
648 }
649 if(nic->mem->selftest.signature == 0) {
650 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
651 return -ETIMEDOUT;
652 }
653
654 return 0;
655}
656
657static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
658{
659 u32 cmd_addr_data[3];
660 u8 ctrl;
661 int i, j;
662
663 /* Three cmds: write/erase enable, write data, write/erase disable */
664 cmd_addr_data[0] = op_ewen << (addr_len - 2);
665 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
666 cpu_to_le16(data);
667 cmd_addr_data[2] = op_ewds << (addr_len - 2);
668
669 /* Bit-bang cmds to write word to eeprom */
670 for(j = 0; j < 3; j++) {
671
672 /* Chip select */
673 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
674 e100_write_flush(nic); udelay(4);
675
676 for(i = 31; i >= 0; i--) {
677 ctrl = (cmd_addr_data[j] & (1 << i)) ?
678 eecs | eedi : eecs;
679 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
680 e100_write_flush(nic); udelay(4);
681
682 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
683 e100_write_flush(nic); udelay(4);
684 }
685 /* Wait 10 msec for cmd to complete */
686 msleep(10);
687
688 /* Chip deselect */
689 writeb(0, &nic->csr->eeprom_ctrl_lo);
690 e100_write_flush(nic); udelay(4);
691 }
692};
693
694/* General technique stolen from the eepro100 driver - very clever */
695static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
696{
697 u32 cmd_addr_data;
698 u16 data = 0;
699 u8 ctrl;
700 int i;
701
702 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
703
704 /* Chip select */
705 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
706 e100_write_flush(nic); udelay(4);
707
708 /* Bit-bang to read word from eeprom */
709 for(i = 31; i >= 0; i--) {
710 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
711 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
712 e100_write_flush(nic); udelay(4);
05479938 713
1da177e4
LT
714 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
715 e100_write_flush(nic); udelay(4);
05479938 716
1da177e4
LT
717 /* Eeprom drives a dummy zero to EEDO after receiving
718 * complete address. Use this to adjust addr_len. */
719 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
720 if(!(ctrl & eedo) && i > 16) {
721 *addr_len -= (i - 16);
722 i = 17;
723 }
05479938 724
1da177e4
LT
725 data = (data << 1) | (ctrl & eedo ? 1 : 0);
726 }
727
728 /* Chip deselect */
729 writeb(0, &nic->csr->eeprom_ctrl_lo);
730 e100_write_flush(nic); udelay(4);
731
732 return le16_to_cpu(data);
733};
734
735/* Load entire EEPROM image into driver cache and validate checksum */
736static int e100_eeprom_load(struct nic *nic)
737{
738 u16 addr, addr_len = 8, checksum = 0;
739
740 /* Try reading with an 8-bit addr len to discover actual addr len */
741 e100_eeprom_read(nic, &addr_len, 0);
742 nic->eeprom_wc = 1 << addr_len;
743
744 for(addr = 0; addr < nic->eeprom_wc; addr++) {
745 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
746 if(addr < nic->eeprom_wc - 1)
747 checksum += cpu_to_le16(nic->eeprom[addr]);
748 }
749
750 /* The checksum, stored in the last word, is calculated such that
751 * the sum of words should be 0xBABA */
752 checksum = le16_to_cpu(0xBABA - checksum);
753 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
754 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
755 if (!eeprom_bad_csum_allow)
756 return -EAGAIN;
1da177e4
LT
757 }
758
759 return 0;
760}
761
762/* Save (portion of) driver EEPROM cache to device and update checksum */
763static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
764{
765 u16 addr, addr_len = 8, checksum = 0;
766
767 /* Try reading with an 8-bit addr len to discover actual addr len */
768 e100_eeprom_read(nic, &addr_len, 0);
769 nic->eeprom_wc = 1 << addr_len;
770
771 if(start + count >= nic->eeprom_wc)
772 return -EINVAL;
773
774 for(addr = start; addr < start + count; addr++)
775 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
776
777 /* The checksum, stored in the last word, is calculated such that
778 * the sum of words should be 0xBABA */
779 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
780 checksum += cpu_to_le16(nic->eeprom[addr]);
781 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
782 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
783 nic->eeprom[nic->eeprom_wc - 1]);
784
785 return 0;
786}
787
962082b6 788#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 789#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 790static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
791{
792 unsigned long flags;
793 unsigned int i;
794 int err = 0;
795
796 spin_lock_irqsave(&nic->cmd_lock, flags);
797
798 /* Previous command is accepted when SCB clears */
799 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
800 if(likely(!readb(&nic->csr->scb.cmd_lo)))
801 break;
802 cpu_relax();
e6280f26 803 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
804 udelay(5);
805 }
806 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
807 err = -EAGAIN;
808 goto err_unlock;
809 }
810
811 if(unlikely(cmd != cuc_resume))
812 writel(dma_addr, &nic->csr->scb.gen_ptr);
813 writeb(cmd, &nic->csr->scb.cmd_lo);
814
815err_unlock:
816 spin_unlock_irqrestore(&nic->cmd_lock, flags);
817
818 return err;
819}
820
858119e1 821static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
822 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
823{
824 struct cb *cb;
825 unsigned long flags;
826 int err = 0;
827
828 spin_lock_irqsave(&nic->cb_lock, flags);
829
830 if(unlikely(!nic->cbs_avail)) {
831 err = -ENOMEM;
832 goto err_unlock;
833 }
834
835 cb = nic->cb_to_use;
836 nic->cb_to_use = cb->next;
837 nic->cbs_avail--;
838 cb->skb = skb;
839
840 if(unlikely(!nic->cbs_avail))
841 err = -ENOSPC;
842
843 cb_prepare(nic, cb, skb);
844
845 /* Order is important otherwise we'll be in a race with h/w:
846 * set S-bit in current first, then clear S-bit in previous. */
847 cb->command |= cpu_to_le16(cb_s);
848 wmb();
849 cb->prev->command &= cpu_to_le16(~cb_s);
850
851 while(nic->cb_to_send != nic->cb_to_use) {
852 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
853 nic->cb_to_send->dma_addr))) {
854 /* Ok, here's where things get sticky. It's
855 * possible that we can't schedule the command
856 * because the controller is too busy, so
857 * let's just queue the command and try again
858 * when another command is scheduled. */
962082b6
MC
859 if(err == -ENOSPC) {
860 //request a reset
861 schedule_work(&nic->tx_timeout_task);
862 }
1da177e4
LT
863 break;
864 } else {
865 nic->cuc_cmd = cuc_resume;
866 nic->cb_to_send = nic->cb_to_send->next;
867 }
868 }
869
870err_unlock:
871 spin_unlock_irqrestore(&nic->cb_lock, flags);
872
873 return err;
874}
875
876static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
877{
878 u32 data_out = 0;
879 unsigned int i;
ac7c6669 880 unsigned long flags;
1da177e4 881
ac7c6669
OM
882
883 /*
884 * Stratus87247: we shouldn't be writing the MDI control
885 * register until the Ready bit shows True. Also, since
886 * manipulation of the MDI control registers is a multi-step
887 * procedure it should be done under lock.
888 */
889 spin_lock_irqsave(&nic->mdio_lock, flags);
890 for (i = 100; i; --i) {
891 if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
892 break;
893 udelay(20);
894 }
895 if (unlikely(!i)) {
896 printk("e100.mdio_ctrl(%s) won't go Ready\n",
897 nic->netdev->name );
898 spin_unlock_irqrestore(&nic->mdio_lock, flags);
899 return 0; /* No way to indicate timeout error */
900 }
1da177e4
LT
901 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
902
ac7c6669 903 for (i = 0; i < 100; i++) {
1da177e4 904 udelay(20);
ac7c6669 905 if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
906 break;
907 }
ac7c6669 908 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
909 DPRINTK(HW, DEBUG,
910 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
911 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
912 return (u16)data_out;
913}
914
915static int mdio_read(struct net_device *netdev, int addr, int reg)
916{
917 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
918}
919
920static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
921{
922 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
923}
924
925static void e100_get_defaults(struct nic *nic)
926{
2afecc04
JB
927 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
928 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4
LT
929
930 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
931 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
932 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
933 if(nic->mac == mac_unknown)
934 nic->mac = mac_82557_D100_A;
935
936 nic->params.rfds = rfds;
937 nic->params.cbs = cbs;
938
939 /* Quadwords to DMA into FIFO before starting frame transmit */
940 nic->tx_threshold = 0xE0;
941
962082b6
MC
942 /* no interrupt for every tx completion, delay = 256us if not 557*/
943 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
944 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
945
946 /* Template for a freshly allocated RFD */
d52df4a3 947 nic->blank_rfd.command = cpu_to_le16(cb_el & cb_s);
1da177e4
LT
948 nic->blank_rfd.rbd = 0xFFFFFFFF;
949 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
950
951 /* MII setup */
952 nic->mii.phy_id_mask = 0x1F;
953 nic->mii.reg_num_mask = 0x1F;
954 nic->mii.dev = nic->netdev;
955 nic->mii.mdio_read = mdio_read;
956 nic->mii.mdio_write = mdio_write;
957}
958
959static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
960{
961 struct config *config = &cb->u.config;
962 u8 *c = (u8 *)config;
963
964 cb->command = cpu_to_le16(cb_config);
965
966 memset(config, 0, sizeof(struct config));
967
968 config->byte_count = 0x16; /* bytes in this struct */
969 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
970 config->direct_rx_dma = 0x1; /* reserved */
971 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
972 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
973 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
974 config->tx_underrun_retry = 0x3; /* # of underrun retries */
975 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
976 config->pad10 = 0x6;
977 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
978 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
979 config->ifs = 0x6; /* x16 = inter frame spacing */
980 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
981 config->pad15_1 = 0x1;
982 config->pad15_2 = 0x1;
983 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
984 config->fc_delay_hi = 0x40; /* time delay for fc frame */
985 config->tx_padding = 0x1; /* 1=pad short frames */
986 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
987 config->pad18 = 0x1;
988 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
989 config->pad20_1 = 0x1F;
990 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
991 config->pad21_1 = 0x5;
992
993 config->adaptive_ifs = nic->adaptive_ifs;
994 config->loopback = nic->loopback;
995
996 if(nic->mii.force_media && nic->mii.full_duplex)
997 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
998
999 if(nic->flags & promiscuous || nic->loopback) {
1000 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1001 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1002 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1003 }
1004
1005 if(nic->flags & multicast_all)
1006 config->multicast_all = 0x1; /* 1=accept, 0=no */
1007
6bdacb1a
MC
1008 /* disable WoL when up */
1009 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1010 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1011
1012 if(nic->mac >= mac_82558_D101_A4) {
1013 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1014 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1015 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1016 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1017 if(nic->mac >= mac_82559_D101M)
1018 config->tno_intr = 0x1; /* TCO stats enable */
1019 else
1020 config->standard_stat_counter = 0x0;
1021 }
1022
1023 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1024 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1025 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1026 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1027 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1028 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1029}
1030
2afecc04
JB
1031/********************************************************/
1032/* Micro code for 8086:1229 Rev 8 */
1033/********************************************************/
1034
1035/* Parameter values for the D101M B-step */
1036#define D101M_CPUSAVER_TIMER_DWORD 78
1037#define D101M_CPUSAVER_BUNDLE_DWORD 65
1038#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1039
1040#define D101M_B_RCVBUNDLE_UCODE \
1041{\
10420x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10430x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10440x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10450x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10460x00380438, 0x00000000, 0x00140000, 0x00380555, \
10470x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10480x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10490x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10500x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10510x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10520x00000000, 0x00000000, 0x00000000, 0x00000000, \
10530x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10540x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10550x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10560x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10570x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10580x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10590x00000000, 0x00000000, 0x00000000, 0x00000000, \
10600x00000000, 0x00000000, 0x00000000, 0x00000000, \
10610x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10620x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10630x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10640x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10650x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10660x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10670x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10680x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10690x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10700x00380559, 0x00000000, 0x00000000, 0x00000000, \
10710x00000000, 0x00000000, 0x00000000, 0x00000000, \
10720x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10730x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10740x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1075}
1076
1077/********************************************************/
1078/* Micro code for 8086:1229 Rev 9 */
1079/********************************************************/
1080
1081/* Parameter values for the D101S */
1082#define D101S_CPUSAVER_TIMER_DWORD 78
1083#define D101S_CPUSAVER_BUNDLE_DWORD 67
1084#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1085
1086#define D101S_RCVBUNDLE_UCODE \
1087{\
10880x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
10890x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
10900x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
10910x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
10920x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
10930x00308000, 0x00100610, 0x00100561, 0x000E0408, \
10940x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10950x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10960x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
10970x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
10980x00000000, 0x00000000, 0x00000000, 0x00000000, \
10990x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11000x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11010x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11020x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11030x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11040x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11050x00101313, 0x00380700, 0x00000000, 0x00000000, \
11060x00000000, 0x00000000, 0x00000000, 0x00000000, \
11070x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11080x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11090x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11100x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11110x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11120x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11130x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11140x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11150x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11160x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11170x00000000, 0x00000000, 0x00000000, 0x00000000, \
11180x00000000, 0x00000000, 0x00000000, 0x00130831, \
11190x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11200x00041000, 0x00010004, 0x00380700 \
1121}
1122
1123/********************************************************/
1124/* Micro code for the 8086:1229 Rev F/10 */
1125/********************************************************/
1126
1127/* Parameter values for the D102 E-step */
1128#define D102_E_CPUSAVER_TIMER_DWORD 42
1129#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1130#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1131
1132#define D102_E_RCVBUNDLE_UCODE \
1133{\
11340x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11350x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11360x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11370x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11380x00000000, 0x00000000, 0x00000000, 0x00000000, \
11390x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11400x00000000, 0x00000000, 0x00000000, 0x00000000, \
11410x00000000, 0x00000000, 0x00000000, 0x00000000, \
11420x00000000, 0x00000000, 0x00000000, 0x00000000, \
11430x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11440x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11450x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11460x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11470x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11480x00000000, 0x00000000, 0x00000000, 0x00000000, \
11490x00000000, 0x00000000, 0x00000000, 0x00000000, \
11500x00000000, 0x00000000, 0x00000000, 0x00000000, \
11510x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11520x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11530x00000000, 0x00000000, 0x00000000, 0x00000000, \
11540x00000000, 0x00000000, 0x00000000, 0x00000000, \
11550x00000000, 0x00000000, 0x00000000, 0x00000000, \
11560x00000000, 0x00000000, 0x00000000, 0x00000000, \
11570x00000000, 0x00000000, 0x00000000, 0x00000000, \
11580x00000000, 0x00000000, 0x00000000, 0x00000000, \
11590x00000000, 0x00000000, 0x00000000, 0x00000000, \
11600x00000000, 0x00000000, 0x00000000, 0x00000000, \
11610x00000000, 0x00000000, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00000000, 0x00000000, 0x00000000, 0x00000000, \
11650x00000000, 0x00000000, 0x00000000, 0x00000000, \
11660x00000000, 0x00000000, 0x00000000, 0x00000000, \
1167}
1168
24180333 1169static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4 1170{
2afecc04
JB
1171/* *INDENT-OFF* */
1172 static struct {
1173 u32 ucode[UCODE_SIZE + 1];
1174 u8 mac;
1175 u8 timer_dword;
1176 u8 bundle_dword;
1177 u8 min_size_dword;
1178 } ucode_opts[] = {
1179 { D101M_B_RCVBUNDLE_UCODE,
1180 mac_82559_D101M,
1181 D101M_CPUSAVER_TIMER_DWORD,
1182 D101M_CPUSAVER_BUNDLE_DWORD,
1183 D101M_CPUSAVER_MIN_SIZE_DWORD },
1184 { D101S_RCVBUNDLE_UCODE,
1185 mac_82559_D101S,
1186 D101S_CPUSAVER_TIMER_DWORD,
1187 D101S_CPUSAVER_BUNDLE_DWORD,
1188 D101S_CPUSAVER_MIN_SIZE_DWORD },
1189 { D102_E_RCVBUNDLE_UCODE,
1190 mac_82551_F,
1191 D102_E_CPUSAVER_TIMER_DWORD,
1192 D102_E_CPUSAVER_BUNDLE_DWORD,
1193 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1194 { D102_E_RCVBUNDLE_UCODE,
1195 mac_82551_10,
1196 D102_E_CPUSAVER_TIMER_DWORD,
1197 D102_E_CPUSAVER_BUNDLE_DWORD,
1198 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1199 { {0}, 0, 0, 0, 0}
1200 }, *opts;
1201/* *INDENT-ON* */
1202
1203/*************************************************************************
1204* CPUSaver parameters
1205*
1206* All CPUSaver parameters are 16-bit literals that are part of a
1207* "move immediate value" instruction. By changing the value of
1208* the literal in the instruction before the code is loaded, the
1209* driver can change the algorithm.
1210*
0779bf2d 1211* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1212* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1213* timer is reset each time a new packet is received. (see
1214* BUNDLEMAX below to set the limit on number of chained packets)
1215* The current default is 0x600 or 1536. Experiments show that
1216* the value should probably stay within the 0x200 - 0x1000.
1217*
05479938 1218* BUNDLEMAX -
2afecc04
JB
1219* This sets the maximum number of frames that will be bundled. In
1220* some situations, such as the TCP windowing algorithm, it may be
1221* better to limit the growth of the bundle size than let it go as
1222* high as it can, because that could cause too much added latency.
1223* The default is six, because this is the number of packets in the
1224* default TCP window size. A value of 1 would make CPUSaver indicate
1225* an interrupt for every frame received. If you do not want to put
1226* a limit on the bundle size, set this value to xFFFF.
1227*
05479938 1228* BUNDLESMALL -
2afecc04
JB
1229* This contains a bit-mask describing the minimum size frame that
1230* will be bundled. The default masks the lower 7 bits, which means
1231* that any frame less than 128 bytes in length will not be bundled,
1232* but will instead immediately generate an interrupt. This does
1233* not affect the current bundle in any way. Any frame that is 128
1234* bytes or large will be bundled normally. This feature is meant
1235* to provide immediate indication of ACK frames in a TCP environment.
1236* Customers were seeing poor performance when a machine with CPUSaver
1237* enabled was sending but not receiving. The delay introduced when
1238* the ACKs were received was enough to reduce total throughput, because
1239* the sender would sit idle until the ACK was finally seen.
1240*
1241* The current default is 0xFF80, which masks out the lower 7 bits.
1242* This means that any frame which is x7F (127) bytes or smaller
05479938 1243* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1244* bit mask, there are only a few valid values that can be used. To
1245* turn this feature off, the driver can write the value xFFFF to the
1246* lower word of this instruction (in the same way that the other
1247* parameters are used). Likewise, a value of 0xF800 (2047) would
1248* cause an interrupt to be generated for every frame, because all
1249* standard Ethernet frames are <= 2047 bytes in length.
1250*************************************************************************/
1251
05479938 1252/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1253 * workarounds it provides, set the following defines to:
1254 * BUNDLESMALL 0
1255 * BUNDLEMAX 1
1256 * INTDELAY 1
1257 */
1258#define BUNDLESMALL 1
1259#define BUNDLEMAX (u16)6
1260#define INTDELAY (u16)1536 /* 0x600 */
1261
1262 /* do not load u-code for ICH devices */
1263 if (nic->flags & ich)
1264 goto noloaducode;
1265
1266 /* Search for ucode match against h/w rev_id */
1267 for (opts = ucode_opts; opts->mac; opts++) {
1268 int i;
1269 u32 *ucode = opts->ucode;
1270 if (nic->mac != opts->mac)
1271 continue;
1272
1273 /* Insert user-tunable settings */
1274 ucode[opts->timer_dword] &= 0xFFFF0000;
1275 ucode[opts->timer_dword] |= INTDELAY;
1276 ucode[opts->bundle_dword] &= 0xFFFF0000;
1277 ucode[opts->bundle_dword] |= BUNDLEMAX;
1278 ucode[opts->min_size_dword] &= 0xFFFF0000;
1279 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1280
1281 for (i = 0; i < UCODE_SIZE; i++)
875521dd 1282 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
24180333 1283 cb->command = cpu_to_le16(cb_ucode | cb_el);
2afecc04
JB
1284 return;
1285 }
1286
1287noloaducode:
24180333
JB
1288 cb->command = cpu_to_le16(cb_nop | cb_el);
1289}
1290
1291static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1292 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1293{
1294 int err = 0, counter = 50;
1295 struct cb *cb = nic->cb_to_clean;
1296
1297 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1298 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1299
24180333
JB
1300 /* must restart cuc */
1301 nic->cuc_cmd = cuc_start;
1302
1303 /* wait for completion */
1304 e100_write_flush(nic);
1305 udelay(10);
1306
1307 /* wait for possibly (ouch) 500ms */
1308 while (!(cb->status & cpu_to_le16(cb_complete))) {
1309 msleep(10);
1310 if (!--counter) break;
1311 }
05479938 1312
24180333
JB
1313 /* ack any interupts, something could have been set */
1314 writeb(~0, &nic->csr->scb.stat_ack);
1315
1316 /* if the command failed, or is not OK, notify and return */
1317 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1318 DPRINTK(PROBE,ERR, "ucode load failed\n");
1319 err = -EPERM;
1320 }
05479938 1321
24180333 1322 return err;
1da177e4
LT
1323}
1324
1325static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1326 struct sk_buff *skb)
1327{
1328 cb->command = cpu_to_le16(cb_iaaddr);
1329 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1330}
1331
1332static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1333{
1334 cb->command = cpu_to_le16(cb_dump);
1335 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1336 offsetof(struct mem, dump_buf));
1337}
1338
1339#define NCONFIG_AUTO_SWITCH 0x0080
1340#define MII_NSC_CONG MII_RESV1
1341#define NSC_CONG_ENABLE 0x0100
1342#define NSC_CONG_TXREADY 0x0400
1343#define ADVERTISE_FC_SUPPORTED 0x0400
1344static int e100_phy_init(struct nic *nic)
1345{
1346 struct net_device *netdev = nic->netdev;
1347 u32 addr;
1348 u16 bmcr, stat, id_lo, id_hi, cong;
1349
1350 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1351 for(addr = 0; addr < 32; addr++) {
1352 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1353 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1354 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1355 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1356 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1357 break;
1358 }
1359 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1360 if(addr == 32)
1361 return -EAGAIN;
1362
1363 /* Selected the phy and isolate the rest */
1364 for(addr = 0; addr < 32; addr++) {
1365 if(addr != nic->mii.phy_id) {
1366 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1367 } else {
1368 bmcr = mdio_read(netdev, addr, MII_BMCR);
1369 mdio_write(netdev, addr, MII_BMCR,
1370 bmcr & ~BMCR_ISOLATE);
1371 }
1372 }
1373
1374 /* Get phy ID */
1375 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1376 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1377 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1378 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1379
1380 /* Handle National tx phys */
1381#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1382 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1383 /* Disable congestion control */
1384 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1385 cong |= NSC_CONG_TXREADY;
1386 cong &= ~NSC_CONG_ENABLE;
1387 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1388 }
1389
05479938 1390 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1391 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1392 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1393 /* enable/disable MDI/MDI-X auto-switching. */
1394 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1395 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1396 }
1da177e4
LT
1397
1398 return 0;
1399}
1400
1401static int e100_hw_init(struct nic *nic)
1402{
1403 int err;
1404
1405 e100_hw_reset(nic);
1406
1407 DPRINTK(HW, ERR, "e100_hw_init\n");
1408 if(!in_interrupt() && (err = e100_self_test(nic)))
1409 return err;
1410
1411 if((err = e100_phy_init(nic)))
1412 return err;
1413 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1414 return err;
1415 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1416 return err;
24180333 1417 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1da177e4
LT
1418 return err;
1419 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1420 return err;
1421 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1422 return err;
1423 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1424 nic->dma_addr + offsetof(struct mem, stats))))
1425 return err;
1426 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1427 return err;
1428
1429 e100_disable_irq(nic);
1430
1431 return 0;
1432}
1433
1434static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1435{
1436 struct net_device *netdev = nic->netdev;
1437 struct dev_mc_list *list = netdev->mc_list;
1438 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1439
1440 cb->command = cpu_to_le16(cb_multi);
1441 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1442 for(i = 0; list && i < count; i++, list = list->next)
1443 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1444 ETH_ALEN);
1445}
1446
1447static void e100_set_multicast_list(struct net_device *netdev)
1448{
1449 struct nic *nic = netdev_priv(netdev);
1450
1451 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1452 netdev->mc_count, netdev->flags);
1453
1454 if(netdev->flags & IFF_PROMISC)
1455 nic->flags |= promiscuous;
1456 else
1457 nic->flags &= ~promiscuous;
1458
1459 if(netdev->flags & IFF_ALLMULTI ||
1460 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1461 nic->flags |= multicast_all;
1462 else
1463 nic->flags &= ~multicast_all;
1464
1465 e100_exec_cb(nic, NULL, e100_configure);
1466 e100_exec_cb(nic, NULL, e100_multi);
1467}
1468
1469static void e100_update_stats(struct nic *nic)
1470{
1471 struct net_device_stats *ns = &nic->net_stats;
1472 struct stats *s = &nic->mem->stats;
1473 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1474 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1475 &s->complete;
1476
1477 /* Device's stats reporting may take several microseconds to
1478 * complete, so where always waiting for results of the
1479 * previous command. */
1480
1481 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1482 *complete = 0;
1483 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1484 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1485 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1486 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1487 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1488 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1489 ns->collisions += nic->tx_collisions;
1490 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1491 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1492 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1493 nic->rx_over_length_errors;
1494 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1495 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1496 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1497 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1498 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1499 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1500 le32_to_cpu(s->rx_alignment_errors) +
1501 le32_to_cpu(s->rx_short_frame_errors) +
1502 le32_to_cpu(s->rx_cdt_errors);
1503 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1504 nic->tx_single_collisions +=
1505 le32_to_cpu(s->tx_single_collisions);
1506 nic->tx_multiple_collisions +=
1507 le32_to_cpu(s->tx_multiple_collisions);
1508 if(nic->mac >= mac_82558_D101_A4) {
1509 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1510 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1511 nic->rx_fc_unsupported +=
1512 le32_to_cpu(s->fc_rcv_unsupported);
1513 if(nic->mac >= mac_82559_D101M) {
1514 nic->tx_tco_frames +=
1515 le16_to_cpu(s->xmt_tco_frames);
1516 nic->rx_tco_frames +=
1517 le16_to_cpu(s->rcv_tco_frames);
1518 }
1519 }
1520 }
1521
05479938 1522
1f53367d
MC
1523 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1524 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1525}
1526
1527static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1528{
1529 /* Adjust inter-frame-spacing (IFS) between two transmits if
1530 * we're getting collisions on a half-duplex connection. */
1531
1532 if(duplex == DUPLEX_HALF) {
1533 u32 prev = nic->adaptive_ifs;
1534 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1535
1536 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1537 (nic->tx_frames > min_frames)) {
1538 if(nic->adaptive_ifs < 60)
1539 nic->adaptive_ifs += 5;
1540 } else if (nic->tx_frames < min_frames) {
1541 if(nic->adaptive_ifs >= 5)
1542 nic->adaptive_ifs -= 5;
1543 }
1544 if(nic->adaptive_ifs != prev)
1545 e100_exec_cb(nic, NULL, e100_configure);
1546 }
1547}
1548
1549static void e100_watchdog(unsigned long data)
1550{
1551 struct nic *nic = (struct nic *)data;
1552 struct ethtool_cmd cmd;
1553
1554 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1555
1556 /* mii library handles link maintenance tasks */
1557
1558 mii_ethtool_gset(&nic->mii, &cmd);
1559
1560 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1561 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1562 cmd.speed == SPEED_100 ? "100" : "10",
1563 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1564 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1565 DPRINTK(LINK, INFO, "link down\n");
1566 }
1567
1568 mii_check_link(&nic->mii);
1569
1570 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1571 * allocation failure.
1572 * Unfortunately have to use a spinlock to not re-enable interrupts
1573 * accidentally, due to hardware that shares a register between the
1574 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4
LT
1575 spin_lock_irq(&nic->cmd_lock);
1576 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1577 e100_write_flush(nic);
ad8c48ad 1578 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1579
1580 e100_update_stats(nic);
1581 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1582
1583 if(nic->mac <= mac_82557_D100_C)
1584 /* Issue a multicast command to workaround a 557 lock up */
1585 e100_set_multicast_list(nic->netdev);
1586
1587 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1588 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1589 nic->flags |= ich_10h_workaround;
1590 else
1591 nic->flags &= ~ich_10h_workaround;
1592
1593 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1594}
1595
858119e1 1596static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1597 struct sk_buff *skb)
1598{
1599 cb->command = nic->tx_command;
962082b6 1600 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1601 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1602 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1603 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1604 cb->u.tcb.tcb_byte_count = 0;
1605 cb->u.tcb.threshold = nic->tx_threshold;
1606 cb->u.tcb.tbd_count = 1;
1607 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1608 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1609 /* check for mapping failure? */
1da177e4
LT
1610 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1611}
1612
1613static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1614{
1615 struct nic *nic = netdev_priv(netdev);
1616 int err;
1617
1618 if(nic->flags & ich_10h_workaround) {
1619 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1620 Issue a NOP command followed by a 1us delay before
1621 issuing the Tx command. */
1f53367d
MC
1622 if(e100_exec_cmd(nic, cuc_nop, 0))
1623 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1624 udelay(1);
1625 }
1626
1627 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1628
1629 switch(err) {
1630 case -ENOSPC:
1631 /* We queued the skb, but now we're out of space. */
1632 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1633 netif_stop_queue(netdev);
1634 break;
1635 case -ENOMEM:
1636 /* This is a hard error - log it. */
1637 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1638 netif_stop_queue(netdev);
1639 return 1;
1640 }
1641
1642 netdev->trans_start = jiffies;
1643 return 0;
1644}
1645
858119e1 1646static int e100_tx_clean(struct nic *nic)
1da177e4
LT
1647{
1648 struct cb *cb;
1649 int tx_cleaned = 0;
1650
1651 spin_lock(&nic->cb_lock);
1652
1da177e4
LT
1653 /* Clean CBs marked complete */
1654 for(cb = nic->cb_to_clean;
1655 cb->status & cpu_to_le16(cb_complete);
1656 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1657 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1658 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1659 cb->status);
1660
1da177e4
LT
1661 if(likely(cb->skb != NULL)) {
1662 nic->net_stats.tx_packets++;
1663 nic->net_stats.tx_bytes += cb->skb->len;
1664
1665 pci_unmap_single(nic->pdev,
1666 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1667 le16_to_cpu(cb->u.tcb.tbd.size),
1668 PCI_DMA_TODEVICE);
1669 dev_kfree_skb_any(cb->skb);
1670 cb->skb = NULL;
1671 tx_cleaned = 1;
1672 }
1673 cb->status = 0;
1674 nic->cbs_avail++;
1675 }
1676
1677 spin_unlock(&nic->cb_lock);
1678
1679 /* Recover from running out of Tx resources in xmit_frame */
1680 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1681 netif_wake_queue(nic->netdev);
1682
1683 return tx_cleaned;
1684}
1685
1686static void e100_clean_cbs(struct nic *nic)
1687{
1688 if(nic->cbs) {
1689 while(nic->cbs_avail != nic->params.cbs.count) {
1690 struct cb *cb = nic->cb_to_clean;
1691 if(cb->skb) {
1692 pci_unmap_single(nic->pdev,
1693 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1694 le16_to_cpu(cb->u.tcb.tbd.size),
1695 PCI_DMA_TODEVICE);
1696 dev_kfree_skb(cb->skb);
1697 }
1698 nic->cb_to_clean = nic->cb_to_clean->next;
1699 nic->cbs_avail++;
1700 }
1701 pci_free_consistent(nic->pdev,
1702 sizeof(struct cb) * nic->params.cbs.count,
1703 nic->cbs, nic->cbs_dma_addr);
1704 nic->cbs = NULL;
1705 nic->cbs_avail = 0;
1706 }
1707 nic->cuc_cmd = cuc_start;
1708 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1709 nic->cbs;
1710}
1711
1712static int e100_alloc_cbs(struct nic *nic)
1713{
1714 struct cb *cb;
1715 unsigned int i, count = nic->params.cbs.count;
1716
1717 nic->cuc_cmd = cuc_start;
1718 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1719 nic->cbs_avail = 0;
1720
1721 nic->cbs = pci_alloc_consistent(nic->pdev,
1722 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1723 if(!nic->cbs)
1724 return -ENOMEM;
1725
1726 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1727 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1728 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1729
1730 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1731 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1732 ((i+1) % count) * sizeof(struct cb));
1733 cb->skb = NULL;
1734 }
1735
1736 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1737 nic->cbs_avail = count;
1738
1739 return 0;
1740}
1741
d52df4a3 1742static inline void e100_start_receiver(struct nic *nic)
1da177e4 1743{
d52df4a3
SF
1744 /* Start if RFA is non-NULL */
1745 if(nic->rx_to_clean->skb)
1746 e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
1da177e4
LT
1747}
1748
1749#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1750static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1751{
4187592b 1752 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1753 return -ENOMEM;
1754
1755 /* Align, init, and map the RFD. */
1da177e4 1756 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1757 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1758 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1759 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1760
1f53367d
MC
1761 if(pci_dma_mapping_error(rx->dma_addr)) {
1762 dev_kfree_skb_any(rx->skb);
097688ef 1763 rx->skb = NULL;
1f53367d
MC
1764 rx->dma_addr = 0;
1765 return -ENOMEM;
1766 }
1767
1da177e4
LT
1768 /* Link the RFD to end of RFA by linking previous RFD to
1769 * this one, and clearing EL bit of previous. */
1770 if(rx->prev->skb) {
1771 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1772 put_unaligned(cpu_to_le32(rx->dma_addr),
1773 (u32 *)&prev_rfd->link);
1774 wmb();
d52df4a3 1775 prev_rfd->command &= ~cpu_to_le16(cb_el & cb_s);
1da177e4
LT
1776 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1777 sizeof(struct rfd), PCI_DMA_TODEVICE);
1778 }
1779
1780 return 0;
1781}
1782
858119e1 1783static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1784 unsigned int *work_done, unsigned int work_to_do)
1785{
1786 struct sk_buff *skb = rx->skb;
1787 struct rfd *rfd = (struct rfd *)skb->data;
1788 u16 rfd_status, actual_size;
1789
1790 if(unlikely(work_done && *work_done >= work_to_do))
1791 return -EAGAIN;
1792
1793 /* Need to sync before taking a peek at cb_complete bit */
1794 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1795 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1796 rfd_status = le16_to_cpu(rfd->status);
1797
1798 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1799
1800 /* If data isn't ready, nothing to indicate */
1801 if(unlikely(!(rfd_status & cb_complete)))
1f53367d 1802 return -ENODATA;
1da177e4
LT
1803
1804 /* Get actual data size */
1805 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1806 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1807 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1808
1809 /* Get data */
1810 pci_unmap_single(nic->pdev, rx->dma_addr,
1811 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1812
1813 /* Pull off the RFD and put the actual data (minus eth hdr) */
1814 skb_reserve(skb, sizeof(struct rfd));
1815 skb_put(skb, actual_size);
1816 skb->protocol = eth_type_trans(skb, nic->netdev);
1817
1818 if(unlikely(!(rfd_status & cb_ok))) {
1819 /* Don't indicate if hardware indicates errors */
1da177e4 1820 dev_kfree_skb_any(skb);
136df52d 1821 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1822 /* Don't indicate oversized frames */
1823 nic->rx_over_length_errors++;
1da177e4
LT
1824 dev_kfree_skb_any(skb);
1825 } else {
1826 nic->net_stats.rx_packets++;
1827 nic->net_stats.rx_bytes += actual_size;
1828 nic->netdev->last_rx = jiffies;
1829 netif_receive_skb(skb);
1830 if(work_done)
1831 (*work_done)++;
1832 }
1833
1834 rx->skb = NULL;
1835
1836 return 0;
1837}
1838
858119e1 1839static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1840 unsigned int work_to_do)
1841{
1842 struct rx *rx;
1843
1844 /* Indicate newly arrived packets */
1845 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
d52df4a3 1846 if(e100_rx_indicate(nic, rx, work_done, work_to_do))
1da177e4
LT
1847 break; /* No more to clean */
1848 }
1849
1850 /* Alloc new skbs to refill list */
1851 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1852 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1853 break; /* Better luck next time (see watchdog) */
1854 }
1da177e4
LT
1855}
1856
1857static void e100_rx_clean_list(struct nic *nic)
1858{
1859 struct rx *rx;
1860 unsigned int i, count = nic->params.rfds.count;
1861
1862 if(nic->rxs) {
1863 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1864 if(rx->skb) {
1865 pci_unmap_single(nic->pdev, rx->dma_addr,
1866 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1867 dev_kfree_skb(rx->skb);
1868 }
1869 }
1870 kfree(nic->rxs);
1871 nic->rxs = NULL;
1872 }
1873
1874 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1875}
1876
1877static int e100_rx_alloc_list(struct nic *nic)
1878{
1879 struct rx *rx;
1880 unsigned int i, count = nic->params.rfds.count;
1881
1882 nic->rx_to_use = nic->rx_to_clean = NULL;
1883
c48e3fca 1884 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 1885 return -ENOMEM;
1da177e4
LT
1886
1887 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1888 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1889 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1890 if(e100_rx_alloc_skb(nic, rx)) {
1891 e100_rx_clean_list(nic);
1892 return -ENOMEM;
1893 }
1894 }
1895
1896 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1897
1898 return 0;
1899}
1900
7d12e780 1901static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
1902{
1903 struct net_device *netdev = dev_id;
1904 struct nic *nic = netdev_priv(netdev);
1905 u8 stat_ack = readb(&nic->csr->scb.stat_ack);
1906
1907 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1908
1909 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1910 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1911 return IRQ_NONE;
1912
1913 /* Ack interrupt(s) */
1914 writeb(stat_ack, &nic->csr->scb.stat_ack);
1915
0685c31b
MC
1916 if(likely(netif_rx_schedule_prep(netdev))) {
1917 e100_disable_irq(nic);
1918 __netif_rx_schedule(netdev);
1919 }
1da177e4
LT
1920
1921 return IRQ_HANDLED;
1922}
1923
1924static int e100_poll(struct net_device *netdev, int *budget)
1925{
1926 struct nic *nic = netdev_priv(netdev);
1927 unsigned int work_to_do = min(netdev->quota, *budget);
1928 unsigned int work_done = 0;
1929 int tx_cleaned;
1930
1931 e100_rx_clean(nic, &work_done, work_to_do);
1932 tx_cleaned = e100_tx_clean(nic);
1933
1934 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1935 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1936 netif_rx_complete(netdev);
1937 e100_enable_irq(nic);
1938 return 0;
1939 }
1940
1941 *budget -= work_done;
1942 netdev->quota -= work_done;
1943
1944 return 1;
1945}
1946
1947#ifdef CONFIG_NET_POLL_CONTROLLER
1948static void e100_netpoll(struct net_device *netdev)
1949{
1950 struct nic *nic = netdev_priv(netdev);
611494dc 1951
1da177e4 1952 e100_disable_irq(nic);
7d12e780 1953 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
1954 e100_tx_clean(nic);
1955 e100_enable_irq(nic);
1956}
1957#endif
1958
1959static struct net_device_stats *e100_get_stats(struct net_device *netdev)
1960{
1961 struct nic *nic = netdev_priv(netdev);
1962 return &nic->net_stats;
1963}
1964
1965static int e100_set_mac_address(struct net_device *netdev, void *p)
1966{
1967 struct nic *nic = netdev_priv(netdev);
1968 struct sockaddr *addr = p;
1969
1970 if (!is_valid_ether_addr(addr->sa_data))
1971 return -EADDRNOTAVAIL;
1972
1973 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1974 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
1975
1976 return 0;
1977}
1978
1979static int e100_change_mtu(struct net_device *netdev, int new_mtu)
1980{
1981 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1982 return -EINVAL;
1983 netdev->mtu = new_mtu;
1984 return 0;
1985}
1986
1987static int e100_asf(struct nic *nic)
1988{
1989 /* ASF can be enabled from eeprom */
1990 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
1991 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
1992 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
1993 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
1994}
1995
1996static int e100_up(struct nic *nic)
1997{
1998 int err;
1999
2000 if((err = e100_rx_alloc_list(nic)))
2001 return err;
2002 if((err = e100_alloc_cbs(nic)))
2003 goto err_rx_clean_list;
2004 if((err = e100_hw_init(nic)))
2005 goto err_clean_cbs;
2006 e100_set_multicast_list(nic->netdev);
d52df4a3 2007 e100_start_receiver(nic);
1da177e4 2008 mod_timer(&nic->watchdog, jiffies);
1fb9df5d 2009 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2010 nic->netdev->name, nic->netdev)))
2011 goto err_no_irq;
1da177e4 2012 netif_wake_queue(nic->netdev);
0236ebb7
MC
2013 netif_poll_enable(nic->netdev);
2014 /* enable ints _after_ enabling poll, preventing a race between
2015 * disable ints+schedule */
2016 e100_enable_irq(nic);
1da177e4
LT
2017 return 0;
2018
2019err_no_irq:
2020 del_timer_sync(&nic->watchdog);
2021err_clean_cbs:
2022 e100_clean_cbs(nic);
2023err_rx_clean_list:
2024 e100_rx_clean_list(nic);
2025 return err;
2026}
2027
2028static void e100_down(struct nic *nic)
2029{
0236ebb7
MC
2030 /* wait here for poll to complete */
2031 netif_poll_disable(nic->netdev);
2032 netif_stop_queue(nic->netdev);
1da177e4
LT
2033 e100_hw_reset(nic);
2034 free_irq(nic->pdev->irq, nic->netdev);
2035 del_timer_sync(&nic->watchdog);
2036 netif_carrier_off(nic->netdev);
1da177e4
LT
2037 e100_clean_cbs(nic);
2038 e100_rx_clean_list(nic);
2039}
2040
2041static void e100_tx_timeout(struct net_device *netdev)
2042{
2043 struct nic *nic = netdev_priv(netdev);
2044
05479938 2045 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2046 * in interrupt context */
2047 schedule_work(&nic->tx_timeout_task);
2048}
2049
c4028958 2050static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2051{
c4028958
DH
2052 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2053 struct net_device *netdev = nic->netdev;
2acdb1e0 2054
1da177e4
LT
2055 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2056 readb(&nic->csr->scb.status));
2057 e100_down(netdev_priv(netdev));
2058 e100_up(netdev_priv(netdev));
2059}
2060
2061static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2062{
2063 int err;
2064 struct sk_buff *skb;
2065
2066 /* Use driver resources to perform internal MAC or PHY
2067 * loopback test. A single packet is prepared and transmitted
2068 * in loopback mode, and the test passes if the received
2069 * packet compares byte-for-byte to the transmitted packet. */
2070
2071 if((err = e100_rx_alloc_list(nic)))
2072 return err;
2073 if((err = e100_alloc_cbs(nic)))
2074 goto err_clean_rx;
2075
2076 /* ICH PHY loopback is broken so do MAC loopback instead */
2077 if(nic->flags & ich && loopback_mode == lb_phy)
2078 loopback_mode = lb_mac;
2079
2080 nic->loopback = loopback_mode;
2081 if((err = e100_hw_init(nic)))
2082 goto err_loopback_none;
2083
2084 if(loopback_mode == lb_phy)
2085 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2086 BMCR_LOOPBACK);
2087
d52df4a3 2088 e100_start_receiver(nic);
1da177e4 2089
4187592b 2090 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2091 err = -ENOMEM;
2092 goto err_loopback_none;
2093 }
2094 skb_put(skb, ETH_DATA_LEN);
2095 memset(skb->data, 0xFF, ETH_DATA_LEN);
2096 e100_xmit_frame(skb, nic->netdev);
2097
2098 msleep(10);
2099
aa49cdd9
JB
2100 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2101 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2102
1da177e4
LT
2103 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2104 skb->data, ETH_DATA_LEN))
2105 err = -EAGAIN;
2106
2107err_loopback_none:
2108 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2109 nic->loopback = lb_none;
1da177e4 2110 e100_clean_cbs(nic);
aa49cdd9 2111 e100_hw_reset(nic);
1da177e4
LT
2112err_clean_rx:
2113 e100_rx_clean_list(nic);
2114 return err;
2115}
2116
2117#define MII_LED_CONTROL 0x1B
2118static void e100_blink_led(unsigned long data)
2119{
2120 struct nic *nic = (struct nic *)data;
2121 enum led_state {
2122 led_on = 0x01,
2123 led_off = 0x04,
2124 led_on_559 = 0x05,
2125 led_on_557 = 0x07,
2126 };
2127
2128 nic->leds = (nic->leds & led_on) ? led_off :
2129 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2130 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2131 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2132}
2133
2134static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2135{
2136 struct nic *nic = netdev_priv(netdev);
2137 return mii_ethtool_gset(&nic->mii, cmd);
2138}
2139
2140static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2141{
2142 struct nic *nic = netdev_priv(netdev);
2143 int err;
2144
2145 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2146 err = mii_ethtool_sset(&nic->mii, cmd);
2147 e100_exec_cb(nic, NULL, e100_configure);
2148
2149 return err;
2150}
2151
2152static void e100_get_drvinfo(struct net_device *netdev,
2153 struct ethtool_drvinfo *info)
2154{
2155 struct nic *nic = netdev_priv(netdev);
2156 strcpy(info->driver, DRV_NAME);
2157 strcpy(info->version, DRV_VERSION);
2158 strcpy(info->fw_version, "N/A");
2159 strcpy(info->bus_info, pci_name(nic->pdev));
2160}
2161
2162static int e100_get_regs_len(struct net_device *netdev)
2163{
2164 struct nic *nic = netdev_priv(netdev);
2165#define E100_PHY_REGS 0x1C
2166#define E100_REGS_LEN 1 + E100_PHY_REGS + \
2167 sizeof(nic->mem->dump_buf) / sizeof(u32)
2168 return E100_REGS_LEN * sizeof(u32);
2169}
2170
2171static void e100_get_regs(struct net_device *netdev,
2172 struct ethtool_regs *regs, void *p)
2173{
2174 struct nic *nic = netdev_priv(netdev);
2175 u32 *buff = p;
2176 int i;
2177
2178 regs->version = (1 << 24) | nic->rev_id;
2179 buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
2180 readb(&nic->csr->scb.cmd_lo) << 16 |
2181 readw(&nic->csr->scb.status);
2182 for(i = E100_PHY_REGS; i >= 0; i--)
2183 buff[1 + E100_PHY_REGS - i] =
2184 mdio_read(netdev, nic->mii.phy_id, i);
2185 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2186 e100_exec_cb(nic, NULL, e100_dump);
2187 msleep(10);
2188 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2189 sizeof(nic->mem->dump_buf));
2190}
2191
2192static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2193{
2194 struct nic *nic = netdev_priv(netdev);
2195 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2196 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2197}
2198
2199static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2200{
2201 struct nic *nic = netdev_priv(netdev);
2202
2203 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2204 return -EOPNOTSUPP;
2205
2206 if(wol->wolopts)
2207 nic->flags |= wol_magic;
2208 else
2209 nic->flags &= ~wol_magic;
2210
1da177e4
LT
2211 e100_exec_cb(nic, NULL, e100_configure);
2212
2213 return 0;
2214}
2215
2216static u32 e100_get_msglevel(struct net_device *netdev)
2217{
2218 struct nic *nic = netdev_priv(netdev);
2219 return nic->msg_enable;
2220}
2221
2222static void e100_set_msglevel(struct net_device *netdev, u32 value)
2223{
2224 struct nic *nic = netdev_priv(netdev);
2225 nic->msg_enable = value;
2226}
2227
2228static int e100_nway_reset(struct net_device *netdev)
2229{
2230 struct nic *nic = netdev_priv(netdev);
2231 return mii_nway_restart(&nic->mii);
2232}
2233
2234static u32 e100_get_link(struct net_device *netdev)
2235{
2236 struct nic *nic = netdev_priv(netdev);
2237 return mii_link_ok(&nic->mii);
2238}
2239
2240static int e100_get_eeprom_len(struct net_device *netdev)
2241{
2242 struct nic *nic = netdev_priv(netdev);
2243 return nic->eeprom_wc << 1;
2244}
2245
2246#define E100_EEPROM_MAGIC 0x1234
2247static int e100_get_eeprom(struct net_device *netdev,
2248 struct ethtool_eeprom *eeprom, u8 *bytes)
2249{
2250 struct nic *nic = netdev_priv(netdev);
2251
2252 eeprom->magic = E100_EEPROM_MAGIC;
2253 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2254
2255 return 0;
2256}
2257
2258static int e100_set_eeprom(struct net_device *netdev,
2259 struct ethtool_eeprom *eeprom, u8 *bytes)
2260{
2261 struct nic *nic = netdev_priv(netdev);
2262
2263 if(eeprom->magic != E100_EEPROM_MAGIC)
2264 return -EINVAL;
2265
2266 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2267
2268 return e100_eeprom_save(nic, eeprom->offset >> 1,
2269 (eeprom->len >> 1) + 1);
2270}
2271
2272static void e100_get_ringparam(struct net_device *netdev,
2273 struct ethtool_ringparam *ring)
2274{
2275 struct nic *nic = netdev_priv(netdev);
2276 struct param_range *rfds = &nic->params.rfds;
2277 struct param_range *cbs = &nic->params.cbs;
2278
2279 ring->rx_max_pending = rfds->max;
2280 ring->tx_max_pending = cbs->max;
2281 ring->rx_mini_max_pending = 0;
2282 ring->rx_jumbo_max_pending = 0;
2283 ring->rx_pending = rfds->count;
2284 ring->tx_pending = cbs->count;
2285 ring->rx_mini_pending = 0;
2286 ring->rx_jumbo_pending = 0;
2287}
2288
2289static int e100_set_ringparam(struct net_device *netdev,
2290 struct ethtool_ringparam *ring)
2291{
2292 struct nic *nic = netdev_priv(netdev);
2293 struct param_range *rfds = &nic->params.rfds;
2294 struct param_range *cbs = &nic->params.cbs;
2295
05479938 2296 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2297 return -EINVAL;
2298
2299 if(netif_running(netdev))
2300 e100_down(nic);
2301 rfds->count = max(ring->rx_pending, rfds->min);
2302 rfds->count = min(rfds->count, rfds->max);
2303 cbs->count = max(ring->tx_pending, cbs->min);
2304 cbs->count = min(cbs->count, cbs->max);
2305 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2306 rfds->count, cbs->count);
2307 if(netif_running(netdev))
2308 e100_up(nic);
2309
2310 return 0;
2311}
2312
2313static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2314 "Link test (on/offline)",
2315 "Eeprom test (on/offline)",
2316 "Self test (offline)",
2317 "Mac loopback (offline)",
2318 "Phy loopback (offline)",
2319};
2320#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2321
2322static int e100_diag_test_count(struct net_device *netdev)
2323{
2324 return E100_TEST_LEN;
2325}
2326
2327static void e100_diag_test(struct net_device *netdev,
2328 struct ethtool_test *test, u64 *data)
2329{
2330 struct ethtool_cmd cmd;
2331 struct nic *nic = netdev_priv(netdev);
2332 int i, err;
2333
2334 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2335 data[0] = !mii_link_ok(&nic->mii);
2336 data[1] = e100_eeprom_load(nic);
2337 if(test->flags & ETH_TEST_FL_OFFLINE) {
2338
2339 /* save speed, duplex & autoneg settings */
2340 err = mii_ethtool_gset(&nic->mii, &cmd);
2341
2342 if(netif_running(netdev))
2343 e100_down(nic);
2344 data[2] = e100_self_test(nic);
2345 data[3] = e100_loopback_test(nic, lb_mac);
2346 data[4] = e100_loopback_test(nic, lb_phy);
2347
2348 /* restore speed, duplex & autoneg settings */
2349 err = mii_ethtool_sset(&nic->mii, &cmd);
2350
2351 if(netif_running(netdev))
2352 e100_up(nic);
2353 }
2354 for(i = 0; i < E100_TEST_LEN; i++)
2355 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2356
2357 msleep_interruptible(4 * 1000);
1da177e4
LT
2358}
2359
2360static int e100_phys_id(struct net_device *netdev, u32 data)
2361{
2362 struct nic *nic = netdev_priv(netdev);
2363
2364 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2365 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2366 mod_timer(&nic->blink_timer, jiffies);
2367 msleep_interruptible(data * 1000);
2368 del_timer_sync(&nic->blink_timer);
2369 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2370
2371 return 0;
2372}
2373
2374static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2375 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2376 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2377 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2378 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2379 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2380 "tx_heartbeat_errors", "tx_window_errors",
2381 /* device-specific stats */
2382 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2383 "tx_flow_control_pause", "rx_flow_control_pause",
2384 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2385};
2386#define E100_NET_STATS_LEN 21
2387#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2388
2389static int e100_get_stats_count(struct net_device *netdev)
2390{
2391 return E100_STATS_LEN;
2392}
2393
2394static void e100_get_ethtool_stats(struct net_device *netdev,
2395 struct ethtool_stats *stats, u64 *data)
2396{
2397 struct nic *nic = netdev_priv(netdev);
2398 int i;
2399
2400 for(i = 0; i < E100_NET_STATS_LEN; i++)
2401 data[i] = ((unsigned long *)&nic->net_stats)[i];
2402
2403 data[i++] = nic->tx_deferred;
2404 data[i++] = nic->tx_single_collisions;
2405 data[i++] = nic->tx_multiple_collisions;
2406 data[i++] = nic->tx_fc_pause;
2407 data[i++] = nic->rx_fc_pause;
2408 data[i++] = nic->rx_fc_unsupported;
2409 data[i++] = nic->tx_tco_frames;
2410 data[i++] = nic->rx_tco_frames;
2411}
2412
2413static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2414{
2415 switch(stringset) {
2416 case ETH_SS_TEST:
2417 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2418 break;
2419 case ETH_SS_STATS:
2420 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2421 break;
2422 }
2423}
2424
7282d491 2425static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2426 .get_settings = e100_get_settings,
2427 .set_settings = e100_set_settings,
2428 .get_drvinfo = e100_get_drvinfo,
2429 .get_regs_len = e100_get_regs_len,
2430 .get_regs = e100_get_regs,
2431 .get_wol = e100_get_wol,
2432 .set_wol = e100_set_wol,
2433 .get_msglevel = e100_get_msglevel,
2434 .set_msglevel = e100_set_msglevel,
2435 .nway_reset = e100_nway_reset,
2436 .get_link = e100_get_link,
2437 .get_eeprom_len = e100_get_eeprom_len,
2438 .get_eeprom = e100_get_eeprom,
2439 .set_eeprom = e100_set_eeprom,
2440 .get_ringparam = e100_get_ringparam,
2441 .set_ringparam = e100_set_ringparam,
2442 .self_test_count = e100_diag_test_count,
2443 .self_test = e100_diag_test,
2444 .get_strings = e100_get_strings,
2445 .phys_id = e100_phys_id,
2446 .get_stats_count = e100_get_stats_count,
2447 .get_ethtool_stats = e100_get_ethtool_stats,
a92dd923 2448 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2449};
2450
2451static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2452{
2453 struct nic *nic = netdev_priv(netdev);
2454
2455 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2456}
2457
2458static int e100_alloc(struct nic *nic)
2459{
2460 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2461 &nic->dma_addr);
2462 return nic->mem ? 0 : -ENOMEM;
2463}
2464
2465static void e100_free(struct nic *nic)
2466{
2467 if(nic->mem) {
2468 pci_free_consistent(nic->pdev, sizeof(struct mem),
2469 nic->mem, nic->dma_addr);
2470 nic->mem = NULL;
2471 }
2472}
2473
2474static int e100_open(struct net_device *netdev)
2475{
2476 struct nic *nic = netdev_priv(netdev);
2477 int err = 0;
2478
2479 netif_carrier_off(netdev);
2480 if((err = e100_up(nic)))
2481 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2482 return err;
2483}
2484
2485static int e100_close(struct net_device *netdev)
2486{
2487 e100_down(netdev_priv(netdev));
2488 return 0;
2489}
2490
2491static int __devinit e100_probe(struct pci_dev *pdev,
2492 const struct pci_device_id *ent)
2493{
2494 struct net_device *netdev;
2495 struct nic *nic;
2496 int err;
2497
2498 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2499 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2500 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2501 return -ENOMEM;
2502 }
2503
2504 netdev->open = e100_open;
2505 netdev->stop = e100_close;
2506 netdev->hard_start_xmit = e100_xmit_frame;
2507 netdev->get_stats = e100_get_stats;
2508 netdev->set_multicast_list = e100_set_multicast_list;
2509 netdev->set_mac_address = e100_set_mac_address;
2510 netdev->change_mtu = e100_change_mtu;
2511 netdev->do_ioctl = e100_do_ioctl;
2512 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2513 netdev->tx_timeout = e100_tx_timeout;
2514 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2515 netdev->poll = e100_poll;
2516 netdev->weight = E100_NAPI_WEIGHT;
2517#ifdef CONFIG_NET_POLL_CONTROLLER
2518 netdev->poll_controller = e100_netpoll;
2519#endif
0eb5a34c 2520 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2521
2522 nic = netdev_priv(netdev);
2523 nic->netdev = netdev;
2524 nic->pdev = pdev;
2525 nic->msg_enable = (1 << debug) - 1;
2526 pci_set_drvdata(pdev, netdev);
2527
2528 if((err = pci_enable_device(pdev))) {
2529 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2530 goto err_out_free_dev;
2531 }
2532
2533 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2534 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2535 "base address, aborting.\n");
2536 err = -ENODEV;
2537 goto err_out_disable_pdev;
2538 }
2539
2540 if((err = pci_request_regions(pdev, DRV_NAME))) {
2541 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2542 goto err_out_disable_pdev;
2543 }
2544
1e7f0bd8 2545 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2546 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2547 goto err_out_free_res;
2548 }
2549
2550 SET_MODULE_OWNER(netdev);
2551 SET_NETDEV_DEV(netdev, &pdev->dev);
2552
2553 nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
2554 if(!nic->csr) {
2555 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2556 err = -ENOMEM;
2557 goto err_out_free_res;
2558 }
2559
2560 if(ent->driver_data)
2561 nic->flags |= ich;
2562 else
2563 nic->flags &= ~ich;
2564
2565 e100_get_defaults(nic);
2566
1f53367d 2567 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2568 spin_lock_init(&nic->cb_lock);
2569 spin_lock_init(&nic->cmd_lock);
ac7c6669 2570 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2571
2572 /* Reset the device before pci_set_master() in case device is in some
2573 * funky state and has an interrupt pending - hint: we don't have the
2574 * interrupt handler registered yet. */
2575 e100_hw_reset(nic);
2576
2577 pci_set_master(pdev);
2578
2579 init_timer(&nic->watchdog);
2580 nic->watchdog.function = e100_watchdog;
2581 nic->watchdog.data = (unsigned long)nic;
2582 init_timer(&nic->blink_timer);
2583 nic->blink_timer.function = e100_blink_led;
2584 nic->blink_timer.data = (unsigned long)nic;
2585
c4028958 2586 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2587
1da177e4
LT
2588 if((err = e100_alloc(nic))) {
2589 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2590 goto err_out_iounmap;
2591 }
2592
1da177e4
LT
2593 if((err = e100_eeprom_load(nic)))
2594 goto err_out_free;
2595
f92d8728
MC
2596 e100_phy_init(nic);
2597
1da177e4 2598 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923
JL
2599 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2600 if(!is_valid_ether_addr(netdev->perm_addr)) {
1da177e4
LT
2601 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2602 "EEPROM, aborting.\n");
2603 err = -EAGAIN;
2604 goto err_out_free;
2605 }
2606
2607 /* Wol magic packet can be enabled from eeprom */
2608 if((nic->mac >= mac_82558_D101_A4) &&
2609 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2610 nic->flags |= wol_magic;
2611
6bdacb1a 2612 /* ack any pending wake events, disable PME */
3435dbce
JB
2613 err = pci_enable_wake(pdev, 0, 0);
2614 if (err)
2615 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
1da177e4
LT
2616
2617 strcpy(netdev->name, "eth%d");
2618 if((err = register_netdev(netdev))) {
2619 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2620 goto err_out_free;
2621 }
2622
7c7459d1 2623 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
1da177e4 2624 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
7c7459d1 2625 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
1da177e4
LT
2626 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2627 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2628
2629 return 0;
2630
2631err_out_free:
2632 e100_free(nic);
2633err_out_iounmap:
2634 iounmap(nic->csr);
2635err_out_free_res:
2636 pci_release_regions(pdev);
2637err_out_disable_pdev:
2638 pci_disable_device(pdev);
2639err_out_free_dev:
2640 pci_set_drvdata(pdev, NULL);
2641 free_netdev(netdev);
2642 return err;
2643}
2644
2645static void __devexit e100_remove(struct pci_dev *pdev)
2646{
2647 struct net_device *netdev = pci_get_drvdata(pdev);
2648
2649 if(netdev) {
2650 struct nic *nic = netdev_priv(netdev);
2651 unregister_netdev(netdev);
2652 e100_free(nic);
2653 iounmap(nic->csr);
2654 free_netdev(netdev);
2655 pci_release_regions(pdev);
2656 pci_disable_device(pdev);
2657 pci_set_drvdata(pdev, NULL);
2658 }
2659}
2660
e8e82b76 2661#ifdef CONFIG_PM
1da177e4
LT
2662static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2663{
2664 struct net_device *netdev = pci_get_drvdata(pdev);
2665 struct nic *nic = netdev_priv(netdev);
2666
824545e7
AK
2667 if (netif_running(netdev))
2668 netif_poll_disable(nic->netdev);
e8e82b76
AK
2669 del_timer_sync(&nic->watchdog);
2670 netif_carrier_off(nic->netdev);
518d8338 2671 netif_device_detach(netdev);
a53a33da 2672
1da177e4 2673 pci_save_state(pdev);
e8e82b76
AK
2674
2675 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2676 pci_enable_wake(pdev, PCI_D3hot, 1);
2677 pci_enable_wake(pdev, PCI_D3cold, 1);
2678 } else {
2679 pci_enable_wake(pdev, PCI_D3hot, 0);
2680 pci_enable_wake(pdev, PCI_D3cold, 0);
2681 }
975b366a 2682
1da177e4 2683 pci_disable_device(pdev);
518d8338 2684 free_irq(pdev->irq, netdev);
e8e82b76 2685 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2686
2687 return 0;
2688}
2689
2690static int e100_resume(struct pci_dev *pdev)
2691{
2692 struct net_device *netdev = pci_get_drvdata(pdev);
2693 struct nic *nic = netdev_priv(netdev);
2694
975b366a 2695 pci_set_power_state(pdev, PCI_D0);
1da177e4 2696 pci_restore_state(pdev);
6bdacb1a 2697 /* ack any pending wake events, disable PME */
975b366a 2698 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2699
2700 netif_device_attach(netdev);
975b366a 2701 if (netif_running(netdev))
1da177e4
LT
2702 e100_up(nic);
2703
2704 return 0;
2705}
975b366a 2706#endif /* CONFIG_PM */
1da177e4 2707
d18c3db5 2708static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2709{
e8e82b76
AK
2710 struct net_device *netdev = pci_get_drvdata(pdev);
2711 struct nic *nic = netdev_priv(netdev);
2712
824545e7
AK
2713 if (netif_running(netdev))
2714 netif_poll_disable(nic->netdev);
e8e82b76
AK
2715 del_timer_sync(&nic->watchdog);
2716 netif_carrier_off(nic->netdev);
2717
2718 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2719 pci_enable_wake(pdev, PCI_D3hot, 1);
2720 pci_enable_wake(pdev, PCI_D3cold, 1);
2721 } else {
2722 pci_enable_wake(pdev, PCI_D3hot, 0);
2723 pci_enable_wake(pdev, PCI_D3cold, 0);
2724 }
2725
2726 pci_disable_device(pdev);
2727 pci_set_power_state(pdev, PCI_D3hot);
6bdacb1a
MC
2728}
2729
2cc30492
AK
2730/* ------------------ PCI Error Recovery infrastructure -------------- */
2731/**
2732 * e100_io_error_detected - called when PCI error is detected.
2733 * @pdev: Pointer to PCI device
2734 * @state: The current pci conneection state
2735 */
2736static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2737{
2738 struct net_device *netdev = pci_get_drvdata(pdev);
2739
2740 /* Similar to calling e100_down(), but avoids adpater I/O. */
2741 netdev->stop(netdev);
2742
2743 /* Detach; put netif into state similar to hotplug unplug. */
2744 netif_poll_enable(netdev);
2745 netif_device_detach(netdev);
b1d26f24 2746 pci_disable_device(pdev);
2cc30492
AK
2747
2748 /* Request a slot reset. */
2749 return PCI_ERS_RESULT_NEED_RESET;
2750}
2751
2752/**
2753 * e100_io_slot_reset - called after the pci bus has been reset.
2754 * @pdev: Pointer to PCI device
2755 *
2756 * Restart the card from scratch.
2757 */
2758static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2759{
2760 struct net_device *netdev = pci_get_drvdata(pdev);
2761 struct nic *nic = netdev_priv(netdev);
2762
2763 if (pci_enable_device(pdev)) {
2764 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2765 return PCI_ERS_RESULT_DISCONNECT;
2766 }
2767 pci_set_master(pdev);
2768
2769 /* Only one device per card can do a reset */
2770 if (0 != PCI_FUNC(pdev->devfn))
2771 return PCI_ERS_RESULT_RECOVERED;
2772 e100_hw_reset(nic);
2773 e100_phy_init(nic);
2774
2775 return PCI_ERS_RESULT_RECOVERED;
2776}
2777
2778/**
2779 * e100_io_resume - resume normal operations
2780 * @pdev: Pointer to PCI device
2781 *
2782 * Resume normal operations after an error recovery
2783 * sequence has been completed.
2784 */
2785static void e100_io_resume(struct pci_dev *pdev)
2786{
2787 struct net_device *netdev = pci_get_drvdata(pdev);
2788 struct nic *nic = netdev_priv(netdev);
2789
2790 /* ack any pending wake events, disable PME */
2791 pci_enable_wake(pdev, 0, 0);
2792
2793 netif_device_attach(netdev);
2794 if (netif_running(netdev)) {
2795 e100_open(netdev);
2796 mod_timer(&nic->watchdog, jiffies);
2797 }
2798}
2799
2800static struct pci_error_handlers e100_err_handler = {
2801 .error_detected = e100_io_error_detected,
2802 .slot_reset = e100_io_slot_reset,
2803 .resume = e100_io_resume,
2804};
6bdacb1a 2805
1da177e4
LT
2806static struct pci_driver e100_driver = {
2807 .name = DRV_NAME,
2808 .id_table = e100_id_table,
2809 .probe = e100_probe,
2810 .remove = __devexit_p(e100_remove),
e8e82b76 2811#ifdef CONFIG_PM
975b366a 2812 /* Power Management hooks */
1da177e4
LT
2813 .suspend = e100_suspend,
2814 .resume = e100_resume,
2815#endif
05479938 2816 .shutdown = e100_shutdown,
2cc30492 2817 .err_handler = &e100_err_handler,
1da177e4
LT
2818};
2819
2820static int __init e100_init_module(void)
2821{
2822 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2823 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2824 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2825 }
29917620 2826 return pci_register_driver(&e100_driver);
1da177e4
LT
2827}
2828
2829static void __exit e100_cleanup_module(void)
2830{
2831 pci_unregister_driver(&e100_driver);
2832}
2833
2834module_init(e100_init_module);
2835module_exit(e100_cleanup_module);