ixgb: cleanup link up/down messages
[linux-2.6-block.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
146 */
147
1da177e4
LT
148#include <linux/module.h>
149#include <linux/moduleparam.h>
150#include <linux/kernel.h>
151#include <linux/types.h>
152#include <linux/slab.h>
153#include <linux/delay.h>
154#include <linux/init.h>
155#include <linux/pci.h>
1e7f0bd8 156#include <linux/dma-mapping.h>
1da177e4
LT
157#include <linux/netdevice.h>
158#include <linux/etherdevice.h>
159#include <linux/mii.h>
160#include <linux/if_vlan.h>
161#include <linux/skbuff.h>
162#include <linux/ethtool.h>
163#include <linux/string.h>
164#include <asm/unaligned.h>
165
166
167#define DRV_NAME "e100"
4e1dc97d 168#define DRV_EXT "-NAPI"
773c9c1f 169#define DRV_VERSION "3.5.23-k6"DRV_EXT
1da177e4 170#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 171#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
172#define PFX DRV_NAME ": "
173
174#define E100_WATCHDOG_PERIOD (2 * HZ)
175#define E100_NAPI_WEIGHT 16
176
177MODULE_DESCRIPTION(DRV_DESCRIPTION);
178MODULE_AUTHOR(DRV_COPYRIGHT);
179MODULE_LICENSE("GPL");
180MODULE_VERSION(DRV_VERSION);
181
182static int debug = 3;
8fb6f732 183static int eeprom_bad_csum_allow = 0;
27345bb6 184static int use_io = 0;
1da177e4 185module_param(debug, int, 0);
8fb6f732 186module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 187module_param(use_io, int, 0);
1da177e4 188MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 189MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 190MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
191#define DPRINTK(nlevel, klevel, fmt, args...) \
192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
b39d66a8 194 __func__ , ## args))
1da177e4
LT
195
196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
198 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
199static struct pci_device_id e100_id_table[] = {
200 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
201 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
202 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
203 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
204 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
205 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
206 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
207 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
208 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
209 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
210 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
211 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
212 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
213 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
215 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
216 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
217 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
218 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
219 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
220 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
221 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
222 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
223 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
224 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
225 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
226 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
227 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
228 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
229 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
230 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
231 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
232 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
233 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
234 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
235 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
236 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
237 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
238 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
239 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 240 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
241 { 0, }
242};
243MODULE_DEVICE_TABLE(pci, e100_id_table);
244
245enum mac {
246 mac_82557_D100_A = 0,
247 mac_82557_D100_B = 1,
248 mac_82557_D100_C = 2,
249 mac_82558_D101_A4 = 4,
250 mac_82558_D101_B0 = 5,
251 mac_82559_D101M = 8,
252 mac_82559_D101S = 9,
253 mac_82550_D102 = 12,
254 mac_82550_D102_C = 13,
255 mac_82551_E = 14,
256 mac_82551_F = 15,
257 mac_82551_10 = 16,
258 mac_unknown = 0xFF,
259};
260
261enum phy {
262 phy_100a = 0x000003E0,
263 phy_100c = 0x035002A8,
264 phy_82555_tx = 0x015002A8,
265 phy_nsc_tx = 0x5C002000,
266 phy_82562_et = 0x033002A8,
267 phy_82562_em = 0x032002A8,
268 phy_82562_ek = 0x031002A8,
269 phy_82562_eh = 0x017002A8,
270 phy_unknown = 0xFFFFFFFF,
271};
272
273/* CSR (Control/Status Registers) */
274struct csr {
275 struct {
276 u8 status;
277 u8 stat_ack;
278 u8 cmd_lo;
279 u8 cmd_hi;
280 u32 gen_ptr;
281 } scb;
282 u32 port;
283 u16 flash_ctrl;
284 u8 eeprom_ctrl_lo;
285 u8 eeprom_ctrl_hi;
286 u32 mdi_ctrl;
287 u32 rx_dma_count;
288};
289
290enum scb_status {
7734f6e6 291 rus_no_res = 0x08,
1da177e4
LT
292 rus_ready = 0x10,
293 rus_mask = 0x3C,
294};
295
ca93ca42
JG
296enum ru_state {
297 RU_SUSPENDED = 0,
298 RU_RUNNING = 1,
299 RU_UNINITIALIZED = -1,
300};
301
1da177e4
LT
302enum scb_stat_ack {
303 stat_ack_not_ours = 0x00,
304 stat_ack_sw_gen = 0x04,
305 stat_ack_rnr = 0x10,
306 stat_ack_cu_idle = 0x20,
307 stat_ack_frame_rx = 0x40,
308 stat_ack_cu_cmd_done = 0x80,
309 stat_ack_not_present = 0xFF,
310 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
311 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
312};
313
314enum scb_cmd_hi {
315 irq_mask_none = 0x00,
316 irq_mask_all = 0x01,
317 irq_sw_gen = 0x02,
318};
319
320enum scb_cmd_lo {
321 cuc_nop = 0x00,
322 ruc_start = 0x01,
323 ruc_load_base = 0x06,
324 cuc_start = 0x10,
325 cuc_resume = 0x20,
326 cuc_dump_addr = 0x40,
327 cuc_dump_stats = 0x50,
328 cuc_load_base = 0x60,
329 cuc_dump_reset = 0x70,
330};
331
332enum cuc_dump {
333 cuc_dump_complete = 0x0000A005,
334 cuc_dump_reset_complete = 0x0000A007,
335};
05479938 336
1da177e4
LT
337enum port {
338 software_reset = 0x0000,
339 selftest = 0x0001,
340 selective_reset = 0x0002,
341};
342
343enum eeprom_ctrl_lo {
344 eesk = 0x01,
345 eecs = 0x02,
346 eedi = 0x04,
347 eedo = 0x08,
348};
349
350enum mdi_ctrl {
351 mdi_write = 0x04000000,
352 mdi_read = 0x08000000,
353 mdi_ready = 0x10000000,
354};
355
356enum eeprom_op {
357 op_write = 0x05,
358 op_read = 0x06,
359 op_ewds = 0x10,
360 op_ewen = 0x13,
361};
362
363enum eeprom_offsets {
364 eeprom_cnfg_mdix = 0x03,
365 eeprom_id = 0x0A,
366 eeprom_config_asf = 0x0D,
367 eeprom_smbus_addr = 0x90,
368};
369
370enum eeprom_cnfg_mdix {
371 eeprom_mdix_enabled = 0x0080,
372};
373
374enum eeprom_id {
375 eeprom_id_wol = 0x0020,
376};
377
378enum eeprom_config_asf {
379 eeprom_asf = 0x8000,
380 eeprom_gcl = 0x4000,
381};
382
383enum cb_status {
384 cb_complete = 0x8000,
385 cb_ok = 0x2000,
386};
387
388enum cb_command {
389 cb_nop = 0x0000,
390 cb_iaaddr = 0x0001,
391 cb_config = 0x0002,
392 cb_multi = 0x0003,
393 cb_tx = 0x0004,
394 cb_ucode = 0x0005,
395 cb_dump = 0x0006,
396 cb_tx_sf = 0x0008,
397 cb_cid = 0x1f00,
398 cb_i = 0x2000,
399 cb_s = 0x4000,
400 cb_el = 0x8000,
401};
402
403struct rfd {
aaf918ba
AV
404 __le16 status;
405 __le16 command;
406 __le32 link;
407 __le32 rbd;
408 __le16 actual_size;
409 __le16 size;
1da177e4
LT
410};
411
412struct rx {
413 struct rx *next, *prev;
414 struct sk_buff *skb;
415 dma_addr_t dma_addr;
416};
417
418#if defined(__BIG_ENDIAN_BITFIELD)
419#define X(a,b) b,a
420#else
421#define X(a,b) a,b
422#endif
423struct config {
424/*0*/ u8 X(byte_count:6, pad0:2);
425/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
426/*2*/ u8 adaptive_ifs;
427/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
428 term_write_cache_line:1), pad3:4);
429/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
430/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
431/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
432 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
433 rx_discard_overruns:1), rx_save_bad_frames:1);
434/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
435 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
436 tx_dynamic_tbd:1);
437/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
438/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
439 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
440/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
441 loopback:2);
442/*11*/ u8 X(linear_priority:3, pad11:5);
443/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
444/*13*/ u8 ip_addr_lo;
445/*14*/ u8 ip_addr_hi;
446/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
447 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
448 pad15_2:1), crs_or_cdt:1);
449/*16*/ u8 fc_delay_lo;
450/*17*/ u8 fc_delay_hi;
451/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
452 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
453/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
454 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
455 full_duplex_force:1), full_duplex_pin:1);
456/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
457/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
458/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
459 u8 pad_d102[9];
460};
461
462#define E100_MAX_MULTICAST_ADDRS 64
463struct multi {
aaf918ba 464 __le16 count;
1da177e4
LT
465 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
466};
467
468/* Important: keep total struct u32-aligned */
469#define UCODE_SIZE 134
470struct cb {
aaf918ba
AV
471 __le16 status;
472 __le16 command;
473 __le32 link;
1da177e4
LT
474 union {
475 u8 iaaddr[ETH_ALEN];
aaf918ba 476 __le32 ucode[UCODE_SIZE];
1da177e4
LT
477 struct config config;
478 struct multi multi;
479 struct {
480 u32 tbd_array;
481 u16 tcb_byte_count;
482 u8 threshold;
483 u8 tbd_count;
484 struct {
aaf918ba
AV
485 __le32 buf_addr;
486 __le16 size;
1da177e4
LT
487 u16 eol;
488 } tbd;
489 } tcb;
aaf918ba 490 __le32 dump_buffer_addr;
1da177e4
LT
491 } u;
492 struct cb *next, *prev;
493 dma_addr_t dma_addr;
494 struct sk_buff *skb;
495};
496
497enum loopback {
498 lb_none = 0, lb_mac = 1, lb_phy = 3,
499};
500
501struct stats {
aaf918ba 502 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
503 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
504 tx_multiple_collisions, tx_total_collisions;
aaf918ba 505 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
506 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
507 rx_short_frame_errors;
aaf918ba
AV
508 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
509 __le16 xmt_tco_frames, rcv_tco_frames;
510 __le32 complete;
1da177e4
LT
511};
512
513struct mem {
514 struct {
515 u32 signature;
516 u32 result;
517 } selftest;
518 struct stats stats;
519 u8 dump_buf[596];
520};
521
522struct param_range {
523 u32 min;
524 u32 max;
525 u32 count;
526};
527
528struct params {
529 struct param_range rfds;
530 struct param_range cbs;
531};
532
533struct nic {
534 /* Begin: frequently used values: keep adjacent for cache effect */
535 u32 msg_enable ____cacheline_aligned;
536 struct net_device *netdev;
537 struct pci_dev *pdev;
538
539 struct rx *rxs ____cacheline_aligned;
540 struct rx *rx_to_use;
541 struct rx *rx_to_clean;
542 struct rfd blank_rfd;
ca93ca42 543 enum ru_state ru_running;
1da177e4
LT
544
545 spinlock_t cb_lock ____cacheline_aligned;
546 spinlock_t cmd_lock;
547 struct csr __iomem *csr;
548 enum scb_cmd_lo cuc_cmd;
549 unsigned int cbs_avail;
bea3348e 550 struct napi_struct napi;
1da177e4
LT
551 struct cb *cbs;
552 struct cb *cb_to_use;
553 struct cb *cb_to_send;
554 struct cb *cb_to_clean;
aaf918ba 555 __le16 tx_command;
1da177e4
LT
556 /* End: frequently used values: keep adjacent for cache effect */
557
558 enum {
559 ich = (1 << 0),
560 promiscuous = (1 << 1),
561 multicast_all = (1 << 2),
562 wol_magic = (1 << 3),
563 ich_10h_workaround = (1 << 4),
564 } flags ____cacheline_aligned;
565
566 enum mac mac;
567 enum phy phy;
568 struct params params;
1da177e4
LT
569 struct timer_list watchdog;
570 struct timer_list blink_timer;
571 struct mii_if_info mii;
2acdb1e0 572 struct work_struct tx_timeout_task;
1da177e4
LT
573 enum loopback loopback;
574
575 struct mem *mem;
576 dma_addr_t dma_addr;
577
578 dma_addr_t cbs_dma_addr;
579 u8 adaptive_ifs;
580 u8 tx_threshold;
581 u32 tx_frames;
582 u32 tx_collisions;
583 u32 tx_deferred;
584 u32 tx_single_collisions;
585 u32 tx_multiple_collisions;
586 u32 tx_fc_pause;
587 u32 tx_tco_frames;
588
589 u32 rx_fc_pause;
590 u32 rx_fc_unsupported;
591 u32 rx_tco_frames;
592 u32 rx_over_length_errors;
593
1da177e4
LT
594 u16 leds;
595 u16 eeprom_wc;
aaf918ba 596 __le16 eeprom[256];
ac7c6669 597 spinlock_t mdio_lock;
1da177e4
LT
598};
599
600static inline void e100_write_flush(struct nic *nic)
601{
602 /* Flush previous PCI writes through intermediate bridges
603 * by doing a benign read */
27345bb6 604 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
605}
606
858119e1 607static void e100_enable_irq(struct nic *nic)
1da177e4
LT
608{
609 unsigned long flags;
610
611 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 612 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 613 e100_write_flush(nic);
ad8c48ad 614 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
615}
616
858119e1 617static void e100_disable_irq(struct nic *nic)
1da177e4
LT
618{
619 unsigned long flags;
620
621 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 622 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 623 e100_write_flush(nic);
ad8c48ad 624 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
625}
626
627static void e100_hw_reset(struct nic *nic)
628{
629 /* Put CU and RU into idle with a selective reset to get
630 * device off of PCI bus */
27345bb6 631 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
632 e100_write_flush(nic); udelay(20);
633
634 /* Now fully reset device */
27345bb6 635 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
636 e100_write_flush(nic); udelay(20);
637
638 /* Mask off our interrupt line - it's unmasked after reset */
639 e100_disable_irq(nic);
640}
641
642static int e100_self_test(struct nic *nic)
643{
644 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
645
646 /* Passing the self-test is a pretty good indication
647 * that the device can DMA to/from host memory */
648
649 nic->mem->selftest.signature = 0;
650 nic->mem->selftest.result = 0xFFFFFFFF;
651
27345bb6 652 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
653 e100_write_flush(nic);
654 /* Wait 10 msec for self-test to complete */
655 msleep(10);
656
657 /* Interrupts are enabled after self-test */
658 e100_disable_irq(nic);
659
660 /* Check results of self-test */
661 if(nic->mem->selftest.result != 0) {
662 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
663 nic->mem->selftest.result);
664 return -ETIMEDOUT;
665 }
666 if(nic->mem->selftest.signature == 0) {
667 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
668 return -ETIMEDOUT;
669 }
670
671 return 0;
672}
673
aaf918ba 674static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
675{
676 u32 cmd_addr_data[3];
677 u8 ctrl;
678 int i, j;
679
680 /* Three cmds: write/erase enable, write data, write/erase disable */
681 cmd_addr_data[0] = op_ewen << (addr_len - 2);
682 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 683 le16_to_cpu(data);
1da177e4
LT
684 cmd_addr_data[2] = op_ewds << (addr_len - 2);
685
686 /* Bit-bang cmds to write word to eeprom */
687 for(j = 0; j < 3; j++) {
688
689 /* Chip select */
27345bb6 690 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
691 e100_write_flush(nic); udelay(4);
692
693 for(i = 31; i >= 0; i--) {
694 ctrl = (cmd_addr_data[j] & (1 << i)) ?
695 eecs | eedi : eecs;
27345bb6 696 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
697 e100_write_flush(nic); udelay(4);
698
27345bb6 699 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
700 e100_write_flush(nic); udelay(4);
701 }
702 /* Wait 10 msec for cmd to complete */
703 msleep(10);
704
705 /* Chip deselect */
27345bb6 706 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
707 e100_write_flush(nic); udelay(4);
708 }
709};
710
711/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 712static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
713{
714 u32 cmd_addr_data;
715 u16 data = 0;
716 u8 ctrl;
717 int i;
718
719 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
720
721 /* Chip select */
27345bb6 722 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
723 e100_write_flush(nic); udelay(4);
724
725 /* Bit-bang to read word from eeprom */
726 for(i = 31; i >= 0; i--) {
727 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 728 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 729 e100_write_flush(nic); udelay(4);
05479938 730
27345bb6 731 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 732 e100_write_flush(nic); udelay(4);
05479938 733
1da177e4
LT
734 /* Eeprom drives a dummy zero to EEDO after receiving
735 * complete address. Use this to adjust addr_len. */
27345bb6 736 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
1da177e4
LT
737 if(!(ctrl & eedo) && i > 16) {
738 *addr_len -= (i - 16);
739 i = 17;
740 }
05479938 741
1da177e4
LT
742 data = (data << 1) | (ctrl & eedo ? 1 : 0);
743 }
744
745 /* Chip deselect */
27345bb6 746 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
747 e100_write_flush(nic); udelay(4);
748
aaf918ba 749 return cpu_to_le16(data);
1da177e4
LT
750};
751
752/* Load entire EEPROM image into driver cache and validate checksum */
753static int e100_eeprom_load(struct nic *nic)
754{
755 u16 addr, addr_len = 8, checksum = 0;
756
757 /* Try reading with an 8-bit addr len to discover actual addr len */
758 e100_eeprom_read(nic, &addr_len, 0);
759 nic->eeprom_wc = 1 << addr_len;
760
761 for(addr = 0; addr < nic->eeprom_wc; addr++) {
762 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
763 if(addr < nic->eeprom_wc - 1)
aaf918ba 764 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
765 }
766
767 /* The checksum, stored in the last word, is calculated such that
768 * the sum of words should be 0xBABA */
aaf918ba 769 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
1da177e4 770 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
771 if (!eeprom_bad_csum_allow)
772 return -EAGAIN;
1da177e4
LT
773 }
774
775 return 0;
776}
777
778/* Save (portion of) driver EEPROM cache to device and update checksum */
779static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
780{
781 u16 addr, addr_len = 8, checksum = 0;
782
783 /* Try reading with an 8-bit addr len to discover actual addr len */
784 e100_eeprom_read(nic, &addr_len, 0);
785 nic->eeprom_wc = 1 << addr_len;
786
787 if(start + count >= nic->eeprom_wc)
788 return -EINVAL;
789
790 for(addr = start; addr < start + count; addr++)
791 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
792
793 /* The checksum, stored in the last word, is calculated such that
794 * the sum of words should be 0xBABA */
795 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
796 checksum += le16_to_cpu(nic->eeprom[addr]);
797 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
798 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
799 nic->eeprom[nic->eeprom_wc - 1]);
800
801 return 0;
802}
803
962082b6 804#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 805#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 806static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
807{
808 unsigned long flags;
809 unsigned int i;
810 int err = 0;
811
812 spin_lock_irqsave(&nic->cmd_lock, flags);
813
814 /* Previous command is accepted when SCB clears */
815 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
27345bb6 816 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
817 break;
818 cpu_relax();
e6280f26 819 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
820 udelay(5);
821 }
822 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
823 err = -EAGAIN;
824 goto err_unlock;
825 }
826
827 if(unlikely(cmd != cuc_resume))
27345bb6
JB
828 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
829 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
830
831err_unlock:
832 spin_unlock_irqrestore(&nic->cmd_lock, flags);
833
834 return err;
835}
836
858119e1 837static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
838 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
839{
840 struct cb *cb;
841 unsigned long flags;
842 int err = 0;
843
844 spin_lock_irqsave(&nic->cb_lock, flags);
845
846 if(unlikely(!nic->cbs_avail)) {
847 err = -ENOMEM;
848 goto err_unlock;
849 }
850
851 cb = nic->cb_to_use;
852 nic->cb_to_use = cb->next;
853 nic->cbs_avail--;
854 cb->skb = skb;
855
856 if(unlikely(!nic->cbs_avail))
857 err = -ENOSPC;
858
859 cb_prepare(nic, cb, skb);
860
861 /* Order is important otherwise we'll be in a race with h/w:
862 * set S-bit in current first, then clear S-bit in previous. */
863 cb->command |= cpu_to_le16(cb_s);
864 wmb();
865 cb->prev->command &= cpu_to_le16(~cb_s);
866
867 while(nic->cb_to_send != nic->cb_to_use) {
868 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
869 nic->cb_to_send->dma_addr))) {
870 /* Ok, here's where things get sticky. It's
871 * possible that we can't schedule the command
872 * because the controller is too busy, so
873 * let's just queue the command and try again
874 * when another command is scheduled. */
962082b6
MC
875 if(err == -ENOSPC) {
876 //request a reset
877 schedule_work(&nic->tx_timeout_task);
878 }
1da177e4
LT
879 break;
880 } else {
881 nic->cuc_cmd = cuc_resume;
882 nic->cb_to_send = nic->cb_to_send->next;
883 }
884 }
885
886err_unlock:
887 spin_unlock_irqrestore(&nic->cb_lock, flags);
888
889 return err;
890}
891
892static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
893{
894 u32 data_out = 0;
895 unsigned int i;
ac7c6669 896 unsigned long flags;
1da177e4 897
ac7c6669
OM
898
899 /*
900 * Stratus87247: we shouldn't be writing the MDI control
901 * register until the Ready bit shows True. Also, since
902 * manipulation of the MDI control registers is a multi-step
903 * procedure it should be done under lock.
904 */
905 spin_lock_irqsave(&nic->mdio_lock, flags);
906 for (i = 100; i; --i) {
27345bb6 907 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
908 break;
909 udelay(20);
910 }
911 if (unlikely(!i)) {
912 printk("e100.mdio_ctrl(%s) won't go Ready\n",
913 nic->netdev->name );
914 spin_unlock_irqrestore(&nic->mdio_lock, flags);
915 return 0; /* No way to indicate timeout error */
916 }
27345bb6 917 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 918
ac7c6669 919 for (i = 0; i < 100; i++) {
1da177e4 920 udelay(20);
27345bb6 921 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
922 break;
923 }
ac7c6669 924 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
925 DPRINTK(HW, DEBUG,
926 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
927 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
928 return (u16)data_out;
929}
930
931static int mdio_read(struct net_device *netdev, int addr, int reg)
932{
933 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
934}
935
936static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
937{
938 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
939}
940
941static void e100_get_defaults(struct nic *nic)
942{
2afecc04
JB
943 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
944 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 945
1da177e4 946 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 947 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1da177e4
LT
948 if(nic->mac == mac_unknown)
949 nic->mac = mac_82557_D100_A;
950
951 nic->params.rfds = rfds;
952 nic->params.cbs = cbs;
953
954 /* Quadwords to DMA into FIFO before starting frame transmit */
955 nic->tx_threshold = 0xE0;
956
0a0863af 957 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
958 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
959 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
960
961 /* Template for a freshly allocated RFD */
7734f6e6 962 nic->blank_rfd.command = 0;
1172899a 963 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1da177e4
LT
964 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
965
966 /* MII setup */
967 nic->mii.phy_id_mask = 0x1F;
968 nic->mii.reg_num_mask = 0x1F;
969 nic->mii.dev = nic->netdev;
970 nic->mii.mdio_read = mdio_read;
971 nic->mii.mdio_write = mdio_write;
972}
973
974static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
975{
976 struct config *config = &cb->u.config;
977 u8 *c = (u8 *)config;
978
979 cb->command = cpu_to_le16(cb_config);
980
981 memset(config, 0, sizeof(struct config));
982
983 config->byte_count = 0x16; /* bytes in this struct */
984 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
985 config->direct_rx_dma = 0x1; /* reserved */
986 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
987 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
988 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
989 config->tx_underrun_retry = 0x3; /* # of underrun retries */
990 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
991 config->pad10 = 0x6;
992 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
993 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
994 config->ifs = 0x6; /* x16 = inter frame spacing */
995 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
996 config->pad15_1 = 0x1;
997 config->pad15_2 = 0x1;
998 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
999 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1000 config->tx_padding = 0x1; /* 1=pad short frames */
1001 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1002 config->pad18 = 0x1;
1003 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1004 config->pad20_1 = 0x1F;
1005 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1006 config->pad21_1 = 0x5;
1007
1008 config->adaptive_ifs = nic->adaptive_ifs;
1009 config->loopback = nic->loopback;
1010
1011 if(nic->mii.force_media && nic->mii.full_duplex)
1012 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1013
1014 if(nic->flags & promiscuous || nic->loopback) {
1015 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1016 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1017 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1018 }
1019
1020 if(nic->flags & multicast_all)
1021 config->multicast_all = 0x1; /* 1=accept, 0=no */
1022
6bdacb1a
MC
1023 /* disable WoL when up */
1024 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1025 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1026
1027 if(nic->mac >= mac_82558_D101_A4) {
1028 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1029 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1030 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1031 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1032 if (nic->mac >= mac_82559_D101M) {
1da177e4 1033 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1034 /* Enable TCO in extended config */
1035 if (nic->mac >= mac_82551_10) {
1036 config->byte_count = 0x20; /* extended bytes */
1037 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1038 }
1039 } else {
1da177e4 1040 config->standard_stat_counter = 0x0;
44e4925e 1041 }
1da177e4
LT
1042 }
1043
1044 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1045 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1046 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1047 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1048 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1049 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1050}
1051
2afecc04
JB
1052/********************************************************/
1053/* Micro code for 8086:1229 Rev 8 */
1054/********************************************************/
1055
1056/* Parameter values for the D101M B-step */
1057#define D101M_CPUSAVER_TIMER_DWORD 78
1058#define D101M_CPUSAVER_BUNDLE_DWORD 65
1059#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1060
1061#define D101M_B_RCVBUNDLE_UCODE \
1062{\
10630x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10640x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10650x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10660x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10670x00380438, 0x00000000, 0x00140000, 0x00380555, \
10680x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10690x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10700x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10710x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10720x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10730x00000000, 0x00000000, 0x00000000, 0x00000000, \
10740x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10750x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10760x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10770x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10780x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10790x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10800x00000000, 0x00000000, 0x00000000, 0x00000000, \
10810x00000000, 0x00000000, 0x00000000, 0x00000000, \
10820x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10830x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10840x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10850x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10860x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10870x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10880x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10890x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10900x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10910x00380559, 0x00000000, 0x00000000, 0x00000000, \
10920x00000000, 0x00000000, 0x00000000, 0x00000000, \
10930x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10940x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10950x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1096}
1097
1098/********************************************************/
1099/* Micro code for 8086:1229 Rev 9 */
1100/********************************************************/
1101
1102/* Parameter values for the D101S */
1103#define D101S_CPUSAVER_TIMER_DWORD 78
1104#define D101S_CPUSAVER_BUNDLE_DWORD 67
1105#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1106
1107#define D101S_RCVBUNDLE_UCODE \
1108{\
11090x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
11100x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
11110x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
11120x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
11130x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
11140x00308000, 0x00100610, 0x00100561, 0x000E0408, \
11150x00134861, 0x000C0002, 0x00103093, 0x00308000, \
11160x00100624, 0x00100561, 0x000E0408, 0x00100861, \
11170x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11180x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11190x00000000, 0x00000000, 0x00000000, 0x00000000, \
11200x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11210x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11220x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11230x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11240x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11250x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11260x00101313, 0x00380700, 0x00000000, 0x00000000, \
11270x00000000, 0x00000000, 0x00000000, 0x00000000, \
11280x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11290x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11300x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11310x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11320x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11330x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11340x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11350x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11360x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11370x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11380x00000000, 0x00000000, 0x00000000, 0x00000000, \
11390x00000000, 0x00000000, 0x00000000, 0x00130831, \
11400x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11410x00041000, 0x00010004, 0x00380700 \
1142}
1143
1144/********************************************************/
1145/* Micro code for the 8086:1229 Rev F/10 */
1146/********************************************************/
1147
1148/* Parameter values for the D102 E-step */
1149#define D102_E_CPUSAVER_TIMER_DWORD 42
1150#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1151#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1152
1153#define D102_E_RCVBUNDLE_UCODE \
1154{\
11550x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11560x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11570x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11580x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11590x00000000, 0x00000000, 0x00000000, 0x00000000, \
11600x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11610x00000000, 0x00000000, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11650x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11660x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11670x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11680x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11690x00000000, 0x00000000, 0x00000000, 0x00000000, \
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
11710x00000000, 0x00000000, 0x00000000, 0x00000000, \
11720x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11730x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11740x00000000, 0x00000000, 0x00000000, 0x00000000, \
11750x00000000, 0x00000000, 0x00000000, 0x00000000, \
11760x00000000, 0x00000000, 0x00000000, 0x00000000, \
11770x00000000, 0x00000000, 0x00000000, 0x00000000, \
11780x00000000, 0x00000000, 0x00000000, 0x00000000, \
11790x00000000, 0x00000000, 0x00000000, 0x00000000, \
11800x00000000, 0x00000000, 0x00000000, 0x00000000, \
11810x00000000, 0x00000000, 0x00000000, 0x00000000, \
11820x00000000, 0x00000000, 0x00000000, 0x00000000, \
11830x00000000, 0x00000000, 0x00000000, 0x00000000, \
11840x00000000, 0x00000000, 0x00000000, 0x00000000, \
11850x00000000, 0x00000000, 0x00000000, 0x00000000, \
11860x00000000, 0x00000000, 0x00000000, 0x00000000, \
11870x00000000, 0x00000000, 0x00000000, 0x00000000, \
1188}
1189
24180333 1190static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4 1191{
2afecc04
JB
1192/* *INDENT-OFF* */
1193 static struct {
1194 u32 ucode[UCODE_SIZE + 1];
1195 u8 mac;
1196 u8 timer_dword;
1197 u8 bundle_dword;
1198 u8 min_size_dword;
1199 } ucode_opts[] = {
1200 { D101M_B_RCVBUNDLE_UCODE,
1201 mac_82559_D101M,
1202 D101M_CPUSAVER_TIMER_DWORD,
1203 D101M_CPUSAVER_BUNDLE_DWORD,
1204 D101M_CPUSAVER_MIN_SIZE_DWORD },
1205 { D101S_RCVBUNDLE_UCODE,
1206 mac_82559_D101S,
1207 D101S_CPUSAVER_TIMER_DWORD,
1208 D101S_CPUSAVER_BUNDLE_DWORD,
1209 D101S_CPUSAVER_MIN_SIZE_DWORD },
1210 { D102_E_RCVBUNDLE_UCODE,
1211 mac_82551_F,
1212 D102_E_CPUSAVER_TIMER_DWORD,
1213 D102_E_CPUSAVER_BUNDLE_DWORD,
1214 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1215 { D102_E_RCVBUNDLE_UCODE,
1216 mac_82551_10,
1217 D102_E_CPUSAVER_TIMER_DWORD,
1218 D102_E_CPUSAVER_BUNDLE_DWORD,
1219 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1220 { {0}, 0, 0, 0, 0}
1221 }, *opts;
1222/* *INDENT-ON* */
1223
1224/*************************************************************************
1225* CPUSaver parameters
1226*
1227* All CPUSaver parameters are 16-bit literals that are part of a
1228* "move immediate value" instruction. By changing the value of
1229* the literal in the instruction before the code is loaded, the
1230* driver can change the algorithm.
1231*
0779bf2d 1232* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1233* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1234* timer is reset each time a new packet is received. (see
1235* BUNDLEMAX below to set the limit on number of chained packets)
1236* The current default is 0x600 or 1536. Experiments show that
1237* the value should probably stay within the 0x200 - 0x1000.
1238*
05479938 1239* BUNDLEMAX -
2afecc04
JB
1240* This sets the maximum number of frames that will be bundled. In
1241* some situations, such as the TCP windowing algorithm, it may be
1242* better to limit the growth of the bundle size than let it go as
1243* high as it can, because that could cause too much added latency.
1244* The default is six, because this is the number of packets in the
1245* default TCP window size. A value of 1 would make CPUSaver indicate
1246* an interrupt for every frame received. If you do not want to put
1247* a limit on the bundle size, set this value to xFFFF.
1248*
05479938 1249* BUNDLESMALL -
2afecc04
JB
1250* This contains a bit-mask describing the minimum size frame that
1251* will be bundled. The default masks the lower 7 bits, which means
1252* that any frame less than 128 bytes in length will not be bundled,
1253* but will instead immediately generate an interrupt. This does
1254* not affect the current bundle in any way. Any frame that is 128
1255* bytes or large will be bundled normally. This feature is meant
1256* to provide immediate indication of ACK frames in a TCP environment.
1257* Customers were seeing poor performance when a machine with CPUSaver
1258* enabled was sending but not receiving. The delay introduced when
1259* the ACKs were received was enough to reduce total throughput, because
1260* the sender would sit idle until the ACK was finally seen.
1261*
1262* The current default is 0xFF80, which masks out the lower 7 bits.
1263* This means that any frame which is x7F (127) bytes or smaller
05479938 1264* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1265* bit mask, there are only a few valid values that can be used. To
1266* turn this feature off, the driver can write the value xFFFF to the
1267* lower word of this instruction (in the same way that the other
1268* parameters are used). Likewise, a value of 0xF800 (2047) would
1269* cause an interrupt to be generated for every frame, because all
1270* standard Ethernet frames are <= 2047 bytes in length.
1271*************************************************************************/
1272
05479938 1273/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1274 * workarounds it provides, set the following defines to:
1275 * BUNDLESMALL 0
1276 * BUNDLEMAX 1
1277 * INTDELAY 1
1278 */
1279#define BUNDLESMALL 1
1280#define BUNDLEMAX (u16)6
1281#define INTDELAY (u16)1536 /* 0x600 */
1282
1283 /* do not load u-code for ICH devices */
1284 if (nic->flags & ich)
1285 goto noloaducode;
1286
44c10138 1287 /* Search for ucode match against h/w revision */
2afecc04
JB
1288 for (opts = ucode_opts; opts->mac; opts++) {
1289 int i;
1290 u32 *ucode = opts->ucode;
1291 if (nic->mac != opts->mac)
1292 continue;
1293
1294 /* Insert user-tunable settings */
1295 ucode[opts->timer_dword] &= 0xFFFF0000;
1296 ucode[opts->timer_dword] |= INTDELAY;
1297 ucode[opts->bundle_dword] &= 0xFFFF0000;
1298 ucode[opts->bundle_dword] |= BUNDLEMAX;
1299 ucode[opts->min_size_dword] &= 0xFFFF0000;
1300 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1301
1302 for (i = 0; i < UCODE_SIZE; i++)
875521dd 1303 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
24180333 1304 cb->command = cpu_to_le16(cb_ucode | cb_el);
2afecc04
JB
1305 return;
1306 }
1307
1308noloaducode:
24180333
JB
1309 cb->command = cpu_to_le16(cb_nop | cb_el);
1310}
1311
1312static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1313 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1314{
1315 int err = 0, counter = 50;
1316 struct cb *cb = nic->cb_to_clean;
1317
1318 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1319 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1320
24180333
JB
1321 /* must restart cuc */
1322 nic->cuc_cmd = cuc_start;
1323
1324 /* wait for completion */
1325 e100_write_flush(nic);
1326 udelay(10);
1327
1328 /* wait for possibly (ouch) 500ms */
1329 while (!(cb->status & cpu_to_le16(cb_complete))) {
1330 msleep(10);
1331 if (!--counter) break;
1332 }
05479938 1333
3a4fa0a2 1334 /* ack any interrupts, something could have been set */
27345bb6 1335 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1336
1337 /* if the command failed, or is not OK, notify and return */
1338 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1339 DPRINTK(PROBE,ERR, "ucode load failed\n");
1340 err = -EPERM;
1341 }
05479938 1342
24180333 1343 return err;
1da177e4
LT
1344}
1345
1346static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1347 struct sk_buff *skb)
1348{
1349 cb->command = cpu_to_le16(cb_iaaddr);
1350 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1351}
1352
1353static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1354{
1355 cb->command = cpu_to_le16(cb_dump);
1356 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1357 offsetof(struct mem, dump_buf));
1358}
1359
1360#define NCONFIG_AUTO_SWITCH 0x0080
1361#define MII_NSC_CONG MII_RESV1
1362#define NSC_CONG_ENABLE 0x0100
1363#define NSC_CONG_TXREADY 0x0400
1364#define ADVERTISE_FC_SUPPORTED 0x0400
1365static int e100_phy_init(struct nic *nic)
1366{
1367 struct net_device *netdev = nic->netdev;
1368 u32 addr;
1369 u16 bmcr, stat, id_lo, id_hi, cong;
1370
1371 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1372 for(addr = 0; addr < 32; addr++) {
1373 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1374 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1375 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1376 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1377 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1378 break;
1379 }
1380 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1381 if(addr == 32)
1382 return -EAGAIN;
1383
1384 /* Selected the phy and isolate the rest */
1385 for(addr = 0; addr < 32; addr++) {
1386 if(addr != nic->mii.phy_id) {
1387 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1388 } else {
1389 bmcr = mdio_read(netdev, addr, MII_BMCR);
1390 mdio_write(netdev, addr, MII_BMCR,
1391 bmcr & ~BMCR_ISOLATE);
1392 }
1393 }
1394
1395 /* Get phy ID */
1396 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1397 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1398 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1399 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1400
1401 /* Handle National tx phys */
1402#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1403 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1404 /* Disable congestion control */
1405 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1406 cong |= NSC_CONG_TXREADY;
1407 cong &= ~NSC_CONG_ENABLE;
1408 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1409 }
1410
05479938 1411 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1412 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1413 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1414 /* enable/disable MDI/MDI-X auto-switching. */
1415 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1416 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1417 }
1da177e4
LT
1418
1419 return 0;
1420}
1421
1422static int e100_hw_init(struct nic *nic)
1423{
1424 int err;
1425
1426 e100_hw_reset(nic);
1427
1428 DPRINTK(HW, ERR, "e100_hw_init\n");
1429 if(!in_interrupt() && (err = e100_self_test(nic)))
1430 return err;
1431
1432 if((err = e100_phy_init(nic)))
1433 return err;
1434 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1435 return err;
1436 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1437 return err;
24180333 1438 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1da177e4
LT
1439 return err;
1440 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1441 return err;
1442 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1443 return err;
1444 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1445 nic->dma_addr + offsetof(struct mem, stats))))
1446 return err;
1447 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1448 return err;
1449
1450 e100_disable_irq(nic);
1451
1452 return 0;
1453}
1454
1455static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1456{
1457 struct net_device *netdev = nic->netdev;
1458 struct dev_mc_list *list = netdev->mc_list;
1459 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1460
1461 cb->command = cpu_to_le16(cb_multi);
1462 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1463 for(i = 0; list && i < count; i++, list = list->next)
1464 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1465 ETH_ALEN);
1466}
1467
1468static void e100_set_multicast_list(struct net_device *netdev)
1469{
1470 struct nic *nic = netdev_priv(netdev);
1471
1472 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1473 netdev->mc_count, netdev->flags);
1474
1475 if(netdev->flags & IFF_PROMISC)
1476 nic->flags |= promiscuous;
1477 else
1478 nic->flags &= ~promiscuous;
1479
1480 if(netdev->flags & IFF_ALLMULTI ||
1481 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1482 nic->flags |= multicast_all;
1483 else
1484 nic->flags &= ~multicast_all;
1485
1486 e100_exec_cb(nic, NULL, e100_configure);
1487 e100_exec_cb(nic, NULL, e100_multi);
1488}
1489
1490static void e100_update_stats(struct nic *nic)
1491{
09f75cd7
JG
1492 struct net_device *dev = nic->netdev;
1493 struct net_device_stats *ns = &dev->stats;
1da177e4 1494 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1495 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1496 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1497 &s->complete;
1498
1499 /* Device's stats reporting may take several microseconds to
0a0863af 1500 * complete, so we're always waiting for results of the
1da177e4
LT
1501 * previous command. */
1502
aaf918ba 1503 if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1504 *complete = 0;
1505 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1506 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1507 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1508 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1509 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1510 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1511 ns->collisions += nic->tx_collisions;
1512 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1513 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1514 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1515 nic->rx_over_length_errors;
1516 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1517 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1518 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1519 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1520 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1521 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1522 le32_to_cpu(s->rx_alignment_errors) +
1523 le32_to_cpu(s->rx_short_frame_errors) +
1524 le32_to_cpu(s->rx_cdt_errors);
1525 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1526 nic->tx_single_collisions +=
1527 le32_to_cpu(s->tx_single_collisions);
1528 nic->tx_multiple_collisions +=
1529 le32_to_cpu(s->tx_multiple_collisions);
1530 if(nic->mac >= mac_82558_D101_A4) {
1531 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1532 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1533 nic->rx_fc_unsupported +=
1534 le32_to_cpu(s->fc_rcv_unsupported);
1535 if(nic->mac >= mac_82559_D101M) {
1536 nic->tx_tco_frames +=
1537 le16_to_cpu(s->xmt_tco_frames);
1538 nic->rx_tco_frames +=
1539 le16_to_cpu(s->rcv_tco_frames);
1540 }
1541 }
1542 }
1543
05479938 1544
1f53367d
MC
1545 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1546 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1547}
1548
1549static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1550{
1551 /* Adjust inter-frame-spacing (IFS) between two transmits if
1552 * we're getting collisions on a half-duplex connection. */
1553
1554 if(duplex == DUPLEX_HALF) {
1555 u32 prev = nic->adaptive_ifs;
1556 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1557
1558 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1559 (nic->tx_frames > min_frames)) {
1560 if(nic->adaptive_ifs < 60)
1561 nic->adaptive_ifs += 5;
1562 } else if (nic->tx_frames < min_frames) {
1563 if(nic->adaptive_ifs >= 5)
1564 nic->adaptive_ifs -= 5;
1565 }
1566 if(nic->adaptive_ifs != prev)
1567 e100_exec_cb(nic, NULL, e100_configure);
1568 }
1569}
1570
1571static void e100_watchdog(unsigned long data)
1572{
1573 struct nic *nic = (struct nic *)data;
1574 struct ethtool_cmd cmd;
1575
1576 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1577
1578 /* mii library handles link maintenance tasks */
1579
1580 mii_ethtool_gset(&nic->mii, &cmd);
1581
1582 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1583 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1584 cmd.speed == SPEED_100 ? "100" : "10",
1585 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1586 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1587 DPRINTK(LINK, INFO, "link down\n");
1588 }
1589
1590 mii_check_link(&nic->mii);
1591
1592 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1593 * allocation failure.
1594 * Unfortunately have to use a spinlock to not re-enable interrupts
1595 * accidentally, due to hardware that shares a register between the
1596 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1597 spin_lock_irq(&nic->cmd_lock);
27345bb6 1598 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1599 e100_write_flush(nic);
ad8c48ad 1600 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1601
1602 e100_update_stats(nic);
1603 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1604
1605 if(nic->mac <= mac_82557_D100_C)
1606 /* Issue a multicast command to workaround a 557 lock up */
1607 e100_set_multicast_list(nic->netdev);
1608
1609 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1610 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1611 nic->flags |= ich_10h_workaround;
1612 else
1613 nic->flags &= ~ich_10h_workaround;
1614
34c6417b
SH
1615 mod_timer(&nic->watchdog,
1616 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1617}
1618
858119e1 1619static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1620 struct sk_buff *skb)
1621{
1622 cb->command = nic->tx_command;
962082b6 1623 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1624 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1625 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1626 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1627 cb->u.tcb.tcb_byte_count = 0;
1628 cb->u.tcb.threshold = nic->tx_threshold;
1629 cb->u.tcb.tbd_count = 1;
1630 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1631 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1632 /* check for mapping failure? */
1da177e4
LT
1633 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1634}
1635
1636static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1637{
1638 struct nic *nic = netdev_priv(netdev);
1639 int err;
1640
1641 if(nic->flags & ich_10h_workaround) {
1642 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1643 Issue a NOP command followed by a 1us delay before
1644 issuing the Tx command. */
1f53367d
MC
1645 if(e100_exec_cmd(nic, cuc_nop, 0))
1646 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1647 udelay(1);
1648 }
1649
1650 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1651
1652 switch(err) {
1653 case -ENOSPC:
1654 /* We queued the skb, but now we're out of space. */
1655 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1656 netif_stop_queue(netdev);
1657 break;
1658 case -ENOMEM:
1659 /* This is a hard error - log it. */
1660 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1661 netif_stop_queue(netdev);
1662 return 1;
1663 }
1664
1665 netdev->trans_start = jiffies;
1666 return 0;
1667}
1668
858119e1 1669static int e100_tx_clean(struct nic *nic)
1da177e4 1670{
09f75cd7 1671 struct net_device *dev = nic->netdev;
1da177e4
LT
1672 struct cb *cb;
1673 int tx_cleaned = 0;
1674
1675 spin_lock(&nic->cb_lock);
1676
1da177e4
LT
1677 /* Clean CBs marked complete */
1678 for(cb = nic->cb_to_clean;
1679 cb->status & cpu_to_le16(cb_complete);
1680 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1681 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1682 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1683 cb->status);
1684
1da177e4 1685 if(likely(cb->skb != NULL)) {
09f75cd7
JG
1686 dev->stats.tx_packets++;
1687 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1688
1689 pci_unmap_single(nic->pdev,
1690 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1691 le16_to_cpu(cb->u.tcb.tbd.size),
1692 PCI_DMA_TODEVICE);
1693 dev_kfree_skb_any(cb->skb);
1694 cb->skb = NULL;
1695 tx_cleaned = 1;
1696 }
1697 cb->status = 0;
1698 nic->cbs_avail++;
1699 }
1700
1701 spin_unlock(&nic->cb_lock);
1702
1703 /* Recover from running out of Tx resources in xmit_frame */
1704 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1705 netif_wake_queue(nic->netdev);
1706
1707 return tx_cleaned;
1708}
1709
1710static void e100_clean_cbs(struct nic *nic)
1711{
1712 if(nic->cbs) {
1713 while(nic->cbs_avail != nic->params.cbs.count) {
1714 struct cb *cb = nic->cb_to_clean;
1715 if(cb->skb) {
1716 pci_unmap_single(nic->pdev,
1717 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1718 le16_to_cpu(cb->u.tcb.tbd.size),
1719 PCI_DMA_TODEVICE);
1720 dev_kfree_skb(cb->skb);
1721 }
1722 nic->cb_to_clean = nic->cb_to_clean->next;
1723 nic->cbs_avail++;
1724 }
1725 pci_free_consistent(nic->pdev,
1726 sizeof(struct cb) * nic->params.cbs.count,
1727 nic->cbs, nic->cbs_dma_addr);
1728 nic->cbs = NULL;
1729 nic->cbs_avail = 0;
1730 }
1731 nic->cuc_cmd = cuc_start;
1732 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1733 nic->cbs;
1734}
1735
1736static int e100_alloc_cbs(struct nic *nic)
1737{
1738 struct cb *cb;
1739 unsigned int i, count = nic->params.cbs.count;
1740
1741 nic->cuc_cmd = cuc_start;
1742 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1743 nic->cbs_avail = 0;
1744
1745 nic->cbs = pci_alloc_consistent(nic->pdev,
1746 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1747 if(!nic->cbs)
1748 return -ENOMEM;
1749
1750 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1751 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1752 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1753
1754 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1755 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1756 ((i+1) % count) * sizeof(struct cb));
1757 cb->skb = NULL;
1758 }
1759
1760 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1761 nic->cbs_avail = count;
1762
1763 return 0;
1764}
1765
ca93ca42 1766static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1767{
ca93ca42
JG
1768 if(!nic->rxs) return;
1769 if(RU_SUSPENDED != nic->ru_running) return;
1770
1771 /* handle init time starts */
1772 if(!rx) rx = nic->rxs;
1773
1774 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1775 if(rx->skb) {
1776 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1777 nic->ru_running = RU_RUNNING;
1778 }
1da177e4
LT
1779}
1780
1781#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1782static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1783{
4187592b 1784 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1785 return -ENOMEM;
1786
1787 /* Align, init, and map the RFD. */
1da177e4 1788 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1789 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1790 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1791 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1792
8d8bb39b 1793 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1794 dev_kfree_skb_any(rx->skb);
097688ef 1795 rx->skb = NULL;
1f53367d
MC
1796 rx->dma_addr = 0;
1797 return -ENOMEM;
1798 }
1799
1da177e4 1800 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1801 * this one. We are safe to touch the previous RFD because
1802 * it is protected by the before last buffer's el bit being set */
aaf918ba 1803 if (rx->prev->skb) {
1da177e4 1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1806 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1807 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1808 }
1809
1810 return 0;
1811}
1812
858119e1 1813static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1814 unsigned int *work_done, unsigned int work_to_do)
1815{
09f75cd7 1816 struct net_device *dev = nic->netdev;
1da177e4
LT
1817 struct sk_buff *skb = rx->skb;
1818 struct rfd *rfd = (struct rfd *)skb->data;
1819 u16 rfd_status, actual_size;
1820
1821 if(unlikely(work_done && *work_done >= work_to_do))
1822 return -EAGAIN;
1823
1824 /* Need to sync before taking a peek at cb_complete bit */
1825 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1826 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1827 rfd_status = le16_to_cpu(rfd->status);
1828
1829 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1830
1831 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1832 if (unlikely(!(rfd_status & cb_complete))) {
1833 /* If the next buffer has the el bit, but we think the receiver
1834 * is still running, check to see if it really stopped while
1835 * we had interrupts off.
1836 * This allows for a fast restart without re-enabling
1837 * interrupts */
1838 if ((le16_to_cpu(rfd->command) & cb_el) &&
1839 (RU_RUNNING == nic->ru_running))
1840
17393dd6 1841 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1842 nic->ru_running = RU_SUSPENDED;
1f53367d 1843 return -ENODATA;
7734f6e6 1844 }
1da177e4
LT
1845
1846 /* Get actual data size */
1847 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1848 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1849 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1850
1851 /* Get data */
1852 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1853 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 1854
7734f6e6
DA
1855 /* If this buffer has the el bit, but we think the receiver
1856 * is still running, check to see if it really stopped while
1857 * we had interrupts off.
1858 * This allows for a fast restart without re-enabling interrupts.
1859 * This can happen when the RU sees the size change but also sees
1860 * the el bit set. */
1861 if ((le16_to_cpu(rfd->command) & cb_el) &&
1862 (RU_RUNNING == nic->ru_running)) {
1863
17393dd6 1864 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 1865 nic->ru_running = RU_SUSPENDED;
7734f6e6 1866 }
ca93ca42 1867
1da177e4
LT
1868 /* Pull off the RFD and put the actual data (minus eth hdr) */
1869 skb_reserve(skb, sizeof(struct rfd));
1870 skb_put(skb, actual_size);
1871 skb->protocol = eth_type_trans(skb, nic->netdev);
1872
1873 if(unlikely(!(rfd_status & cb_ok))) {
1874 /* Don't indicate if hardware indicates errors */
1da177e4 1875 dev_kfree_skb_any(skb);
136df52d 1876 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1877 /* Don't indicate oversized frames */
1878 nic->rx_over_length_errors++;
1da177e4
LT
1879 dev_kfree_skb_any(skb);
1880 } else {
09f75cd7
JG
1881 dev->stats.rx_packets++;
1882 dev->stats.rx_bytes += actual_size;
1da177e4
LT
1883 netif_receive_skb(skb);
1884 if(work_done)
1885 (*work_done)++;
1886 }
1887
1888 rx->skb = NULL;
1889
1890 return 0;
1891}
1892
858119e1 1893static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1894 unsigned int work_to_do)
1895{
1896 struct rx *rx;
7734f6e6
DA
1897 int restart_required = 0, err = 0;
1898 struct rx *old_before_last_rx, *new_before_last_rx;
1899 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
1900
1901 /* Indicate newly arrived packets */
1902 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
1903 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1904 /* Hit quota or no more to clean */
1905 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 1906 break;
1da177e4
LT
1907 }
1908
7734f6e6
DA
1909
1910 /* On EAGAIN, hit quota so have more work to do, restart once
1911 * cleanup is complete.
1912 * Else, are we already rnr? then pay attention!!! this ensures that
1913 * the state machine progression never allows a start with a
1914 * partially cleaned list, avoiding a race between hardware
1915 * and rx_to_clean when in NAPI mode */
1916 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
1917 restart_required = 1;
1918
1919 old_before_last_rx = nic->rx_to_use->prev->prev;
1920 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 1921
1da177e4
LT
1922 /* Alloc new skbs to refill list */
1923 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1924 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1925 break; /* Better luck next time (see watchdog) */
1926 }
ca93ca42 1927
7734f6e6
DA
1928 new_before_last_rx = nic->rx_to_use->prev->prev;
1929 if (new_before_last_rx != old_before_last_rx) {
1930 /* Set the el-bit on the buffer that is before the last buffer.
1931 * This lets us update the next pointer on the last buffer
1932 * without worrying about hardware touching it.
1933 * We set the size to 0 to prevent hardware from touching this
1934 * buffer.
1935 * When the hardware hits the before last buffer with el-bit
1936 * and size of 0, it will RNR interrupt, the RUS will go into
1937 * the No Resources state. It will not complete nor write to
1938 * this buffer. */
1939 new_before_last_rfd =
1940 (struct rfd *)new_before_last_rx->skb->data;
1941 new_before_last_rfd->size = 0;
1942 new_before_last_rfd->command |= cpu_to_le16(cb_el);
1943 pci_dma_sync_single_for_device(nic->pdev,
1944 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1945 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1946
1947 /* Now that we have a new stopping point, we can clear the old
1948 * stopping point. We must sync twice to get the proper
1949 * ordering on the hardware side of things. */
1950 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
1951 pci_dma_sync_single_for_device(nic->pdev,
1952 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1953 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1954 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1955 pci_dma_sync_single_for_device(nic->pdev,
1956 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1957 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1958 }
1959
ca93ca42
JG
1960 if(restart_required) {
1961 // ack the rnr?
915e91d7 1962 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 1963 e100_start_receiver(nic, nic->rx_to_clean);
ca93ca42
JG
1964 if(work_done)
1965 (*work_done)++;
1966 }
1da177e4
LT
1967}
1968
1969static void e100_rx_clean_list(struct nic *nic)
1970{
1971 struct rx *rx;
1972 unsigned int i, count = nic->params.rfds.count;
1973
ca93ca42
JG
1974 nic->ru_running = RU_UNINITIALIZED;
1975
1da177e4
LT
1976 if(nic->rxs) {
1977 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1978 if(rx->skb) {
1979 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1980 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1981 dev_kfree_skb(rx->skb);
1982 }
1983 }
1984 kfree(nic->rxs);
1985 nic->rxs = NULL;
1986 }
1987
1988 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1989}
1990
1991static int e100_rx_alloc_list(struct nic *nic)
1992{
1993 struct rx *rx;
1994 unsigned int i, count = nic->params.rfds.count;
7734f6e6 1995 struct rfd *before_last;
1da177e4
LT
1996
1997 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 1998 nic->ru_running = RU_UNINITIALIZED;
1da177e4 1999
c48e3fca 2000 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2001 return -ENOMEM;
1da177e4
LT
2002
2003 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
2004 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2005 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2006 if(e100_rx_alloc_skb(nic, rx)) {
2007 e100_rx_clean_list(nic);
2008 return -ENOMEM;
2009 }
2010 }
7734f6e6
DA
2011 /* Set the el-bit on the buffer that is before the last buffer.
2012 * This lets us update the next pointer on the last buffer without
2013 * worrying about hardware touching it.
2014 * We set the size to 0 to prevent hardware from touching this buffer.
2015 * When the hardware hits the before last buffer with el-bit and size
2016 * of 0, it will RNR interrupt, the RU will go into the No Resources
2017 * state. It will not complete nor write to this buffer. */
2018 rx = nic->rxs->prev->prev;
2019 before_last = (struct rfd *)rx->skb->data;
2020 before_last->command |= cpu_to_le16(cb_el);
2021 before_last->size = 0;
2022 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 2023 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2024
2025 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2026 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2027
2028 return 0;
2029}
2030
7d12e780 2031static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2032{
2033 struct net_device *netdev = dev_id;
2034 struct nic *nic = netdev_priv(netdev);
27345bb6 2035 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
2036
2037 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
2038
2039 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
2040 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2041 return IRQ_NONE;
2042
2043 /* Ack interrupt(s) */
27345bb6 2044 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2045
ca93ca42
JG
2046 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2047 if(stat_ack & stat_ack_rnr)
2048 nic->ru_running = RU_SUSPENDED;
2049
bea3348e 2050 if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
0685c31b 2051 e100_disable_irq(nic);
bea3348e 2052 __netif_rx_schedule(netdev, &nic->napi);
0685c31b 2053 }
1da177e4
LT
2054
2055 return IRQ_HANDLED;
2056}
2057
bea3348e 2058static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2059{
bea3348e
SH
2060 struct nic *nic = container_of(napi, struct nic, napi);
2061 struct net_device *netdev = nic->netdev;
ddfce6bb 2062 unsigned int work_done = 0;
1da177e4 2063
bea3348e 2064 e100_rx_clean(nic, &work_done, budget);
53e52c72 2065 e100_tx_clean(nic);
1da177e4 2066
53e52c72
DM
2067 /* If budget not fully consumed, exit the polling mode */
2068 if (work_done < budget) {
bea3348e 2069 netif_rx_complete(netdev, napi);
1da177e4 2070 e100_enable_irq(nic);
1da177e4
LT
2071 }
2072
bea3348e 2073 return work_done;
1da177e4
LT
2074}
2075
2076#ifdef CONFIG_NET_POLL_CONTROLLER
2077static void e100_netpoll(struct net_device *netdev)
2078{
2079 struct nic *nic = netdev_priv(netdev);
611494dc 2080
1da177e4 2081 e100_disable_irq(nic);
7d12e780 2082 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2083 e100_tx_clean(nic);
2084 e100_enable_irq(nic);
2085}
2086#endif
2087
1da177e4
LT
2088static int e100_set_mac_address(struct net_device *netdev, void *p)
2089{
2090 struct nic *nic = netdev_priv(netdev);
2091 struct sockaddr *addr = p;
2092
2093 if (!is_valid_ether_addr(addr->sa_data))
2094 return -EADDRNOTAVAIL;
2095
2096 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2097 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2098
2099 return 0;
2100}
2101
2102static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2103{
2104 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2105 return -EINVAL;
2106 netdev->mtu = new_mtu;
2107 return 0;
2108}
2109
2110static int e100_asf(struct nic *nic)
2111{
2112 /* ASF can be enabled from eeprom */
2113 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2114 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2115 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2116 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2117}
2118
2119static int e100_up(struct nic *nic)
2120{
2121 int err;
2122
2123 if((err = e100_rx_alloc_list(nic)))
2124 return err;
2125 if((err = e100_alloc_cbs(nic)))
2126 goto err_rx_clean_list;
2127 if((err = e100_hw_init(nic)))
2128 goto err_clean_cbs;
2129 e100_set_multicast_list(nic->netdev);
ca93ca42 2130 e100_start_receiver(nic, NULL);
1da177e4 2131 mod_timer(&nic->watchdog, jiffies);
1fb9df5d 2132 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2133 nic->netdev->name, nic->netdev)))
2134 goto err_no_irq;
1da177e4 2135 netif_wake_queue(nic->netdev);
bea3348e 2136 napi_enable(&nic->napi);
0236ebb7
MC
2137 /* enable ints _after_ enabling poll, preventing a race between
2138 * disable ints+schedule */
2139 e100_enable_irq(nic);
1da177e4
LT
2140 return 0;
2141
2142err_no_irq:
2143 del_timer_sync(&nic->watchdog);
2144err_clean_cbs:
2145 e100_clean_cbs(nic);
2146err_rx_clean_list:
2147 e100_rx_clean_list(nic);
2148 return err;
2149}
2150
2151static void e100_down(struct nic *nic)
2152{
0236ebb7 2153 /* wait here for poll to complete */
bea3348e 2154 napi_disable(&nic->napi);
0236ebb7 2155 netif_stop_queue(nic->netdev);
1da177e4
LT
2156 e100_hw_reset(nic);
2157 free_irq(nic->pdev->irq, nic->netdev);
2158 del_timer_sync(&nic->watchdog);
2159 netif_carrier_off(nic->netdev);
1da177e4
LT
2160 e100_clean_cbs(nic);
2161 e100_rx_clean_list(nic);
2162}
2163
2164static void e100_tx_timeout(struct net_device *netdev)
2165{
2166 struct nic *nic = netdev_priv(netdev);
2167
05479938 2168 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2169 * in interrupt context */
2170 schedule_work(&nic->tx_timeout_task);
2171}
2172
c4028958 2173static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2174{
c4028958
DH
2175 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2176 struct net_device *netdev = nic->netdev;
2acdb1e0 2177
1da177e4 2178 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2179 ioread8(&nic->csr->scb.status));
1da177e4
LT
2180 e100_down(netdev_priv(netdev));
2181 e100_up(netdev_priv(netdev));
2182}
2183
2184static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2185{
2186 int err;
2187 struct sk_buff *skb;
2188
2189 /* Use driver resources to perform internal MAC or PHY
2190 * loopback test. A single packet is prepared and transmitted
2191 * in loopback mode, and the test passes if the received
2192 * packet compares byte-for-byte to the transmitted packet. */
2193
2194 if((err = e100_rx_alloc_list(nic)))
2195 return err;
2196 if((err = e100_alloc_cbs(nic)))
2197 goto err_clean_rx;
2198
2199 /* ICH PHY loopback is broken so do MAC loopback instead */
2200 if(nic->flags & ich && loopback_mode == lb_phy)
2201 loopback_mode = lb_mac;
2202
2203 nic->loopback = loopback_mode;
2204 if((err = e100_hw_init(nic)))
2205 goto err_loopback_none;
2206
2207 if(loopback_mode == lb_phy)
2208 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2209 BMCR_LOOPBACK);
2210
ca93ca42 2211 e100_start_receiver(nic, NULL);
1da177e4 2212
4187592b 2213 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2214 err = -ENOMEM;
2215 goto err_loopback_none;
2216 }
2217 skb_put(skb, ETH_DATA_LEN);
2218 memset(skb->data, 0xFF, ETH_DATA_LEN);
2219 e100_xmit_frame(skb, nic->netdev);
2220
2221 msleep(10);
2222
aa49cdd9 2223 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2224 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2225
1da177e4
LT
2226 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2227 skb->data, ETH_DATA_LEN))
2228 err = -EAGAIN;
2229
2230err_loopback_none:
2231 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2232 nic->loopback = lb_none;
1da177e4 2233 e100_clean_cbs(nic);
aa49cdd9 2234 e100_hw_reset(nic);
1da177e4
LT
2235err_clean_rx:
2236 e100_rx_clean_list(nic);
2237 return err;
2238}
2239
2240#define MII_LED_CONTROL 0x1B
2241static void e100_blink_led(unsigned long data)
2242{
2243 struct nic *nic = (struct nic *)data;
2244 enum led_state {
2245 led_on = 0x01,
2246 led_off = 0x04,
2247 led_on_559 = 0x05,
2248 led_on_557 = 0x07,
2249 };
2250
2251 nic->leds = (nic->leds & led_on) ? led_off :
2252 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2253 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2254 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2255}
2256
2257static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2258{
2259 struct nic *nic = netdev_priv(netdev);
2260 return mii_ethtool_gset(&nic->mii, cmd);
2261}
2262
2263static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2264{
2265 struct nic *nic = netdev_priv(netdev);
2266 int err;
2267
2268 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2269 err = mii_ethtool_sset(&nic->mii, cmd);
2270 e100_exec_cb(nic, NULL, e100_configure);
2271
2272 return err;
2273}
2274
2275static void e100_get_drvinfo(struct net_device *netdev,
2276 struct ethtool_drvinfo *info)
2277{
2278 struct nic *nic = netdev_priv(netdev);
2279 strcpy(info->driver, DRV_NAME);
2280 strcpy(info->version, DRV_VERSION);
2281 strcpy(info->fw_version, "N/A");
2282 strcpy(info->bus_info, pci_name(nic->pdev));
2283}
2284
abf9b902 2285#define E100_PHY_REGS 0x1C
1da177e4
LT
2286static int e100_get_regs_len(struct net_device *netdev)
2287{
2288 struct nic *nic = netdev_priv(netdev);
abf9b902 2289 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2290}
2291
2292static void e100_get_regs(struct net_device *netdev,
2293 struct ethtool_regs *regs, void *p)
2294{
2295 struct nic *nic = netdev_priv(netdev);
2296 u32 *buff = p;
2297 int i;
2298
44c10138 2299 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2300 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2301 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2302 ioread16(&nic->csr->scb.status);
1da177e4
LT
2303 for(i = E100_PHY_REGS; i >= 0; i--)
2304 buff[1 + E100_PHY_REGS - i] =
2305 mdio_read(netdev, nic->mii.phy_id, i);
2306 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2307 e100_exec_cb(nic, NULL, e100_dump);
2308 msleep(10);
2309 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2310 sizeof(nic->mem->dump_buf));
2311}
2312
2313static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2314{
2315 struct nic *nic = netdev_priv(netdev);
2316 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2317 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2318}
2319
2320static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2321{
2322 struct nic *nic = netdev_priv(netdev);
2323
bc79fc84
RW
2324 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2325 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2326 return -EOPNOTSUPP;
2327
2328 if(wol->wolopts)
2329 nic->flags |= wol_magic;
2330 else
2331 nic->flags &= ~wol_magic;
2332
bc79fc84
RW
2333 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2334
1da177e4
LT
2335 e100_exec_cb(nic, NULL, e100_configure);
2336
2337 return 0;
2338}
2339
2340static u32 e100_get_msglevel(struct net_device *netdev)
2341{
2342 struct nic *nic = netdev_priv(netdev);
2343 return nic->msg_enable;
2344}
2345
2346static void e100_set_msglevel(struct net_device *netdev, u32 value)
2347{
2348 struct nic *nic = netdev_priv(netdev);
2349 nic->msg_enable = value;
2350}
2351
2352static int e100_nway_reset(struct net_device *netdev)
2353{
2354 struct nic *nic = netdev_priv(netdev);
2355 return mii_nway_restart(&nic->mii);
2356}
2357
2358static u32 e100_get_link(struct net_device *netdev)
2359{
2360 struct nic *nic = netdev_priv(netdev);
2361 return mii_link_ok(&nic->mii);
2362}
2363
2364static int e100_get_eeprom_len(struct net_device *netdev)
2365{
2366 struct nic *nic = netdev_priv(netdev);
2367 return nic->eeprom_wc << 1;
2368}
2369
2370#define E100_EEPROM_MAGIC 0x1234
2371static int e100_get_eeprom(struct net_device *netdev,
2372 struct ethtool_eeprom *eeprom, u8 *bytes)
2373{
2374 struct nic *nic = netdev_priv(netdev);
2375
2376 eeprom->magic = E100_EEPROM_MAGIC;
2377 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2378
2379 return 0;
2380}
2381
2382static int e100_set_eeprom(struct net_device *netdev,
2383 struct ethtool_eeprom *eeprom, u8 *bytes)
2384{
2385 struct nic *nic = netdev_priv(netdev);
2386
2387 if(eeprom->magic != E100_EEPROM_MAGIC)
2388 return -EINVAL;
2389
2390 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2391
2392 return e100_eeprom_save(nic, eeprom->offset >> 1,
2393 (eeprom->len >> 1) + 1);
2394}
2395
2396static void e100_get_ringparam(struct net_device *netdev,
2397 struct ethtool_ringparam *ring)
2398{
2399 struct nic *nic = netdev_priv(netdev);
2400 struct param_range *rfds = &nic->params.rfds;
2401 struct param_range *cbs = &nic->params.cbs;
2402
2403 ring->rx_max_pending = rfds->max;
2404 ring->tx_max_pending = cbs->max;
2405 ring->rx_mini_max_pending = 0;
2406 ring->rx_jumbo_max_pending = 0;
2407 ring->rx_pending = rfds->count;
2408 ring->tx_pending = cbs->count;
2409 ring->rx_mini_pending = 0;
2410 ring->rx_jumbo_pending = 0;
2411}
2412
2413static int e100_set_ringparam(struct net_device *netdev,
2414 struct ethtool_ringparam *ring)
2415{
2416 struct nic *nic = netdev_priv(netdev);
2417 struct param_range *rfds = &nic->params.rfds;
2418 struct param_range *cbs = &nic->params.cbs;
2419
05479938 2420 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2421 return -EINVAL;
2422
2423 if(netif_running(netdev))
2424 e100_down(nic);
2425 rfds->count = max(ring->rx_pending, rfds->min);
2426 rfds->count = min(rfds->count, rfds->max);
2427 cbs->count = max(ring->tx_pending, cbs->min);
2428 cbs->count = min(cbs->count, cbs->max);
2429 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2430 rfds->count, cbs->count);
2431 if(netif_running(netdev))
2432 e100_up(nic);
2433
2434 return 0;
2435}
2436
2437static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2438 "Link test (on/offline)",
2439 "Eeprom test (on/offline)",
2440 "Self test (offline)",
2441 "Mac loopback (offline)",
2442 "Phy loopback (offline)",
2443};
4c3616cd 2444#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2445
1da177e4
LT
2446static void e100_diag_test(struct net_device *netdev,
2447 struct ethtool_test *test, u64 *data)
2448{
2449 struct ethtool_cmd cmd;
2450 struct nic *nic = netdev_priv(netdev);
2451 int i, err;
2452
2453 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2454 data[0] = !mii_link_ok(&nic->mii);
2455 data[1] = e100_eeprom_load(nic);
2456 if(test->flags & ETH_TEST_FL_OFFLINE) {
2457
2458 /* save speed, duplex & autoneg settings */
2459 err = mii_ethtool_gset(&nic->mii, &cmd);
2460
2461 if(netif_running(netdev))
2462 e100_down(nic);
2463 data[2] = e100_self_test(nic);
2464 data[3] = e100_loopback_test(nic, lb_mac);
2465 data[4] = e100_loopback_test(nic, lb_phy);
2466
2467 /* restore speed, duplex & autoneg settings */
2468 err = mii_ethtool_sset(&nic->mii, &cmd);
2469
2470 if(netif_running(netdev))
2471 e100_up(nic);
2472 }
2473 for(i = 0; i < E100_TEST_LEN; i++)
2474 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2475
2476 msleep_interruptible(4 * 1000);
1da177e4
LT
2477}
2478
2479static int e100_phys_id(struct net_device *netdev, u32 data)
2480{
2481 struct nic *nic = netdev_priv(netdev);
2482
2483 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2484 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2485 mod_timer(&nic->blink_timer, jiffies);
2486 msleep_interruptible(data * 1000);
2487 del_timer_sync(&nic->blink_timer);
2488 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2489
2490 return 0;
2491}
2492
2493static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2494 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2495 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2496 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2497 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2498 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2499 "tx_heartbeat_errors", "tx_window_errors",
2500 /* device-specific stats */
2501 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2502 "tx_flow_control_pause", "rx_flow_control_pause",
2503 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2504};
2505#define E100_NET_STATS_LEN 21
4c3616cd 2506#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2507
b9f2c044 2508static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2509{
b9f2c044
JG
2510 switch (sset) {
2511 case ETH_SS_TEST:
2512 return E100_TEST_LEN;
2513 case ETH_SS_STATS:
2514 return E100_STATS_LEN;
2515 default:
2516 return -EOPNOTSUPP;
2517 }
1da177e4
LT
2518}
2519
2520static void e100_get_ethtool_stats(struct net_device *netdev,
2521 struct ethtool_stats *stats, u64 *data)
2522{
2523 struct nic *nic = netdev_priv(netdev);
2524 int i;
2525
2526 for(i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2527 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2528
2529 data[i++] = nic->tx_deferred;
2530 data[i++] = nic->tx_single_collisions;
2531 data[i++] = nic->tx_multiple_collisions;
2532 data[i++] = nic->tx_fc_pause;
2533 data[i++] = nic->rx_fc_pause;
2534 data[i++] = nic->rx_fc_unsupported;
2535 data[i++] = nic->tx_tco_frames;
2536 data[i++] = nic->rx_tco_frames;
2537}
2538
2539static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2540{
2541 switch(stringset) {
2542 case ETH_SS_TEST:
2543 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2544 break;
2545 case ETH_SS_STATS:
2546 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2547 break;
2548 }
2549}
2550
7282d491 2551static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2552 .get_settings = e100_get_settings,
2553 .set_settings = e100_set_settings,
2554 .get_drvinfo = e100_get_drvinfo,
2555 .get_regs_len = e100_get_regs_len,
2556 .get_regs = e100_get_regs,
2557 .get_wol = e100_get_wol,
2558 .set_wol = e100_set_wol,
2559 .get_msglevel = e100_get_msglevel,
2560 .set_msglevel = e100_set_msglevel,
2561 .nway_reset = e100_nway_reset,
2562 .get_link = e100_get_link,
2563 .get_eeprom_len = e100_get_eeprom_len,
2564 .get_eeprom = e100_get_eeprom,
2565 .set_eeprom = e100_set_eeprom,
2566 .get_ringparam = e100_get_ringparam,
2567 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2568 .self_test = e100_diag_test,
2569 .get_strings = e100_get_strings,
2570 .phys_id = e100_phys_id,
1da177e4 2571 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2572 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2573};
2574
2575static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2576{
2577 struct nic *nic = netdev_priv(netdev);
2578
2579 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2580}
2581
2582static int e100_alloc(struct nic *nic)
2583{
2584 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2585 &nic->dma_addr);
2586 return nic->mem ? 0 : -ENOMEM;
2587}
2588
2589static void e100_free(struct nic *nic)
2590{
2591 if(nic->mem) {
2592 pci_free_consistent(nic->pdev, sizeof(struct mem),
2593 nic->mem, nic->dma_addr);
2594 nic->mem = NULL;
2595 }
2596}
2597
2598static int e100_open(struct net_device *netdev)
2599{
2600 struct nic *nic = netdev_priv(netdev);
2601 int err = 0;
2602
2603 netif_carrier_off(netdev);
2604 if((err = e100_up(nic)))
2605 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2606 return err;
2607}
2608
2609static int e100_close(struct net_device *netdev)
2610{
2611 e100_down(netdev_priv(netdev));
2612 return 0;
2613}
2614
acc78426
SH
2615static const struct net_device_ops e100_netdev_ops = {
2616 .ndo_open = e100_open,
2617 .ndo_stop = e100_close,
00829823 2618 .ndo_start_xmit = e100_xmit_frame,
acc78426
SH
2619 .ndo_validate_addr = eth_validate_addr,
2620 .ndo_set_multicast_list = e100_set_multicast_list,
2621 .ndo_set_mac_address = e100_set_mac_address,
2622 .ndo_change_mtu = e100_change_mtu,
2623 .ndo_do_ioctl = e100_do_ioctl,
2624 .ndo_tx_timeout = e100_tx_timeout,
2625#ifdef CONFIG_NET_POLL_CONTROLLER
2626 .ndo_poll_controller = e100_netpoll,
2627#endif
2628};
2629
1da177e4
LT
2630static int __devinit e100_probe(struct pci_dev *pdev,
2631 const struct pci_device_id *ent)
2632{
2633 struct net_device *netdev;
2634 struct nic *nic;
2635 int err;
2636
2637 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2638 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2639 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2640 return -ENOMEM;
2641 }
2642
acc78426 2643 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2644 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2645 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2646 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2647
2648 nic = netdev_priv(netdev);
bea3348e 2649 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2650 nic->netdev = netdev;
2651 nic->pdev = pdev;
2652 nic->msg_enable = (1 << debug) - 1;
2653 pci_set_drvdata(pdev, netdev);
2654
2655 if((err = pci_enable_device(pdev))) {
2656 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2657 goto err_out_free_dev;
2658 }
2659
2660 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2661 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2662 "base address, aborting.\n");
2663 err = -ENODEV;
2664 goto err_out_disable_pdev;
2665 }
2666
2667 if((err = pci_request_regions(pdev, DRV_NAME))) {
2668 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2669 goto err_out_disable_pdev;
2670 }
2671
1e7f0bd8 2672 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2673 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2674 goto err_out_free_res;
2675 }
2676
1da177e4
LT
2677 SET_NETDEV_DEV(netdev, &pdev->dev);
2678
27345bb6
JB
2679 if (use_io)
2680 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2681
2682 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
1da177e4
LT
2683 if(!nic->csr) {
2684 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2685 err = -ENOMEM;
2686 goto err_out_free_res;
2687 }
2688
2689 if(ent->driver_data)
2690 nic->flags |= ich;
2691 else
2692 nic->flags &= ~ich;
2693
2694 e100_get_defaults(nic);
2695
1f53367d 2696 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2697 spin_lock_init(&nic->cb_lock);
2698 spin_lock_init(&nic->cmd_lock);
ac7c6669 2699 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2700
2701 /* Reset the device before pci_set_master() in case device is in some
2702 * funky state and has an interrupt pending - hint: we don't have the
2703 * interrupt handler registered yet. */
2704 e100_hw_reset(nic);
2705
2706 pci_set_master(pdev);
2707
2708 init_timer(&nic->watchdog);
2709 nic->watchdog.function = e100_watchdog;
2710 nic->watchdog.data = (unsigned long)nic;
2711 init_timer(&nic->blink_timer);
2712 nic->blink_timer.function = e100_blink_led;
2713 nic->blink_timer.data = (unsigned long)nic;
2714
c4028958 2715 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2716
1da177e4
LT
2717 if((err = e100_alloc(nic))) {
2718 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2719 goto err_out_iounmap;
2720 }
2721
1da177e4
LT
2722 if((err = e100_eeprom_load(nic)))
2723 goto err_out_free;
2724
f92d8728
MC
2725 e100_phy_init(nic);
2726
1da177e4 2727 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2728 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2729 if (!is_valid_ether_addr(netdev->perm_addr)) {
2730 if (!eeprom_bad_csum_allow) {
2731 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2732 "EEPROM, aborting.\n");
2733 err = -EAGAIN;
2734 goto err_out_free;
2735 } else {
2736 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2737 "you MUST configure one.\n");
2738 }
1da177e4
LT
2739 }
2740
2741 /* Wol magic packet can be enabled from eeprom */
2742 if((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2743 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2744 nic->flags |= wol_magic;
bc79fc84
RW
2745 device_set_wakeup_enable(&pdev->dev, true);
2746 }
1da177e4 2747
6bdacb1a 2748 /* ack any pending wake events, disable PME */
e7272403 2749 pci_pme_active(pdev, false);
1da177e4
LT
2750
2751 strcpy(netdev->name, "eth%d");
2752 if((err = register_netdev(netdev))) {
2753 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2754 goto err_out_free;
2755 }
2756
e174961c 2757 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
0795af57 2758 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
e174961c 2759 pdev->irq, netdev->dev_addr);
1da177e4
LT
2760
2761 return 0;
2762
2763err_out_free:
2764 e100_free(nic);
2765err_out_iounmap:
27345bb6 2766 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2767err_out_free_res:
2768 pci_release_regions(pdev);
2769err_out_disable_pdev:
2770 pci_disable_device(pdev);
2771err_out_free_dev:
2772 pci_set_drvdata(pdev, NULL);
2773 free_netdev(netdev);
2774 return err;
2775}
2776
2777static void __devexit e100_remove(struct pci_dev *pdev)
2778{
2779 struct net_device *netdev = pci_get_drvdata(pdev);
2780
2781 if(netdev) {
2782 struct nic *nic = netdev_priv(netdev);
2783 unregister_netdev(netdev);
2784 e100_free(nic);
915e91d7 2785 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2786 free_netdev(netdev);
2787 pci_release_regions(pdev);
2788 pci_disable_device(pdev);
2789 pci_set_drvdata(pdev, NULL);
2790 }
2791}
2792
1da177e4
LT
2793static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2794{
2795 struct net_device *netdev = pci_get_drvdata(pdev);
2796 struct nic *nic = netdev_priv(netdev);
2797
824545e7 2798 if (netif_running(netdev))
f902283b 2799 e100_down(nic);
518d8338 2800 netif_device_detach(netdev);
a53a33da 2801
1da177e4 2802 pci_save_state(pdev);
e8e82b76
AK
2803
2804 if ((nic->flags & wol_magic) | e100_asf(nic)) {
bc79fc84
RW
2805 if (pci_enable_wake(pdev, PCI_D3cold, true))
2806 pci_enable_wake(pdev, PCI_D3hot, true);
e8e82b76 2807 } else {
bc79fc84 2808 pci_enable_wake(pdev, PCI_D3hot, false);
e8e82b76 2809 }
975b366a 2810
8543da66 2811 pci_disable_device(pdev);
e8e82b76 2812 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2813
2814 return 0;
2815}
2816
f902283b 2817#ifdef CONFIG_PM
1da177e4
LT
2818static int e100_resume(struct pci_dev *pdev)
2819{
2820 struct net_device *netdev = pci_get_drvdata(pdev);
2821 struct nic *nic = netdev_priv(netdev);
2822
975b366a 2823 pci_set_power_state(pdev, PCI_D0);
1da177e4 2824 pci_restore_state(pdev);
6bdacb1a 2825 /* ack any pending wake events, disable PME */
975b366a 2826 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2827
2828 netif_device_attach(netdev);
975b366a 2829 if (netif_running(netdev))
1da177e4
LT
2830 e100_up(nic);
2831
2832 return 0;
2833}
975b366a 2834#endif /* CONFIG_PM */
1da177e4 2835
d18c3db5 2836static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2837{
f902283b 2838 e100_suspend(pdev, PMSG_SUSPEND);
6bdacb1a
MC
2839}
2840
2cc30492
AK
2841/* ------------------ PCI Error Recovery infrastructure -------------- */
2842/**
2843 * e100_io_error_detected - called when PCI error is detected.
2844 * @pdev: Pointer to PCI device
0a0863af 2845 * @state: The current pci connection state
2cc30492
AK
2846 */
2847static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2848{
2849 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 2850 struct nic *nic = netdev_priv(netdev);
2cc30492 2851
0a0863af 2852 /* Similar to calling e100_down(), but avoids adapter I/O. */
acc78426 2853 e100_close(netdev);
2cc30492 2854
0a0863af 2855 /* Detach; put netif into a state similar to hotplug unplug. */
bea3348e 2856 napi_enable(&nic->napi);
2cc30492 2857 netif_device_detach(netdev);
b1d26f24 2858 pci_disable_device(pdev);
2cc30492
AK
2859
2860 /* Request a slot reset. */
2861 return PCI_ERS_RESULT_NEED_RESET;
2862}
2863
2864/**
2865 * e100_io_slot_reset - called after the pci bus has been reset.
2866 * @pdev: Pointer to PCI device
2867 *
2868 * Restart the card from scratch.
2869 */
2870static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2871{
2872 struct net_device *netdev = pci_get_drvdata(pdev);
2873 struct nic *nic = netdev_priv(netdev);
2874
2875 if (pci_enable_device(pdev)) {
2876 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2877 return PCI_ERS_RESULT_DISCONNECT;
2878 }
2879 pci_set_master(pdev);
2880
2881 /* Only one device per card can do a reset */
2882 if (0 != PCI_FUNC(pdev->devfn))
2883 return PCI_ERS_RESULT_RECOVERED;
2884 e100_hw_reset(nic);
2885 e100_phy_init(nic);
2886
2887 return PCI_ERS_RESULT_RECOVERED;
2888}
2889
2890/**
2891 * e100_io_resume - resume normal operations
2892 * @pdev: Pointer to PCI device
2893 *
2894 * Resume normal operations after an error recovery
2895 * sequence has been completed.
2896 */
2897static void e100_io_resume(struct pci_dev *pdev)
2898{
2899 struct net_device *netdev = pci_get_drvdata(pdev);
2900 struct nic *nic = netdev_priv(netdev);
2901
2902 /* ack any pending wake events, disable PME */
2903 pci_enable_wake(pdev, 0, 0);
2904
2905 netif_device_attach(netdev);
2906 if (netif_running(netdev)) {
2907 e100_open(netdev);
2908 mod_timer(&nic->watchdog, jiffies);
2909 }
2910}
2911
2912static struct pci_error_handlers e100_err_handler = {
2913 .error_detected = e100_io_error_detected,
2914 .slot_reset = e100_io_slot_reset,
2915 .resume = e100_io_resume,
2916};
6bdacb1a 2917
1da177e4
LT
2918static struct pci_driver e100_driver = {
2919 .name = DRV_NAME,
2920 .id_table = e100_id_table,
2921 .probe = e100_probe,
2922 .remove = __devexit_p(e100_remove),
e8e82b76 2923#ifdef CONFIG_PM
975b366a 2924 /* Power Management hooks */
1da177e4
LT
2925 .suspend = e100_suspend,
2926 .resume = e100_resume,
2927#endif
05479938 2928 .shutdown = e100_shutdown,
2cc30492 2929 .err_handler = &e100_err_handler,
1da177e4
LT
2930};
2931
2932static int __init e100_init_module(void)
2933{
2934 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2935 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2936 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2937 }
29917620 2938 return pci_register_driver(&e100_driver);
1da177e4
LT
2939}
2940
2941static void __exit e100_cleanup_module(void)
2942{
2943 pci_unregister_driver(&e100_driver);
2944}
2945
2946module_init(e100_init_module);
2947module_exit(e100_cleanup_module);