S2IO: Added a loadable parameter to enable or disable vlan stripping in frame.
[linux-2.6-block.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
1da177e4
LT
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722 30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8
AR
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7
AR
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
926930b2
SS
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
1da177e4
LT
53 ************************************************************************/
54
1da177e4
LT
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
1e7f0bd8 60#include <linux/dma-mapping.h>
1da177e4
LT
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/skbuff.h>
65#include <linux/init.h>
66#include <linux/delay.h>
67#include <linux/stddef.h>
68#include <linux/ioctl.h>
69#include <linux/timex.h>
1da177e4 70#include <linux/ethtool.h>
1da177e4 71#include <linux/workqueue.h>
be3a6b02 72#include <linux/if_vlan.h>
7d3d0439
RA
73#include <linux/ip.h>
74#include <linux/tcp.h>
75#include <net/tcp.h>
1da177e4 76
1da177e4
LT
77#include <asm/system.h>
78#include <asm/uaccess.h>
20346722 79#include <asm/io.h>
fe931395 80#include <asm/div64.h>
330ce0de 81#include <asm/irq.h>
1da177e4
LT
82
83/* local include */
84#include "s2io.h"
85#include "s2io-regs.h"
86
1ee6dd77 87#define DRV_VERSION "2.0.16.1"
6c1792f4 88
1da177e4 89/* S2io Driver name & version. */
20346722 90static char s2io_driver_name[] = "Neterion";
6c1792f4 91static char s2io_driver_version[] = DRV_VERSION;
1da177e4 92
26df54bf
AB
93static int rxd_size[4] = {32,48,48,64};
94static int rxd_count[4] = {127,85,85,63};
da6971d8 95
1ee6dd77 96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd 97{
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104}
105
20346722 106/*
1da177e4
LT
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
541ae68f 111#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
115
116#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119#define PANIC 1
120#define LOW 2
1ee6dd77 121static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
1da177e4 122{
1ee6dd77 123 struct mac_info *mac_control;
20346722 124
125 mac_control = &sp->mac_control;
863c11a9
AR
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
1da177e4
LT
131}
132
133/* Ethtool related variables and Macros. */
134static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
140};
141
142static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
143 {"tmac_frms"},
144 {"tmac_data_octets"},
145 {"tmac_drop_frms"},
146 {"tmac_mcst_frms"},
147 {"tmac_bcst_frms"},
148 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
149 {"tmac_ttl_octets"},
150 {"tmac_ucst_frms"},
151 {"tmac_nucst_frms"},
1da177e4 152 {"tmac_any_err_frms"},
bd1034f0 153 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
154 {"tmac_vld_ip_octets"},
155 {"tmac_vld_ip"},
156 {"tmac_drop_ip"},
157 {"tmac_icmp"},
158 {"tmac_rst_tcp"},
159 {"tmac_tcp"},
160 {"tmac_udp"},
161 {"rmac_vld_frms"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
164 {"rmac_drop_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
bd1034f0 168 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
169 {"rmac_long_frms"},
170 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
171 {"rmac_unsup_ctrl_frms"},
172 {"rmac_ttl_octets"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
1da177e4 175 {"rmac_discarded_frms"},
bd1034f0
AR
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
178 {"rmac_ttl_frms"},
1da177e4
LT
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
181 {"rmac_frag_frms"},
182 {"rmac_jabber_frms"},
bd1034f0
AR
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
189 {"rmac_ip"},
190 {"rmac_ip_octets"},
191 {"rmac_hdr_err_ip"},
192 {"rmac_drop_ip"},
193 {"rmac_icmp"},
194 {"rmac_tcp"},
195 {"rmac_udp"},
196 {"rmac_err_drp_udp"},
bd1034f0
AR
197 {"rmac_xgmii_err_sym"},
198 {"rmac_frms_q0"},
199 {"rmac_frms_q1"},
200 {"rmac_frms_q2"},
201 {"rmac_frms_q3"},
202 {"rmac_frms_q4"},
203 {"rmac_frms_q5"},
204 {"rmac_frms_q6"},
205 {"rmac_frms_q7"},
206 {"rmac_full_q0"},
207 {"rmac_full_q1"},
208 {"rmac_full_q2"},
209 {"rmac_full_q3"},
210 {"rmac_full_q4"},
211 {"rmac_full_q5"},
212 {"rmac_full_q6"},
213 {"rmac_full_q7"},
1da177e4 214 {"rmac_pause_cnt"},
bd1034f0
AR
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
217 {"rmac_accepted_ip"},
218 {"rmac_err_tcp"},
bd1034f0
AR
219 {"rd_req_cnt"},
220 {"new_rd_req_cnt"},
221 {"new_rd_req_rtry_cnt"},
222 {"rd_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
224 {"wr_req_cnt"},
225 {"new_wr_req_cnt"},
226 {"new_wr_req_rtry_cnt"},
227 {"wr_rtry_cnt"},
228 {"wr_disc_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
230 {"txp_wr_cnt"},
231 {"txd_rd_cnt"},
232 {"txd_wr_cnt"},
233 {"rxd_rd_cnt"},
234 {"rxd_wr_cnt"},
235 {"txf_rd_cnt"},
236 {"rxf_wr_cnt"},
237 {"rmac_ttl_1519_4095_frms"},
238 {"rmac_ttl_4096_8191_frms"},
239 {"rmac_ttl_8192_max_frms"},
240 {"rmac_ttl_gt_max_frms"},
241 {"rmac_osized_alt_frms"},
242 {"rmac_jabber_alt_frms"},
243 {"rmac_gt_max_alt_frms"},
244 {"rmac_vlan_frms"},
245 {"rmac_len_discard"},
246 {"rmac_fcs_discard"},
247 {"rmac_pf_discard"},
248 {"rmac_da_discard"},
249 {"rmac_red_discard"},
250 {"rmac_rts_discard"},
251 {"rmac_ingm_full_discard"},
252 {"link_fault_cnt"},
7ba013ac 253 {"\n DRIVER STATISTICS"},
254 {"single_bit_ecc_errs"},
255 {"double_bit_ecc_errs"},
bd1034f0
AR
256 {"parity_err_cnt"},
257 {"serious_err_cnt"},
258 {"soft_reset_cnt"},
259 {"fifo_full_cnt"},
260 {"ring_full_cnt"},
261 ("alarm_transceiver_temp_high"),
262 ("alarm_transceiver_temp_low"),
263 ("alarm_laser_bias_current_high"),
264 ("alarm_laser_bias_current_low"),
265 ("alarm_laser_output_power_high"),
266 ("alarm_laser_output_power_low"),
267 ("warn_transceiver_temp_high"),
268 ("warn_transceiver_temp_low"),
269 ("warn_laser_bias_current_high"),
270 ("warn_laser_bias_current_low"),
271 ("warn_laser_output_power_high"),
272 ("warn_laser_output_power_low"),
7d3d0439
RA
273 ("lro_aggregated_pkts"),
274 ("lro_flush_both_count"),
275 ("lro_out_of_sequence_pkts"),
276 ("lro_flush_due_to_max_pkts"),
277 ("lro_avg_aggr_pkts"),
1da177e4
LT
278};
279
280#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
281#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
282
283#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
284#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
285
25fff88e 286#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
287 init_timer(&timer); \
288 timer.function = handle; \
289 timer.data = (unsigned long) arg; \
290 mod_timer(&timer, (jiffies + exp)) \
291
be3a6b02 292/* Add the vlan */
293static void s2io_vlan_rx_register(struct net_device *dev,
294 struct vlan_group *grp)
295{
1ee6dd77 296 struct s2io_nic *nic = dev->priv;
be3a6b02 297 unsigned long flags;
298
299 spin_lock_irqsave(&nic->tx_lock, flags);
300 nic->vlgrp = grp;
301 spin_unlock_irqrestore(&nic->tx_lock, flags);
302}
303
926930b2
SS
304/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
305int vlan_strip_flag;
306
be3a6b02 307/* Unregister the vlan */
308static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
309{
1ee6dd77 310 struct s2io_nic *nic = dev->priv;
be3a6b02 311 unsigned long flags;
312
313 spin_lock_irqsave(&nic->tx_lock, flags);
314 if (nic->vlgrp)
315 nic->vlgrp->vlan_devices[vid] = NULL;
316 spin_unlock_irqrestore(&nic->tx_lock, flags);
317}
318
20346722 319/*
1da177e4
LT
320 * Constants to be programmed into the Xena's registers, to configure
321 * the XAUI.
322 */
323
1da177e4 324#define END_SIGN 0x0
f71e1309 325static const u64 herc_act_dtx_cfg[] = {
541ae68f 326 /* Set address */
e960fc5c 327 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 328 /* Write data */
e960fc5c 329 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f 330 /* Set address */
331 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
332 /* Write data */
333 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
334 /* Set address */
e960fc5c 335 0x801205150D440000ULL, 0x801205150D4400E0ULL,
336 /* Write data */
337 0x801205150D440004ULL, 0x801205150D4400E4ULL,
338 /* Set address */
541ae68f 339 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
340 /* Write data */
341 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
342 /* Done */
343 END_SIGN
344};
345
f71e1309 346static const u64 xena_dtx_cfg[] = {
c92ca04b 347 /* Set address */
1da177e4 348 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
349 /* Write data */
350 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
351 /* Set address */
352 0x8001051500000000ULL, 0x80010515000000E0ULL,
353 /* Write data */
354 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
355 /* Set address */
1da177e4 356 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
357 /* Write data */
358 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
359 END_SIGN
360};
361
20346722 362/*
1da177e4
LT
363 * Constants for Fixing the MacAddress problem seen mostly on
364 * Alpha machines.
365 */
f71e1309 366static const u64 fix_mac[] = {
1da177e4
LT
367 0x0060000000000000ULL, 0x0060600000000000ULL,
368 0x0040600000000000ULL, 0x0000600000000000ULL,
369 0x0020600000000000ULL, 0x0060600000000000ULL,
370 0x0020600000000000ULL, 0x0060600000000000ULL,
371 0x0020600000000000ULL, 0x0060600000000000ULL,
372 0x0020600000000000ULL, 0x0060600000000000ULL,
373 0x0020600000000000ULL, 0x0060600000000000ULL,
374 0x0020600000000000ULL, 0x0060600000000000ULL,
375 0x0020600000000000ULL, 0x0060600000000000ULL,
376 0x0020600000000000ULL, 0x0060600000000000ULL,
377 0x0020600000000000ULL, 0x0060600000000000ULL,
378 0x0020600000000000ULL, 0x0060600000000000ULL,
379 0x0020600000000000ULL, 0x0000600000000000ULL,
380 0x0040600000000000ULL, 0x0060600000000000ULL,
381 END_SIGN
382};
383
b41477f3
AR
384MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
385MODULE_LICENSE("GPL");
386MODULE_VERSION(DRV_VERSION);
387
388
1da177e4 389/* Module Loadable parameters. */
b41477f3
AR
390S2IO_PARM_INT(tx_fifo_num, 1);
391S2IO_PARM_INT(rx_ring_num, 1);
392
393
394S2IO_PARM_INT(rx_ring_mode, 1);
395S2IO_PARM_INT(use_continuous_tx_intrs, 1);
396S2IO_PARM_INT(rmac_pause_time, 0x100);
397S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
398S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
399S2IO_PARM_INT(shared_splits, 0);
400S2IO_PARM_INT(tmac_util_period, 5);
401S2IO_PARM_INT(rmac_util_period, 5);
402S2IO_PARM_INT(bimodal, 0);
403S2IO_PARM_INT(l3l4hdr_size, 128);
303bcb4b 404/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 405S2IO_PARM_INT(rxsync_frequency, 3);
cc6e7c44 406/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
b41477f3 407S2IO_PARM_INT(intr_type, 0);
7d3d0439 408/* Large receive offload feature */
b41477f3 409S2IO_PARM_INT(lro, 0);
7d3d0439
RA
410/* Max pkts to be aggregated by LRO at one time. If not specified,
411 * aggregation happens until we hit max IP pkt size(64K)
412 */
b41477f3 413S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 414S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
415
416S2IO_PARM_INT(napi, 1);
417S2IO_PARM_INT(ufo, 0);
926930b2 418S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
419
420static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
421 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
422static unsigned int rx_ring_sz[MAX_RX_RINGS] =
423 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
424static unsigned int rts_frm_len[MAX_RX_RINGS] =
425 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
426
427module_param_array(tx_fifo_len, uint, NULL, 0);
428module_param_array(rx_ring_sz, uint, NULL, 0);
429module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 430
20346722 431/*
1da177e4 432 * S2IO device table.
20346722 433 * This table lists all the devices that this driver supports.
1da177e4
LT
434 */
435static struct pci_device_id s2io_tbl[] __devinitdata = {
436 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
437 PCI_ANY_ID, PCI_ANY_ID},
438 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
439 PCI_ANY_ID, PCI_ANY_ID},
440 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722 441 PCI_ANY_ID, PCI_ANY_ID},
442 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
443 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
444 {0,}
445};
446
447MODULE_DEVICE_TABLE(pci, s2io_tbl);
448
449static struct pci_driver s2io_driver = {
450 .name = "S2IO",
451 .id_table = s2io_tbl,
452 .probe = s2io_init_nic,
453 .remove = __devexit_p(s2io_rem_nic),
454};
455
456/* A simplifier macro used both by init and free shared_mem Fns(). */
457#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
458
459/**
460 * init_shared_mem - Allocation and Initialization of Memory
461 * @nic: Device private variable.
20346722 462 * Description: The function allocates all the memory areas shared
463 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
464 * Rx descriptors and the statistics block.
465 */
466
467static int init_shared_mem(struct s2io_nic *nic)
468{
469 u32 size;
470 void *tmp_v_addr, *tmp_v_addr_next;
471 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 472 struct RxD_block *pre_rxd_blk = NULL;
372cc597 473 int i, j, blk_cnt;
1da177e4
LT
474 int lst_size, lst_per_page;
475 struct net_device *dev = nic->dev;
8ae418cf 476 unsigned long tmp;
1ee6dd77 477 struct buffAdd *ba;
1da177e4 478
1ee6dd77 479 struct mac_info *mac_control;
1da177e4
LT
480 struct config_param *config;
481
482 mac_control = &nic->mac_control;
483 config = &nic->config;
484
485
486 /* Allocation and initialization of TXDLs in FIOFs */
487 size = 0;
488 for (i = 0; i < config->tx_fifo_num; i++) {
489 size += config->tx_cfg[i].fifo_len;
490 }
491 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 492 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 493 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 494 return -EINVAL;
1da177e4
LT
495 }
496
1ee6dd77 497 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
498 lst_per_page = PAGE_SIZE / lst_size;
499
500 for (i = 0; i < config->tx_fifo_num; i++) {
501 int fifo_len = config->tx_cfg[i].fifo_len;
1ee6dd77 502 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
20346722 503 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
504 GFP_KERNEL);
505 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
506 DBG_PRINT(ERR_DBG,
507 "Malloc failed for list_info\n");
508 return -ENOMEM;
509 }
20346722 510 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
511 }
512 for (i = 0; i < config->tx_fifo_num; i++) {
513 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
514 lst_per_page);
20346722 515 mac_control->fifos[i].tx_curr_put_info.offset = 0;
516 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 517 config->tx_cfg[i].fifo_len - 1;
20346722 518 mac_control->fifos[i].tx_curr_get_info.offset = 0;
519 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 520 config->tx_cfg[i].fifo_len - 1;
20346722 521 mac_control->fifos[i].fifo_no = i;
522 mac_control->fifos[i].nic = nic;
fed5eccd 523 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
20346722 524
1da177e4
LT
525 for (j = 0; j < page_num; j++) {
526 int k = 0;
527 dma_addr_t tmp_p;
528 void *tmp_v;
529 tmp_v = pci_alloc_consistent(nic->pdev,
530 PAGE_SIZE, &tmp_p);
531 if (!tmp_v) {
532 DBG_PRINT(ERR_DBG,
533 "pci_alloc_consistent ");
534 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
535 return -ENOMEM;
536 }
776bd20f 537 /* If we got a zero DMA address(can happen on
538 * certain platforms like PPC), reallocate.
539 * Store virtual address of page we don't want,
540 * to be freed later.
541 */
542 if (!tmp_p) {
543 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 544 DBG_PRINT(INIT_DBG,
776bd20f 545 "%s: Zero DMA address for TxDL. ", dev->name);
6aa20a22 546 DBG_PRINT(INIT_DBG,
6b4d617d 547 "Virtual address %p\n", tmp_v);
776bd20f 548 tmp_v = pci_alloc_consistent(nic->pdev,
549 PAGE_SIZE, &tmp_p);
550 if (!tmp_v) {
551 DBG_PRINT(ERR_DBG,
552 "pci_alloc_consistent ");
553 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
554 return -ENOMEM;
555 }
556 }
1da177e4
LT
557 while (k < lst_per_page) {
558 int l = (j * lst_per_page) + k;
559 if (l == config->tx_cfg[i].fifo_len)
20346722 560 break;
561 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 562 tmp_v + (k * lst_size);
20346722 563 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
564 tmp_p + (k * lst_size);
565 k++;
566 }
567 }
568 }
1da177e4 569
4384247b 570 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
fed5eccd
AR
571 if (!nic->ufo_in_band_v)
572 return -ENOMEM;
573
1da177e4
LT
574 /* Allocation and initialization of RXDs in Rings */
575 size = 0;
576 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
577 if (config->rx_cfg[i].num_rxd %
578 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
579 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
580 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
581 i);
582 DBG_PRINT(ERR_DBG, "RxDs per Block");
583 return FAILURE;
584 }
585 size += config->rx_cfg[i].num_rxd;
20346722 586 mac_control->rings[i].block_count =
da6971d8
AR
587 config->rx_cfg[i].num_rxd /
588 (rxd_count[nic->rxd_mode] + 1 );
589 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
590 mac_control->rings[i].block_count;
1da177e4 591 }
da6971d8 592 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 593 size = (size * (sizeof(struct RxD1)));
da6971d8 594 else
1ee6dd77 595 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
596
597 for (i = 0; i < config->rx_ring_num; i++) {
20346722 598 mac_control->rings[i].rx_curr_get_info.block_index = 0;
599 mac_control->rings[i].rx_curr_get_info.offset = 0;
600 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 601 config->rx_cfg[i].num_rxd - 1;
20346722 602 mac_control->rings[i].rx_curr_put_info.block_index = 0;
603 mac_control->rings[i].rx_curr_put_info.offset = 0;
604 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 605 config->rx_cfg[i].num_rxd - 1;
20346722 606 mac_control->rings[i].nic = nic;
607 mac_control->rings[i].ring_no = i;
608
da6971d8
AR
609 blk_cnt = config->rx_cfg[i].num_rxd /
610 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
611 /* Allocating all the Rx blocks */
612 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 613 struct rx_block_info *rx_blocks;
da6971d8
AR
614 int l;
615
616 rx_blocks = &mac_control->rings[i].rx_blocks[j];
617 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
618 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
619 &tmp_p_addr);
620 if (tmp_v_addr == NULL) {
621 /*
20346722 622 * In case of failure, free_shared_mem()
623 * is called, which should free any
624 * memory that was alloced till the
1da177e4
LT
625 * failure happened.
626 */
da6971d8 627 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
628 return -ENOMEM;
629 }
630 memset(tmp_v_addr, 0, size);
da6971d8
AR
631 rx_blocks->block_virt_addr = tmp_v_addr;
632 rx_blocks->block_dma_addr = tmp_p_addr;
1ee6dd77 633 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
da6971d8
AR
634 rxd_count[nic->rxd_mode],
635 GFP_KERNEL);
372cc597
SS
636 if (!rx_blocks->rxds)
637 return -ENOMEM;
da6971d8
AR
638 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
639 rx_blocks->rxds[l].virt_addr =
640 rx_blocks->block_virt_addr +
641 (rxd_size[nic->rxd_mode] * l);
642 rx_blocks->rxds[l].dma_addr =
643 rx_blocks->block_dma_addr +
644 (rxd_size[nic->rxd_mode] * l);
645 }
1da177e4
LT
646 }
647 /* Interlinking all Rx Blocks */
648 for (j = 0; j < blk_cnt; j++) {
20346722 649 tmp_v_addr =
650 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 651 tmp_v_addr_next =
20346722 652 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 653 blk_cnt].block_virt_addr;
20346722 654 tmp_p_addr =
655 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 656 tmp_p_addr_next =
20346722 657 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
658 blk_cnt].block_dma_addr;
659
1ee6dd77 660 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
1da177e4
LT
661 pre_rxd_blk->reserved_2_pNext_RxD_block =
662 (unsigned long) tmp_v_addr_next;
1da177e4
LT
663 pre_rxd_blk->pNext_RxD_Blk_physical =
664 (u64) tmp_p_addr_next;
665 }
666 }
da6971d8
AR
667 if (nic->rxd_mode >= RXD_MODE_3A) {
668 /*
669 * Allocation of Storages for buffer addresses in 2BUFF mode
670 * and the buffers as well.
671 */
672 for (i = 0; i < config->rx_ring_num; i++) {
673 blk_cnt = config->rx_cfg[i].num_rxd /
674 (rxd_count[nic->rxd_mode]+ 1);
675 mac_control->rings[i].ba =
1ee6dd77 676 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
1da177e4 677 GFP_KERNEL);
da6971d8 678 if (!mac_control->rings[i].ba)
1da177e4 679 return -ENOMEM;
da6971d8
AR
680 for (j = 0; j < blk_cnt; j++) {
681 int k = 0;
682 mac_control->rings[i].ba[j] =
1ee6dd77 683 kmalloc((sizeof(struct buffAdd) *
da6971d8
AR
684 (rxd_count[nic->rxd_mode] + 1)),
685 GFP_KERNEL);
686 if (!mac_control->rings[i].ba[j])
1da177e4 687 return -ENOMEM;
da6971d8
AR
688 while (k != rxd_count[nic->rxd_mode]) {
689 ba = &mac_control->rings[i].ba[j][k];
690
691 ba->ba_0_org = (void *) kmalloc
692 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
693 if (!ba->ba_0_org)
694 return -ENOMEM;
695 tmp = (unsigned long)ba->ba_0_org;
696 tmp += ALIGN_SIZE;
697 tmp &= ~((unsigned long) ALIGN_SIZE);
698 ba->ba_0 = (void *) tmp;
699
700 ba->ba_1_org = (void *) kmalloc
701 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
702 if (!ba->ba_1_org)
703 return -ENOMEM;
704 tmp = (unsigned long) ba->ba_1_org;
705 tmp += ALIGN_SIZE;
706 tmp &= ~((unsigned long) ALIGN_SIZE);
707 ba->ba_1 = (void *) tmp;
708 k++;
709 }
1da177e4
LT
710 }
711 }
712 }
1da177e4
LT
713
714 /* Allocation and initialization of Statistics block */
1ee6dd77 715 size = sizeof(struct stat_block);
1da177e4
LT
716 mac_control->stats_mem = pci_alloc_consistent
717 (nic->pdev, size, &mac_control->stats_mem_phy);
718
719 if (!mac_control->stats_mem) {
20346722 720 /*
721 * In case of failure, free_shared_mem() is called, which
722 * should free any memory that was alloced till the
1da177e4
LT
723 * failure happened.
724 */
725 return -ENOMEM;
726 }
727 mac_control->stats_mem_sz = size;
728
729 tmp_v_addr = mac_control->stats_mem;
1ee6dd77 730 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
1da177e4 731 memset(tmp_v_addr, 0, size);
1da177e4
LT
732 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
733 (unsigned long long) tmp_p_addr);
734
735 return SUCCESS;
736}
737
20346722 738/**
739 * free_shared_mem - Free the allocated Memory
1da177e4
LT
740 * @nic: Device private variable.
741 * Description: This function is to free all memory locations allocated by
742 * the init_shared_mem() function and return it to the kernel.
743 */
744
745static void free_shared_mem(struct s2io_nic *nic)
746{
747 int i, j, blk_cnt, size;
748 void *tmp_v_addr;
749 dma_addr_t tmp_p_addr;
1ee6dd77 750 struct mac_info *mac_control;
1da177e4
LT
751 struct config_param *config;
752 int lst_size, lst_per_page;
776bd20f 753 struct net_device *dev = nic->dev;
1da177e4
LT
754
755 if (!nic)
756 return;
757
758 mac_control = &nic->mac_control;
759 config = &nic->config;
760
1ee6dd77 761 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
762 lst_per_page = PAGE_SIZE / lst_size;
763
764 for (i = 0; i < config->tx_fifo_num; i++) {
765 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
766 lst_per_page);
767 for (j = 0; j < page_num; j++) {
768 int mem_blks = (j * lst_per_page);
776bd20f 769 if (!mac_control->fifos[i].list_info)
6aa20a22 770 return;
776bd20f 771 if (!mac_control->fifos[i].list_info[mem_blks].
772 list_virt_addr)
1da177e4
LT
773 break;
774 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722 775 mac_control->fifos[i].
776 list_info[mem_blks].
1da177e4 777 list_virt_addr,
20346722 778 mac_control->fifos[i].
779 list_info[mem_blks].
1da177e4
LT
780 list_phy_addr);
781 }
776bd20f 782 /* If we got a zero DMA address during allocation,
783 * free the page now
784 */
785 if (mac_control->zerodma_virt_addr) {
786 pci_free_consistent(nic->pdev, PAGE_SIZE,
787 mac_control->zerodma_virt_addr,
788 (dma_addr_t)0);
6aa20a22 789 DBG_PRINT(INIT_DBG,
6b4d617d
AM
790 "%s: Freeing TxDL with zero DMA addr. ",
791 dev->name);
792 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
793 mac_control->zerodma_virt_addr);
776bd20f 794 }
20346722 795 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
796 }
797
1da177e4 798 size = SIZE_OF_BLOCK;
1da177e4 799 for (i = 0; i < config->rx_ring_num; i++) {
20346722 800 blk_cnt = mac_control->rings[i].block_count;
1da177e4 801 for (j = 0; j < blk_cnt; j++) {
20346722 802 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
803 block_virt_addr;
804 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
805 block_dma_addr;
1da177e4
LT
806 if (tmp_v_addr == NULL)
807 break;
808 pci_free_consistent(nic->pdev, size,
809 tmp_v_addr, tmp_p_addr);
da6971d8 810 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1da177e4
LT
811 }
812 }
813
da6971d8
AR
814 if (nic->rxd_mode >= RXD_MODE_3A) {
815 /* Freeing buffer storage addresses in 2BUFF mode. */
816 for (i = 0; i < config->rx_ring_num; i++) {
817 blk_cnt = config->rx_cfg[i].num_rxd /
818 (rxd_count[nic->rxd_mode] + 1);
819 for (j = 0; j < blk_cnt; j++) {
820 int k = 0;
821 if (!mac_control->rings[i].ba[j])
822 continue;
823 while (k != rxd_count[nic->rxd_mode]) {
1ee6dd77 824 struct buffAdd *ba =
da6971d8
AR
825 &mac_control->rings[i].ba[j][k];
826 kfree(ba->ba_0_org);
827 kfree(ba->ba_1_org);
828 k++;
829 }
830 kfree(mac_control->rings[i].ba[j]);
1da177e4 831 }
da6971d8 832 kfree(mac_control->rings[i].ba);
1da177e4 833 }
1da177e4 834 }
1da177e4
LT
835
836 if (mac_control->stats_mem) {
837 pci_free_consistent(nic->pdev,
838 mac_control->stats_mem_sz,
839 mac_control->stats_mem,
840 mac_control->stats_mem_phy);
841 }
fed5eccd
AR
842 if (nic->ufo_in_band_v)
843 kfree(nic->ufo_in_band_v);
1da177e4
LT
844}
845
541ae68f 846/**
847 * s2io_verify_pci_mode -
848 */
849
1ee6dd77 850static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 851{
1ee6dd77 852 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 853 register u64 val64 = 0;
854 int mode;
855
856 val64 = readq(&bar0->pci_mode);
857 mode = (u8)GET_PCI_MODE(val64);
858
859 if ( val64 & PCI_MODE_UNKNOWN_MODE)
860 return -1; /* Unknown PCI mode */
861 return mode;
862}
863
c92ca04b
AR
864#define NEC_VENID 0x1033
865#define NEC_DEVID 0x0125
866static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
867{
868 struct pci_dev *tdev = NULL;
26d36b64
AC
869 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
870 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
c92ca04b 871 if (tdev->bus == s2io_pdev->bus->parent)
26d36b64 872 pci_dev_put(tdev);
c92ca04b
AR
873 return 1;
874 }
875 }
876 return 0;
877}
541ae68f 878
7b32a312 879static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f 880/**
881 * s2io_print_pci_mode -
882 */
1ee6dd77 883static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 884{
1ee6dd77 885 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 886 register u64 val64 = 0;
887 int mode;
888 struct config_param *config = &nic->config;
889
890 val64 = readq(&bar0->pci_mode);
891 mode = (u8)GET_PCI_MODE(val64);
892
893 if ( val64 & PCI_MODE_UNKNOWN_MODE)
894 return -1; /* Unknown PCI mode */
895
c92ca04b
AR
896 config->bus_speed = bus_speed[mode];
897
898 if (s2io_on_nec_bridge(nic->pdev)) {
899 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
900 nic->dev->name);
901 return mode;
902 }
903
541ae68f 904 if (val64 & PCI_MODE_32_BITS) {
905 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
906 } else {
907 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
908 }
909
910 switch(mode) {
911 case PCI_MODE_PCI_33:
912 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f 913 break;
914 case PCI_MODE_PCI_66:
915 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f 916 break;
917 case PCI_MODE_PCIX_M1_66:
918 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f 919 break;
920 case PCI_MODE_PCIX_M1_100:
921 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f 922 break;
923 case PCI_MODE_PCIX_M1_133:
924 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f 925 break;
926 case PCI_MODE_PCIX_M2_66:
927 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f 928 break;
929 case PCI_MODE_PCIX_M2_100:
930 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f 931 break;
932 case PCI_MODE_PCIX_M2_133:
933 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f 934 break;
935 default:
936 return -1; /* Unsupported bus speed */
937 }
938
939 return mode;
940}
941
20346722 942/**
943 * init_nic - Initialization of hardware
1da177e4 944 * @nic: device peivate variable
20346722 945 * Description: The function sequentially configures every block
946 * of the H/W from their reset values.
947 * Return Value: SUCCESS on success and
1da177e4
LT
948 * '-1' on failure (endian settings incorrect).
949 */
950
951static int init_nic(struct s2io_nic *nic)
952{
1ee6dd77 953 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
954 struct net_device *dev = nic->dev;
955 register u64 val64 = 0;
956 void __iomem *add;
957 u32 time;
958 int i, j;
1ee6dd77 959 struct mac_info *mac_control;
1da177e4 960 struct config_param *config;
c92ca04b 961 int dtx_cnt = 0;
1da177e4 962 unsigned long long mem_share;
20346722 963 int mem_size;
1da177e4
LT
964
965 mac_control = &nic->mac_control;
966 config = &nic->config;
967
5e25b9dd 968 /* to set the swapper controle on the card */
20346722 969 if(s2io_set_swapper(nic)) {
1da177e4
LT
970 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
971 return -1;
972 }
973
541ae68f 974 /*
975 * Herc requires EOI to be removed from reset before XGXS, so..
976 */
977 if (nic->device_type & XFRAME_II_DEVICE) {
978 val64 = 0xA500000000ULL;
979 writeq(val64, &bar0->sw_reset);
980 msleep(500);
981 val64 = readq(&bar0->sw_reset);
982 }
983
1da177e4
LT
984 /* Remove XGXS from reset state */
985 val64 = 0;
986 writeq(val64, &bar0->sw_reset);
1da177e4 987 msleep(500);
20346722 988 val64 = readq(&bar0->sw_reset);
1da177e4
LT
989
990 /* Enable Receiving broadcasts */
991 add = &bar0->mac_cfg;
992 val64 = readq(&bar0->mac_cfg);
993 val64 |= MAC_RMAC_BCAST_ENABLE;
994 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
995 writel((u32) val64, add);
996 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
997 writel((u32) (val64 >> 32), (add + 4));
998
999 /* Read registers in all blocks */
1000 val64 = readq(&bar0->mac_int_mask);
1001 val64 = readq(&bar0->mc_int_mask);
1002 val64 = readq(&bar0->xgxs_int_mask);
1003
1004 /* Set MTU */
1005 val64 = dev->mtu;
1006 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1007
541ae68f 1008 if (nic->device_type & XFRAME_II_DEVICE) {
1009 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1010 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1011 &bar0->dtx_control, UF);
541ae68f 1012 if (dtx_cnt & 0x1)
1013 msleep(1); /* Necessary!! */
1da177e4
LT
1014 dtx_cnt++;
1015 }
541ae68f 1016 } else {
c92ca04b
AR
1017 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1018 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1019 &bar0->dtx_control, UF);
1020 val64 = readq(&bar0->dtx_control);
1021 dtx_cnt++;
1da177e4
LT
1022 }
1023 }
1024
1025 /* Tx DMA Initialization */
1026 val64 = 0;
1027 writeq(val64, &bar0->tx_fifo_partition_0);
1028 writeq(val64, &bar0->tx_fifo_partition_1);
1029 writeq(val64, &bar0->tx_fifo_partition_2);
1030 writeq(val64, &bar0->tx_fifo_partition_3);
1031
1032
1033 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1034 val64 |=
1035 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1036 13) | vBIT(config->tx_cfg[i].fifo_priority,
1037 ((i * 32) + 5), 3);
1038
1039 if (i == (config->tx_fifo_num - 1)) {
1040 if (i % 2 == 0)
1041 i++;
1042 }
1043
1044 switch (i) {
1045 case 1:
1046 writeq(val64, &bar0->tx_fifo_partition_0);
1047 val64 = 0;
1048 break;
1049 case 3:
1050 writeq(val64, &bar0->tx_fifo_partition_1);
1051 val64 = 0;
1052 break;
1053 case 5:
1054 writeq(val64, &bar0->tx_fifo_partition_2);
1055 val64 = 0;
1056 break;
1057 case 7:
1058 writeq(val64, &bar0->tx_fifo_partition_3);
1059 break;
1060 }
1061 }
1062
5e25b9dd 1063 /*
1064 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1065 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1066 */
541ae68f 1067 if ((nic->device_type == XFRAME_I_DEVICE) &&
1068 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd 1069 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1070
1da177e4
LT
1071 val64 = readq(&bar0->tx_fifo_partition_0);
1072 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1073 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1074
20346722 1075 /*
1076 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1077 * integrity checking.
1078 */
1079 val64 = readq(&bar0->tx_pa_cfg);
1080 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1081 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1082 writeq(val64, &bar0->tx_pa_cfg);
1083
1084 /* Rx DMA intialization. */
1085 val64 = 0;
1086 for (i = 0; i < config->rx_ring_num; i++) {
1087 val64 |=
1088 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1089 3);
1090 }
1091 writeq(val64, &bar0->rx_queue_priority);
1092
20346722 1093 /*
1094 * Allocating equal share of memory to all the
1da177e4
LT
1095 * configured Rings.
1096 */
1097 val64 = 0;
541ae68f 1098 if (nic->device_type & XFRAME_II_DEVICE)
1099 mem_size = 32;
1100 else
1101 mem_size = 64;
1102
1da177e4
LT
1103 for (i = 0; i < config->rx_ring_num; i++) {
1104 switch (i) {
1105 case 0:
20346722 1106 mem_share = (mem_size / config->rx_ring_num +
1107 mem_size % config->rx_ring_num);
1da177e4
LT
1108 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1109 continue;
1110 case 1:
20346722 1111 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1112 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1113 continue;
1114 case 2:
20346722 1115 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1116 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1117 continue;
1118 case 3:
20346722 1119 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1120 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1121 continue;
1122 case 4:
20346722 1123 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1124 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1125 continue;
1126 case 5:
20346722 1127 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1128 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1129 continue;
1130 case 6:
20346722 1131 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1132 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1133 continue;
1134 case 7:
20346722 1135 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1136 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1137 continue;
1138 }
1139 }
1140 writeq(val64, &bar0->rx_queue_cfg);
1141
20346722 1142 /*
5e25b9dd 1143 * Filling Tx round robin registers
1144 * as per the number of FIFOs
1da177e4 1145 */
5e25b9dd 1146 switch (config->tx_fifo_num) {
1147 case 1:
1148 val64 = 0x0000000000000000ULL;
1149 writeq(val64, &bar0->tx_w_round_robin_0);
1150 writeq(val64, &bar0->tx_w_round_robin_1);
1151 writeq(val64, &bar0->tx_w_round_robin_2);
1152 writeq(val64, &bar0->tx_w_round_robin_3);
1153 writeq(val64, &bar0->tx_w_round_robin_4);
1154 break;
1155 case 2:
1156 val64 = 0x0000010000010000ULL;
1157 writeq(val64, &bar0->tx_w_round_robin_0);
1158 val64 = 0x0100000100000100ULL;
1159 writeq(val64, &bar0->tx_w_round_robin_1);
1160 val64 = 0x0001000001000001ULL;
1161 writeq(val64, &bar0->tx_w_round_robin_2);
1162 val64 = 0x0000010000010000ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_3);
1164 val64 = 0x0100000000000000ULL;
1165 writeq(val64, &bar0->tx_w_round_robin_4);
1166 break;
1167 case 3:
1168 val64 = 0x0001000102000001ULL;
1169 writeq(val64, &bar0->tx_w_round_robin_0);
1170 val64 = 0x0001020000010001ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_1);
1172 val64 = 0x0200000100010200ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_2);
1174 val64 = 0x0001000102000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_3);
1176 val64 = 0x0001020000000000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_4);
1178 break;
1179 case 4:
1180 val64 = 0x0001020300010200ULL;
1181 writeq(val64, &bar0->tx_w_round_robin_0);
1182 val64 = 0x0100000102030001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_1);
1184 val64 = 0x0200010000010203ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_2);
1186 val64 = 0x0001020001000001ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_3);
1188 val64 = 0x0203000100000000ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_4);
1190 break;
1191 case 5:
1192 val64 = 0x0001000203000102ULL;
1193 writeq(val64, &bar0->tx_w_round_robin_0);
1194 val64 = 0x0001020001030004ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_1);
1196 val64 = 0x0001000203000102ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_2);
1198 val64 = 0x0001020001030004ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_3);
1200 val64 = 0x0001000000000000ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_4);
1202 break;
1203 case 6:
1204 val64 = 0x0001020304000102ULL;
1205 writeq(val64, &bar0->tx_w_round_robin_0);
1206 val64 = 0x0304050001020001ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_1);
1208 val64 = 0x0203000100000102ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_2);
1210 val64 = 0x0304000102030405ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_3);
1212 val64 = 0x0001000200000000ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_4);
1214 break;
1215 case 7:
1216 val64 = 0x0001020001020300ULL;
1217 writeq(val64, &bar0->tx_w_round_robin_0);
1218 val64 = 0x0102030400010203ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_1);
1220 val64 = 0x0405060001020001ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_2);
1222 val64 = 0x0304050000010200ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_3);
1224 val64 = 0x0102030000000000ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_4);
1226 break;
1227 case 8:
1228 val64 = 0x0001020300040105ULL;
1229 writeq(val64, &bar0->tx_w_round_robin_0);
1230 val64 = 0x0200030106000204ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_1);
1232 val64 = 0x0103000502010007ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_2);
1234 val64 = 0x0304010002060500ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_3);
1236 val64 = 0x0103020400000000ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_4);
1238 break;
1239 }
1240
b41477f3 1241 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1242 val64 = readq(&bar0->tx_fifo_partition_0);
1243 val64 |= (TX_FIFO_PARTITION_EN);
1244 writeq(val64, &bar0->tx_fifo_partition_0);
1245
5e25b9dd 1246 /* Filling the Rx round robin registers as per the
1247 * number of Rings and steering based on QoS.
1248 */
1249 switch (config->rx_ring_num) {
1250 case 1:
1251 val64 = 0x8080808080808080ULL;
1252 writeq(val64, &bar0->rts_qos_steering);
1253 break;
1254 case 2:
1255 val64 = 0x0000010000010000ULL;
1256 writeq(val64, &bar0->rx_w_round_robin_0);
1257 val64 = 0x0100000100000100ULL;
1258 writeq(val64, &bar0->rx_w_round_robin_1);
1259 val64 = 0x0001000001000001ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_2);
1261 val64 = 0x0000010000010000ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_3);
1263 val64 = 0x0100000000000000ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_4);
1265
1266 val64 = 0x8080808040404040ULL;
1267 writeq(val64, &bar0->rts_qos_steering);
1268 break;
1269 case 3:
1270 val64 = 0x0001000102000001ULL;
1271 writeq(val64, &bar0->rx_w_round_robin_0);
1272 val64 = 0x0001020000010001ULL;
1273 writeq(val64, &bar0->rx_w_round_robin_1);
1274 val64 = 0x0200000100010200ULL;
1275 writeq(val64, &bar0->rx_w_round_robin_2);
1276 val64 = 0x0001000102000001ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_3);
1278 val64 = 0x0001020000000000ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_4);
1280
1281 val64 = 0x8080804040402020ULL;
1282 writeq(val64, &bar0->rts_qos_steering);
1283 break;
1284 case 4:
1285 val64 = 0x0001020300010200ULL;
1286 writeq(val64, &bar0->rx_w_round_robin_0);
1287 val64 = 0x0100000102030001ULL;
1288 writeq(val64, &bar0->rx_w_round_robin_1);
1289 val64 = 0x0200010000010203ULL;
1290 writeq(val64, &bar0->rx_w_round_robin_2);
6aa20a22 1291 val64 = 0x0001020001000001ULL;
5e25b9dd 1292 writeq(val64, &bar0->rx_w_round_robin_3);
1293 val64 = 0x0203000100000000ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_4);
1295
1296 val64 = 0x8080404020201010ULL;
1297 writeq(val64, &bar0->rts_qos_steering);
1298 break;
1299 case 5:
1300 val64 = 0x0001000203000102ULL;
1301 writeq(val64, &bar0->rx_w_round_robin_0);
1302 val64 = 0x0001020001030004ULL;
1303 writeq(val64, &bar0->rx_w_round_robin_1);
1304 val64 = 0x0001000203000102ULL;
1305 writeq(val64, &bar0->rx_w_round_robin_2);
1306 val64 = 0x0001020001030004ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_3);
1308 val64 = 0x0001000000000000ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_4);
1310
1311 val64 = 0x8080404020201008ULL;
1312 writeq(val64, &bar0->rts_qos_steering);
1313 break;
1314 case 6:
1315 val64 = 0x0001020304000102ULL;
1316 writeq(val64, &bar0->rx_w_round_robin_0);
1317 val64 = 0x0304050001020001ULL;
1318 writeq(val64, &bar0->rx_w_round_robin_1);
1319 val64 = 0x0203000100000102ULL;
1320 writeq(val64, &bar0->rx_w_round_robin_2);
1321 val64 = 0x0304000102030405ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_3);
1323 val64 = 0x0001000200000000ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_4);
1325
1326 val64 = 0x8080404020100804ULL;
1327 writeq(val64, &bar0->rts_qos_steering);
1328 break;
1329 case 7:
1330 val64 = 0x0001020001020300ULL;
1331 writeq(val64, &bar0->rx_w_round_robin_0);
1332 val64 = 0x0102030400010203ULL;
1333 writeq(val64, &bar0->rx_w_round_robin_1);
1334 val64 = 0x0405060001020001ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_2);
1336 val64 = 0x0304050000010200ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_3);
1338 val64 = 0x0102030000000000ULL;
1339 writeq(val64, &bar0->rx_w_round_robin_4);
1340
1341 val64 = 0x8080402010080402ULL;
1342 writeq(val64, &bar0->rts_qos_steering);
1343 break;
1344 case 8:
1345 val64 = 0x0001020300040105ULL;
1346 writeq(val64, &bar0->rx_w_round_robin_0);
1347 val64 = 0x0200030106000204ULL;
1348 writeq(val64, &bar0->rx_w_round_robin_1);
1349 val64 = 0x0103000502010007ULL;
1350 writeq(val64, &bar0->rx_w_round_robin_2);
1351 val64 = 0x0304010002060500ULL;
1352 writeq(val64, &bar0->rx_w_round_robin_3);
1353 val64 = 0x0103020400000000ULL;
1354 writeq(val64, &bar0->rx_w_round_robin_4);
1355
1356 val64 = 0x8040201008040201ULL;
1357 writeq(val64, &bar0->rts_qos_steering);
1358 break;
1359 }
1da177e4
LT
1360
1361 /* UDP Fix */
1362 val64 = 0;
20346722 1363 for (i = 0; i < 8; i++)
1da177e4
LT
1364 writeq(val64, &bar0->rts_frm_len_n[i]);
1365
5e25b9dd 1366 /* Set the default rts frame length for the rings configured */
1367 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1368 for (i = 0 ; i < config->rx_ring_num ; i++)
1369 writeq(val64, &bar0->rts_frm_len_n[i]);
1370
1371 /* Set the frame length for the configured rings
1372 * desired by the user
1373 */
1374 for (i = 0; i < config->rx_ring_num; i++) {
1375 /* If rts_frm_len[i] == 0 then it is assumed that user not
1376 * specified frame length steering.
1377 * If the user provides the frame length then program
1378 * the rts_frm_len register for those values or else
1379 * leave it as it is.
1380 */
1381 if (rts_frm_len[i] != 0) {
1382 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1383 &bar0->rts_frm_len_n[i]);
1384 }
1385 }
926930b2 1386
9fc93a41
SS
1387 /* Disable differentiated services steering logic */
1388 for (i = 0; i < 64; i++) {
1389 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1390 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1391 dev->name);
1392 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1393 return FAILURE;
1394 }
1395 }
1396
20346722 1397 /* Program statistics memory */
1da177e4 1398 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1399
541ae68f 1400 if (nic->device_type == XFRAME_II_DEVICE) {
1401 val64 = STAT_BC(0x320);
1402 writeq(val64, &bar0->stat_byte_cnt);
1403 }
1404
20346722 1405 /*
1da177e4
LT
1406 * Initializing the sampling rate for the device to calculate the
1407 * bandwidth utilization.
1408 */
1409 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1410 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1411 writeq(val64, &bar0->mac_link_util);
1412
1413
20346722 1414 /*
1415 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1416 * Scheme.
1417 */
20346722 1418 /*
1419 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1420 * 250 interrupts per sec. Continuous interrupts are enabled
1421 * by default.
1422 */
541ae68f 1423 if (nic->device_type == XFRAME_II_DEVICE) {
1424 int count = (nic->config.bus_speed * 125)/2;
1425 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1426 } else {
1427
1428 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1429 }
1430 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1431 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1432 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f 1433 if (use_continuous_tx_intrs)
1434 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1435 writeq(val64, &bar0->tti_data1_mem);
1436
1437 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1438 TTI_DATA2_MEM_TX_UFC_B(0x20) |
19a60522 1439 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1440 writeq(val64, &bar0->tti_data2_mem);
1441
1442 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1443 writeq(val64, &bar0->tti_command_mem);
1444
20346722 1445 /*
1da177e4
LT
1446 * Once the operation completes, the Strobe bit of the command
1447 * register will be reset. We poll for this particular condition
1448 * We wait for a maximum of 500ms for the operation to complete,
1449 * if it's not complete by then we return error.
1450 */
1451 time = 0;
1452 while (TRUE) {
1453 val64 = readq(&bar0->tti_command_mem);
1454 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1455 break;
1456 }
1457 if (time > 10) {
1458 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1459 dev->name);
1460 return -1;
1461 }
1462 msleep(50);
1463 time++;
1464 }
1465
b6e3f982 1466 if (nic->config.bimodal) {
1467 int k = 0;
1468 for (k = 0; k < config->rx_ring_num; k++) {
1469 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1470 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1471 writeq(val64, &bar0->tti_command_mem);
541ae68f 1472
541ae68f 1473 /*
b6e3f982 1474 * Once the operation completes, the Strobe bit of the command
1475 * register will be reset. We poll for this particular condition
1476 * We wait for a maximum of 500ms for the operation to complete,
1477 * if it's not complete by then we return error.
1478 */
1479 time = 0;
1480 while (TRUE) {
1481 val64 = readq(&bar0->tti_command_mem);
1482 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1483 break;
1484 }
1485 if (time > 10) {
1486 DBG_PRINT(ERR_DBG,
1487 "%s: TTI init Failed\n",
1488 dev->name);
1489 return -1;
1490 }
1491 time++;
1492 msleep(50);
1493 }
1494 }
541ae68f 1495 } else {
1da177e4 1496
b6e3f982 1497 /* RTI Initialization */
1498 if (nic->device_type == XFRAME_II_DEVICE) {
1499 /*
1500 * Programmed to generate Apprx 500 Intrs per
1501 * second
1502 */
1503 int count = (nic->config.bus_speed * 125)/4;
1504 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1505 } else {
1506 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1507 }
1508 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1509 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1510 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1511
b6e3f982 1512 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1513
b6e3f982 1514 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
cc6e7c44
RA
1515 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1516 if (nic->intr_type == MSI_X)
1517 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1518 RTI_DATA2_MEM_RX_UFC_D(0x40));
1519 else
1520 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1521 RTI_DATA2_MEM_RX_UFC_D(0x80));
b6e3f982 1522 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1523
b6e3f982 1524 for (i = 0; i < config->rx_ring_num; i++) {
1525 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1526 | RTI_CMD_MEM_OFFSET(i);
1527 writeq(val64, &bar0->rti_command_mem);
1528
1529 /*
1530 * Once the operation completes, the Strobe bit of the
1531 * command register will be reset. We poll for this
1532 * particular condition. We wait for a maximum of 500ms
1533 * for the operation to complete, if it's not complete
1534 * by then we return error.
1535 */
1536 time = 0;
1537 while (TRUE) {
1538 val64 = readq(&bar0->rti_command_mem);
1539 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1540 break;
1541 }
1542 if (time > 10) {
1543 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1544 dev->name);
1545 return -1;
1546 }
1547 time++;
1548 msleep(50);
1549 }
1da177e4 1550 }
1da177e4
LT
1551 }
1552
20346722 1553 /*
1554 * Initializing proper values as Pause threshold into all
1da177e4
LT
1555 * the 8 Queues on Rx side.
1556 */
1557 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1558 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1559
1560 /* Disable RMAC PAD STRIPPING */
509a2671 1561 add = &bar0->mac_cfg;
1da177e4
LT
1562 val64 = readq(&bar0->mac_cfg);
1563 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1564 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1565 writel((u32) (val64), add);
1566 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1567 writel((u32) (val64 >> 32), (add + 4));
1568 val64 = readq(&bar0->mac_cfg);
1569
7d3d0439
RA
1570 /* Enable FCS stripping by adapter */
1571 add = &bar0->mac_cfg;
1572 val64 = readq(&bar0->mac_cfg);
1573 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1574 if (nic->device_type == XFRAME_II_DEVICE)
1575 writeq(val64, &bar0->mac_cfg);
1576 else {
1577 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1578 writel((u32) (val64), add);
1579 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1580 writel((u32) (val64 >> 32), (add + 4));
1581 }
1582
20346722 1583 /*
1584 * Set the time value to be inserted in the pause frame
1da177e4
LT
1585 * generated by xena.
1586 */
1587 val64 = readq(&bar0->rmac_pause_cfg);
1588 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1589 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1590 writeq(val64, &bar0->rmac_pause_cfg);
1591
20346722 1592 /*
1da177e4
LT
1593 * Set the Threshold Limit for Generating the pause frame
1594 * If the amount of data in any Queue exceeds ratio of
1595 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1596 * pause frame is generated
1597 */
1598 val64 = 0;
1599 for (i = 0; i < 4; i++) {
1600 val64 |=
1601 (((u64) 0xFF00 | nic->mac_control.
1602 mc_pause_threshold_q0q3)
1603 << (i * 2 * 8));
1604 }
1605 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1606
1607 val64 = 0;
1608 for (i = 0; i < 4; i++) {
1609 val64 |=
1610 (((u64) 0xFF00 | nic->mac_control.
1611 mc_pause_threshold_q4q7)
1612 << (i * 2 * 8));
1613 }
1614 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1615
20346722 1616 /*
1617 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1618 * exceeded the limit pointed by shared_splits
1619 */
1620 val64 = readq(&bar0->pic_control);
1621 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1622 writeq(val64, &bar0->pic_control);
1623
863c11a9
AR
1624 if (nic->config.bus_speed == 266) {
1625 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1626 writeq(0x0, &bar0->read_retry_delay);
1627 writeq(0x0, &bar0->write_retry_delay);
1628 }
1629
541ae68f 1630 /*
1631 * Programming the Herc to split every write transaction
1632 * that does not start on an ADB to reduce disconnects.
1633 */
1634 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1635 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1636 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1637 writeq(val64, &bar0->misc_control);
1638 val64 = readq(&bar0->pic_control2);
1639 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1640 writeq(val64, &bar0->pic_control2);
541ae68f 1641 }
c92ca04b
AR
1642 if (strstr(nic->product_name, "CX4")) {
1643 val64 = TMAC_AVG_IPG(0x17);
1644 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d 1645 }
1646
1da177e4
LT
1647 return SUCCESS;
1648}
a371a07d 1649#define LINK_UP_DOWN_INTERRUPT 1
1650#define MAC_RMAC_ERR_TIMER 2
1651
1ee6dd77 1652static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1653{
cc6e7c44
RA
1654 if (nic->intr_type != INTA)
1655 return MAC_RMAC_ERR_TIMER;
a371a07d 1656 if (nic->device_type == XFRAME_II_DEVICE)
1657 return LINK_UP_DOWN_INTERRUPT;
1658 else
1659 return MAC_RMAC_ERR_TIMER;
1660}
1da177e4 1661
20346722 1662/**
1663 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1664 * @nic: device private variable,
1665 * @mask: A mask indicating which Intr block must be modified and,
1666 * @flag: A flag indicating whether to enable or disable the Intrs.
1667 * Description: This function will either disable or enable the interrupts
20346722 1668 * depending on the flag argument. The mask argument can be used to
1669 * enable/disable any Intr block.
1da177e4
LT
1670 * Return Value: NONE.
1671 */
1672
1673static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1674{
1ee6dd77 1675 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1676 register u64 val64 = 0, temp64 = 0;
1677
1678 /* Top level interrupt classification */
1679 /* PIC Interrupts */
1680 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1681 /* Enable PIC Intrs in the general intr mask register */
a113ae06 1682 val64 = TXPIC_INT_M;
1da177e4
LT
1683 if (flag == ENABLE_INTRS) {
1684 temp64 = readq(&bar0->general_int_mask);
1685 temp64 &= ~((u64) val64);
1686 writeq(temp64, &bar0->general_int_mask);
20346722 1687 /*
a371a07d 1688 * If Hercules adapter enable GPIO otherwise
b41477f3 1689 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722 1690 * interrupts for now.
1691 * TODO
1da177e4 1692 */
a371a07d 1693 if (s2io_link_fault_indication(nic) ==
1694 LINK_UP_DOWN_INTERRUPT ) {
1695 temp64 = readq(&bar0->pic_int_mask);
1696 temp64 &= ~((u64) PIC_INT_GPIO);
1697 writeq(temp64, &bar0->pic_int_mask);
1698 temp64 = readq(&bar0->gpio_int_mask);
1699 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1700 writeq(temp64, &bar0->gpio_int_mask);
1701 } else {
1702 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1703 }
20346722 1704 /*
1da177e4
LT
1705 * No MSI Support is available presently, so TTI and
1706 * RTI interrupts are also disabled.
1707 */
1708 } else if (flag == DISABLE_INTRS) {
20346722 1709 /*
1710 * Disable PIC Intrs in the general
1711 * intr mask register
1da177e4
LT
1712 */
1713 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1714 temp64 = readq(&bar0->general_int_mask);
1715 val64 |= temp64;
1716 writeq(val64, &bar0->general_int_mask);
1717 }
1718 }
1719
1da177e4
LT
1720 /* MAC Interrupts */
1721 /* Enabling/Disabling MAC interrupts */
1722 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1723 val64 = TXMAC_INT_M | RXMAC_INT_M;
1724 if (flag == ENABLE_INTRS) {
1725 temp64 = readq(&bar0->general_int_mask);
1726 temp64 &= ~((u64) val64);
1727 writeq(temp64, &bar0->general_int_mask);
20346722 1728 /*
1729 * All MAC block error interrupts are disabled for now
1da177e4
LT
1730 * TODO
1731 */
1da177e4 1732 } else if (flag == DISABLE_INTRS) {
20346722 1733 /*
1734 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1735 */
1736 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1737 writeq(DISABLE_ALL_INTRS,
1738 &bar0->mac_rmac_err_mask);
1739
1740 temp64 = readq(&bar0->general_int_mask);
1741 val64 |= temp64;
1742 writeq(val64, &bar0->general_int_mask);
1743 }
1744 }
1745
1da177e4
LT
1746 /* Tx traffic interrupts */
1747 if (mask & TX_TRAFFIC_INTR) {
1748 val64 = TXTRAFFIC_INT_M;
1749 if (flag == ENABLE_INTRS) {
1750 temp64 = readq(&bar0->general_int_mask);
1751 temp64 &= ~((u64) val64);
1752 writeq(temp64, &bar0->general_int_mask);
20346722 1753 /*
1da177e4 1754 * Enable all the Tx side interrupts
20346722 1755 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1756 */
1757 writeq(0x0, &bar0->tx_traffic_mask);
1758 } else if (flag == DISABLE_INTRS) {
20346722 1759 /*
1760 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1761 * register.
1762 */
1763 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1764 temp64 = readq(&bar0->general_int_mask);
1765 val64 |= temp64;
1766 writeq(val64, &bar0->general_int_mask);
1767 }
1768 }
1769
1770 /* Rx traffic interrupts */
1771 if (mask & RX_TRAFFIC_INTR) {
1772 val64 = RXTRAFFIC_INT_M;
1773 if (flag == ENABLE_INTRS) {
1774 temp64 = readq(&bar0->general_int_mask);
1775 temp64 &= ~((u64) val64);
1776 writeq(temp64, &bar0->general_int_mask);
1777 /* writing 0 Enables all 8 RX interrupt levels */
1778 writeq(0x0, &bar0->rx_traffic_mask);
1779 } else if (flag == DISABLE_INTRS) {
20346722 1780 /*
1781 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1782 * register.
1783 */
1784 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1785 temp64 = readq(&bar0->general_int_mask);
1786 val64 |= temp64;
1787 writeq(val64, &bar0->general_int_mask);
1788 }
1789 }
1790}
1791
19a60522
SS
1792/**
1793 * verify_pcc_quiescent- Checks for PCC quiescent state
1794 * Return: 1 If PCC is quiescence
1795 * 0 If PCC is not quiescence
1796 */
1ee6dd77 1797static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 1798{
19a60522 1799 int ret = 0, herc;
1ee6dd77 1800 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1801 u64 val64 = readq(&bar0->adapter_status);
1802
1803 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 1804
1805 if (flag == FALSE) {
19a60522
SS
1806 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1807 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1808 ret = 1;
19a60522
SS
1809 } else {
1810 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1811 ret = 1;
20346722 1812 }
1813 } else {
19a60522 1814 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
5e25b9dd 1815 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 1816 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1817 ret = 1;
5e25b9dd 1818 } else {
1819 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 1820 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1821 ret = 1;
20346722 1822 }
1823 }
1824
1825 return ret;
1826}
1827/**
1828 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 1829 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1830 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1831 * differs and the calling function passes the input argument flag to
1832 * indicate this.
20346722 1833 * Return: 1 If xena is quiescence
1da177e4
LT
1834 * 0 If Xena is not quiescence
1835 */
1836
1ee6dd77 1837static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 1838{
19a60522 1839 int mode;
1ee6dd77 1840 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1841 u64 val64 = readq(&bar0->adapter_status);
1842 mode = s2io_verify_pci_mode(sp);
1da177e4 1843
19a60522
SS
1844 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1845 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1846 return 0;
1847 }
1848 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1849 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1850 return 0;
1851 }
1852 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1853 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1854 return 0;
1855 }
1856 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1857 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1858 return 0;
1859 }
1860 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1861 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1862 return 0;
1863 }
1864 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1865 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1866 return 0;
1867 }
1868 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1869 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1870 return 0;
1871 }
1872 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1873 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1874 return 0;
1da177e4
LT
1875 }
1876
19a60522
SS
1877 /*
1878 * In PCI 33 mode, the P_PLL is not used, and therefore,
1879 * the the P_PLL_LOCK bit in the adapter_status register will
1880 * not be asserted.
1881 */
1882 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1883 sp->device_type == XFRAME_II_DEVICE && mode !=
1884 PCI_MODE_PCI_33) {
1885 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1886 return 0;
1887 }
1888 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1889 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1890 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1891 return 0;
1892 }
1893 return 1;
1da177e4
LT
1894}
1895
1896/**
1897 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1898 * @sp: Pointer to device specifc structure
20346722 1899 * Description :
1da177e4
LT
1900 * New procedure to clear mac address reading problems on Alpha platforms
1901 *
1902 */
1903
1ee6dd77 1904static void fix_mac_address(struct s2io_nic * sp)
1da177e4 1905{
1ee6dd77 1906 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
1907 u64 val64;
1908 int i = 0;
1909
1910 while (fix_mac[i] != END_SIGN) {
1911 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1912 udelay(10);
1da177e4
LT
1913 val64 = readq(&bar0->gpio_control);
1914 }
1915}
1916
1917/**
20346722 1918 * start_nic - Turns the device on
1da177e4 1919 * @nic : device private variable.
20346722 1920 * Description:
1921 * This function actually turns the device on. Before this function is
1922 * called,all Registers are configured from their reset states
1923 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1924 * calling this function, the device interrupts are cleared and the NIC is
1925 * literally switched on by writing into the adapter control register.
20346722 1926 * Return Value:
1da177e4
LT
1927 * SUCCESS on success and -1 on failure.
1928 */
1929
1930static int start_nic(struct s2io_nic *nic)
1931{
1ee6dd77 1932 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1933 struct net_device *dev = nic->dev;
1934 register u64 val64 = 0;
20346722 1935 u16 subid, i;
1ee6dd77 1936 struct mac_info *mac_control;
1da177e4
LT
1937 struct config_param *config;
1938
1939 mac_control = &nic->mac_control;
1940 config = &nic->config;
1941
1942 /* PRC Initialization and configuration */
1943 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1944 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1945 &bar0->prc_rxd0_n[i]);
1946
1947 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982 1948 if (nic->config.bimodal)
1949 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
da6971d8
AR
1950 if (nic->rxd_mode == RXD_MODE_1)
1951 val64 |= PRC_CTRL_RC_ENABLED;
1952 else
1953 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
1954 if (nic->device_type == XFRAME_II_DEVICE)
1955 val64 |= PRC_CTRL_GROUP_READS;
1956 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1957 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
1958 writeq(val64, &bar0->prc_ctrl_n[i]);
1959 }
1960
da6971d8
AR
1961 if (nic->rxd_mode == RXD_MODE_3B) {
1962 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1963 val64 = readq(&bar0->rx_pa_cfg);
1964 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1965 writeq(val64, &bar0->rx_pa_cfg);
1966 }
1da177e4 1967
926930b2
SS
1968 if (vlan_tag_strip == 0) {
1969 val64 = readq(&bar0->rx_pa_cfg);
1970 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1971 writeq(val64, &bar0->rx_pa_cfg);
1972 vlan_strip_flag = 0;
1973 }
1974
20346722 1975 /*
1da177e4
LT
1976 * Enabling MC-RLDRAM. After enabling the device, we timeout
1977 * for around 100ms, which is approximately the time required
1978 * for the device to be ready for operation.
1979 */
1980 val64 = readq(&bar0->mc_rldram_mrs);
1981 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1982 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1983 val64 = readq(&bar0->mc_rldram_mrs);
1984
20346722 1985 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1986
1987 /* Enabling ECC Protection. */
1988 val64 = readq(&bar0->adapter_control);
1989 val64 &= ~ADAPTER_ECC_EN;
1990 writeq(val64, &bar0->adapter_control);
1991
20346722 1992 /*
1993 * Clearing any possible Link state change interrupts that
1da177e4
LT
1994 * could have popped up just before Enabling the card.
1995 */
1996 val64 = readq(&bar0->mac_rmac_err_reg);
1997 if (val64)
1998 writeq(val64, &bar0->mac_rmac_err_reg);
1999
20346722 2000 /*
2001 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2002 * it.
2003 */
2004 val64 = readq(&bar0->adapter_status);
19a60522 2005 if (!verify_xena_quiescence(nic)) {
1da177e4
LT
2006 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2007 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2008 (unsigned long long) val64);
2009 return FAILURE;
2010 }
2011
20346722 2012 /*
1da177e4 2013 * With some switches, link might be already up at this point.
20346722 2014 * Because of this weird behavior, when we enable laser,
2015 * we may not get link. We need to handle this. We cannot
2016 * figure out which switch is misbehaving. So we are forced to
2017 * make a global change.
1da177e4
LT
2018 */
2019
2020 /* Enabling Laser. */
2021 val64 = readq(&bar0->adapter_control);
2022 val64 |= ADAPTER_EOI_TX_ON;
2023 writeq(val64, &bar0->adapter_control);
2024
c92ca04b
AR
2025 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2026 /*
2027 * Dont see link state interrupts initally on some switches,
2028 * so directly scheduling the link state task here.
2029 */
2030 schedule_work(&nic->set_link_task);
2031 }
1da177e4
LT
2032 /* SXE-002: Initialize link and activity LED */
2033 subid = nic->pdev->subsystem_device;
541ae68f 2034 if (((subid & 0xFF) >= 0x07) &&
2035 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2036 val64 = readq(&bar0->gpio_control);
2037 val64 |= 0x0000800000000000ULL;
2038 writeq(val64, &bar0->gpio_control);
2039 val64 = 0x0411040400000000ULL;
509a2671 2040 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2041 }
2042
1da177e4
LT
2043 return SUCCESS;
2044}
fed5eccd
AR
2045/**
2046 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2047 */
1ee6dd77
RB
2048static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2049 TxD *txdlp, int get_off)
fed5eccd 2050{
1ee6dd77 2051 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2052 struct sk_buff *skb;
1ee6dd77 2053 struct TxD *txds;
fed5eccd
AR
2054 u16 j, frg_cnt;
2055
2056 txds = txdlp;
26b7625c 2057 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
fed5eccd
AR
2058 pci_unmap_single(nic->pdev, (dma_addr_t)
2059 txds->Buffer_Pointer, sizeof(u64),
2060 PCI_DMA_TODEVICE);
2061 txds++;
2062 }
2063
2064 skb = (struct sk_buff *) ((unsigned long)
2065 txds->Host_Control);
2066 if (!skb) {
1ee6dd77 2067 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2068 return NULL;
2069 }
2070 pci_unmap_single(nic->pdev, (dma_addr_t)
2071 txds->Buffer_Pointer,
2072 skb->len - skb->data_len,
2073 PCI_DMA_TODEVICE);
2074 frg_cnt = skb_shinfo(skb)->nr_frags;
2075 if (frg_cnt) {
2076 txds++;
2077 for (j = 0; j < frg_cnt; j++, txds++) {
2078 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2079 if (!txds->Buffer_Pointer)
2080 break;
6aa20a22 2081 pci_unmap_page(nic->pdev, (dma_addr_t)
fed5eccd
AR
2082 txds->Buffer_Pointer,
2083 frag->size, PCI_DMA_TODEVICE);
2084 }
2085 }
1ee6dd77 2086 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2087 return(skb);
2088}
1da177e4 2089
20346722 2090/**
2091 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2092 * @nic : device private variable.
20346722 2093 * Description:
1da177e4 2094 * Free all queued Tx buffers.
20346722 2095 * Return Value: void
1da177e4
LT
2096*/
2097
2098static void free_tx_buffers(struct s2io_nic *nic)
2099{
2100 struct net_device *dev = nic->dev;
2101 struct sk_buff *skb;
1ee6dd77 2102 struct TxD *txdp;
1da177e4 2103 int i, j;
1ee6dd77 2104 struct mac_info *mac_control;
1da177e4 2105 struct config_param *config;
fed5eccd 2106 int cnt = 0;
1da177e4
LT
2107
2108 mac_control = &nic->mac_control;
2109 config = &nic->config;
2110
2111 for (i = 0; i < config->tx_fifo_num; i++) {
2112 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1ee6dd77 2113 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
1da177e4 2114 list_virt_addr;
fed5eccd
AR
2115 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2116 if (skb) {
2117 dev_kfree_skb(skb);
2118 cnt++;
1da177e4 2119 }
1da177e4
LT
2120 }
2121 DBG_PRINT(INTR_DBG,
2122 "%s:forcibly freeing %d skbs on FIFO%d\n",
2123 dev->name, cnt, i);
20346722 2124 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2125 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2126 }
2127}
2128
20346722 2129/**
2130 * stop_nic - To stop the nic
1da177e4 2131 * @nic ; device private variable.
20346722 2132 * Description:
2133 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2134 * function does. This function is called to stop the device.
2135 * Return Value:
2136 * void.
2137 */
2138
2139static void stop_nic(struct s2io_nic *nic)
2140{
1ee6dd77 2141 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2142 register u64 val64 = 0;
5d3213cc 2143 u16 interruptible;
1ee6dd77 2144 struct mac_info *mac_control;
1da177e4
LT
2145 struct config_param *config;
2146
2147 mac_control = &nic->mac_control;
2148 config = &nic->config;
2149
2150 /* Disable all interrupts */
e960fc5c 2151 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d 2152 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2153 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2154 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2155
5d3213cc
AR
2156 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2157 val64 = readq(&bar0->adapter_control);
2158 val64 &= ~(ADAPTER_CNTL_EN);
2159 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2160}
2161
1ee6dd77
RB
2162static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2163 sk_buff *skb)
da6971d8
AR
2164{
2165 struct net_device *dev = nic->dev;
2166 struct sk_buff *frag_list;
50eb8006 2167 void *tmp;
da6971d8
AR
2168
2169 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 2170 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
da6971d8
AR
2171 (nic->pdev, skb->data, l3l4hdr_size + 4,
2172 PCI_DMA_FROMDEVICE);
2173
2174 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2175 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2176 if (skb_shinfo(skb)->frag_list == NULL) {
2177 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2178 return -ENOMEM ;
2179 }
2180 frag_list = skb_shinfo(skb)->frag_list;
372cc597 2181 skb->truesize += frag_list->truesize;
da6971d8 2182 frag_list->next = NULL;
50eb8006
JG
2183 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2184 frag_list->data = tmp;
2185 frag_list->tail = tmp;
da6971d8
AR
2186
2187 /* Buffer-2 receives L4 data payload */
1ee6dd77 2188 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
da6971d8
AR
2189 frag_list->data, dev->mtu,
2190 PCI_DMA_FROMDEVICE);
2191 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2192 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2193
2194 return SUCCESS;
2195}
2196
20346722 2197/**
2198 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2199 * @nic: device private variable
20346722 2200 * @ring_no: ring number
2201 * Description:
1da177e4
LT
2202 * The function allocates Rx side skbs and puts the physical
2203 * address of these buffers into the RxD buffer pointers, so that the NIC
2204 * can DMA the received frame into these locations.
2205 * The NIC supports 3 receive modes, viz
2206 * 1. single buffer,
2207 * 2. three buffer and
2208 * 3. Five buffer modes.
20346722 2209 * Each mode defines how many fragments the received frame will be split
2210 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2211 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2212 * is split into 3 fragments. As of now only single buffer mode is
2213 * supported.
2214 * Return Value:
2215 * SUCCESS on success or an appropriate -ve value on failure.
2216 */
2217
ac1f60db 2218static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2219{
2220 struct net_device *dev = nic->dev;
2221 struct sk_buff *skb;
1ee6dd77 2222 struct RxD_t *rxdp;
1da177e4 2223 int off, off1, size, block_no, block_no1;
1da177e4 2224 u32 alloc_tab = 0;
20346722 2225 u32 alloc_cnt;
1ee6dd77 2226 struct mac_info *mac_control;
1da177e4 2227 struct config_param *config;
20346722 2228 u64 tmp;
1ee6dd77 2229 struct buffAdd *ba;
1da177e4 2230 unsigned long flags;
1ee6dd77 2231 struct RxD_t *first_rxdp = NULL;
1da177e4
LT
2232
2233 mac_control = &nic->mac_control;
2234 config = &nic->config;
20346722 2235 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2236 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4 2237
5d3213cc 2238 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
863c11a9 2239 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4 2240 while (alloc_tab < alloc_cnt) {
20346722 2241 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2242 block_index;
20346722 2243 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1da177e4 2244
da6971d8
AR
2245 rxdp = mac_control->rings[ring_no].
2246 rx_blocks[block_no].rxds[off].virt_addr;
2247
2248 if ((block_no == block_no1) && (off == off1) &&
2249 (rxdp->Host_Control)) {
2250 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2251 dev->name);
1da177e4
LT
2252 DBG_PRINT(INTR_DBG, " info equated\n");
2253 goto end;
2254 }
da6971d8 2255 if (off && (off == rxd_count[nic->rxd_mode])) {
20346722 2256 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2257 block_index++;
da6971d8
AR
2258 if (mac_control->rings[ring_no].rx_curr_put_info.
2259 block_index == mac_control->rings[ring_no].
2260 block_count)
2261 mac_control->rings[ring_no].rx_curr_put_info.
2262 block_index = 0;
2263 block_no = mac_control->rings[ring_no].
2264 rx_curr_put_info.block_index;
2265 if (off == rxd_count[nic->rxd_mode])
2266 off = 0;
20346722 2267 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8
AR
2268 offset = off;
2269 rxdp = mac_control->rings[ring_no].
2270 rx_blocks[block_no].block_virt_addr;
1da177e4
LT
2271 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2272 dev->name, rxdp);
2273 }
db874e65
SS
2274 if(!napi) {
2275 spin_lock_irqsave(&nic->put_lock, flags);
2276 mac_control->rings[ring_no].put_pos =
2277 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2278 spin_unlock_irqrestore(&nic->put_lock, flags);
2279 } else {
2280 mac_control->rings[ring_no].put_pos =
2281 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2282 }
da6971d8
AR
2283 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2284 ((nic->rxd_mode >= RXD_MODE_3A) &&
2285 (rxdp->Control_2 & BIT(0)))) {
20346722 2286 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8 2287 offset = off;
1da177e4
LT
2288 goto end;
2289 }
da6971d8
AR
2290 /* calculate size of skb based on ring mode */
2291 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2292 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2293 if (nic->rxd_mode == RXD_MODE_1)
2294 size += NET_IP_ALIGN;
2295 else if (nic->rxd_mode == RXD_MODE_3B)
2296 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2297 else
2298 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2299
da6971d8
AR
2300 /* allocate skb */
2301 skb = dev_alloc_skb(size);
2302 if(!skb) {
1da177e4
LT
2303 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2304 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
303bcb4b 2305 if (first_rxdp) {
2306 wmb();
2307 first_rxdp->Control_1 |= RXD_OWN_XENA;
2308 }
da6971d8
AR
2309 return -ENOMEM ;
2310 }
2311 if (nic->rxd_mode == RXD_MODE_1) {
2312 /* 1 buffer mode - normal operation mode */
1ee6dd77 2313 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2314 skb_reserve(skb, NET_IP_ALIGN);
1ee6dd77 2315 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
863c11a9
AR
2316 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2317 PCI_DMA_FROMDEVICE);
2318 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
da6971d8
AR
2319
2320 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2321 /*
2322 * 2 or 3 buffer mode -
2323 * Both 2 buffer mode and 3 buffer mode provides 128
2324 * byte aligned receive buffers.
2325 *
2326 * 3 buffer mode provides header separation where in
2327 * skb->data will have L3/L4 headers where as
2328 * skb_shinfo(skb)->frag_list will have the L4 data
2329 * payload
2330 */
2331
1ee6dd77 2332 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2333 ba = &mac_control->rings[ring_no].ba[block_no][off];
2334 skb_reserve(skb, BUF0_LEN);
2335 tmp = (u64)(unsigned long) skb->data;
2336 tmp += ALIGN_SIZE;
2337 tmp &= ~ALIGN_SIZE;
2338 skb->data = (void *) (unsigned long)tmp;
2339 skb->tail = (void *) (unsigned long)tmp;
2340
1ee6dd77
RB
2341 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2342 ((struct RxD3*)rxdp)->Buffer0_ptr =
75c30b13 2343 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
da6971d8 2344 PCI_DMA_FROMDEVICE);
75c30b13
AR
2345 else
2346 pci_dma_sync_single_for_device(nic->pdev,
1ee6dd77 2347 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
75c30b13 2348 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2349 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2350 if (nic->rxd_mode == RXD_MODE_3B) {
2351 /* Two buffer mode */
2352
2353 /*
6aa20a22 2354 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2355 * L4 payload
2356 */
1ee6dd77 2357 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
da6971d8
AR
2358 (nic->pdev, skb->data, dev->mtu + 4,
2359 PCI_DMA_FROMDEVICE);
2360
75c30b13 2361 /* Buffer-1 will be dummy buffer. Not used */
1ee6dd77
RB
2362 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2363 ((struct RxD3*)rxdp)->Buffer1_ptr =
6aa20a22 2364 pci_map_single(nic->pdev,
75c30b13
AR
2365 ba->ba_1, BUF1_LEN,
2366 PCI_DMA_FROMDEVICE);
2367 }
da6971d8
AR
2368 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2369 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2370 (dev->mtu + 4);
2371 } else {
2372 /* 3 buffer mode */
2373 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2374 dev_kfree_skb_irq(skb);
2375 if (first_rxdp) {
2376 wmb();
2377 first_rxdp->Control_1 |=
2378 RXD_OWN_XENA;
2379 }
2380 return -ENOMEM ;
2381 }
2382 }
2383 rxdp->Control_2 |= BIT(0);
1da177e4 2384 }
1da177e4 2385 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b 2386 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2387 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2388 off++;
da6971d8
AR
2389 if (off == (rxd_count[nic->rxd_mode] + 1))
2390 off = 0;
20346722 2391 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
20346722 2392
da6971d8 2393 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b 2394 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2395 if (first_rxdp) {
2396 wmb();
2397 first_rxdp->Control_1 |= RXD_OWN_XENA;
2398 }
2399 first_rxdp = rxdp;
2400 }
1da177e4
LT
2401 atomic_inc(&nic->rx_bufs_left[ring_no]);
2402 alloc_tab++;
2403 }
2404
2405 end:
303bcb4b 2406 /* Transfer ownership of first descriptor to adapter just before
2407 * exiting. Before that, use memory barrier so that ownership
2408 * and other fields are seen by adapter correctly.
2409 */
2410 if (first_rxdp) {
2411 wmb();
2412 first_rxdp->Control_1 |= RXD_OWN_XENA;
2413 }
2414
1da177e4
LT
2415 return SUCCESS;
2416}
2417
da6971d8
AR
2418static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2419{
2420 struct net_device *dev = sp->dev;
2421 int j;
2422 struct sk_buff *skb;
1ee6dd77
RB
2423 struct RxD_t *rxdp;
2424 struct mac_info *mac_control;
2425 struct buffAdd *ba;
da6971d8
AR
2426
2427 mac_control = &sp->mac_control;
2428 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2429 rxdp = mac_control->rings[ring_no].
2430 rx_blocks[blk].rxds[j].virt_addr;
2431 skb = (struct sk_buff *)
2432 ((unsigned long) rxdp->Host_Control);
2433 if (!skb) {
2434 continue;
2435 }
2436 if (sp->rxd_mode == RXD_MODE_1) {
2437 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2438 ((struct RxD1*)rxdp)->Buffer0_ptr,
da6971d8
AR
2439 dev->mtu +
2440 HEADER_ETHERNET_II_802_3_SIZE
2441 + HEADER_802_2_SIZE +
2442 HEADER_SNAP_SIZE,
2443 PCI_DMA_FROMDEVICE);
1ee6dd77 2444 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8
AR
2445 } else if(sp->rxd_mode == RXD_MODE_3B) {
2446 ba = &mac_control->rings[ring_no].
2447 ba[blk][j];
2448 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2449 ((struct RxD3*)rxdp)->Buffer0_ptr,
da6971d8
AR
2450 BUF0_LEN,
2451 PCI_DMA_FROMDEVICE);
2452 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2453 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2454 BUF1_LEN,
2455 PCI_DMA_FROMDEVICE);
2456 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2457 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2458 dev->mtu + 4,
2459 PCI_DMA_FROMDEVICE);
1ee6dd77 2460 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2461 } else {
2462 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2463 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2464 PCI_DMA_FROMDEVICE);
2465 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2466 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2467 l3l4hdr_size + 4,
2468 PCI_DMA_FROMDEVICE);
2469 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2470 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
da6971d8 2471 PCI_DMA_FROMDEVICE);
1ee6dd77 2472 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2473 }
2474 dev_kfree_skb(skb);
2475 atomic_dec(&sp->rx_bufs_left[ring_no]);
2476 }
2477}
2478
1da177e4 2479/**
20346722 2480 * free_rx_buffers - Frees all Rx buffers
1da177e4 2481 * @sp: device private variable.
20346722 2482 * Description:
1da177e4
LT
2483 * This function will free all Rx buffers allocated by host.
2484 * Return Value:
2485 * NONE.
2486 */
2487
2488static void free_rx_buffers(struct s2io_nic *sp)
2489{
2490 struct net_device *dev = sp->dev;
da6971d8 2491 int i, blk = 0, buf_cnt = 0;
1ee6dd77 2492 struct mac_info *mac_control;
1da177e4 2493 struct config_param *config;
1da177e4
LT
2494
2495 mac_control = &sp->mac_control;
2496 config = &sp->config;
2497
2498 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2499 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2500 free_rxd_blk(sp,i,blk);
1da177e4 2501
20346722 2502 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2503 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2504 mac_control->rings[i].rx_curr_put_info.offset = 0;
2505 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2506 atomic_set(&sp->rx_bufs_left[i], 0);
2507 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2508 dev->name, buf_cnt, i);
2509 }
2510}
2511
2512/**
2513 * s2io_poll - Rx interrupt handler for NAPI support
2514 * @dev : pointer to the device structure.
20346722 2515 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2516 * during one pass through the 'Poll" function.
2517 * Description:
2518 * Comes into picture only if NAPI support has been incorporated. It does
2519 * the same thing that rx_intr_handler does, but not in a interrupt context
2520 * also It will process only a given number of packets.
2521 * Return value:
2522 * 0 on success and 1 if there are No Rx packets to be processed.
2523 */
2524
1da177e4
LT
2525static int s2io_poll(struct net_device *dev, int *budget)
2526{
1ee6dd77 2527 struct s2io_nic *nic = dev->priv;
20346722 2528 int pkt_cnt = 0, org_pkts_to_process;
1ee6dd77 2529 struct mac_info *mac_control;
1da177e4 2530 struct config_param *config;
1ee6dd77 2531 struct XENA_dev_config __iomem *bar0 = nic->bar0;
20346722 2532 int i;
1da177e4 2533
7ba013ac 2534 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2535 mac_control = &nic->mac_control;
2536 config = &nic->config;
2537
20346722 2538 nic->pkts_to_process = *budget;
2539 if (nic->pkts_to_process > dev->quota)
2540 nic->pkts_to_process = dev->quota;
2541 org_pkts_to_process = nic->pkts_to_process;
1da177e4 2542
19a60522
SS
2543 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2544 readl(&bar0->rx_traffic_int);
1da177e4
LT
2545
2546 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2547 rx_intr_handler(&mac_control->rings[i]);
2548 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2549 if (!nic->pkts_to_process) {
2550 /* Quota for the current iteration has been met */
2551 goto no_rx;
1da177e4 2552 }
1da177e4
LT
2553 }
2554 if (!pkt_cnt)
2555 pkt_cnt = 1;
2556
2557 dev->quota -= pkt_cnt;
2558 *budget -= pkt_cnt;
2559 netif_rx_complete(dev);
2560
2561 for (i = 0; i < config->rx_ring_num; i++) {
2562 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2563 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2564 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2565 break;
2566 }
2567 }
2568 /* Re enable the Rx interrupts. */
c92ca04b 2569 writeq(0x0, &bar0->rx_traffic_mask);
19a60522 2570 readl(&bar0->rx_traffic_mask);
7ba013ac 2571 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2572 return 0;
2573
20346722 2574no_rx:
1da177e4
LT
2575 dev->quota -= pkt_cnt;
2576 *budget -= pkt_cnt;
2577
2578 for (i = 0; i < config->rx_ring_num; i++) {
2579 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2580 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2581 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2582 break;
2583 }
2584 }
7ba013ac 2585 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2586 return 1;
2587}
20346722 2588
b41477f3 2589#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2590/**
b41477f3 2591 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2592 * @dev : pointer to the device structure.
2593 * Description:
b41477f3
AR
2594 * This function will be called by upper layer to check for events on the
2595 * interface in situations where interrupts are disabled. It is used for
2596 * specific in-kernel networking tasks, such as remote consoles and kernel
2597 * debugging over the network (example netdump in RedHat).
612eff0e 2598 */
612eff0e
BH
2599static void s2io_netpoll(struct net_device *dev)
2600{
1ee6dd77
RB
2601 struct s2io_nic *nic = dev->priv;
2602 struct mac_info *mac_control;
612eff0e 2603 struct config_param *config;
1ee6dd77 2604 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2605 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2606 int i;
2607
2608 disable_irq(dev->irq);
2609
2610 atomic_inc(&nic->isr_cnt);
2611 mac_control = &nic->mac_control;
2612 config = &nic->config;
2613
612eff0e 2614 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2615 writeq(val64, &bar0->tx_traffic_int);
2616
6aa20a22 2617 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2618 * run out of skbs and will fail and eventually netpoll application such
2619 * as netdump will fail.
2620 */
2621 for (i = 0; i < config->tx_fifo_num; i++)
2622 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2623
b41477f3 2624 /* check for received packet and indicate up to network */
612eff0e
BH
2625 for (i = 0; i < config->rx_ring_num; i++)
2626 rx_intr_handler(&mac_control->rings[i]);
2627
2628 for (i = 0; i < config->rx_ring_num; i++) {
2629 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2630 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2631 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2632 break;
2633 }
2634 }
2635 atomic_dec(&nic->isr_cnt);
2636 enable_irq(dev->irq);
2637 return;
2638}
2639#endif
2640
20346722 2641/**
1da177e4
LT
2642 * rx_intr_handler - Rx interrupt handler
2643 * @nic: device private variable.
20346722 2644 * Description:
2645 * If the interrupt is because of a received frame or if the
1da177e4 2646 * receive ring contains fresh as yet un-processed frames,this function is
20346722 2647 * called. It picks out the RxD at which place the last Rx processing had
2648 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2649 * the offset.
2650 * Return Value:
2651 * NONE.
2652 */
1ee6dd77 2653static void rx_intr_handler(struct ring_info *ring_data)
1da177e4 2654{
1ee6dd77 2655 struct s2io_nic *nic = ring_data->nic;
1da177e4 2656 struct net_device *dev = (struct net_device *) nic->dev;
da6971d8 2657 int get_block, put_block, put_offset;
1ee6dd77
RB
2658 struct rx_curr_get_info get_info, put_info;
2659 struct RxD_t *rxdp;
1da177e4 2660 struct sk_buff *skb;
20346722 2661 int pkt_cnt = 0;
7d3d0439
RA
2662 int i;
2663
7ba013ac 2664 spin_lock(&nic->rx_lock);
2665 if (atomic_read(&nic->card_state) == CARD_DOWN) {
776bd20f 2666 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
7ba013ac 2667 __FUNCTION__, dev->name);
2668 spin_unlock(&nic->rx_lock);
776bd20f 2669 return;
7ba013ac 2670 }
2671
20346722 2672 get_info = ring_data->rx_curr_get_info;
2673 get_block = get_info.block_index;
1ee6dd77 2674 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2675 put_block = put_info.block_index;
da6971d8 2676 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65
SS
2677 if (!napi) {
2678 spin_lock(&nic->put_lock);
2679 put_offset = ring_data->put_pos;
2680 spin_unlock(&nic->put_lock);
2681 } else
2682 put_offset = ring_data->put_pos;
2683
da6971d8 2684 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2685 /*
2686 * If your are next to put index then it's
2687 * FIFO full condition
2688 */
da6971d8
AR
2689 if ((get_block == put_block) &&
2690 (get_info.offset + 1) == put_info.offset) {
75c30b13 2691 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
da6971d8
AR
2692 break;
2693 }
20346722 2694 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2695 if (skb == NULL) {
2696 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2697 dev->name);
2698 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2699 spin_unlock(&nic->rx_lock);
20346722 2700 return;
1da177e4 2701 }
da6971d8
AR
2702 if (nic->rxd_mode == RXD_MODE_1) {
2703 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2704 ((struct RxD1*)rxdp)->Buffer0_ptr,
20346722 2705 dev->mtu +
2706 HEADER_ETHERNET_II_802_3_SIZE +
2707 HEADER_802_2_SIZE +
2708 HEADER_SNAP_SIZE,
2709 PCI_DMA_FROMDEVICE);
da6971d8 2710 } else if (nic->rxd_mode == RXD_MODE_3B) {
75c30b13 2711 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2712 ((struct RxD3*)rxdp)->Buffer0_ptr,
20346722 2713 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8 2714 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2715 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8 2716 dev->mtu + 4,
20346722 2717 PCI_DMA_FROMDEVICE);
da6971d8 2718 } else {
75c30b13 2719 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2720 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2721 PCI_DMA_FROMDEVICE);
2722 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2723 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2724 l3l4hdr_size + 4,
2725 PCI_DMA_FROMDEVICE);
2726 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2727 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2728 dev->mtu, PCI_DMA_FROMDEVICE);
2729 }
863c11a9 2730 prefetch(skb->data);
20346722 2731 rx_osm_handler(ring_data, rxdp);
2732 get_info.offset++;
da6971d8
AR
2733 ring_data->rx_curr_get_info.offset = get_info.offset;
2734 rxdp = ring_data->rx_blocks[get_block].
2735 rxds[get_info.offset].virt_addr;
2736 if (get_info.offset == rxd_count[nic->rxd_mode]) {
20346722 2737 get_info.offset = 0;
da6971d8 2738 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2739 get_block++;
da6971d8
AR
2740 if (get_block == ring_data->block_count)
2741 get_block = 0;
2742 ring_data->rx_curr_get_info.block_index = get_block;
20346722 2743 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2744 }
1da177e4 2745
20346722 2746 nic->pkts_to_process -= 1;
db874e65 2747 if ((napi) && (!nic->pkts_to_process))
20346722 2748 break;
20346722 2749 pkt_cnt++;
1da177e4
LT
2750 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2751 break;
2752 }
7d3d0439
RA
2753 if (nic->lro) {
2754 /* Clear all LRO sessions before exiting */
2755 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 2756 struct lro *lro = &nic->lro0_n[i];
7d3d0439
RA
2757 if (lro->in_use) {
2758 update_L3L4_header(nic, lro);
2759 queue_rx_frame(lro->parent);
2760 clear_lro_session(lro);
2761 }
2762 }
2763 }
2764
7ba013ac 2765 spin_unlock(&nic->rx_lock);
1da177e4 2766}
20346722 2767
2768/**
1da177e4
LT
2769 * tx_intr_handler - Transmit interrupt handler
2770 * @nic : device private variable
20346722 2771 * Description:
2772 * If an interrupt was raised to indicate DMA complete of the
2773 * Tx packet, this function is called. It identifies the last TxD
2774 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2775 * DMA'ed into the NICs internal memory.
2776 * Return Value:
2777 * NONE
2778 */
2779
1ee6dd77 2780static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 2781{
1ee6dd77 2782 struct s2io_nic *nic = fifo_data->nic;
1da177e4 2783 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 2784 struct tx_curr_get_info get_info, put_info;
1da177e4 2785 struct sk_buff *skb;
1ee6dd77 2786 struct TxD *txdlp;
1da177e4 2787
20346722 2788 get_info = fifo_data->tx_curr_get_info;
1ee6dd77
RB
2789 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2790 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
20346722 2791 list_virt_addr;
2792 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2793 (get_info.offset != put_info.offset) &&
2794 (txdlp->Host_Control)) {
2795 /* Check for TxD errors */
2796 if (txdlp->Control_1 & TXD_T_CODE) {
2797 unsigned long long err;
2798 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
2799 if (err & 0x1) {
2800 nic->mac_control.stats_info->sw_stat.
2801 parity_err_cnt++;
2802 }
776bd20f 2803 if ((err >> 48) == 0xA) {
2804 DBG_PRINT(TX_DBG, "TxD returned due \
19a60522 2805 to loss of link\n");
776bd20f 2806 }
2807 else {
19a60522 2808 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
776bd20f 2809 }
20346722 2810 }
1da177e4 2811
fed5eccd 2812 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 2813 if (skb == NULL) {
2814 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2815 __FUNCTION__);
2816 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2817 return;
2818 }
2819
20346722 2820 /* Updating the statistics block */
20346722 2821 nic->stats.tx_bytes += skb->len;
2822 dev_kfree_skb_irq(skb);
2823
2824 get_info.offset++;
863c11a9
AR
2825 if (get_info.offset == get_info.fifo_len + 1)
2826 get_info.offset = 0;
1ee6dd77 2827 txdlp = (struct TxD *) fifo_data->list_info
20346722 2828 [get_info.offset].list_virt_addr;
2829 fifo_data->tx_curr_get_info.offset =
2830 get_info.offset;
1da177e4
LT
2831 }
2832
2833 spin_lock(&nic->tx_lock);
2834 if (netif_queue_stopped(dev))
2835 netif_wake_queue(dev);
2836 spin_unlock(&nic->tx_lock);
2837}
2838
bd1034f0
AR
2839/**
2840 * s2io_mdio_write - Function to write in to MDIO registers
2841 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2842 * @addr : address value
2843 * @value : data value
2844 * @dev : pointer to net_device structure
2845 * Description:
2846 * This function is used to write values to the MDIO registers
2847 * NONE
2848 */
2849static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2850{
2851 u64 val64 = 0x0;
1ee6dd77
RB
2852 struct s2io_nic *sp = dev->priv;
2853 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2854
2855 //address transaction
2856 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2857 | MDIO_MMD_DEV_ADDR(mmd_type)
2858 | MDIO_MMS_PRT_ADDR(0x0);
2859 writeq(val64, &bar0->mdio_control);
2860 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2861 writeq(val64, &bar0->mdio_control);
2862 udelay(100);
2863
2864 //Data transaction
2865 val64 = 0x0;
2866 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2867 | MDIO_MMD_DEV_ADDR(mmd_type)
2868 | MDIO_MMS_PRT_ADDR(0x0)
2869 | MDIO_MDIO_DATA(value)
2870 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2871 writeq(val64, &bar0->mdio_control);
2872 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2873 writeq(val64, &bar0->mdio_control);
2874 udelay(100);
2875
2876 val64 = 0x0;
2877 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2878 | MDIO_MMD_DEV_ADDR(mmd_type)
2879 | MDIO_MMS_PRT_ADDR(0x0)
2880 | MDIO_OP(MDIO_OP_READ_TRANS);
2881 writeq(val64, &bar0->mdio_control);
2882 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2883 writeq(val64, &bar0->mdio_control);
2884 udelay(100);
2885
2886}
2887
2888/**
2889 * s2io_mdio_read - Function to write in to MDIO registers
2890 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2891 * @addr : address value
2892 * @dev : pointer to net_device structure
2893 * Description:
2894 * This function is used to read values to the MDIO registers
2895 * NONE
2896 */
2897static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2898{
2899 u64 val64 = 0x0;
2900 u64 rval64 = 0x0;
1ee6dd77
RB
2901 struct s2io_nic *sp = dev->priv;
2902 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2903
2904 /* address transaction */
2905 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2906 | MDIO_MMD_DEV_ADDR(mmd_type)
2907 | MDIO_MMS_PRT_ADDR(0x0);
2908 writeq(val64, &bar0->mdio_control);
2909 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2910 writeq(val64, &bar0->mdio_control);
2911 udelay(100);
2912
2913 /* Data transaction */
2914 val64 = 0x0;
2915 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2916 | MDIO_MMD_DEV_ADDR(mmd_type)
2917 | MDIO_MMS_PRT_ADDR(0x0)
2918 | MDIO_OP(MDIO_OP_READ_TRANS);
2919 writeq(val64, &bar0->mdio_control);
2920 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2921 writeq(val64, &bar0->mdio_control);
2922 udelay(100);
2923
2924 /* Read the value from regs */
2925 rval64 = readq(&bar0->mdio_control);
2926 rval64 = rval64 & 0xFFFF0000;
2927 rval64 = rval64 >> 16;
2928 return rval64;
2929}
2930/**
2931 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2932 * @counter : couter value to be updated
2933 * @flag : flag to indicate the status
2934 * @type : counter type
2935 * Description:
2936 * This function is to check the status of the xpak counters value
2937 * NONE
2938 */
2939
2940static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2941{
2942 u64 mask = 0x3;
2943 u64 val64;
2944 int i;
2945 for(i = 0; i <index; i++)
2946 mask = mask << 0x2;
2947
2948 if(flag > 0)
2949 {
2950 *counter = *counter + 1;
2951 val64 = *regs_stat & mask;
2952 val64 = val64 >> (index * 0x2);
2953 val64 = val64 + 1;
2954 if(val64 == 3)
2955 {
2956 switch(type)
2957 {
2958 case 1:
2959 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2960 "service. Excessive temperatures may "
2961 "result in premature transceiver "
2962 "failure \n");
2963 break;
2964 case 2:
2965 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2966 "service Excessive bias currents may "
2967 "indicate imminent laser diode "
2968 "failure \n");
2969 break;
2970 case 3:
2971 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2972 "service Excessive laser output "
2973 "power may saturate far-end "
2974 "receiver\n");
2975 break;
2976 default:
2977 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2978 "type \n");
2979 }
2980 val64 = 0x0;
2981 }
2982 val64 = val64 << (index * 0x2);
2983 *regs_stat = (*regs_stat & (~mask)) | (val64);
2984
2985 } else {
2986 *regs_stat = *regs_stat & (~mask);
2987 }
2988}
2989
2990/**
2991 * s2io_updt_xpak_counter - Function to update the xpak counters
2992 * @dev : pointer to net_device struct
2993 * Description:
2994 * This function is to upate the status of the xpak counters value
2995 * NONE
2996 */
2997static void s2io_updt_xpak_counter(struct net_device *dev)
2998{
2999 u16 flag = 0x0;
3000 u16 type = 0x0;
3001 u16 val16 = 0x0;
3002 u64 val64 = 0x0;
3003 u64 addr = 0x0;
3004
1ee6dd77
RB
3005 struct s2io_nic *sp = dev->priv;
3006 struct stat_block *stat_info = sp->mac_control.stats_info;
bd1034f0
AR
3007
3008 /* Check the communication with the MDIO slave */
3009 addr = 0x0000;
3010 val64 = 0x0;
3011 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3012 if((val64 == 0xFFFF) || (val64 == 0x0000))
3013 {
3014 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3015 "Returned %llx\n", (unsigned long long)val64);
3016 return;
3017 }
3018
3019 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3020 if(val64 != 0x2040)
3021 {
3022 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3023 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3024 (unsigned long long)val64);
3025 return;
3026 }
3027
3028 /* Loading the DOM register to MDIO register */
3029 addr = 0xA100;
3030 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3031 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3032
3033 /* Reading the Alarm flags */
3034 addr = 0xA070;
3035 val64 = 0x0;
3036 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3037
3038 flag = CHECKBIT(val64, 0x7);
3039 type = 1;
3040 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3041 &stat_info->xpak_stat.xpak_regs_stat,
3042 0x0, flag, type);
3043
3044 if(CHECKBIT(val64, 0x6))
3045 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3046
3047 flag = CHECKBIT(val64, 0x3);
3048 type = 2;
3049 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3050 &stat_info->xpak_stat.xpak_regs_stat,
3051 0x2, flag, type);
3052
3053 if(CHECKBIT(val64, 0x2))
3054 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3055
3056 flag = CHECKBIT(val64, 0x1);
3057 type = 3;
3058 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3059 &stat_info->xpak_stat.xpak_regs_stat,
3060 0x4, flag, type);
3061
3062 if(CHECKBIT(val64, 0x0))
3063 stat_info->xpak_stat.alarm_laser_output_power_low++;
3064
3065 /* Reading the Warning flags */
3066 addr = 0xA074;
3067 val64 = 0x0;
3068 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3069
3070 if(CHECKBIT(val64, 0x7))
3071 stat_info->xpak_stat.warn_transceiver_temp_high++;
3072
3073 if(CHECKBIT(val64, 0x6))
3074 stat_info->xpak_stat.warn_transceiver_temp_low++;
3075
3076 if(CHECKBIT(val64, 0x3))
3077 stat_info->xpak_stat.warn_laser_bias_current_high++;
3078
3079 if(CHECKBIT(val64, 0x2))
3080 stat_info->xpak_stat.warn_laser_bias_current_low++;
3081
3082 if(CHECKBIT(val64, 0x1))
3083 stat_info->xpak_stat.warn_laser_output_power_high++;
3084
3085 if(CHECKBIT(val64, 0x0))
3086 stat_info->xpak_stat.warn_laser_output_power_low++;
3087}
3088
20346722 3089/**
1da177e4
LT
3090 * alarm_intr_handler - Alarm Interrrupt handler
3091 * @nic: device private variable
20346722 3092 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 3093 * complete, this function is called. If the interrupt was to indicate
20346722 3094 * a loss of link, the OSM link status handler is invoked for any other
3095 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
3096 * and a H/W reset is issued.
3097 * Return Value:
3098 * NONE
3099*/
3100
3101static void alarm_intr_handler(struct s2io_nic *nic)
3102{
3103 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 3104 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 3105 register u64 val64 = 0, err_reg = 0;
bd1034f0
AR
3106 u64 cnt;
3107 int i;
372cc597
SS
3108 if (atomic_read(&nic->card_state) == CARD_DOWN)
3109 return;
bd1034f0
AR
3110 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3111 /* Handling the XPAK counters update */
3112 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3113 /* waiting for an hour */
3114 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3115 } else {
3116 s2io_updt_xpak_counter(dev);
3117 /* reset the count to zero */
3118 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3119 }
1da177e4
LT
3120
3121 /* Handling link status change error Intr */
a371a07d 3122 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3123 err_reg = readq(&bar0->mac_rmac_err_reg);
3124 writeq(err_reg, &bar0->mac_rmac_err_reg);
3125 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3126 schedule_work(&nic->set_link_task);
3127 }
1da177e4
LT
3128 }
3129
5e25b9dd 3130 /* Handling Ecc errors */
3131 val64 = readq(&bar0->mc_err_reg);
3132 writeq(val64, &bar0->mc_err_reg);
3133 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3134 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac 3135 nic->mac_control.stats_info->sw_stat.
3136 double_ecc_errs++;
776bd20f 3137 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
5e25b9dd 3138 dev->name);
776bd20f 3139 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
e960fc5c 3140 if (nic->device_type != XFRAME_II_DEVICE) {
776bd20f 3141 /* Reset XframeI only if critical error */
3142 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3143 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3144 netif_stop_queue(dev);
3145 schedule_work(&nic->rst_timer_task);
bd1034f0
AR
3146 nic->mac_control.stats_info->sw_stat.
3147 soft_reset_cnt++;
776bd20f 3148 }
e960fc5c 3149 }
5e25b9dd 3150 } else {
7ba013ac 3151 nic->mac_control.stats_info->sw_stat.
3152 single_ecc_errs++;
5e25b9dd 3153 }
3154 }
3155
1da177e4
LT
3156 /* In case of a serious error, the device will be Reset. */
3157 val64 = readq(&bar0->serr_source);
3158 if (val64 & SERR_SOURCE_ANY) {
bd1034f0 3159 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
1da177e4 3160 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
6aa20a22 3161 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
776bd20f 3162 (unsigned long long)val64);
1da177e4
LT
3163 netif_stop_queue(dev);
3164 schedule_work(&nic->rst_timer_task);
bd1034f0 3165 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
3166 }
3167
3168 /*
3169 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3170 * Error occurs, the adapter will be recycled by disabling the
20346722 3171 * adapter enable bit and enabling it again after the device
1da177e4
LT
3172 * becomes Quiescent.
3173 */
3174 val64 = readq(&bar0->pcc_err_reg);
3175 writeq(val64, &bar0->pcc_err_reg);
3176 if (val64 & PCC_FB_ECC_DB_ERR) {
3177 u64 ac = readq(&bar0->adapter_control);
3178 ac &= ~(ADAPTER_CNTL_EN);
3179 writeq(ac, &bar0->adapter_control);
3180 ac = readq(&bar0->adapter_control);
3181 schedule_work(&nic->set_link_task);
3182 }
bd1034f0
AR
3183 /* Check for data parity error */
3184 val64 = readq(&bar0->pic_int_status);
3185 if (val64 & PIC_INT_GPIO) {
3186 val64 = readq(&bar0->gpio_int_reg);
3187 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3188 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3189 schedule_work(&nic->rst_timer_task);
3190 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3191 }
3192 }
3193
3194 /* Check for ring full counter */
3195 if (nic->device_type & XFRAME_II_DEVICE) {
3196 val64 = readq(&bar0->ring_bump_counter1);
3197 for (i=0; i<4; i++) {
3198 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3199 cnt >>= 64 - ((i+1)*16);
3200 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3201 += cnt;
3202 }
3203
3204 val64 = readq(&bar0->ring_bump_counter2);
3205 for (i=0; i<4; i++) {
3206 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3207 cnt >>= 64 - ((i+1)*16);
3208 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3209 += cnt;
3210 }
3211 }
1da177e4
LT
3212
3213 /* Other type of interrupts are not being handled now, TODO */
3214}
3215
20346722 3216/**
1da177e4 3217 * wait_for_cmd_complete - waits for a command to complete.
20346722 3218 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3219 * s2io_nic structure.
20346722 3220 * Description: Function that waits for a command to Write into RMAC
3221 * ADDR DATA registers to be completed and returns either success or
3222 * error depending on whether the command was complete or not.
1da177e4
LT
3223 * Return value:
3224 * SUCCESS on success and FAILURE on failure.
3225 */
3226
9fc93a41
SS
3227static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3228 int bit_state)
1da177e4 3229{
9fc93a41 3230 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3231 u64 val64;
3232
9fc93a41
SS
3233 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3234 return FAILURE;
3235
3236 do {
c92ca04b 3237 val64 = readq(addr);
9fc93a41
SS
3238 if (bit_state == S2IO_BIT_RESET) {
3239 if (!(val64 & busy_bit)) {
3240 ret = SUCCESS;
3241 break;
3242 }
3243 } else {
3244 if (!(val64 & busy_bit)) {
3245 ret = SUCCESS;
3246 break;
3247 }
1da177e4 3248 }
c92ca04b
AR
3249
3250 if(in_interrupt())
9fc93a41 3251 mdelay(delay);
c92ca04b 3252 else
9fc93a41 3253 msleep(delay);
c92ca04b 3254
9fc93a41
SS
3255 if (++cnt >= 10)
3256 delay = 50;
3257 } while (cnt < 20);
1da177e4
LT
3258 return ret;
3259}
19a60522
SS
3260/*
3261 * check_pci_device_id - Checks if the device id is supported
3262 * @id : device id
3263 * Description: Function to check if the pci device id is supported by driver.
3264 * Return value: Actual device id if supported else PCI_ANY_ID
3265 */
3266static u16 check_pci_device_id(u16 id)
3267{
3268 switch (id) {
3269 case PCI_DEVICE_ID_HERC_WIN:
3270 case PCI_DEVICE_ID_HERC_UNI:
3271 return XFRAME_II_DEVICE;
3272 case PCI_DEVICE_ID_S2IO_UNI:
3273 case PCI_DEVICE_ID_S2IO_WIN:
3274 return XFRAME_I_DEVICE;
3275 default:
3276 return PCI_ANY_ID;
3277 }
3278}
1da177e4 3279
20346722 3280/**
3281 * s2io_reset - Resets the card.
1da177e4
LT
3282 * @sp : private member of the device structure.
3283 * Description: Function to Reset the card. This function then also
20346722 3284 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3285 * the card reset also resets the configuration space.
3286 * Return value:
3287 * void.
3288 */
3289
1ee6dd77 3290static void s2io_reset(struct s2io_nic * sp)
1da177e4 3291{
1ee6dd77 3292 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3293 u64 val64;
5e25b9dd 3294 u16 subid, pci_cmd;
19a60522
SS
3295 int i;
3296 u16 val16;
3297 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3298 __FUNCTION__, sp->dev->name);
1da177e4 3299
0b1f7ebe 3300 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3301 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3302
19a60522
SS
3303 if (sp->device_type == XFRAME_II_DEVICE) {
3304 int ret;
3305 ret = pci_set_power_state(sp->pdev, 3);
3306 if (!ret)
3307 ret = pci_set_power_state(sp->pdev, 0);
3308 else {
3309 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3310 __FUNCTION__);
3311 goto old_way;
3312 }
3313 msleep(20);
3314 goto new_way;
3315 }
3316old_way:
1da177e4
LT
3317 val64 = SW_RESET_ALL;
3318 writeq(val64, &bar0->sw_reset);
19a60522 3319new_way:
c92ca04b
AR
3320 if (strstr(sp->product_name, "CX4")) {
3321 msleep(750);
3322 }
19a60522
SS
3323 msleep(250);
3324 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3325
19a60522
SS
3326 /* Restore the PCI state saved during initialization. */
3327 pci_restore_state(sp->pdev);
3328 pci_read_config_word(sp->pdev, 0x2, &val16);
3329 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3330 break;
3331 msleep(200);
3332 }
1da177e4 3333
19a60522
SS
3334 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3335 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3336 }
3337
3338 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3339
3340 s2io_init_pci(sp);
1da177e4 3341
20346722 3342 /* Set swapper to enable I/O register access */
3343 s2io_set_swapper(sp);
3344
cc6e7c44
RA
3345 /* Restore the MSIX table entries from local variables */
3346 restore_xmsi_data(sp);
3347
5e25b9dd 3348 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3349 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3350 /* Clear "detected parity error" bit */
303bcb4b 3351 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3352
303bcb4b 3353 /* Clearing PCIX Ecc status register */
3354 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3355
303bcb4b 3356 /* Clearing PCI_STATUS error reflected here */
3357 writeq(BIT(62), &bar0->txpic_int_reg);
3358 }
5e25b9dd 3359
20346722 3360 /* Reset device statistics maintained by OS */
3361 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3362
1da177e4
LT
3363 /* SXE-002: Configure link and activity LED to turn it off */
3364 subid = sp->pdev->subsystem_device;
541ae68f 3365 if (((subid & 0xFF) >= 0x07) &&
3366 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3367 val64 = readq(&bar0->gpio_control);
3368 val64 |= 0x0000800000000000ULL;
3369 writeq(val64, &bar0->gpio_control);
3370 val64 = 0x0411040400000000ULL;
509a2671 3371 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3372 }
3373
541ae68f 3374 /*
3375 * Clear spurious ECC interrupts that would have occured on
3376 * XFRAME II cards after reset.
3377 */
3378 if (sp->device_type == XFRAME_II_DEVICE) {
3379 val64 = readq(&bar0->pcc_err_reg);
3380 writeq(val64, &bar0->pcc_err_reg);
3381 }
3382
1da177e4
LT
3383 sp->device_enabled_once = FALSE;
3384}
3385
3386/**
20346722 3387 * s2io_set_swapper - to set the swapper controle on the card
3388 * @sp : private member of the device structure,
1da177e4 3389 * pointer to the s2io_nic structure.
20346722 3390 * Description: Function to set the swapper control on the card
1da177e4
LT
3391 * correctly depending on the 'endianness' of the system.
3392 * Return value:
3393 * SUCCESS on success and FAILURE on failure.
3394 */
3395
1ee6dd77 3396static int s2io_set_swapper(struct s2io_nic * sp)
1da177e4
LT
3397{
3398 struct net_device *dev = sp->dev;
1ee6dd77 3399 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3400 u64 val64, valt, valr;
3401
20346722 3402 /*
1da177e4
LT
3403 * Set proper endian settings and verify the same by reading
3404 * the PIF Feed-back register.
3405 */
3406
3407 val64 = readq(&bar0->pif_rd_swapper_fb);
3408 if (val64 != 0x0123456789ABCDEFULL) {
3409 int i = 0;
3410 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3411 0x8100008181000081ULL, /* FE=1, SE=0 */
3412 0x4200004242000042ULL, /* FE=0, SE=1 */
3413 0}; /* FE=0, SE=0 */
3414
3415 while(i<4) {
3416 writeq(value[i], &bar0->swapper_ctrl);
3417 val64 = readq(&bar0->pif_rd_swapper_fb);
3418 if (val64 == 0x0123456789ABCDEFULL)
3419 break;
3420 i++;
3421 }
3422 if (i == 4) {
3423 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3424 dev->name);
3425 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3426 (unsigned long long) val64);
3427 return FAILURE;
3428 }
3429 valr = value[i];
3430 } else {
3431 valr = readq(&bar0->swapper_ctrl);
3432 }
3433
3434 valt = 0x0123456789ABCDEFULL;
3435 writeq(valt, &bar0->xmsi_address);
3436 val64 = readq(&bar0->xmsi_address);
3437
3438 if(val64 != valt) {
3439 int i = 0;
3440 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3441 0x0081810000818100ULL, /* FE=1, SE=0 */
3442 0x0042420000424200ULL, /* FE=0, SE=1 */
3443 0}; /* FE=0, SE=0 */
3444
3445 while(i<4) {
3446 writeq((value[i] | valr), &bar0->swapper_ctrl);
3447 writeq(valt, &bar0->xmsi_address);
3448 val64 = readq(&bar0->xmsi_address);
3449 if(val64 == valt)
3450 break;
3451 i++;
3452 }
3453 if(i == 4) {
20346722 3454 unsigned long long x = val64;
1da177e4 3455 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3456 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3457 return FAILURE;
3458 }
3459 }
3460 val64 = readq(&bar0->swapper_ctrl);
3461 val64 &= 0xFFFF000000000000ULL;
3462
3463#ifdef __BIG_ENDIAN
20346722 3464 /*
3465 * The device by default set to a big endian format, so a
1da177e4
LT
3466 * big endian driver need not set anything.
3467 */
3468 val64 |= (SWAPPER_CTRL_TXP_FE |
3469 SWAPPER_CTRL_TXP_SE |
3470 SWAPPER_CTRL_TXD_R_FE |
3471 SWAPPER_CTRL_TXD_W_FE |
3472 SWAPPER_CTRL_TXF_R_FE |
3473 SWAPPER_CTRL_RXD_R_FE |
3474 SWAPPER_CTRL_RXD_W_FE |
3475 SWAPPER_CTRL_RXF_W_FE |
3476 SWAPPER_CTRL_XMSI_FE |
1da177e4 3477 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
92383340 3478 if (sp->intr_type == INTA)
cc6e7c44 3479 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3480 writeq(val64, &bar0->swapper_ctrl);
3481#else
20346722 3482 /*
1da177e4 3483 * Initially we enable all bits to make it accessible by the
20346722 3484 * driver, then we selectively enable only those bits that
1da177e4
LT
3485 * we want to set.
3486 */
3487 val64 |= (SWAPPER_CTRL_TXP_FE |
3488 SWAPPER_CTRL_TXP_SE |
3489 SWAPPER_CTRL_TXD_R_FE |
3490 SWAPPER_CTRL_TXD_R_SE |
3491 SWAPPER_CTRL_TXD_W_FE |
3492 SWAPPER_CTRL_TXD_W_SE |
3493 SWAPPER_CTRL_TXF_R_FE |
3494 SWAPPER_CTRL_RXD_R_FE |
3495 SWAPPER_CTRL_RXD_R_SE |
3496 SWAPPER_CTRL_RXD_W_FE |
3497 SWAPPER_CTRL_RXD_W_SE |
3498 SWAPPER_CTRL_RXF_W_FE |
3499 SWAPPER_CTRL_XMSI_FE |
1da177e4 3500 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
cc6e7c44
RA
3501 if (sp->intr_type == INTA)
3502 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3503 writeq(val64, &bar0->swapper_ctrl);
3504#endif
3505 val64 = readq(&bar0->swapper_ctrl);
3506
20346722 3507 /*
3508 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3509 * feedback register.
3510 */
3511 val64 = readq(&bar0->pif_rd_swapper_fb);
3512 if (val64 != 0x0123456789ABCDEFULL) {
3513 /* Endian settings are incorrect, calls for another dekko. */
3514 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3515 dev->name);
3516 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3517 (unsigned long long) val64);
3518 return FAILURE;
3519 }
3520
3521 return SUCCESS;
3522}
3523
1ee6dd77 3524static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3525{
1ee6dd77 3526 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3527 u64 val64;
3528 int ret = 0, cnt = 0;
3529
3530 do {
3531 val64 = readq(&bar0->xmsi_access);
3532 if (!(val64 & BIT(15)))
3533 break;
3534 mdelay(1);
3535 cnt++;
3536 } while(cnt < 5);
3537 if (cnt == 5) {
3538 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3539 ret = 1;
3540 }
3541
3542 return ret;
3543}
3544
1ee6dd77 3545static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3546{
1ee6dd77 3547 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3548 u64 val64;
3549 int i;
3550
75c30b13 3551 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3552 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3553 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3554 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3555 writeq(val64, &bar0->xmsi_access);
3556 if (wait_for_msix_trans(nic, i)) {
3557 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3558 continue;
3559 }
3560 }
3561}
3562
1ee6dd77 3563static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3564{
1ee6dd77 3565 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3566 u64 val64, addr, data;
3567 int i;
3568
3569 /* Store and display */
75c30b13 3570 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3571 val64 = (BIT(15) | vBIT(i, 26, 6));
3572 writeq(val64, &bar0->xmsi_access);
3573 if (wait_for_msix_trans(nic, i)) {
3574 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3575 continue;
3576 }
3577 addr = readq(&bar0->xmsi_address);
3578 data = readq(&bar0->xmsi_data);
3579 if (addr && data) {
3580 nic->msix_info[i].addr = addr;
3581 nic->msix_info[i].data = data;
3582 }
3583 }
3584}
3585
1ee6dd77 3586int s2io_enable_msi(struct s2io_nic *nic)
cc6e7c44 3587{
1ee6dd77 3588 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3589 u16 msi_ctrl, msg_val;
3590 struct config_param *config = &nic->config;
3591 struct net_device *dev = nic->dev;
3592 u64 val64, tx_mat, rx_mat;
3593 int i, err;
3594
3595 val64 = readq(&bar0->pic_control);
3596 val64 &= ~BIT(1);
3597 writeq(val64, &bar0->pic_control);
3598
3599 err = pci_enable_msi(nic->pdev);
3600 if (err) {
3601 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3602 nic->dev->name);
3603 return err;
3604 }
3605
3606 /*
3607 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3608 * for interrupt handling.
3609 */
3610 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3611 msg_val ^= 0x1;
3612 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3613 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3614
3615 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3616 msi_ctrl |= 0x10;
3617 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3618
3619 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3620 tx_mat = readq(&bar0->tx_mat0_n[0]);
3621 for (i=0; i<config->tx_fifo_num; i++) {
3622 tx_mat |= TX_MAT_SET(i, 1);
3623 }
3624 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3625
3626 rx_mat = readq(&bar0->rx_mat);
3627 for (i=0; i<config->rx_ring_num; i++) {
3628 rx_mat |= RX_MAT_SET(i, 1);
3629 }
3630 writeq(rx_mat, &bar0->rx_mat);
3631
3632 dev->irq = nic->pdev->irq;
3633 return 0;
3634}
3635
1ee6dd77 3636static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3637{
1ee6dd77 3638 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3639 u64 tx_mat, rx_mat;
3640 u16 msi_control; /* Temp variable */
3641 int ret, i, j, msix_indx = 1;
3642
3643 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3644 GFP_KERNEL);
3645 if (nic->entries == NULL) {
3646 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3647 return -ENOMEM;
3648 }
3649 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3650
3651 nic->s2io_entries =
3652 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3653 GFP_KERNEL);
3654 if (nic->s2io_entries == NULL) {
3655 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3656 kfree(nic->entries);
3657 return -ENOMEM;
3658 }
3659 memset(nic->s2io_entries, 0,
3660 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3661
3662 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3663 nic->entries[i].entry = i;
3664 nic->s2io_entries[i].entry = i;
3665 nic->s2io_entries[i].arg = NULL;
3666 nic->s2io_entries[i].in_use = 0;
3667 }
3668
3669 tx_mat = readq(&bar0->tx_mat0_n[0]);
3670 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3671 tx_mat |= TX_MAT_SET(i, msix_indx);
3672 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3673 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3674 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3675 }
3676 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3677
3678 if (!nic->config.bimodal) {
3679 rx_mat = readq(&bar0->rx_mat);
3680 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3681 rx_mat |= RX_MAT_SET(j, msix_indx);
3682 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3683 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3684 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3685 }
3686 writeq(rx_mat, &bar0->rx_mat);
3687 } else {
3688 tx_mat = readq(&bar0->tx_mat0_n[7]);
3689 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3690 tx_mat |= TX_MAT_SET(i, msix_indx);
3691 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3692 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3693 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3694 }
3695 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3696 }
3697
c92ca04b 3698 nic->avail_msix_vectors = 0;
cc6e7c44 3699 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
c92ca04b
AR
3700 /* We fail init if error or we get less vectors than min required */
3701 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3702 nic->avail_msix_vectors = ret;
3703 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3704 }
cc6e7c44
RA
3705 if (ret) {
3706 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3707 kfree(nic->entries);
3708 kfree(nic->s2io_entries);
3709 nic->entries = NULL;
3710 nic->s2io_entries = NULL;
c92ca04b 3711 nic->avail_msix_vectors = 0;
cc6e7c44
RA
3712 return -ENOMEM;
3713 }
c92ca04b
AR
3714 if (!nic->avail_msix_vectors)
3715 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
cc6e7c44
RA
3716
3717 /*
3718 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3719 * in the herc NIC. (Temp change, needs to be removed later)
3720 */
3721 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3722 msi_control |= 0x1; /* Enable MSI */
3723 pci_write_config_word(nic->pdev, 0x42, msi_control);
3724
3725 return 0;
3726}
3727
1da177e4
LT
3728/* ********************************************************* *
3729 * Functions defined below concern the OS part of the driver *
3730 * ********************************************************* */
3731
20346722 3732/**
1da177e4
LT
3733 * s2io_open - open entry point of the driver
3734 * @dev : pointer to the device structure.
3735 * Description:
3736 * This function is the open entry point of the driver. It mainly calls a
3737 * function to allocate Rx buffers and inserts them into the buffer
20346722 3738 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3739 * Return value:
3740 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3741 * file on failure.
3742 */
3743
ac1f60db 3744static int s2io_open(struct net_device *dev)
1da177e4 3745{
1ee6dd77 3746 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3747 int err = 0;
3748
20346722 3749 /*
3750 * Make sure you have link off by default every time
1da177e4
LT
3751 * Nic is initialized
3752 */
3753 netif_carrier_off(dev);
0b1f7ebe 3754 sp->last_link_state = 0;
1da177e4
LT
3755
3756 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3757 err = s2io_card_up(sp);
3758 if (err) {
1da177e4
LT
3759 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3760 dev->name);
e6a8fee2 3761 goto hw_init_failed;
1da177e4
LT
3762 }
3763
3764 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3765 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3766 s2io_card_down(sp);
20346722 3767 err = -ENODEV;
e6a8fee2 3768 goto hw_init_failed;
1da177e4
LT
3769 }
3770
3771 netif_start_queue(dev);
3772 return 0;
20346722 3773
20346722 3774hw_init_failed:
cc6e7c44
RA
3775 if (sp->intr_type == MSI_X) {
3776 if (sp->entries)
3777 kfree(sp->entries);
3778 if (sp->s2io_entries)
3779 kfree(sp->s2io_entries);
3780 }
20346722 3781 return err;
1da177e4
LT
3782}
3783
3784/**
3785 * s2io_close -close entry point of the driver
3786 * @dev : device pointer.
3787 * Description:
3788 * This is the stop entry point of the driver. It needs to undo exactly
3789 * whatever was done by the open entry point,thus it's usually referred to
3790 * as the close function.Among other things this function mainly stops the
3791 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3792 * Return value:
3793 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3794 * file on failure.
3795 */
3796
ac1f60db 3797static int s2io_close(struct net_device *dev)
1da177e4 3798{
1ee6dd77 3799 struct s2io_nic *sp = dev->priv;
cc6e7c44 3800
1da177e4
LT
3801 netif_stop_queue(dev);
3802 /* Reset card, kill tasklet and free Tx and Rx buffers. */
e6a8fee2 3803 s2io_card_down(sp);
cc6e7c44 3804
1da177e4
LT
3805 sp->device_close_flag = TRUE; /* Device is shut down. */
3806 return 0;
3807}
3808
3809/**
3810 * s2io_xmit - Tx entry point of te driver
3811 * @skb : the socket buffer containing the Tx data.
3812 * @dev : device pointer.
3813 * Description :
3814 * This function is the Tx entry point of the driver. S2IO NIC supports
3815 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3816 * NOTE: when device cant queue the pkt,just the trans_start variable will
3817 * not be upadted.
3818 * Return value:
3819 * 0 on success & 1 on failure.
3820 */
3821
ac1f60db 3822static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 3823{
1ee6dd77 3824 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3825 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3826 register u64 val64;
1ee6dd77
RB
3827 struct TxD *txdp;
3828 struct TxFIFO_element __iomem *tx_fifo;
1da177e4 3829 unsigned long flags;
be3a6b02 3830 u16 vlan_tag = 0;
3831 int vlan_priority = 0;
1ee6dd77 3832 struct mac_info *mac_control;
1da177e4 3833 struct config_param *config;
75c30b13 3834 int offload_type;
1da177e4
LT
3835
3836 mac_control = &sp->mac_control;
3837 config = &sp->config;
3838
20346722 3839 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3840 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3841 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3842 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3843 dev->name);
3844 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722 3845 dev_kfree_skb(skb);
3846 return 0;
1da177e4
LT
3847 }
3848
3849 queue = 0;
1da177e4 3850
be3a6b02 3851 /* Get Fifo number to Transmit based on vlan priority */
3852 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3853 vlan_tag = vlan_tx_tag_get(skb);
3854 vlan_priority = vlan_tag >> 13;
3855 queue = config->fifo_mapping[vlan_priority];
3856 }
3857
20346722 3858 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3859 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
1ee6dd77 3860 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
20346722 3861 list_virt_addr;
3862
3863 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4 3864 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
3865 if (txdp->Host_Control ||
3866 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 3867 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
1da177e4
LT
3868 netif_stop_queue(dev);
3869 dev_kfree_skb(skb);
3870 spin_unlock_irqrestore(&sp->tx_lock, flags);
3871 return 0;
3872 }
0b1f7ebe 3873
3874 /* A buffer with no data will be dropped */
3875 if (!skb->len) {
3876 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3877 dev_kfree_skb(skb);
3878 spin_unlock_irqrestore(&sp->tx_lock, flags);
3879 return 0;
3880 }
3881
75c30b13 3882 offload_type = s2io_offload_type(skb);
75c30b13 3883 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 3884 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 3885 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 3886 }
84fa7933 3887 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
3888 txdp->Control_2 |=
3889 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3890 TXD_TX_CKO_UDP_EN);
3891 }
fed5eccd
AR
3892 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3893 txdp->Control_1 |= TXD_LIST_OWN_XENA;
1da177e4 3894 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3895
be3a6b02 3896 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3897 txdp->Control_2 |= TXD_VLAN_ENABLE;
3898 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3899 }
3900
fed5eccd 3901 frg_len = skb->len - skb->data_len;
75c30b13 3902 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
3903 int ufo_size;
3904
75c30b13 3905 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
3906 ufo_size &= ~7;
3907 txdp->Control_1 |= TXD_UFO_EN;
3908 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3909 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3910#ifdef __BIG_ENDIAN
3911 sp->ufo_in_band_v[put_off] =
3912 (u64)skb_shinfo(skb)->ip6_frag_id;
3913#else
3914 sp->ufo_in_band_v[put_off] =
3915 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3916#endif
3917 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3918 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3919 sp->ufo_in_band_v,
3920 sizeof(u64), PCI_DMA_TODEVICE);
3921 txdp++;
fed5eccd 3922 }
1da177e4 3923
fed5eccd
AR
3924 txdp->Buffer_Pointer = pci_map_single
3925 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3926 txdp->Host_Control = (unsigned long) skb;
3927 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 3928 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3929 txdp->Control_1 |= TXD_UFO_EN;
3930
3931 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
3932 /* For fragmented SKB. */
3933 for (i = 0; i < frg_cnt; i++) {
3934 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 3935 /* A '0' length fragment will be ignored */
3936 if (!frag->size)
3937 continue;
1da177e4
LT
3938 txdp++;
3939 txdp->Buffer_Pointer = (u64) pci_map_page
3940 (sp->pdev, frag->page, frag->page_offset,
3941 frag->size, PCI_DMA_TODEVICE);
efd51b5c 3942 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 3943 if (offload_type == SKB_GSO_UDP)
fed5eccd 3944 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
3945 }
3946 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3947
75c30b13 3948 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3949 frg_cnt++; /* as Txd0 was used for inband header */
3950
1da177e4 3951 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3952 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3953 writeq(val64, &tx_fifo->TxDL_Pointer);
3954
3955 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3956 TX_FIFO_LAST_LIST);
75c30b13 3957 if (offload_type)
fed5eccd 3958 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 3959
1da177e4
LT
3960 writeq(val64, &tx_fifo->List_Control);
3961
303bcb4b 3962 mmiowb();
3963
1da177e4 3964 put_off++;
863c11a9
AR
3965 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3966 put_off = 0;
20346722 3967 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3968
3969 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 3970 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 3971 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
3972 DBG_PRINT(TX_DBG,
3973 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3974 put_off, get_off);
3975 netif_stop_queue(dev);
3976 }
3977
3978 dev->trans_start = jiffies;
3979 spin_unlock_irqrestore(&sp->tx_lock, flags);
3980
3981 return 0;
3982}
3983
25fff88e 3984static void
3985s2io_alarm_handle(unsigned long data)
3986{
1ee6dd77 3987 struct s2io_nic *sp = (struct s2io_nic *)data;
25fff88e 3988
3989 alarm_intr_handler(sp);
3990 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3991}
3992
1ee6dd77 3993static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
75c30b13
AR
3994{
3995 int rxb_size, level;
3996
3997 if (!sp->lro) {
3998 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3999 level = rx_buffer_level(sp, rxb_size, rng_n);
4000
4001 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4002 int ret;
4003 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4004 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4005 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4006 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4007 __FUNCTION__);
4008 clear_bit(0, (&sp->tasklet_status));
4009 return -1;
4010 }
4011 clear_bit(0, (&sp->tasklet_status));
4012 } else if (level == LOW)
4013 tasklet_schedule(&sp->task);
4014
4015 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4016 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4017 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4018 }
4019 return 0;
4020}
4021
7d12e780 4022static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
cc6e7c44
RA
4023{
4024 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77 4025 struct s2io_nic *sp = dev->priv;
cc6e7c44 4026 int i;
1ee6dd77 4027 struct mac_info *mac_control;
cc6e7c44
RA
4028 struct config_param *config;
4029
4030 atomic_inc(&sp->isr_cnt);
4031 mac_control = &sp->mac_control;
4032 config = &sp->config;
4033 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4034
4035 /* If Intr is because of Rx Traffic */
4036 for (i = 0; i < config->rx_ring_num; i++)
4037 rx_intr_handler(&mac_control->rings[i]);
4038
4039 /* If Intr is because of Tx Traffic */
4040 for (i = 0; i < config->tx_fifo_num; i++)
4041 tx_intr_handler(&mac_control->fifos[i]);
4042
4043 /*
4044 * If the Rx buffer count is below the panic threshold then
4045 * reallocate the buffers from the interrupt handler itself,
4046 * else schedule a tasklet to reallocate the buffers.
4047 */
75c30b13
AR
4048 for (i = 0; i < config->rx_ring_num; i++)
4049 s2io_chk_rx_buffers(sp, i);
cc6e7c44
RA
4050
4051 atomic_dec(&sp->isr_cnt);
4052 return IRQ_HANDLED;
4053}
4054
7d12e780 4055static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4056{
1ee6dd77
RB
4057 struct ring_info *ring = (struct ring_info *)dev_id;
4058 struct s2io_nic *sp = ring->nic;
cc6e7c44
RA
4059
4060 atomic_inc(&sp->isr_cnt);
cc6e7c44 4061
75c30b13
AR
4062 rx_intr_handler(ring);
4063 s2io_chk_rx_buffers(sp, ring->ring_no);
7d3d0439 4064
cc6e7c44 4065 atomic_dec(&sp->isr_cnt);
cc6e7c44
RA
4066 return IRQ_HANDLED;
4067}
4068
7d12e780 4069static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4070{
1ee6dd77
RB
4071 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4072 struct s2io_nic *sp = fifo->nic;
cc6e7c44
RA
4073
4074 atomic_inc(&sp->isr_cnt);
4075 tx_intr_handler(fifo);
4076 atomic_dec(&sp->isr_cnt);
4077 return IRQ_HANDLED;
4078}
1ee6dd77 4079static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4080{
1ee6dd77 4081 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d 4082 u64 val64;
4083
4084 val64 = readq(&bar0->pic_int_status);
4085 if (val64 & PIC_INT_GPIO) {
4086 val64 = readq(&bar0->gpio_int_reg);
4087 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4088 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4089 /*
4090 * This is unstable state so clear both up/down
4091 * interrupt and adapter to re-evaluate the link state.
4092 */
a371a07d 4093 val64 |= GPIO_INT_REG_LINK_DOWN;
4094 val64 |= GPIO_INT_REG_LINK_UP;
4095 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4096 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4097 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4098 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4099 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4100 }
c92ca04b
AR
4101 else if (val64 & GPIO_INT_REG_LINK_UP) {
4102 val64 = readq(&bar0->adapter_status);
c92ca04b 4103 /* Enable Adapter */
19a60522
SS
4104 val64 = readq(&bar0->adapter_control);
4105 val64 |= ADAPTER_CNTL_EN;
4106 writeq(val64, &bar0->adapter_control);
4107 val64 |= ADAPTER_LED_ON;
4108 writeq(val64, &bar0->adapter_control);
4109 if (!sp->device_enabled_once)
4110 sp->device_enabled_once = 1;
c92ca04b 4111
19a60522
SS
4112 s2io_link(sp, LINK_UP);
4113 /*
4114 * unmask link down interrupt and mask link-up
4115 * intr
4116 */
4117 val64 = readq(&bar0->gpio_int_mask);
4118 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4119 val64 |= GPIO_INT_MASK_LINK_UP;
4120 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4121
c92ca04b
AR
4122 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4123 val64 = readq(&bar0->adapter_status);
19a60522
SS
4124 s2io_link(sp, LINK_DOWN);
4125 /* Link is down so unmaks link up interrupt */
4126 val64 = readq(&bar0->gpio_int_mask);
4127 val64 &= ~GPIO_INT_MASK_LINK_UP;
4128 val64 |= GPIO_INT_MASK_LINK_DOWN;
4129 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4130 }
4131 }
c92ca04b 4132 val64 = readq(&bar0->gpio_int_mask);
a371a07d 4133}
4134
1da177e4
LT
4135/**
4136 * s2io_isr - ISR handler of the device .
4137 * @irq: the irq of the device.
4138 * @dev_id: a void pointer to the dev structure of the NIC.
20346722 4139 * Description: This function is the ISR handler of the device. It
4140 * identifies the reason for the interrupt and calls the relevant
4141 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4142 * recv buffers, if their numbers are below the panic value which is
4143 * presently set to 25% of the original number of rcv buffers allocated.
4144 * Return value:
20346722 4145 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4146 * IRQ_NONE: will be returned if interrupt is not from our device
4147 */
7d12e780 4148static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4
LT
4149{
4150 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77
RB
4151 struct s2io_nic *sp = dev->priv;
4152 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4153 int i;
19a60522 4154 u64 reason = 0;
1ee6dd77 4155 struct mac_info *mac_control;
1da177e4
LT
4156 struct config_param *config;
4157
7ba013ac 4158 atomic_inc(&sp->isr_cnt);
1da177e4
LT
4159 mac_control = &sp->mac_control;
4160 config = &sp->config;
4161
20346722 4162 /*
1da177e4
LT
4163 * Identify the cause for interrupt and call the appropriate
4164 * interrupt handler. Causes for the interrupt could be;
4165 * 1. Rx of packet.
4166 * 2. Tx complete.
4167 * 3. Link down.
20346722 4168 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
4169 */
4170 reason = readq(&bar0->general_int_status);
4171
4172 if (!reason) {
19a60522
SS
4173 /* The interrupt was not raised by us. */
4174 atomic_dec(&sp->isr_cnt);
4175 return IRQ_NONE;
4176 }
4177 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4178 /* Disable device and get out */
7ba013ac 4179 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4180 return IRQ_NONE;
4181 }
5d3213cc 4182
db874e65
SS
4183 if (napi) {
4184 if (reason & GEN_INTR_RXTRAFFIC) {
19a60522 4185 if ( likely ( netif_rx_schedule_prep(dev)) ) {
db874e65 4186 __netif_rx_schedule(dev);
19a60522 4187 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
db874e65 4188 }
19a60522
SS
4189 else
4190 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
db874e65
SS
4191 }
4192 } else {
4193 /*
4194 * Rx handler is called by default, without checking for the
4195 * cause of interrupt.
4196 * rx_traffic_int reg is an R1 register, writing all 1's
4197 * will ensure that the actual interrupt causing bit get's
4198 * cleared and hence a read can be avoided.
4199 */
19a60522
SS
4200 if (reason & GEN_INTR_RXTRAFFIC)
4201 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4202
db874e65
SS
4203 for (i = 0; i < config->rx_ring_num; i++) {
4204 rx_intr_handler(&mac_control->rings[i]);
1da177e4
LT
4205 }
4206 }
1da177e4 4207
863c11a9
AR
4208 /*
4209 * tx_traffic_int reg is an R1 register, writing all 1's
4210 * will ensure that the actual interrupt causing bit get's
4211 * cleared and hence a read can be avoided.
4212 */
19a60522
SS
4213 if (reason & GEN_INTR_TXTRAFFIC)
4214 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
fe113638 4215
863c11a9
AR
4216 for (i = 0; i < config->tx_fifo_num; i++)
4217 tx_intr_handler(&mac_control->fifos[i]);
20346722 4218
a371a07d 4219 if (reason & GEN_INTR_TXPIC)
4220 s2io_txpic_intr_handle(sp);
20346722 4221 /*
4222 * If the Rx buffer count is below the panic threshold then
4223 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
4224 * else schedule a tasklet to reallocate the buffers.
4225 */
db874e65
SS
4226 if (!napi) {
4227 for (i = 0; i < config->rx_ring_num; i++)
4228 s2io_chk_rx_buffers(sp, i);
4229 }
4230
4231 writeq(0, &bar0->general_int_mask);
4232 readl(&bar0->general_int_status);
4233
7ba013ac 4234 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4235 return IRQ_HANDLED;
4236}
4237
7ba013ac 4238/**
4239 * s2io_updt_stats -
4240 */
1ee6dd77 4241static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4242{
1ee6dd77 4243 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac 4244 u64 val64;
4245 int cnt = 0;
4246
4247 if (atomic_read(&sp->card_state) == CARD_UP) {
4248 /* Apprx 30us on a 133 MHz bus */
4249 val64 = SET_UPDT_CLICKS(10) |
4250 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4251 writeq(val64, &bar0->stat_cfg);
4252 do {
4253 udelay(100);
4254 val64 = readq(&bar0->stat_cfg);
4255 if (!(val64 & BIT(0)))
4256 break;
4257 cnt++;
4258 if (cnt == 5)
4259 break; /* Updt failed */
4260 } while(1);
75c30b13 4261 } else {
1ee6dd77 4262 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
7ba013ac 4263 }
4264}
4265
1da177e4 4266/**
20346722 4267 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4268 * @dev : pointer to the device structure.
4269 * Description:
20346722 4270 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4271 * structure and returns a pointer to the same.
4272 * Return value:
4273 * pointer to the updated net_device_stats structure.
4274 */
4275
ac1f60db 4276static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4277{
1ee6dd77
RB
4278 struct s2io_nic *sp = dev->priv;
4279 struct mac_info *mac_control;
1da177e4
LT
4280 struct config_param *config;
4281
20346722 4282
1da177e4
LT
4283 mac_control = &sp->mac_control;
4284 config = &sp->config;
4285
7ba013ac 4286 /* Configure Stats for immediate updt */
4287 s2io_updt_stats(sp);
4288
4289 sp->stats.tx_packets =
4290 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722 4291 sp->stats.tx_errors =
4292 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4293 sp->stats.rx_errors =
ee705dba 4294 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
20346722 4295 sp->stats.multicast =
4296 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4297 sp->stats.rx_length_errors =
ee705dba 4298 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
4299
4300 return (&sp->stats);
4301}
4302
4303/**
4304 * s2io_set_multicast - entry point for multicast address enable/disable.
4305 * @dev : pointer to the device structure
4306 * Description:
20346722 4307 * This function is a driver entry point which gets called by the kernel
4308 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4309 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4310 * determine, if multicast address must be enabled or if promiscuous mode
4311 * is to be disabled etc.
4312 * Return value:
4313 * void.
4314 */
4315
4316static void s2io_set_multicast(struct net_device *dev)
4317{
4318 int i, j, prev_cnt;
4319 struct dev_mc_list *mclist;
1ee6dd77
RB
4320 struct s2io_nic *sp = dev->priv;
4321 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4322 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4323 0xfeffffffffffULL;
4324 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4325 void __iomem *add;
4326
4327 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4328 /* Enable all Multicast addresses */
4329 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4330 &bar0->rmac_addr_data0_mem);
4331 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4332 &bar0->rmac_addr_data1_mem);
4333 val64 = RMAC_ADDR_CMD_MEM_WE |
4334 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4335 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4336 writeq(val64, &bar0->rmac_addr_cmd_mem);
4337 /* Wait till command completes */
c92ca04b 4338 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4339 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4340 S2IO_BIT_RESET);
1da177e4
LT
4341
4342 sp->m_cast_flg = 1;
4343 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4344 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4345 /* Disable all Multicast addresses */
4346 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4347 &bar0->rmac_addr_data0_mem);
5e25b9dd 4348 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4349 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4350 val64 = RMAC_ADDR_CMD_MEM_WE |
4351 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4352 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4353 writeq(val64, &bar0->rmac_addr_cmd_mem);
4354 /* Wait till command completes */
c92ca04b 4355 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4356 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4357 S2IO_BIT_RESET);
1da177e4
LT
4358
4359 sp->m_cast_flg = 0;
4360 sp->all_multi_pos = 0;
4361 }
4362
4363 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4364 /* Put the NIC into promiscuous mode */
4365 add = &bar0->mac_cfg;
4366 val64 = readq(&bar0->mac_cfg);
4367 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4368
4369 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4370 writel((u32) val64, add);
4371 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4372 writel((u32) (val64 >> 32), (add + 4));
4373
926930b2
SS
4374 if (vlan_tag_strip != 1) {
4375 val64 = readq(&bar0->rx_pa_cfg);
4376 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4377 writeq(val64, &bar0->rx_pa_cfg);
4378 vlan_strip_flag = 0;
4379 }
4380
1da177e4
LT
4381 val64 = readq(&bar0->mac_cfg);
4382 sp->promisc_flg = 1;
776bd20f 4383 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4384 dev->name);
4385 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4386 /* Remove the NIC from promiscuous mode */
4387 add = &bar0->mac_cfg;
4388 val64 = readq(&bar0->mac_cfg);
4389 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4390
4391 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4392 writel((u32) val64, add);
4393 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4394 writel((u32) (val64 >> 32), (add + 4));
4395
926930b2
SS
4396 if (vlan_tag_strip != 0) {
4397 val64 = readq(&bar0->rx_pa_cfg);
4398 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4399 writeq(val64, &bar0->rx_pa_cfg);
4400 vlan_strip_flag = 1;
4401 }
4402
1da177e4
LT
4403 val64 = readq(&bar0->mac_cfg);
4404 sp->promisc_flg = 0;
776bd20f 4405 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
4406 dev->name);
4407 }
4408
4409 /* Update individual M_CAST address list */
4410 if ((!sp->m_cast_flg) && dev->mc_count) {
4411 if (dev->mc_count >
4412 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4413 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4414 dev->name);
4415 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4416 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4417 return;
4418 }
4419
4420 prev_cnt = sp->mc_addr_count;
4421 sp->mc_addr_count = dev->mc_count;
4422
4423 /* Clear out the previous list of Mc in the H/W. */
4424 for (i = 0; i < prev_cnt; i++) {
4425 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4426 &bar0->rmac_addr_data0_mem);
4427 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4428 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4429 val64 = RMAC_ADDR_CMD_MEM_WE |
4430 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4431 RMAC_ADDR_CMD_MEM_OFFSET
4432 (MAC_MC_ADDR_START_OFFSET + i);
4433 writeq(val64, &bar0->rmac_addr_cmd_mem);
4434
4435 /* Wait for command completes */
c92ca04b 4436 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4437 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4438 S2IO_BIT_RESET)) {
1da177e4
LT
4439 DBG_PRINT(ERR_DBG, "%s: Adding ",
4440 dev->name);
4441 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4442 return;
4443 }
4444 }
4445
4446 /* Create the new Rx filter list and update the same in H/W. */
4447 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4448 i++, mclist = mclist->next) {
4449 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4450 ETH_ALEN);
a7a80d5a 4451 mac_addr = 0;
1da177e4
LT
4452 for (j = 0; j < ETH_ALEN; j++) {
4453 mac_addr |= mclist->dmi_addr[j];
4454 mac_addr <<= 8;
4455 }
4456 mac_addr >>= 8;
4457 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4458 &bar0->rmac_addr_data0_mem);
4459 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4460 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4461 val64 = RMAC_ADDR_CMD_MEM_WE |
4462 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4463 RMAC_ADDR_CMD_MEM_OFFSET
4464 (i + MAC_MC_ADDR_START_OFFSET);
4465 writeq(val64, &bar0->rmac_addr_cmd_mem);
4466
4467 /* Wait for command completes */
c92ca04b 4468 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4469 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4470 S2IO_BIT_RESET)) {
1da177e4
LT
4471 DBG_PRINT(ERR_DBG, "%s: Adding ",
4472 dev->name);
4473 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4474 return;
4475 }
4476 }
4477 }
4478}
4479
4480/**
20346722 4481 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
4482 * @dev : pointer to the device structure.
4483 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 4484 * Description : This procedure will program the Xframe to receive
1da177e4 4485 * frames with new Mac Address
20346722 4486 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
4487 * as defined in errno.h file on failure.
4488 */
4489
26df54bf 4490static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
1da177e4 4491{
1ee6dd77
RB
4492 struct s2io_nic *sp = dev->priv;
4493 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4494 register u64 val64, mac_addr = 0;
4495 int i;
4496
20346722 4497 /*
1da177e4
LT
4498 * Set the new MAC address as the new unicast filter and reflect this
4499 * change on the device address registered with the OS. It will be
20346722 4500 * at offset 0.
1da177e4
LT
4501 */
4502 for (i = 0; i < ETH_ALEN; i++) {
4503 mac_addr <<= 8;
4504 mac_addr |= addr[i];
4505 }
4506
4507 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4508 &bar0->rmac_addr_data0_mem);
4509
4510 val64 =
4511 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4512 RMAC_ADDR_CMD_MEM_OFFSET(0);
4513 writeq(val64, &bar0->rmac_addr_cmd_mem);
4514 /* Wait till command completes */
c92ca04b 4515 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 4516 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
1da177e4
LT
4517 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4518 return FAILURE;
4519 }
4520
4521 return SUCCESS;
4522}
4523
4524/**
20346722 4525 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
4526 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4527 * @info: pointer to the structure with parameters given by ethtool to set
4528 * link information.
4529 * Description:
20346722 4530 * The function sets different link parameters provided by the user onto
1da177e4
LT
4531 * the NIC.
4532 * Return value:
4533 * 0 on success.
4534*/
4535
4536static int s2io_ethtool_sset(struct net_device *dev,
4537 struct ethtool_cmd *info)
4538{
1ee6dd77 4539 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4540 if ((info->autoneg == AUTONEG_ENABLE) ||
4541 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4542 return -EINVAL;
4543 else {
4544 s2io_close(sp->dev);
4545 s2io_open(sp->dev);
4546 }
4547
4548 return 0;
4549}
4550
4551/**
20346722 4552 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
4553 * @sp : private member of the device structure, pointer to the
4554 * s2io_nic structure.
4555 * @info : pointer to the structure with parameters given by ethtool
4556 * to return link information.
4557 * Description:
4558 * Returns link specific information like speed, duplex etc.. to ethtool.
4559 * Return value :
4560 * return 0 on success.
4561 */
4562
4563static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4564{
1ee6dd77 4565 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4566 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4567 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4568 info->port = PORT_FIBRE;
4569 /* info->transceiver?? TODO */
4570
4571 if (netif_carrier_ok(sp->dev)) {
4572 info->speed = 10000;
4573 info->duplex = DUPLEX_FULL;
4574 } else {
4575 info->speed = -1;
4576 info->duplex = -1;
4577 }
4578
4579 info->autoneg = AUTONEG_DISABLE;
4580 return 0;
4581}
4582
4583/**
20346722 4584 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4585 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4586 * s2io_nic structure.
4587 * @info : pointer to the structure with parameters given by ethtool to
4588 * return driver information.
4589 * Description:
4590 * Returns driver specefic information like name, version etc.. to ethtool.
4591 * Return value:
4592 * void
4593 */
4594
4595static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4596 struct ethtool_drvinfo *info)
4597{
1ee6dd77 4598 struct s2io_nic *sp = dev->priv;
1da177e4 4599
dbc2309d
JL
4600 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4601 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4602 strncpy(info->fw_version, "", sizeof(info->fw_version));
4603 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
4604 info->regdump_len = XENA_REG_SPACE;
4605 info->eedump_len = XENA_EEPROM_SPACE;
4606 info->testinfo_len = S2IO_TEST_LEN;
4607 info->n_stats = S2IO_STAT_LEN;
4608}
4609
4610/**
4611 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 4612 * @sp: private member of the device structure, which is a pointer to the
1da177e4 4613 * s2io_nic structure.
20346722 4614 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
4615 * dumping the registers.
4616 * @reg_space: The input argumnet into which all the registers are dumped.
4617 * Description:
4618 * Dumps the entire register space of xFrame NIC into the user given
4619 * buffer area.
4620 * Return value :
4621 * void .
4622*/
4623
4624static void s2io_ethtool_gregs(struct net_device *dev,
4625 struct ethtool_regs *regs, void *space)
4626{
4627 int i;
4628 u64 reg;
4629 u8 *reg_space = (u8 *) space;
1ee6dd77 4630 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4631
4632 regs->len = XENA_REG_SPACE;
4633 regs->version = sp->pdev->subsystem_device;
4634
4635 for (i = 0; i < regs->len; i += 8) {
4636 reg = readq(sp->bar0 + i);
4637 memcpy((reg_space + i), &reg, 8);
4638 }
4639}
4640
4641/**
4642 * s2io_phy_id - timer function that alternates adapter LED.
20346722 4643 * @data : address of the private member of the device structure, which
1da177e4 4644 * is a pointer to the s2io_nic structure, provided as an u32.
20346722 4645 * Description: This is actually the timer function that alternates the
4646 * adapter LED bit of the adapter control bit to set/reset every time on
4647 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
4648 * once every second.
4649*/
4650static void s2io_phy_id(unsigned long data)
4651{
1ee6dd77
RB
4652 struct s2io_nic *sp = (struct s2io_nic *) data;
4653 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4654 u64 val64 = 0;
4655 u16 subid;
4656
4657 subid = sp->pdev->subsystem_device;
541ae68f 4658 if ((sp->device_type == XFRAME_II_DEVICE) ||
4659 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
4660 val64 = readq(&bar0->gpio_control);
4661 val64 ^= GPIO_CTRL_GPIO_0;
4662 writeq(val64, &bar0->gpio_control);
4663 } else {
4664 val64 = readq(&bar0->adapter_control);
4665 val64 ^= ADAPTER_LED_ON;
4666 writeq(val64, &bar0->adapter_control);
4667 }
4668
4669 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4670}
4671
4672/**
4673 * s2io_ethtool_idnic - To physically identify the nic on the system.
4674 * @sp : private member of the device structure, which is a pointer to the
4675 * s2io_nic structure.
20346722 4676 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
4677 * ethtool.
4678 * Description: Used to physically identify the NIC on the system.
20346722 4679 * The Link LED will blink for a time specified by the user for
1da177e4 4680 * identification.
20346722 4681 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
4682 * identification is possible only if it's link is up.
4683 * Return value:
4684 * int , returns 0 on success
4685 */
4686
4687static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4688{
4689 u64 val64 = 0, last_gpio_ctrl_val;
1ee6dd77
RB
4690 struct s2io_nic *sp = dev->priv;
4691 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4692 u16 subid;
4693
4694 subid = sp->pdev->subsystem_device;
4695 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f 4696 if ((sp->device_type == XFRAME_I_DEVICE) &&
4697 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
4698 val64 = readq(&bar0->adapter_control);
4699 if (!(val64 & ADAPTER_CNTL_EN)) {
4700 printk(KERN_ERR
4701 "Adapter Link down, cannot blink LED\n");
4702 return -EFAULT;
4703 }
4704 }
4705 if (sp->id_timer.function == NULL) {
4706 init_timer(&sp->id_timer);
4707 sp->id_timer.function = s2io_phy_id;
4708 sp->id_timer.data = (unsigned long) sp;
4709 }
4710 mod_timer(&sp->id_timer, jiffies);
4711 if (data)
20346722 4712 msleep_interruptible(data * HZ);
1da177e4 4713 else
20346722 4714 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
4715 del_timer_sync(&sp->id_timer);
4716
541ae68f 4717 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
4718 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4719 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4720 }
4721
4722 return 0;
4723}
4724
4725/**
4726 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722 4727 * @sp : private member of the device structure, which is a pointer to the
4728 * s2io_nic structure.
1da177e4
LT
4729 * @ep : pointer to the structure with pause parameters given by ethtool.
4730 * Description:
4731 * Returns the Pause frame generation and reception capability of the NIC.
4732 * Return value:
4733 * void
4734 */
4735static void s2io_ethtool_getpause_data(struct net_device *dev,
4736 struct ethtool_pauseparam *ep)
4737{
4738 u64 val64;
1ee6dd77
RB
4739 struct s2io_nic *sp = dev->priv;
4740 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4741
4742 val64 = readq(&bar0->rmac_pause_cfg);
4743 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4744 ep->tx_pause = TRUE;
4745 if (val64 & RMAC_PAUSE_RX_ENABLE)
4746 ep->rx_pause = TRUE;
4747 ep->autoneg = FALSE;
4748}
4749
4750/**
4751 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 4752 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4753 * s2io_nic structure.
4754 * @ep : pointer to the structure with pause parameters given by ethtool.
4755 * Description:
4756 * It can be used to set or reset Pause frame generation or reception
4757 * support of the NIC.
4758 * Return value:
4759 * int, returns 0 on Success
4760 */
4761
4762static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 4763 struct ethtool_pauseparam *ep)
1da177e4
LT
4764{
4765 u64 val64;
1ee6dd77
RB
4766 struct s2io_nic *sp = dev->priv;
4767 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4768
4769 val64 = readq(&bar0->rmac_pause_cfg);
4770 if (ep->tx_pause)
4771 val64 |= RMAC_PAUSE_GEN_ENABLE;
4772 else
4773 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4774 if (ep->rx_pause)
4775 val64 |= RMAC_PAUSE_RX_ENABLE;
4776 else
4777 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4778 writeq(val64, &bar0->rmac_pause_cfg);
4779 return 0;
4780}
4781
4782/**
4783 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 4784 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4785 * s2io_nic structure.
4786 * @off : offset at which the data must be written
4787 * @data : Its an output parameter where the data read at the given
20346722 4788 * offset is stored.
1da177e4 4789 * Description:
20346722 4790 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
4791 * read data.
4792 * NOTE: Will allow to read only part of the EEPROM visible through the
4793 * I2C bus.
4794 * Return value:
4795 * -1 on failure and 0 on success.
4796 */
4797
4798#define S2IO_DEV_ID 5
1ee6dd77 4799static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
1da177e4
LT
4800{
4801 int ret = -1;
4802 u32 exit_cnt = 0;
4803 u64 val64;
1ee6dd77 4804 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4805
ad4ebed0 4806 if (sp->device_type == XFRAME_I_DEVICE) {
4807 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4808 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4809 I2C_CONTROL_CNTL_START;
4810 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 4811
ad4ebed0 4812 while (exit_cnt < 5) {
4813 val64 = readq(&bar0->i2c_control);
4814 if (I2C_CONTROL_CNTL_END(val64)) {
4815 *data = I2C_CONTROL_GET_DATA(val64);
4816 ret = 0;
4817 break;
4818 }
4819 msleep(50);
4820 exit_cnt++;
1da177e4 4821 }
1da177e4
LT
4822 }
4823
ad4ebed0 4824 if (sp->device_type == XFRAME_II_DEVICE) {
4825 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4826 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 4827 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4828 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4829 val64 |= SPI_CONTROL_REQ;
4830 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4831 while (exit_cnt < 5) {
4832 val64 = readq(&bar0->spi_control);
4833 if (val64 & SPI_CONTROL_NACK) {
4834 ret = 1;
4835 break;
4836 } else if (val64 & SPI_CONTROL_DONE) {
4837 *data = readq(&bar0->spi_data);
4838 *data &= 0xffffff;
4839 ret = 0;
4840 break;
4841 }
4842 msleep(50);
4843 exit_cnt++;
4844 }
4845 }
1da177e4
LT
4846 return ret;
4847}
4848
4849/**
4850 * write_eeprom - actually writes the relevant part of the data value.
4851 * @sp : private member of the device structure, which is a pointer to the
4852 * s2io_nic structure.
4853 * @off : offset at which the data must be written
4854 * @data : The data that is to be written
20346722 4855 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
4856 * the Eeprom. (max of 3)
4857 * Description:
4858 * Actually writes the relevant part of the data value into the Eeprom
4859 * through the I2C bus.
4860 * Return value:
4861 * 0 on success, -1 on failure.
4862 */
4863
1ee6dd77 4864static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
1da177e4
LT
4865{
4866 int exit_cnt = 0, ret = -1;
4867 u64 val64;
1ee6dd77 4868 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4869
ad4ebed0 4870 if (sp->device_type == XFRAME_I_DEVICE) {
4871 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4872 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4873 I2C_CONTROL_CNTL_START;
4874 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4875
4876 while (exit_cnt < 5) {
4877 val64 = readq(&bar0->i2c_control);
4878 if (I2C_CONTROL_CNTL_END(val64)) {
4879 if (!(val64 & I2C_CONTROL_NACK))
4880 ret = 0;
4881 break;
4882 }
4883 msleep(50);
4884 exit_cnt++;
4885 }
4886 }
1da177e4 4887
ad4ebed0 4888 if (sp->device_type == XFRAME_II_DEVICE) {
4889 int write_cnt = (cnt == 8) ? 0 : cnt;
4890 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4891
4892 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4893 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 4894 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4895 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4896 val64 |= SPI_CONTROL_REQ;
4897 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4898 while (exit_cnt < 5) {
4899 val64 = readq(&bar0->spi_control);
4900 if (val64 & SPI_CONTROL_NACK) {
4901 ret = 1;
4902 break;
4903 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 4904 ret = 0;
ad4ebed0 4905 break;
4906 }
4907 msleep(50);
4908 exit_cnt++;
1da177e4 4909 }
1da177e4 4910 }
1da177e4
LT
4911 return ret;
4912}
1ee6dd77 4913static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 4914{
b41477f3
AR
4915 u8 *vpd_data;
4916 u8 data;
9dc737a7
AR
4917 int i=0, cnt, fail = 0;
4918 int vpd_addr = 0x80;
4919
4920 if (nic->device_type == XFRAME_II_DEVICE) {
4921 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4922 vpd_addr = 0x80;
4923 }
4924 else {
4925 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4926 vpd_addr = 0x50;
4927 }
19a60522 4928 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 4929
b41477f3
AR
4930 vpd_data = kmalloc(256, GFP_KERNEL);
4931 if (!vpd_data)
4932 return;
4933
9dc737a7
AR
4934 for (i = 0; i < 256; i +=4 ) {
4935 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4936 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4937 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4938 for (cnt = 0; cnt <5; cnt++) {
4939 msleep(2);
4940 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4941 if (data == 0x80)
4942 break;
4943 }
4944 if (cnt >= 5) {
4945 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4946 fail = 1;
4947 break;
4948 }
4949 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4950 (u32 *)&vpd_data[i]);
4951 }
19a60522
SS
4952
4953 if(!fail) {
4954 /* read serial number of adapter */
4955 for (cnt = 0; cnt < 256; cnt++) {
4956 if ((vpd_data[cnt] == 'S') &&
4957 (vpd_data[cnt+1] == 'N') &&
4958 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4959 memset(nic->serial_num, 0, VPD_STRING_LEN);
4960 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4961 vpd_data[cnt+2]);
4962 break;
4963 }
4964 }
4965 }
4966
4967 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
9dc737a7
AR
4968 memset(nic->product_name, 0, vpd_data[1]);
4969 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4970 }
b41477f3 4971 kfree(vpd_data);
9dc737a7
AR
4972}
4973
1da177e4
LT
4974/**
4975 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4976 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 4977 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4978 * containing all relevant information.
4979 * @data_buf : user defined value to be written into Eeprom.
4980 * Description: Reads the values stored in the Eeprom at given offset
4981 * for a given length. Stores these values int the input argument data
4982 * buffer 'data_buf' and returns these to the caller (ethtool.)
4983 * Return value:
4984 * int 0 on success
4985 */
4986
4987static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 4988 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 4989{
ad4ebed0 4990 u32 i, valid;
4991 u64 data;
1ee6dd77 4992 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4993
4994 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4995
4996 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4997 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4998
4999 for (i = 0; i < eeprom->len; i += 4) {
5000 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5001 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5002 return -EFAULT;
5003 }
5004 valid = INV(data);
5005 memcpy((data_buf + i), &valid, 4);
5006 }
5007 return 0;
5008}
5009
5010/**
5011 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5012 * @sp : private member of the device structure, which is a pointer to the
5013 * s2io_nic structure.
20346722 5014 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5015 * containing all relevant information.
5016 * @data_buf ; user defined value to be written into Eeprom.
5017 * Description:
5018 * Tries to write the user provided value in the Eeprom, at the offset
5019 * given by the user.
5020 * Return value:
5021 * 0 on success, -EFAULT on failure.
5022 */
5023
5024static int s2io_ethtool_seeprom(struct net_device *dev,
5025 struct ethtool_eeprom *eeprom,
5026 u8 * data_buf)
5027{
5028 int len = eeprom->len, cnt = 0;
ad4ebed0 5029 u64 valid = 0, data;
1ee6dd77 5030 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5031
5032 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5033 DBG_PRINT(ERR_DBG,
5034 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5035 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5036 eeprom->magic);
5037 return -EFAULT;
5038 }
5039
5040 while (len) {
5041 data = (u32) data_buf[cnt] & 0x000000FF;
5042 if (data) {
5043 valid = (u32) (data << 24);
5044 } else
5045 valid = data;
5046
5047 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5048 DBG_PRINT(ERR_DBG,
5049 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5050 DBG_PRINT(ERR_DBG,
5051 "write into the specified offset\n");
5052 return -EFAULT;
5053 }
5054 cnt++;
5055 len--;
5056 }
5057
5058 return 0;
5059}
5060
5061/**
20346722 5062 * s2io_register_test - reads and writes into all clock domains.
5063 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5064 * s2io_nic structure.
5065 * @data : variable that returns the result of each of the test conducted b
5066 * by the driver.
5067 * Description:
5068 * Read and write into all clock domains. The NIC has 3 clock domains,
5069 * see that registers in all the three regions are accessible.
5070 * Return value:
5071 * 0 on success.
5072 */
5073
1ee6dd77 5074static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5075{
1ee6dd77 5076 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5077 u64 val64 = 0, exp_val;
1da177e4
LT
5078 int fail = 0;
5079
20346722 5080 val64 = readq(&bar0->pif_rd_swapper_fb);
5081 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5082 fail = 1;
5083 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5084 }
5085
5086 val64 = readq(&bar0->rmac_pause_cfg);
5087 if (val64 != 0xc000ffff00000000ULL) {
5088 fail = 1;
5089 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5090 }
5091
5092 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5093 if (sp->device_type == XFRAME_II_DEVICE)
5094 exp_val = 0x0404040404040404ULL;
5095 else
5096 exp_val = 0x0808080808080808ULL;
5097 if (val64 != exp_val) {
1da177e4
LT
5098 fail = 1;
5099 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5100 }
5101
5102 val64 = readq(&bar0->xgxs_efifo_cfg);
5103 if (val64 != 0x000000001923141EULL) {
5104 fail = 1;
5105 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5106 }
5107
5108 val64 = 0x5A5A5A5A5A5A5A5AULL;
5109 writeq(val64, &bar0->xmsi_data);
5110 val64 = readq(&bar0->xmsi_data);
5111 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5112 fail = 1;
5113 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5114 }
5115
5116 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5117 writeq(val64, &bar0->xmsi_data);
5118 val64 = readq(&bar0->xmsi_data);
5119 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5120 fail = 1;
5121 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5122 }
5123
5124 *data = fail;
ad4ebed0 5125 return fail;
1da177e4
LT
5126}
5127
5128/**
20346722 5129 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5130 * @sp : private member of the device structure, which is a pointer to the
5131 * s2io_nic structure.
5132 * @data:variable that returns the result of each of the test conducted by
5133 * the driver.
5134 * Description:
20346722 5135 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5136 * register.
5137 * Return value:
5138 * 0 on success.
5139 */
5140
1ee6dd77 5141static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5142{
5143 int fail = 0;
ad4ebed0 5144 u64 ret_data, org_4F0, org_7F0;
5145 u8 saved_4F0 = 0, saved_7F0 = 0;
5146 struct net_device *dev = sp->dev;
1da177e4
LT
5147
5148 /* Test Write Error at offset 0 */
ad4ebed0 5149 /* Note that SPI interface allows write access to all areas
5150 * of EEPROM. Hence doing all negative testing only for Xframe I.
5151 */
5152 if (sp->device_type == XFRAME_I_DEVICE)
5153 if (!write_eeprom(sp, 0, 0, 3))
5154 fail = 1;
5155
5156 /* Save current values at offsets 0x4F0 and 0x7F0 */
5157 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5158 saved_4F0 = 1;
5159 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5160 saved_7F0 = 1;
1da177e4
LT
5161
5162 /* Test Write at offset 4f0 */
ad4ebed0 5163 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5164 fail = 1;
5165 if (read_eeprom(sp, 0x4F0, &ret_data))
5166 fail = 1;
5167
ad4ebed0 5168 if (ret_data != 0x012345) {
26b7625c
AM
5169 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5170 "Data written %llx Data read %llx\n",
5171 dev->name, (unsigned long long)0x12345,
5172 (unsigned long long)ret_data);
1da177e4 5173 fail = 1;
ad4ebed0 5174 }
1da177e4
LT
5175
5176 /* Reset the EEPROM data go FFFF */
ad4ebed0 5177 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5178
5179 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5180 if (sp->device_type == XFRAME_I_DEVICE)
5181 if (!write_eeprom(sp, 0x07C, 0, 3))
5182 fail = 1;
1da177e4 5183
ad4ebed0 5184 /* Test Write Request at offset 0x7f0 */
5185 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5186 fail = 1;
ad4ebed0 5187 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5188 fail = 1;
5189
ad4ebed0 5190 if (ret_data != 0x012345) {
26b7625c
AM
5191 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5192 "Data written %llx Data read %llx\n",
5193 dev->name, (unsigned long long)0x12345,
5194 (unsigned long long)ret_data);
1da177e4 5195 fail = 1;
ad4ebed0 5196 }
1da177e4
LT
5197
5198 /* Reset the EEPROM data go FFFF */
ad4ebed0 5199 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5200
ad4ebed0 5201 if (sp->device_type == XFRAME_I_DEVICE) {
5202 /* Test Write Error at offset 0x80 */
5203 if (!write_eeprom(sp, 0x080, 0, 3))
5204 fail = 1;
1da177e4 5205
ad4ebed0 5206 /* Test Write Error at offset 0xfc */
5207 if (!write_eeprom(sp, 0x0FC, 0, 3))
5208 fail = 1;
1da177e4 5209
ad4ebed0 5210 /* Test Write Error at offset 0x100 */
5211 if (!write_eeprom(sp, 0x100, 0, 3))
5212 fail = 1;
1da177e4 5213
ad4ebed0 5214 /* Test Write Error at offset 4ec */
5215 if (!write_eeprom(sp, 0x4EC, 0, 3))
5216 fail = 1;
5217 }
5218
5219 /* Restore values at offsets 0x4F0 and 0x7F0 */
5220 if (saved_4F0)
5221 write_eeprom(sp, 0x4F0, org_4F0, 3);
5222 if (saved_7F0)
5223 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5224
5225 *data = fail;
ad4ebed0 5226 return fail;
1da177e4
LT
5227}
5228
5229/**
5230 * s2io_bist_test - invokes the MemBist test of the card .
20346722 5231 * @sp : private member of the device structure, which is a pointer to the
1da177e4 5232 * s2io_nic structure.
20346722 5233 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
5234 * the driver.
5235 * Description:
5236 * This invokes the MemBist test of the card. We give around
5237 * 2 secs time for the Test to complete. If it's still not complete
20346722 5238 * within this peiod, we consider that the test failed.
1da177e4
LT
5239 * Return value:
5240 * 0 on success and -1 on failure.
5241 */
5242
1ee6dd77 5243static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5244{
5245 u8 bist = 0;
5246 int cnt = 0, ret = -1;
5247
5248 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5249 bist |= PCI_BIST_START;
5250 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5251
5252 while (cnt < 20) {
5253 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5254 if (!(bist & PCI_BIST_START)) {
5255 *data = (bist & PCI_BIST_CODE_MASK);
5256 ret = 0;
5257 break;
5258 }
5259 msleep(100);
5260 cnt++;
5261 }
5262
5263 return ret;
5264}
5265
5266/**
20346722 5267 * s2io-link_test - verifies the link state of the nic
5268 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
5269 * s2io_nic structure.
5270 * @data: variable that returns the result of each of the test conducted by
5271 * the driver.
5272 * Description:
20346722 5273 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
5274 * argument 'data' appropriately.
5275 * Return value:
5276 * 0 on success.
5277 */
5278
1ee6dd77 5279static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5280{
1ee6dd77 5281 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5282 u64 val64;
5283
5284 val64 = readq(&bar0->adapter_status);
c92ca04b 5285 if(!(LINK_IS_UP(val64)))
1da177e4 5286 *data = 1;
c92ca04b
AR
5287 else
5288 *data = 0;
1da177e4 5289
b41477f3 5290 return *data;
1da177e4
LT
5291}
5292
5293/**
20346722 5294 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5295 * @sp - private member of the device structure, which is a pointer to the
1da177e4 5296 * s2io_nic structure.
20346722 5297 * @data - variable that returns the result of each of the test
1da177e4
LT
5298 * conducted by the driver.
5299 * Description:
20346722 5300 * This is one of the offline test that tests the read and write
1da177e4
LT
5301 * access to the RldRam chip on the NIC.
5302 * Return value:
5303 * 0 on success.
5304 */
5305
1ee6dd77 5306static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5307{
1ee6dd77 5308 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5309 u64 val64;
ad4ebed0 5310 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
5311
5312 val64 = readq(&bar0->adapter_control);
5313 val64 &= ~ADAPTER_ECC_EN;
5314 writeq(val64, &bar0->adapter_control);
5315
5316 val64 = readq(&bar0->mc_rldram_test_ctrl);
5317 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 5318 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5319
5320 val64 = readq(&bar0->mc_rldram_mrs);
5321 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5322 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5323
5324 val64 |= MC_RLDRAM_MRS_ENABLE;
5325 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5326
5327 while (iteration < 2) {
5328 val64 = 0x55555555aaaa0000ULL;
5329 if (iteration == 1) {
5330 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5331 }
5332 writeq(val64, &bar0->mc_rldram_test_d0);
5333
5334 val64 = 0xaaaa5a5555550000ULL;
5335 if (iteration == 1) {
5336 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5337 }
5338 writeq(val64, &bar0->mc_rldram_test_d1);
5339
5340 val64 = 0x55aaaaaaaa5a0000ULL;
5341 if (iteration == 1) {
5342 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5343 }
5344 writeq(val64, &bar0->mc_rldram_test_d2);
5345
ad4ebed0 5346 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
5347 writeq(val64, &bar0->mc_rldram_test_add);
5348
ad4ebed0 5349 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5350 MC_RLDRAM_TEST_GO;
5351 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5352
5353 for (cnt = 0; cnt < 5; cnt++) {
5354 val64 = readq(&bar0->mc_rldram_test_ctrl);
5355 if (val64 & MC_RLDRAM_TEST_DONE)
5356 break;
5357 msleep(200);
5358 }
5359
5360 if (cnt == 5)
5361 break;
5362
ad4ebed0 5363 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5365
5366 for (cnt = 0; cnt < 5; cnt++) {
5367 val64 = readq(&bar0->mc_rldram_test_ctrl);
5368 if (val64 & MC_RLDRAM_TEST_DONE)
5369 break;
5370 msleep(500);
5371 }
5372
5373 if (cnt == 5)
5374 break;
5375
5376 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 5377 if (!(val64 & MC_RLDRAM_TEST_PASS))
5378 test_fail = 1;
1da177e4
LT
5379
5380 iteration++;
5381 }
5382
ad4ebed0 5383 *data = test_fail;
1da177e4 5384
ad4ebed0 5385 /* Bring the adapter out of test mode */
5386 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5387
5388 return test_fail;
1da177e4
LT
5389}
5390
5391/**
5392 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5393 * @sp : private member of the device structure, which is a pointer to the
5394 * s2io_nic structure.
5395 * @ethtest : pointer to a ethtool command specific structure that will be
5396 * returned to the user.
20346722 5397 * @data : variable that returns the result of each of the test
1da177e4
LT
5398 * conducted by the driver.
5399 * Description:
5400 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5401 * the health of the card.
5402 * Return value:
5403 * void
5404 */
5405
5406static void s2io_ethtool_test(struct net_device *dev,
5407 struct ethtool_test *ethtest,
5408 uint64_t * data)
5409{
1ee6dd77 5410 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5411 int orig_state = netif_running(sp->dev);
5412
5413 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5414 /* Offline Tests. */
20346722 5415 if (orig_state)
1da177e4 5416 s2io_close(sp->dev);
1da177e4
LT
5417
5418 if (s2io_register_test(sp, &data[0]))
5419 ethtest->flags |= ETH_TEST_FL_FAILED;
5420
5421 s2io_reset(sp);
1da177e4
LT
5422
5423 if (s2io_rldram_test(sp, &data[3]))
5424 ethtest->flags |= ETH_TEST_FL_FAILED;
5425
5426 s2io_reset(sp);
1da177e4
LT
5427
5428 if (s2io_eeprom_test(sp, &data[1]))
5429 ethtest->flags |= ETH_TEST_FL_FAILED;
5430
5431 if (s2io_bist_test(sp, &data[4]))
5432 ethtest->flags |= ETH_TEST_FL_FAILED;
5433
5434 if (orig_state)
5435 s2io_open(sp->dev);
5436
5437 data[2] = 0;
5438 } else {
5439 /* Online Tests. */
5440 if (!orig_state) {
5441 DBG_PRINT(ERR_DBG,
5442 "%s: is not up, cannot run test\n",
5443 dev->name);
5444 data[0] = -1;
5445 data[1] = -1;
5446 data[2] = -1;
5447 data[3] = -1;
5448 data[4] = -1;
5449 }
5450
5451 if (s2io_link_test(sp, &data[2]))
5452 ethtest->flags |= ETH_TEST_FL_FAILED;
5453
5454 data[0] = 0;
5455 data[1] = 0;
5456 data[3] = 0;
5457 data[4] = 0;
5458 }
5459}
5460
5461static void s2io_get_ethtool_stats(struct net_device *dev,
5462 struct ethtool_stats *estats,
5463 u64 * tmp_stats)
5464{
5465 int i = 0;
1ee6dd77
RB
5466 struct s2io_nic *sp = dev->priv;
5467 struct stat_block *stat_info = sp->mac_control.stats_info;
1da177e4 5468
7ba013ac 5469 s2io_updt_stats(sp);
541ae68f 5470 tmp_stats[i++] =
5471 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5472 le32_to_cpu(stat_info->tmac_frms);
5473 tmp_stats[i++] =
5474 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5475 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 5476 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f 5477 tmp_stats[i++] =
5478 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5479 le32_to_cpu(stat_info->tmac_mcst_frms);
5480 tmp_stats[i++] =
5481 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5482 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 5483 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
5484 tmp_stats[i++] =
5485 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5486 le32_to_cpu(stat_info->tmac_ttl_octets);
5487 tmp_stats[i++] =
5488 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5489 le32_to_cpu(stat_info->tmac_ucst_frms);
5490 tmp_stats[i++] =
5491 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5492 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f 5493 tmp_stats[i++] =
5494 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5495 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 5496 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 5497 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f 5498 tmp_stats[i++] =
5499 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5500 le32_to_cpu(stat_info->tmac_vld_ip);
5501 tmp_stats[i++] =
5502 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5503 le32_to_cpu(stat_info->tmac_drop_ip);
5504 tmp_stats[i++] =
5505 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5506 le32_to_cpu(stat_info->tmac_icmp);
5507 tmp_stats[i++] =
5508 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5509 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 5510 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f 5511 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5512 le32_to_cpu(stat_info->tmac_udp);
5513 tmp_stats[i++] =
5514 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5515 le32_to_cpu(stat_info->rmac_vld_frms);
5516 tmp_stats[i++] =
5517 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5518 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
5519 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5520 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f 5521 tmp_stats[i++] =
5522 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5523 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5524 tmp_stats[i++] =
5525 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5526 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 5527 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 5528 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
5529 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5530 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
5531 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5532 tmp_stats[i++] =
5533 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5534 le32_to_cpu(stat_info->rmac_ttl_octets);
5535 tmp_stats[i++] =
5536 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5537 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5538 tmp_stats[i++] =
5539 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5540 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f 5541 tmp_stats[i++] =
5542 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5543 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
5544 tmp_stats[i++] =
5545 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5546 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5547 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5548 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f 5549 tmp_stats[i++] =
5550 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5551 le32_to_cpu(stat_info->rmac_usized_frms);
5552 tmp_stats[i++] =
5553 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5554 le32_to_cpu(stat_info->rmac_osized_frms);
5555 tmp_stats[i++] =
5556 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5557 le32_to_cpu(stat_info->rmac_frag_frms);
5558 tmp_stats[i++] =
5559 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5560 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
5561 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5562 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5563 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5564 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5565 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5566 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5567 tmp_stats[i++] =
5568 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 5569 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
5570 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5571 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
5572 tmp_stats[i++] =
5573 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 5574 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
5575 tmp_stats[i++] =
5576 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 5577 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 5578 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
5579 tmp_stats[i++] =
5580 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f 5581 le32_to_cpu(stat_info->rmac_udp);
5582 tmp_stats[i++] =
5583 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5584 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
5585 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5586 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5587 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5588 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5589 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5590 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5591 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5592 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5593 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5594 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5595 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5596 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5597 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5598 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5599 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5600 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5601 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f 5602 tmp_stats[i++] =
5603 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5604 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
5605 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5606 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f 5607 tmp_stats[i++] =
5608 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5609 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 5610 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
5611 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5612 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5613 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5614 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5615 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5616 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5617 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5618 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5619 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5620 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5621 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5622 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5623 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5624 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5625 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5626 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5627 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5628 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5629 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5630 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5631 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5632 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5633 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5634 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5635 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5636 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5637 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5638 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5639 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5640 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5641 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5642 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5643 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5644 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
7ba013ac 5645 tmp_stats[i++] = 0;
5646 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5647 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
5648 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5649 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5650 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5651 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5652 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5653 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5654 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5655 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5656 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5657 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5658 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5659 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5660 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5661 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5662 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5663 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5664 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
5665 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5666 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5667 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5668 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 5669 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
5670 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5671 int count = 0;
6aa20a22 5672 /*
bd1034f0
AR
5673 * Since 64-bit divide does not work on all platforms,
5674 * do repeated subtraction.
5675 */
5676 while (tmp >= stat_info->sw_stat.num_aggregations) {
5677 tmp -= stat_info->sw_stat.num_aggregations;
5678 count++;
5679 }
5680 tmp_stats[i++] = count;
fe931395 5681 }
bd1034f0
AR
5682 else
5683 tmp_stats[i++] = 0;
1da177e4
LT
5684}
5685
ac1f60db 5686static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
5687{
5688 return (XENA_REG_SPACE);
5689}
5690
5691
ac1f60db 5692static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4 5693{
1ee6dd77 5694 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5695
5696 return (sp->rx_csum);
5697}
ac1f60db
AB
5698
5699static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 5700{
1ee6dd77 5701 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5702
5703 if (data)
5704 sp->rx_csum = 1;
5705 else
5706 sp->rx_csum = 0;
5707
5708 return 0;
5709}
ac1f60db
AB
5710
5711static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
5712{
5713 return (XENA_EEPROM_SPACE);
5714}
5715
ac1f60db 5716static int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
5717{
5718 return (S2IO_TEST_LEN);
5719}
ac1f60db
AB
5720
5721static void s2io_ethtool_get_strings(struct net_device *dev,
5722 u32 stringset, u8 * data)
1da177e4
LT
5723{
5724 switch (stringset) {
5725 case ETH_SS_TEST:
5726 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5727 break;
5728 case ETH_SS_STATS:
5729 memcpy(data, &ethtool_stats_keys,
5730 sizeof(ethtool_stats_keys));
5731 }
5732}
1da177e4
LT
5733static int s2io_ethtool_get_stats_count(struct net_device *dev)
5734{
5735 return (S2IO_STAT_LEN);
5736}
5737
ac1f60db 5738static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5739{
5740 if (data)
5741 dev->features |= NETIF_F_IP_CSUM;
5742 else
5743 dev->features &= ~NETIF_F_IP_CSUM;
5744
5745 return 0;
5746}
5747
75c30b13
AR
5748static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5749{
5750 return (dev->features & NETIF_F_TSO) != 0;
5751}
5752static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5753{
5754 if (data)
5755 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5756 else
5757 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5758
5759 return 0;
5760}
1da177e4 5761
7282d491 5762static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
5763 .get_settings = s2io_ethtool_gset,
5764 .set_settings = s2io_ethtool_sset,
5765 .get_drvinfo = s2io_ethtool_gdrvinfo,
5766 .get_regs_len = s2io_ethtool_get_regs_len,
5767 .get_regs = s2io_ethtool_gregs,
5768 .get_link = ethtool_op_get_link,
5769 .get_eeprom_len = s2io_get_eeprom_len,
5770 .get_eeprom = s2io_ethtool_geeprom,
5771 .set_eeprom = s2io_ethtool_seeprom,
5772 .get_pauseparam = s2io_ethtool_getpause_data,
5773 .set_pauseparam = s2io_ethtool_setpause_data,
5774 .get_rx_csum = s2io_ethtool_get_rx_csum,
5775 .set_rx_csum = s2io_ethtool_set_rx_csum,
5776 .get_tx_csum = ethtool_op_get_tx_csum,
5777 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5778 .get_sg = ethtool_op_get_sg,
5779 .set_sg = ethtool_op_set_sg,
75c30b13
AR
5780 .get_tso = s2io_ethtool_op_get_tso,
5781 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd
AR
5782 .get_ufo = ethtool_op_get_ufo,
5783 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
5784 .self_test_count = s2io_ethtool_self_test_count,
5785 .self_test = s2io_ethtool_test,
5786 .get_strings = s2io_ethtool_get_strings,
5787 .phys_id = s2io_ethtool_idnic,
5788 .get_stats_count = s2io_ethtool_get_stats_count,
5789 .get_ethtool_stats = s2io_get_ethtool_stats
5790};
5791
5792/**
20346722 5793 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
5794 * @dev : Device pointer.
5795 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5796 * a proprietary structure used to pass information to the driver.
5797 * @cmd : This is used to distinguish between the different commands that
5798 * can be passed to the IOCTL functions.
5799 * Description:
20346722 5800 * Currently there are no special functionality supported in IOCTL, hence
5801 * function always return EOPNOTSUPPORTED
1da177e4
LT
5802 */
5803
ac1f60db 5804static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
5805{
5806 return -EOPNOTSUPP;
5807}
5808
5809/**
5810 * s2io_change_mtu - entry point to change MTU size for the device.
5811 * @dev : device pointer.
5812 * @new_mtu : the new MTU size for the device.
5813 * Description: A driver entry point to change MTU size for the device.
5814 * Before changing the MTU the device must be stopped.
5815 * Return value:
5816 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5817 * file on failure.
5818 */
5819
ac1f60db 5820static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 5821{
1ee6dd77 5822 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5823
5824 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5825 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5826 dev->name);
5827 return -EPERM;
5828 }
5829
1da177e4 5830 dev->mtu = new_mtu;
d8892c6e 5831 if (netif_running(dev)) {
e6a8fee2 5832 s2io_card_down(sp);
d8892c6e 5833 netif_stop_queue(dev);
5834 if (s2io_card_up(sp)) {
5835 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5836 __FUNCTION__);
5837 }
5838 if (netif_queue_stopped(dev))
5839 netif_wake_queue(dev);
5840 } else { /* Device is down */
1ee6dd77 5841 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e 5842 u64 val64 = new_mtu;
5843
5844 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5845 }
1da177e4
LT
5846
5847 return 0;
5848}
5849
5850/**
5851 * s2io_tasklet - Bottom half of the ISR.
5852 * @dev_adr : address of the device structure in dma_addr_t format.
5853 * Description:
5854 * This is the tasklet or the bottom half of the ISR. This is
20346722 5855 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 5856 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 5857 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
5858 * replenish the Rx buffers in the Rx buffer descriptors.
5859 * Return value:
5860 * void.
5861 */
5862
5863static void s2io_tasklet(unsigned long dev_addr)
5864{
5865 struct net_device *dev = (struct net_device *) dev_addr;
1ee6dd77 5866 struct s2io_nic *sp = dev->priv;
1da177e4 5867 int i, ret;
1ee6dd77 5868 struct mac_info *mac_control;
1da177e4
LT
5869 struct config_param *config;
5870
5871 mac_control = &sp->mac_control;
5872 config = &sp->config;
5873
5874 if (!TASKLET_IN_USE) {
5875 for (i = 0; i < config->rx_ring_num; i++) {
5876 ret = fill_rx_buffers(sp, i);
5877 if (ret == -ENOMEM) {
5878 DBG_PRINT(ERR_DBG, "%s: Out of ",
5879 dev->name);
5880 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5881 break;
5882 } else if (ret == -EFILL) {
5883 DBG_PRINT(ERR_DBG,
5884 "%s: Rx Ring %d is full\n",
5885 dev->name, i);
5886 break;
5887 }
5888 }
5889 clear_bit(0, (&sp->tasklet_status));
5890 }
5891}
5892
5893/**
5894 * s2io_set_link - Set the LInk status
5895 * @data: long pointer to device private structue
5896 * Description: Sets the link status for the adapter
5897 */
5898
c4028958 5899static void s2io_set_link(struct work_struct *work)
1da177e4 5900{
1ee6dd77 5901 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
1da177e4 5902 struct net_device *dev = nic->dev;
1ee6dd77 5903 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
5904 register u64 val64;
5905 u16 subid;
5906
22747d6b
FR
5907 rtnl_lock();
5908
5909 if (!netif_running(dev))
5910 goto out_unlock;
5911
1da177e4
LT
5912 if (test_and_set_bit(0, &(nic->link_state))) {
5913 /* The card is being reset, no point doing anything */
22747d6b 5914 goto out_unlock;
1da177e4
LT
5915 }
5916
5917 subid = nic->pdev->subsystem_device;
a371a07d 5918 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5919 /*
5920 * Allow a small delay for the NICs self initiated
5921 * cleanup to complete.
5922 */
5923 msleep(100);
5924 }
1da177e4
LT
5925
5926 val64 = readq(&bar0->adapter_status);
19a60522
SS
5927 if (LINK_IS_UP(val64)) {
5928 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5929 if (verify_xena_quiescence(nic)) {
5930 val64 = readq(&bar0->adapter_control);
5931 val64 |= ADAPTER_CNTL_EN;
1da177e4 5932 writeq(val64, &bar0->adapter_control);
19a60522
SS
5933 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5934 nic->device_type, subid)) {
5935 val64 = readq(&bar0->gpio_control);
5936 val64 |= GPIO_CTRL_GPIO_0;
5937 writeq(val64, &bar0->gpio_control);
5938 val64 = readq(&bar0->gpio_control);
5939 } else {
5940 val64 |= ADAPTER_LED_ON;
5941 writeq(val64, &bar0->adapter_control);
a371a07d 5942 }
1da177e4 5943 nic->device_enabled_once = TRUE;
19a60522
SS
5944 } else {
5945 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5946 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5947 netif_stop_queue(dev);
1da177e4 5948 }
19a60522
SS
5949 }
5950 val64 = readq(&bar0->adapter_status);
5951 if (!LINK_IS_UP(val64)) {
5952 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5953 DBG_PRINT(ERR_DBG, " Link down after enabling ");
5954 DBG_PRINT(ERR_DBG, "device \n");
5955 } else
1da177e4 5956 s2io_link(nic, LINK_UP);
19a60522
SS
5957 } else {
5958 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5959 subid)) {
5960 val64 = readq(&bar0->gpio_control);
5961 val64 &= ~GPIO_CTRL_GPIO_0;
5962 writeq(val64, &bar0->gpio_control);
5963 val64 = readq(&bar0->gpio_control);
1da177e4 5964 }
19a60522 5965 s2io_link(nic, LINK_DOWN);
1da177e4
LT
5966 }
5967 clear_bit(0, &(nic->link_state));
22747d6b
FR
5968
5969out_unlock:
5970 rtnl_lock();
1da177e4
LT
5971}
5972
1ee6dd77
RB
5973static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5974 struct buffAdd *ba,
5975 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5976 u64 *temp2, int size)
5d3213cc
AR
5977{
5978 struct net_device *dev = sp->dev;
5979 struct sk_buff *frag_list;
5980
5981 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
5982 /* allocate skb */
5983 if (*skb) {
5984 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
5985 /*
5986 * As Rx frame are not going to be processed,
5987 * using same mapped address for the Rxd
5988 * buffer pointer
5989 */
1ee6dd77 5990 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5d3213cc
AR
5991 } else {
5992 *skb = dev_alloc_skb(size);
5993 if (!(*skb)) {
5994 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
5995 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
5996 return -ENOMEM ;
5997 }
5998 /* storing the mapped addr in a temp variable
5999 * such it will be used for next rxd whose
6000 * Host Control is NULL
6001 */
1ee6dd77 6002 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6003 pci_map_single( sp->pdev, (*skb)->data,
6004 size - NET_IP_ALIGN,
6005 PCI_DMA_FROMDEVICE);
6006 rxdp->Host_Control = (unsigned long) (*skb);
6007 }
6008 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6009 /* Two buffer Mode */
6010 if (*skb) {
1ee6dd77
RB
6011 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6012 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6013 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5d3213cc
AR
6014 } else {
6015 *skb = dev_alloc_skb(size);
2ceaac75
DR
6016 if (!(*skb)) {
6017 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
19a60522 6018 dev->name);
2ceaac75
DR
6019 return -ENOMEM;
6020 }
1ee6dd77 6021 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
6022 pci_map_single(sp->pdev, (*skb)->data,
6023 dev->mtu + 4,
6024 PCI_DMA_FROMDEVICE);
1ee6dd77 6025 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6026 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6027 PCI_DMA_FROMDEVICE);
6028 rxdp->Host_Control = (unsigned long) (*skb);
6029
6030 /* Buffer-1 will be dummy buffer not used */
1ee6dd77 6031 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
6032 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6033 PCI_DMA_FROMDEVICE);
6034 }
6035 } else if ((rxdp->Host_Control == 0)) {
6036 /* Three buffer mode */
6037 if (*skb) {
1ee6dd77
RB
6038 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6039 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6040 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5d3213cc
AR
6041 } else {
6042 *skb = dev_alloc_skb(size);
2ceaac75
DR
6043 if (!(*skb)) {
6044 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6045 dev->name);
6046 return -ENOMEM;
6047 }
1ee6dd77 6048 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6049 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6050 PCI_DMA_FROMDEVICE);
6051 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 6052 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
6053 pci_map_single( sp->pdev, (*skb)->data,
6054 l3l4hdr_size + 4,
6055 PCI_DMA_FROMDEVICE);
6056 /*
6057 * skb_shinfo(skb)->frag_list will have L4
6058 * data payload
6059 */
6060 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6061 ALIGN_SIZE);
6062 if (skb_shinfo(*skb)->frag_list == NULL) {
6063 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6064 failed\n ", dev->name);
6065 return -ENOMEM ;
6066 }
6067 frag_list = skb_shinfo(*skb)->frag_list;
6068 frag_list->next = NULL;
6069 /*
6070 * Buffer-2 receives L4 data payload
6071 */
1ee6dd77 6072 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
6073 pci_map_single( sp->pdev, frag_list->data,
6074 dev->mtu, PCI_DMA_FROMDEVICE);
6075 }
6076 }
6077 return 0;
6078}
1ee6dd77
RB
6079static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6080 int size)
5d3213cc
AR
6081{
6082 struct net_device *dev = sp->dev;
6083 if (sp->rxd_mode == RXD_MODE_1) {
6084 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6085 } else if (sp->rxd_mode == RXD_MODE_3B) {
6086 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6087 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6088 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6089 } else {
6090 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6091 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6092 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6093 }
6094}
6095
1ee6dd77 6096static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6097{
6098 int i, j, k, blk_cnt = 0, size;
1ee6dd77 6099 struct mac_info * mac_control = &sp->mac_control;
5d3213cc
AR
6100 struct config_param *config = &sp->config;
6101 struct net_device *dev = sp->dev;
1ee6dd77 6102 struct RxD_t *rxdp = NULL;
5d3213cc 6103 struct sk_buff *skb = NULL;
1ee6dd77 6104 struct buffAdd *ba = NULL;
5d3213cc
AR
6105 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6106
6107 /* Calculate the size based on ring mode */
6108 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6109 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6110 if (sp->rxd_mode == RXD_MODE_1)
6111 size += NET_IP_ALIGN;
6112 else if (sp->rxd_mode == RXD_MODE_3B)
6113 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6114 else
6115 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6116
6117 for (i = 0; i < config->rx_ring_num; i++) {
6118 blk_cnt = config->rx_cfg[i].num_rxd /
6119 (rxd_count[sp->rxd_mode] +1);
6120
6121 for (j = 0; j < blk_cnt; j++) {
6122 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6123 rxdp = mac_control->rings[i].
6124 rx_blocks[j].rxds[k].virt_addr;
6125 if(sp->rxd_mode >= RXD_MODE_3A)
6126 ba = &mac_control->rings[i].ba[j][k];
6127 set_rxd_buffer_pointer(sp, rxdp, ba,
6128 &skb,(u64 *)&temp0_64,
6129 (u64 *)&temp1_64,
6130 (u64 *)&temp2_64, size);
6131
6132 set_rxd_buffer_size(sp, rxdp, size);
6133 wmb();
6134 /* flip the Ownership bit to Hardware */
6135 rxdp->Control_1 |= RXD_OWN_XENA;
6136 }
6137 }
6138 }
6139 return 0;
6140
6141}
6142
1ee6dd77 6143static int s2io_add_isr(struct s2io_nic * sp)
1da177e4 6144{
e6a8fee2 6145 int ret = 0;
c92ca04b 6146 struct net_device *dev = sp->dev;
e6a8fee2 6147 int err = 0;
1da177e4 6148
e6a8fee2
AR
6149 if (sp->intr_type == MSI)
6150 ret = s2io_enable_msi(sp);
6151 else if (sp->intr_type == MSI_X)
6152 ret = s2io_enable_msi_x(sp);
6153 if (ret) {
6154 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6155 sp->intr_type = INTA;
20346722 6156 }
1da177e4 6157
1ee6dd77 6158 /* Store the values of the MSIX table in the struct s2io_nic structure */
e6a8fee2 6159 store_xmsi_data(sp);
c92ca04b 6160
e6a8fee2
AR
6161 /* After proper initialization of H/W, register ISR */
6162 if (sp->intr_type == MSI) {
6163 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6164 IRQF_SHARED, sp->name, dev);
6165 if (err) {
6166 pci_disable_msi(sp->pdev);
6167 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6168 dev->name);
6169 return -1;
6170 }
6171 }
6172 if (sp->intr_type == MSI_X) {
fb6a825b 6173 int i, msix_tx_cnt=0,msix_rx_cnt=0;
c92ca04b 6174
e6a8fee2
AR
6175 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6176 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6177 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6178 dev->name, i);
6179 err = request_irq(sp->entries[i].vector,
6180 s2io_msix_fifo_handle, 0, sp->desc[i],
6181 sp->s2io_entries[i].arg);
fb6a825b
SS
6182 /* If either data or addr is zero print it */
6183 if(!(sp->msix_info[i].addr &&
6184 sp->msix_info[i].data)) {
6185 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6186 "Data:0x%lx\n",sp->desc[i],
6187 (unsigned long long)
6188 sp->msix_info[i].addr,
6189 (unsigned long)
6190 ntohl(sp->msix_info[i].data));
6191 } else {
6192 msix_tx_cnt++;
6193 }
e6a8fee2
AR
6194 } else {
6195 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6196 dev->name, i);
6197 err = request_irq(sp->entries[i].vector,
6198 s2io_msix_ring_handle, 0, sp->desc[i],
6199 sp->s2io_entries[i].arg);
fb6a825b
SS
6200 /* If either data or addr is zero print it */
6201 if(!(sp->msix_info[i].addr &&
6202 sp->msix_info[i].data)) {
6203 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6204 "Data:0x%lx\n",sp->desc[i],
6205 (unsigned long long)
6206 sp->msix_info[i].addr,
6207 (unsigned long)
6208 ntohl(sp->msix_info[i].data));
6209 } else {
6210 msix_rx_cnt++;
6211 }
c92ca04b 6212 }
e6a8fee2
AR
6213 if (err) {
6214 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6215 "failed\n", dev->name, i);
6216 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6217 return -1;
6218 }
6219 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6220 }
fb6a825b
SS
6221 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6222 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
e6a8fee2
AR
6223 }
6224 if (sp->intr_type == INTA) {
6225 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6226 sp->name, dev);
6227 if (err) {
6228 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6229 dev->name);
6230 return -1;
6231 }
6232 }
6233 return 0;
6234}
1ee6dd77 6235static void s2io_rem_isr(struct s2io_nic * sp)
e6a8fee2
AR
6236{
6237 int cnt = 0;
6238 struct net_device *dev = sp->dev;
6239
6240 if (sp->intr_type == MSI_X) {
6241 int i;
6242 u16 msi_control;
6243
6244 for (i=1; (sp->s2io_entries[i].in_use ==
6245 MSIX_REGISTERED_SUCCESS); i++) {
6246 int vector = sp->entries[i].vector;
6247 void *arg = sp->s2io_entries[i].arg;
6248
6249 free_irq(vector, arg);
6250 }
6251 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6252 msi_control &= 0xFFFE; /* Disable MSI */
6253 pci_write_config_word(sp->pdev, 0x42, msi_control);
6254
6255 pci_disable_msix(sp->pdev);
6256 } else {
6257 free_irq(sp->pdev->irq, dev);
6258 if (sp->intr_type == MSI) {
6259 u16 val;
6260
6261 pci_disable_msi(sp->pdev);
6262 pci_read_config_word(sp->pdev, 0x4c, &val);
6263 val ^= 0x1;
6264 pci_write_config_word(sp->pdev, 0x4c, val);
c92ca04b
AR
6265 }
6266 }
6267 /* Waiting till all Interrupt handlers are complete */
6268 cnt = 0;
6269 do {
6270 msleep(10);
6271 if (!atomic_read(&sp->isr_cnt))
6272 break;
6273 cnt++;
6274 } while(cnt < 5);
e6a8fee2
AR
6275}
6276
1ee6dd77 6277static void s2io_card_down(struct s2io_nic * sp)
e6a8fee2
AR
6278{
6279 int cnt = 0;
1ee6dd77 6280 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2
AR
6281 unsigned long flags;
6282 register u64 val64 = 0;
6283
6284 del_timer_sync(&sp->alarm_timer);
6285 /* If s2io_set_link task is executing, wait till it completes. */
6286 while (test_and_set_bit(0, &(sp->link_state))) {
6287 msleep(50);
6288 }
6289 atomic_set(&sp->card_state, CARD_DOWN);
6290
6291 /* disable Tx and Rx traffic on the NIC */
6292 stop_nic(sp);
6293
6294 s2io_rem_isr(sp);
1da177e4
LT
6295
6296 /* Kill tasklet. */
6297 tasklet_kill(&sp->task);
6298
6299 /* Check if the device is Quiescent and then Reset the NIC */
6300 do {
5d3213cc
AR
6301 /* As per the HW requirement we need to replenish the
6302 * receive buffer to avoid the ring bump. Since there is
6303 * no intention of processing the Rx frame at this pointwe are
6304 * just settting the ownership bit of rxd in Each Rx
6305 * ring to HW and set the appropriate buffer size
6306 * based on the ring mode
6307 */
6308 rxd_owner_bit_reset(sp);
6309
1da177e4 6310 val64 = readq(&bar0->adapter_status);
19a60522
SS
6311 if (verify_xena_quiescence(sp)) {
6312 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
1da177e4
LT
6313 break;
6314 }
6315
6316 msleep(50);
6317 cnt++;
6318 if (cnt == 10) {
6319 DBG_PRINT(ERR_DBG,
6320 "s2io_close:Device not Quiescent ");
6321 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6322 (unsigned long long) val64);
6323 break;
6324 }
6325 } while (1);
1da177e4
LT
6326 s2io_reset(sp);
6327
7ba013ac 6328 spin_lock_irqsave(&sp->tx_lock, flags);
6329 /* Free all Tx buffers */
1da177e4 6330 free_tx_buffers(sp);
7ba013ac 6331 spin_unlock_irqrestore(&sp->tx_lock, flags);
6332
6333 /* Free all Rx buffers */
6334 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 6335 free_rx_buffers(sp);
7ba013ac 6336 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 6337
1da177e4
LT
6338 clear_bit(0, &(sp->link_state));
6339}
6340
1ee6dd77 6341static int s2io_card_up(struct s2io_nic * sp)
1da177e4 6342{
cc6e7c44 6343 int i, ret = 0;
1ee6dd77 6344 struct mac_info *mac_control;
1da177e4
LT
6345 struct config_param *config;
6346 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 6347 u16 interruptible;
1da177e4
LT
6348
6349 /* Initialize the H/W I/O registers */
6350 if (init_nic(sp) != 0) {
6351 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6352 dev->name);
e6a8fee2 6353 s2io_reset(sp);
1da177e4
LT
6354 return -ENODEV;
6355 }
6356
20346722 6357 /*
6358 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
6359 * Rx ring and initializing buffers into 30 Rx blocks
6360 */
6361 mac_control = &sp->mac_control;
6362 config = &sp->config;
6363
6364 for (i = 0; i < config->rx_ring_num; i++) {
6365 if ((ret = fill_rx_buffers(sp, i))) {
6366 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6367 dev->name);
6368 s2io_reset(sp);
6369 free_rx_buffers(sp);
6370 return -ENOMEM;
6371 }
6372 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6373 atomic_read(&sp->rx_bufs_left[i]));
6374 }
19a60522
SS
6375 /* Maintain the state prior to the open */
6376 if (sp->promisc_flg)
6377 sp->promisc_flg = 0;
6378 if (sp->m_cast_flg) {
6379 sp->m_cast_flg = 0;
6380 sp->all_multi_pos= 0;
6381 }
1da177e4
LT
6382
6383 /* Setting its receive mode */
6384 s2io_set_multicast(dev);
6385
7d3d0439 6386 if (sp->lro) {
b41477f3 6387 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
6388 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6389 /* Check if we can use(if specified) user provided value */
6390 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6391 sp->lro_max_aggr_per_sess = lro_max_pkts;
6392 }
6393
1da177e4
LT
6394 /* Enable Rx Traffic and interrupts on the NIC */
6395 if (start_nic(sp)) {
6396 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 6397 s2io_reset(sp);
e6a8fee2
AR
6398 free_rx_buffers(sp);
6399 return -ENODEV;
6400 }
6401
6402 /* Add interrupt service routine */
6403 if (s2io_add_isr(sp) != 0) {
6404 if (sp->intr_type == MSI_X)
6405 s2io_rem_isr(sp);
6406 s2io_reset(sp);
1da177e4
LT
6407 free_rx_buffers(sp);
6408 return -ENODEV;
6409 }
6410
25fff88e 6411 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6412
e6a8fee2
AR
6413 /* Enable tasklet for the device */
6414 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6415
6416 /* Enable select interrupts */
6417 if (sp->intr_type != INTA)
6418 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6419 else {
6420 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6421 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6422 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6423 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6424 }
6425
6426
1da177e4
LT
6427 atomic_set(&sp->card_state, CARD_UP);
6428 return 0;
6429}
6430
20346722 6431/**
1da177e4
LT
6432 * s2io_restart_nic - Resets the NIC.
6433 * @data : long pointer to the device private structure
6434 * Description:
6435 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 6436 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
6437 * the run time of the watch dog routine which is run holding a
6438 * spin lock.
6439 */
6440
c4028958 6441static void s2io_restart_nic(struct work_struct *work)
1da177e4 6442{
1ee6dd77 6443 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 6444 struct net_device *dev = sp->dev;
1da177e4 6445
22747d6b
FR
6446 rtnl_lock();
6447
6448 if (!netif_running(dev))
6449 goto out_unlock;
6450
e6a8fee2 6451 s2io_card_down(sp);
1da177e4
LT
6452 if (s2io_card_up(sp)) {
6453 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6454 dev->name);
6455 }
6456 netif_wake_queue(dev);
6457 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6458 dev->name);
22747d6b
FR
6459out_unlock:
6460 rtnl_unlock();
1da177e4
LT
6461}
6462
20346722 6463/**
6464 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
6465 * @dev : Pointer to net device structure
6466 * Description:
6467 * This function is triggered if the Tx Queue is stopped
6468 * for a pre-defined amount of time when the Interface is still up.
6469 * If the Interface is jammed in such a situation, the hardware is
6470 * reset (by s2io_close) and restarted again (by s2io_open) to
6471 * overcome any problem that might have been caused in the hardware.
6472 * Return value:
6473 * void
6474 */
6475
6476static void s2io_tx_watchdog(struct net_device *dev)
6477{
1ee6dd77 6478 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6479
6480 if (netif_carrier_ok(dev)) {
6481 schedule_work(&sp->rst_timer_task);
bd1034f0 6482 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
6483 }
6484}
6485
6486/**
6487 * rx_osm_handler - To perform some OS related operations on SKB.
6488 * @sp: private member of the device structure,pointer to s2io_nic structure.
6489 * @skb : the socket buffer pointer.
6490 * @len : length of the packet
6491 * @cksum : FCS checksum of the frame.
6492 * @ring_no : the ring from which this RxD was extracted.
20346722 6493 * Description:
b41477f3 6494 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
6495 * some OS related operations on the SKB before passing it to the upper
6496 * layers. It mainly checks if the checksum is OK, if so adds it to the
6497 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6498 * to the upper layer. If the checksum is wrong, it increments the Rx
6499 * packet error count, frees the SKB and returns error.
6500 * Return value:
6501 * SUCCESS on success and -1 on failure.
6502 */
1ee6dd77 6503static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 6504{
1ee6dd77 6505 struct s2io_nic *sp = ring_data->nic;
1da177e4 6506 struct net_device *dev = (struct net_device *) sp->dev;
20346722 6507 struct sk_buff *skb = (struct sk_buff *)
6508 ((unsigned long) rxdp->Host_Control);
6509 int ring_no = ring_data->ring_no;
1da177e4 6510 u16 l3_csum, l4_csum;
863c11a9 6511 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
1ee6dd77 6512 struct lro *lro;
da6971d8 6513
20346722 6514 skb->dev = dev;
c92ca04b 6515
863c11a9 6516 if (err) {
bd1034f0
AR
6517 /* Check for parity error */
6518 if (err & 0x1) {
6519 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6520 }
6521
863c11a9
AR
6522 /*
6523 * Drop the packet if bad transfer code. Exception being
6524 * 0x5, which could be due to unsupported IPv6 extension header.
6525 * In this case, we let stack handle the packet.
6526 * Note that in this case, since checksum will be incorrect,
6527 * stack will validate the same.
6528 */
6529 if (err && ((err >> 48) != 0x5)) {
6530 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6531 dev->name, err);
6532 sp->stats.rx_crc_errors++;
6533 dev_kfree_skb(skb);
6534 atomic_dec(&sp->rx_bufs_left[ring_no]);
6535 rxdp->Host_Control = 0;
6536 return 0;
6537 }
20346722 6538 }
1da177e4 6539
20346722 6540 /* Updating statistics */
6541 rxdp->Host_Control = 0;
6542 sp->rx_pkt_count++;
6543 sp->stats.rx_packets++;
da6971d8
AR
6544 if (sp->rxd_mode == RXD_MODE_1) {
6545 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 6546
da6971d8
AR
6547 sp->stats.rx_bytes += len;
6548 skb_put(skb, len);
6549
6550 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6551 int get_block = ring_data->rx_curr_get_info.block_index;
6552 int get_off = ring_data->rx_curr_get_info.offset;
6553 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6554 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6555 unsigned char *buff = skb_push(skb, buf0_len);
6556
1ee6dd77 6557 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8
AR
6558 sp->stats.rx_bytes += buf0_len + buf2_len;
6559 memcpy(buff, ba->ba_0, buf0_len);
6560
6561 if (sp->rxd_mode == RXD_MODE_3A) {
6562 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6563
6564 skb_put(skb, buf1_len);
6565 skb->len += buf2_len;
6566 skb->data_len += buf2_len;
da6971d8
AR
6567 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6568 sp->stats.rx_bytes += buf1_len;
6569
6570 } else
6571 skb_put(skb, buf2_len);
6572 }
20346722 6573
7d3d0439
RA
6574 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6575 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722 6576 (sp->rx_csum)) {
6577 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
6578 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6579 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 6580 /*
1da177e4
LT
6581 * NIC verifies if the Checksum of the received
6582 * frame is Ok or not and accordingly returns
6583 * a flag in the RxD.
6584 */
6585 skb->ip_summed = CHECKSUM_UNNECESSARY;
7d3d0439
RA
6586 if (sp->lro) {
6587 u32 tcp_len;
6588 u8 *tcp;
6589 int ret = 0;
6590
6591 ret = s2io_club_tcp_session(skb->data, &tcp,
6592 &tcp_len, &lro, rxdp, sp);
6593 switch (ret) {
6594 case 3: /* Begin anew */
6595 lro->parent = skb;
6596 goto aggregate;
6597 case 1: /* Aggregate */
6598 {
6599 lro_append_pkt(sp, lro,
6600 skb, tcp_len);
6601 goto aggregate;
6602 }
6603 case 4: /* Flush session */
6604 {
6605 lro_append_pkt(sp, lro,
6606 skb, tcp_len);
6607 queue_rx_frame(lro->parent);
6608 clear_lro_session(lro);
6609 sp->mac_control.stats_info->
6610 sw_stat.flush_max_pkts++;
6611 goto aggregate;
6612 }
6613 case 2: /* Flush both */
6614 lro->parent->data_len =
6615 lro->frags_len;
6616 sp->mac_control.stats_info->
6617 sw_stat.sending_both++;
6618 queue_rx_frame(lro->parent);
6619 clear_lro_session(lro);
6620 goto send_up;
6621 case 0: /* sessions exceeded */
c92ca04b
AR
6622 case -1: /* non-TCP or not
6623 * L2 aggregatable
6624 */
7d3d0439
RA
6625 case 5: /*
6626 * First pkt in session not
6627 * L3/L4 aggregatable
6628 */
6629 break;
6630 default:
6631 DBG_PRINT(ERR_DBG,
6632 "%s: Samadhana!!\n",
6633 __FUNCTION__);
6634 BUG();
6635 }
6636 }
1da177e4 6637 } else {
20346722 6638 /*
6639 * Packet with erroneous checksum, let the
1da177e4
LT
6640 * upper layers deal with it.
6641 */
6642 skb->ip_summed = CHECKSUM_NONE;
6643 }
6644 } else {
6645 skb->ip_summed = CHECKSUM_NONE;
6646 }
6647
7d3d0439
RA
6648 if (!sp->lro) {
6649 skb->protocol = eth_type_trans(skb, dev);
926930b2
SS
6650 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6651 vlan_strip_flag)) {
7d3d0439 6652 /* Queueing the vlan frame to the upper layer */
db874e65
SS
6653 if (napi)
6654 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6655 RXD_GET_VLAN_TAG(rxdp->Control_2));
6656 else
6657 vlan_hwaccel_rx(skb, sp->vlgrp,
6658 RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 6659 } else {
db874e65
SS
6660 if (napi)
6661 netif_receive_skb(skb);
6662 else
6663 netif_rx(skb);
7d3d0439 6664 }
7d3d0439
RA
6665 } else {
6666send_up:
6667 queue_rx_frame(skb);
6aa20a22 6668 }
1da177e4 6669 dev->last_rx = jiffies;
7d3d0439 6670aggregate:
1da177e4 6671 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
6672 return SUCCESS;
6673}
6674
6675/**
6676 * s2io_link - stops/starts the Tx queue.
6677 * @sp : private member of the device structure, which is a pointer to the
6678 * s2io_nic structure.
6679 * @link : inidicates whether link is UP/DOWN.
6680 * Description:
6681 * This function stops/starts the Tx queue depending on whether the link
20346722 6682 * status of the NIC is is down or up. This is called by the Alarm
6683 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
6684 * Return value:
6685 * void.
6686 */
6687
1ee6dd77 6688static void s2io_link(struct s2io_nic * sp, int link)
1da177e4
LT
6689{
6690 struct net_device *dev = (struct net_device *) sp->dev;
6691
6692 if (link != sp->last_link_state) {
6693 if (link == LINK_DOWN) {
6694 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6695 netif_carrier_off(dev);
6696 } else {
6697 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6698 netif_carrier_on(dev);
6699 }
6700 }
6701 sp->last_link_state = link;
6702}
6703
6704/**
20346722 6705 * get_xena_rev_id - to identify revision ID of xena.
6706 * @pdev : PCI Dev structure
6707 * Description:
6708 * Function to identify the Revision ID of xena.
6709 * Return value:
6710 * returns the revision ID of the device.
6711 */
6712
26df54bf 6713static int get_xena_rev_id(struct pci_dev *pdev)
20346722 6714{
6715 u8 id = 0;
6716 int ret;
6717 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6718 return id;
6719}
6720
6721/**
6722 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6723 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
6724 * s2io_nic structure.
6725 * Description:
6726 * This function initializes a few of the PCI and PCI-X configuration registers
6727 * with recommended values.
6728 * Return value:
6729 * void
6730 */
6731
1ee6dd77 6732static void s2io_init_pci(struct s2io_nic * sp)
1da177e4 6733{
20346722 6734 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
6735
6736 /* Enable Data Parity Error Recovery in PCI-X command register. */
6737 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6738 &(pcix_cmd));
1da177e4 6739 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6740 (pcix_cmd | 1));
1da177e4 6741 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6742 &(pcix_cmd));
1da177e4
LT
6743
6744 /* Set the PErr Response bit in PCI command register. */
6745 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6746 pci_write_config_word(sp->pdev, PCI_COMMAND,
6747 (pci_cmd | PCI_COMMAND_PARITY));
6748 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
6749}
6750
9dc737a7
AR
6751static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6752{
6753 if ( tx_fifo_num > 8) {
6754 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6755 "supported\n");
6756 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6757 tx_fifo_num = 8;
6758 }
6759 if ( rx_ring_num > 8) {
6760 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6761 "supported\n");
6762 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6763 rx_ring_num = 8;
6764 }
db874e65
SS
6765 if (*dev_intr_type != INTA)
6766 napi = 0;
6767
9dc737a7
AR
6768#ifndef CONFIG_PCI_MSI
6769 if (*dev_intr_type != INTA) {
6770 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6771 "MSI/MSI-X. Defaulting to INTA\n");
6772 *dev_intr_type = INTA;
6773 }
6774#else
6775 if (*dev_intr_type > MSI_X) {
6776 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6777 "Defaulting to INTA\n");
6778 *dev_intr_type = INTA;
6779 }
6780#endif
6781 if ((*dev_intr_type == MSI_X) &&
6782 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6783 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6aa20a22 6784 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
9dc737a7
AR
6785 "Defaulting to INTA\n");
6786 *dev_intr_type = INTA;
6787 }
fb6a825b 6788
9dc737a7
AR
6789 if (rx_ring_mode > 3) {
6790 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6791 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6792 rx_ring_mode = 3;
6793 }
6794 return SUCCESS;
6795}
6796
9fc93a41
SS
6797/**
6798 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6799 * or Traffic class respectively.
6800 * @nic: device peivate variable
6801 * Description: The function configures the receive steering to
6802 * desired receive ring.
6803 * Return Value: SUCCESS on success and
6804 * '-1' on failure (endian settings incorrect).
6805 */
6806static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6807{
6808 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6809 register u64 val64 = 0;
6810
6811 if (ds_codepoint > 63)
6812 return FAILURE;
6813
6814 val64 = RTS_DS_MEM_DATA(ring);
6815 writeq(val64, &bar0->rts_ds_mem_data);
6816
6817 val64 = RTS_DS_MEM_CTRL_WE |
6818 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6819 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6820
6821 writeq(val64, &bar0->rts_ds_mem_ctrl);
6822
6823 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6824 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6825 S2IO_BIT_RESET);
6826}
6827
1da177e4 6828/**
20346722 6829 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
6830 * @pdev : structure containing the PCI related information of the device.
6831 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6832 * Description:
6833 * The function initializes an adapter identified by the pci_dec structure.
20346722 6834 * All OS related initialization including memory and device structure and
6835 * initlaization of the device private variable is done. Also the swapper
6836 * control register is initialized to enable read and write into the I/O
1da177e4
LT
6837 * registers of the device.
6838 * Return value:
6839 * returns 0 on success and negative on failure.
6840 */
6841
6842static int __devinit
6843s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6844{
1ee6dd77 6845 struct s2io_nic *sp;
1da177e4 6846 struct net_device *dev;
1da177e4
LT
6847 int i, j, ret;
6848 int dma_flag = FALSE;
6849 u32 mac_up, mac_down;
6850 u64 val64 = 0, tmp64 = 0;
1ee6dd77 6851 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 6852 u16 subid;
1ee6dd77 6853 struct mac_info *mac_control;
1da177e4 6854 struct config_param *config;
541ae68f 6855 int mode;
cc6e7c44 6856 u8 dev_intr_type = intr_type;
1da177e4 6857
9dc737a7
AR
6858 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6859 return ret;
1da177e4
LT
6860
6861 if ((ret = pci_enable_device(pdev))) {
6862 DBG_PRINT(ERR_DBG,
6863 "s2io_init_nic: pci_enable_device failed\n");
6864 return ret;
6865 }
6866
1e7f0bd8 6867 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6868 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6869 dma_flag = TRUE;
1da177e4 6870 if (pci_set_consistent_dma_mask
1e7f0bd8 6871 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6872 DBG_PRINT(ERR_DBG,
6873 "Unable to obtain 64bit DMA for \
6874 consistent allocations\n");
6875 pci_disable_device(pdev);
6876 return -ENOMEM;
6877 }
1e7f0bd8 6878 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
6879 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6880 } else {
6881 pci_disable_device(pdev);
6882 return -ENOMEM;
6883 }
cc6e7c44
RA
6884 if (dev_intr_type != MSI_X) {
6885 if (pci_request_regions(pdev, s2io_driver_name)) {
b41477f3
AR
6886 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6887 pci_disable_device(pdev);
cc6e7c44
RA
6888 return -ENODEV;
6889 }
6890 }
6891 else {
6892 if (!(request_mem_region(pci_resource_start(pdev, 0),
6893 pci_resource_len(pdev, 0), s2io_driver_name))) {
6894 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6895 pci_disable_device(pdev);
6896 return -ENODEV;
6897 }
6898 if (!(request_mem_region(pci_resource_start(pdev, 2),
6899 pci_resource_len(pdev, 2), s2io_driver_name))) {
6900 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6901 release_mem_region(pci_resource_start(pdev, 0),
6902 pci_resource_len(pdev, 0));
6903 pci_disable_device(pdev);
6904 return -ENODEV;
6905 }
1da177e4
LT
6906 }
6907
1ee6dd77 6908 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
6909 if (dev == NULL) {
6910 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6911 pci_disable_device(pdev);
6912 pci_release_regions(pdev);
6913 return -ENODEV;
6914 }
6915
6916 pci_set_master(pdev);
6917 pci_set_drvdata(pdev, dev);
6918 SET_MODULE_OWNER(dev);
6919 SET_NETDEV_DEV(dev, &pdev->dev);
6920
6921 /* Private member variable initialized to s2io NIC structure */
6922 sp = dev->priv;
1ee6dd77 6923 memset(sp, 0, sizeof(struct s2io_nic));
1da177e4
LT
6924 sp->dev = dev;
6925 sp->pdev = pdev;
1da177e4 6926 sp->high_dma_flag = dma_flag;
1da177e4 6927 sp->device_enabled_once = FALSE;
da6971d8
AR
6928 if (rx_ring_mode == 1)
6929 sp->rxd_mode = RXD_MODE_1;
6930 if (rx_ring_mode == 2)
6931 sp->rxd_mode = RXD_MODE_3B;
6932 if (rx_ring_mode == 3)
6933 sp->rxd_mode = RXD_MODE_3A;
6934
cc6e7c44 6935 sp->intr_type = dev_intr_type;
1da177e4 6936
541ae68f 6937 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6938 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6939 sp->device_type = XFRAME_II_DEVICE;
6940 else
6941 sp->device_type = XFRAME_I_DEVICE;
6942
7d3d0439 6943 sp->lro = lro;
6aa20a22 6944
1da177e4
LT
6945 /* Initialize some PCI/PCI-X fields of the NIC. */
6946 s2io_init_pci(sp);
6947
20346722 6948 /*
1da177e4 6949 * Setting the device configuration parameters.
20346722 6950 * Most of these parameters can be specified by the user during
6951 * module insertion as they are module loadable parameters. If
6952 * these parameters are not not specified during load time, they
1da177e4
LT
6953 * are initialized with default values.
6954 */
6955 mac_control = &sp->mac_control;
6956 config = &sp->config;
6957
6958 /* Tx side parameters. */
1da177e4
LT
6959 config->tx_fifo_num = tx_fifo_num;
6960 for (i = 0; i < MAX_TX_FIFOS; i++) {
6961 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6962 config->tx_cfg[i].fifo_priority = i;
6963 }
6964
20346722 6965 /* mapping the QoS priority to the configured fifos */
6966 for (i = 0; i < MAX_TX_FIFOS; i++)
6967 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6968
1da177e4
LT
6969 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6970 for (i = 0; i < config->tx_fifo_num; i++) {
6971 config->tx_cfg[i].f_no_snoop =
6972 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6973 if (config->tx_cfg[i].fifo_len < 65) {
6974 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6975 break;
6976 }
6977 }
fed5eccd
AR
6978 /* + 2 because one Txd for skb->data and one Txd for UFO */
6979 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
6980
6981 /* Rx side parameters. */
1da177e4
LT
6982 config->rx_ring_num = rx_ring_num;
6983 for (i = 0; i < MAX_RX_RINGS; i++) {
6984 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 6985 (rxd_count[sp->rxd_mode] + 1);
1da177e4
LT
6986 config->rx_cfg[i].ring_priority = i;
6987 }
6988
6989 for (i = 0; i < rx_ring_num; i++) {
6990 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6991 config->rx_cfg[i].f_no_snoop =
6992 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6993 }
6994
6995 /* Setting Mac Control parameters */
6996 mac_control->rmac_pause_time = rmac_pause_time;
6997 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6998 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6999
7000
7001 /* Initialize Ring buffer parameters. */
7002 for (i = 0; i < config->rx_ring_num; i++)
7003 atomic_set(&sp->rx_bufs_left[i], 0);
7004
7ba013ac 7005 /* Initialize the number of ISRs currently running */
7006 atomic_set(&sp->isr_cnt, 0);
7007
1da177e4
LT
7008 /* initialize the shared memory used by the NIC and the host */
7009 if (init_shared_mem(sp)) {
7010 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 7011 dev->name);
1da177e4
LT
7012 ret = -ENOMEM;
7013 goto mem_alloc_failed;
7014 }
7015
7016 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7017 pci_resource_len(pdev, 0));
7018 if (!sp->bar0) {
19a60522 7019 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7020 dev->name);
7021 ret = -ENOMEM;
7022 goto bar0_remap_failed;
7023 }
7024
7025 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7026 pci_resource_len(pdev, 2));
7027 if (!sp->bar1) {
19a60522 7028 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7029 dev->name);
7030 ret = -ENOMEM;
7031 goto bar1_remap_failed;
7032 }
7033
7034 dev->irq = pdev->irq;
7035 dev->base_addr = (unsigned long) sp->bar0;
7036
7037 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7038 for (j = 0; j < MAX_TX_FIFOS; j++) {
1ee6dd77 7039 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
1da177e4
LT
7040 (sp->bar1 + (j * 0x00020000));
7041 }
7042
7043 /* Driver entry points */
7044 dev->open = &s2io_open;
7045 dev->stop = &s2io_close;
7046 dev->hard_start_xmit = &s2io_xmit;
7047 dev->get_stats = &s2io_get_stats;
7048 dev->set_multicast_list = &s2io_set_multicast;
7049 dev->do_ioctl = &s2io_ioctl;
7050 dev->change_mtu = &s2io_change_mtu;
7051 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02 7052 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7053 dev->vlan_rx_register = s2io_vlan_rx_register;
7054 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 7055
1da177e4
LT
7056 /*
7057 * will use eth_mac_addr() for dev->set_mac_address
7058 * mac address will be set every time dev->open() is called
7059 */
1da177e4 7060 dev->poll = s2io_poll;
20346722 7061 dev->weight = 32;
1da177e4 7062
612eff0e
BH
7063#ifdef CONFIG_NET_POLL_CONTROLLER
7064 dev->poll_controller = s2io_netpoll;
7065#endif
7066
1da177e4
LT
7067 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7068 if (sp->high_dma_flag == TRUE)
7069 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7070 dev->features |= NETIF_F_TSO;
f83ef8c0 7071 dev->features |= NETIF_F_TSO6;
db874e65 7072 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
7073 dev->features |= NETIF_F_UFO;
7074 dev->features |= NETIF_F_HW_CSUM;
7075 }
1da177e4
LT
7076
7077 dev->tx_timeout = &s2io_tx_watchdog;
7078 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7079 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7080 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7081
e960fc5c 7082 pci_save_state(sp->pdev);
1da177e4
LT
7083
7084 /* Setting swapper control on the NIC, for proper reset operation */
7085 if (s2io_set_swapper(sp)) {
7086 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7087 dev->name);
7088 ret = -EAGAIN;
7089 goto set_swap_failed;
7090 }
7091
541ae68f 7092 /* Verify if the Herc works on the slot its placed into */
7093 if (sp->device_type & XFRAME_II_DEVICE) {
7094 mode = s2io_verify_pci_mode(sp);
7095 if (mode < 0) {
7096 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7097 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7098 ret = -EBADSLT;
7099 goto set_swap_failed;
7100 }
7101 }
7102
7103 /* Not needed for Herc */
7104 if (sp->device_type & XFRAME_I_DEVICE) {
7105 /*
7106 * Fix for all "FFs" MAC address problems observed on
7107 * Alpha platforms
7108 */
7109 fix_mac_address(sp);
7110 s2io_reset(sp);
7111 }
1da177e4
LT
7112
7113 /*
1da177e4
LT
7114 * MAC address initialization.
7115 * For now only one mac address will be read and used.
7116 */
7117 bar0 = sp->bar0;
7118 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7119 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7120 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 7121 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 7122 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
1da177e4
LT
7123 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7124 mac_down = (u32) tmp64;
7125 mac_up = (u32) (tmp64 >> 32);
7126
7127 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7128
7129 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7130 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7131 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7132 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7133 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7134 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7135
1da177e4
LT
7136 /* Set the factory defined MAC address initially */
7137 dev->addr_len = ETH_ALEN;
7138 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7139
b41477f3
AR
7140 /* reset Nic and bring it to known state */
7141 s2io_reset(sp);
7142
1da177e4 7143 /*
20346722 7144 * Initialize the tasklet status and link state flags
541ae68f 7145 * and the card state parameter
1da177e4
LT
7146 */
7147 atomic_set(&(sp->card_state), 0);
7148 sp->tasklet_status = 0;
7149 sp->link_state = 0;
7150
1da177e4
LT
7151 /* Initialize spinlocks */
7152 spin_lock_init(&sp->tx_lock);
db874e65
SS
7153
7154 if (!napi)
7155 spin_lock_init(&sp->put_lock);
7ba013ac 7156 spin_lock_init(&sp->rx_lock);
1da177e4 7157
20346722 7158 /*
7159 * SXE-002: Configure link and activity LED to init state
7160 * on driver load.
1da177e4
LT
7161 */
7162 subid = sp->pdev->subsystem_device;
7163 if ((subid & 0xFF) >= 0x07) {
7164 val64 = readq(&bar0->gpio_control);
7165 val64 |= 0x0000800000000000ULL;
7166 writeq(val64, &bar0->gpio_control);
7167 val64 = 0x0411040400000000ULL;
7168 writeq(val64, (void __iomem *) bar0 + 0x2700);
7169 val64 = readq(&bar0->gpio_control);
7170 }
7171
7172 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7173
7174 if (register_netdev(dev)) {
7175 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7176 ret = -ENODEV;
7177 goto register_failed;
7178 }
9dc737a7 7179 s2io_vpd_read(sp);
9dc737a7 7180 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
b41477f3
AR
7181 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7182 sp->product_name, get_xena_rev_id(sp->pdev));
7183 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7184 s2io_driver_version);
9dc737a7 7185 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
19a60522 7186 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
541ae68f 7187 sp->def_mac_addr[0].mac_addr[0],
7188 sp->def_mac_addr[0].mac_addr[1],
7189 sp->def_mac_addr[0].mac_addr[2],
7190 sp->def_mac_addr[0].mac_addr[3],
7191 sp->def_mac_addr[0].mac_addr[4],
7192 sp->def_mac_addr[0].mac_addr[5]);
19a60522 7193 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
9dc737a7 7194 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 7195 mode = s2io_print_pci_mode(sp);
541ae68f 7196 if (mode < 0) {
9dc737a7 7197 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 7198 ret = -EBADSLT;
9dc737a7 7199 unregister_netdev(dev);
541ae68f 7200 goto set_swap_failed;
7201 }
541ae68f 7202 }
9dc737a7
AR
7203 switch(sp->rxd_mode) {
7204 case RXD_MODE_1:
7205 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7206 dev->name);
7207 break;
7208 case RXD_MODE_3B:
7209 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7210 dev->name);
7211 break;
7212 case RXD_MODE_3A:
7213 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7214 dev->name);
7215 break;
7216 }
db874e65
SS
7217
7218 if (napi)
7219 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
9dc737a7
AR
7220 switch(sp->intr_type) {
7221 case INTA:
7222 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7223 break;
7224 case MSI:
7225 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7226 break;
7227 case MSI_X:
7228 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7229 break;
7230 }
7d3d0439
RA
7231 if (sp->lro)
7232 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 7233 dev->name);
db874e65
SS
7234 if (ufo)
7235 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7236 " enabled\n", dev->name);
7ba013ac 7237 /* Initialize device name */
9dc737a7 7238 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 7239
b6e3f982 7240 /* Initialize bimodal Interrupts */
7241 sp->config.bimodal = bimodal;
7242 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7243 sp->config.bimodal = 0;
7244 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7245 dev->name);
7246 }
7247
20346722 7248 /*
7249 * Make Link state as off at this point, when the Link change
7250 * interrupt comes the state will be automatically changed to
1da177e4
LT
7251 * the right state.
7252 */
7253 netif_carrier_off(dev);
1da177e4
LT
7254
7255 return 0;
7256
7257 register_failed:
7258 set_swap_failed:
7259 iounmap(sp->bar1);
7260 bar1_remap_failed:
7261 iounmap(sp->bar0);
7262 bar0_remap_failed:
7263 mem_alloc_failed:
7264 free_shared_mem(sp);
7265 pci_disable_device(pdev);
cc6e7c44
RA
7266 if (dev_intr_type != MSI_X)
7267 pci_release_regions(pdev);
7268 else {
7269 release_mem_region(pci_resource_start(pdev, 0),
7270 pci_resource_len(pdev, 0));
7271 release_mem_region(pci_resource_start(pdev, 2),
7272 pci_resource_len(pdev, 2));
7273 }
1da177e4
LT
7274 pci_set_drvdata(pdev, NULL);
7275 free_netdev(dev);
7276
7277 return ret;
7278}
7279
7280/**
20346722 7281 * s2io_rem_nic - Free the PCI device
1da177e4 7282 * @pdev: structure containing the PCI related information of the device.
20346722 7283 * Description: This function is called by the Pci subsystem to release a
1da177e4 7284 * PCI device and free up all resource held up by the device. This could
20346722 7285 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
7286 * from memory.
7287 */
7288
7289static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7290{
7291 struct net_device *dev =
7292 (struct net_device *) pci_get_drvdata(pdev);
1ee6dd77 7293 struct s2io_nic *sp;
1da177e4
LT
7294
7295 if (dev == NULL) {
7296 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7297 return;
7298 }
7299
22747d6b
FR
7300 flush_scheduled_work();
7301
1da177e4
LT
7302 sp = dev->priv;
7303 unregister_netdev(dev);
7304
7305 free_shared_mem(sp);
7306 iounmap(sp->bar0);
7307 iounmap(sp->bar1);
cc6e7c44
RA
7308 if (sp->intr_type != MSI_X)
7309 pci_release_regions(pdev);
7310 else {
7311 release_mem_region(pci_resource_start(pdev, 0),
7312 pci_resource_len(pdev, 0));
7313 release_mem_region(pci_resource_start(pdev, 2),
7314 pci_resource_len(pdev, 2));
7315 }
1da177e4 7316 pci_set_drvdata(pdev, NULL);
1da177e4 7317 free_netdev(dev);
19a60522 7318 pci_disable_device(pdev);
1da177e4
LT
7319}
7320
7321/**
7322 * s2io_starter - Entry point for the driver
7323 * Description: This function is the entry point for the driver. It verifies
7324 * the module loadable parameters and initializes PCI configuration space.
7325 */
7326
7327int __init s2io_starter(void)
7328{
29917620 7329 return pci_register_driver(&s2io_driver);
1da177e4
LT
7330}
7331
7332/**
20346722 7333 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
7334 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7335 */
7336
372cc597 7337static __exit void s2io_closer(void)
1da177e4
LT
7338{
7339 pci_unregister_driver(&s2io_driver);
7340 DBG_PRINT(INIT_DBG, "cleanup done\n");
7341}
7342
7343module_init(s2io_starter);
7344module_exit(s2io_closer);
7d3d0439 7345
6aa20a22 7346static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
1ee6dd77 7347 struct tcphdr **tcp, struct RxD_t *rxdp)
7d3d0439
RA
7348{
7349 int ip_off;
7350 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7351
7352 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7353 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7354 __FUNCTION__);
7355 return -1;
7356 }
7357
7358 /* TODO:
7359 * By default the VLAN field in the MAC is stripped by the card, if this
7360 * feature is turned off in rx_pa_cfg register, then the ip_off field
7361 * has to be shifted by a further 2 bytes
7362 */
7363 switch (l2_type) {
7364 case 0: /* DIX type */
7365 case 4: /* DIX type with VLAN */
7366 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7367 break;
7368 /* LLC, SNAP etc are considered non-mergeable */
7369 default:
7370 return -1;
7371 }
7372
7373 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7374 ip_len = (u8)((*ip)->ihl);
7375 ip_len <<= 2;
7376 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7377
7378 return 0;
7379}
7380
1ee6dd77 7381static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7382 struct tcphdr *tcp)
7383{
7384 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7385 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7386 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7387 return -1;
7388 return 0;
7389}
7390
7391static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7392{
7393 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7394}
7395
1ee6dd77 7396static void initiate_new_session(struct lro *lro, u8 *l2h,
7d3d0439
RA
7397 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7398{
7399 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7400 lro->l2h = l2h;
7401 lro->iph = ip;
7402 lro->tcph = tcp;
7403 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7404 lro->tcp_ack = ntohl(tcp->ack_seq);
7405 lro->sg_num = 1;
7406 lro->total_len = ntohs(ip->tot_len);
7407 lro->frags_len = 0;
6aa20a22 7408 /*
7d3d0439
RA
7409 * check if we saw TCP timestamp. Other consistency checks have
7410 * already been done.
7411 */
7412 if (tcp->doff == 8) {
7413 u32 *ptr;
7414 ptr = (u32 *)(tcp+1);
7415 lro->saw_ts = 1;
7416 lro->cur_tsval = *(ptr+1);
7417 lro->cur_tsecr = *(ptr+2);
7418 }
7419 lro->in_use = 1;
7420}
7421
1ee6dd77 7422static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
7423{
7424 struct iphdr *ip = lro->iph;
7425 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 7426 __sum16 nchk;
1ee6dd77 7427 struct stat_block *statinfo = sp->mac_control.stats_info;
7d3d0439
RA
7428 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7429
7430 /* Update L3 header */
7431 ip->tot_len = htons(lro->total_len);
7432 ip->check = 0;
7433 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7434 ip->check = nchk;
7435
7436 /* Update L4 header */
7437 tcp->ack_seq = lro->tcp_ack;
7438 tcp->window = lro->window;
7439
7440 /* Update tsecr field if this session has timestamps enabled */
7441 if (lro->saw_ts) {
7442 u32 *ptr = (u32 *)(tcp + 1);
7443 *(ptr+2) = lro->cur_tsecr;
7444 }
7445
7446 /* Update counters required for calculation of
7447 * average no. of packets aggregated.
7448 */
7449 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7450 statinfo->sw_stat.num_aggregations++;
7451}
7452
1ee6dd77 7453static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7454 struct tcphdr *tcp, u32 l4_pyld)
7455{
7456 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7457 lro->total_len += l4_pyld;
7458 lro->frags_len += l4_pyld;
7459 lro->tcp_next_seq += l4_pyld;
7460 lro->sg_num++;
7461
7462 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7463 lro->tcp_ack = tcp->ack_seq;
7464 lro->window = tcp->window;
6aa20a22 7465
7d3d0439
RA
7466 if (lro->saw_ts) {
7467 u32 *ptr;
7468 /* Update tsecr and tsval from this packet */
7469 ptr = (u32 *) (tcp + 1);
6aa20a22 7470 lro->cur_tsval = *(ptr + 1);
7d3d0439
RA
7471 lro->cur_tsecr = *(ptr + 2);
7472 }
7473}
7474
1ee6dd77 7475static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
7476 struct tcphdr *tcp, u32 tcp_pyld_len)
7477{
7d3d0439
RA
7478 u8 *ptr;
7479
79dc1901
AM
7480 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7481
7d3d0439
RA
7482 if (!tcp_pyld_len) {
7483 /* Runt frame or a pure ack */
7484 return -1;
7485 }
7486
7487 if (ip->ihl != 5) /* IP has options */
7488 return -1;
7489
75c30b13
AR
7490 /* If we see CE codepoint in IP header, packet is not mergeable */
7491 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7492 return -1;
7493
7494 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 7495 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 7496 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
7497 /*
7498 * Currently recognize only the ack control word and
7499 * any other control field being set would result in
7500 * flushing the LRO session
7501 */
7502 return -1;
7503 }
7504
6aa20a22 7505 /*
7d3d0439
RA
7506 * Allow only one TCP timestamp option. Don't aggregate if
7507 * any other options are detected.
7508 */
7509 if (tcp->doff != 5 && tcp->doff != 8)
7510 return -1;
7511
7512 if (tcp->doff == 8) {
6aa20a22 7513 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
7514 while (*ptr == TCPOPT_NOP)
7515 ptr++;
7516 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7517 return -1;
7518
7519 /* Ensure timestamp value increases monotonically */
7520 if (l_lro)
7521 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7522 return -1;
7523
7524 /* timestamp echo reply should be non-zero */
6aa20a22 7525 if (*((u32 *)(ptr+6)) == 0)
7d3d0439
RA
7526 return -1;
7527 }
7528
7529 return 0;
7530}
7531
7532static int
1ee6dd77
RB
7533s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7534 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
7535{
7536 struct iphdr *ip;
7537 struct tcphdr *tcph;
7538 int ret = 0, i;
7539
7540 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7541 rxdp))) {
7542 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7543 ip->saddr, ip->daddr);
7544 } else {
7545 return ret;
7546 }
7547
7548 tcph = (struct tcphdr *)*tcp;
7549 *tcp_len = get_l4_pyld_length(ip, tcph);
7550 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7551 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7552 if (l_lro->in_use) {
7553 if (check_for_socket_match(l_lro, ip, tcph))
7554 continue;
7555 /* Sock pair matched */
7556 *lro = l_lro;
7557
7558 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7559 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7560 "0x%x, actual 0x%x\n", __FUNCTION__,
7561 (*lro)->tcp_next_seq,
7562 ntohl(tcph->seq));
7563
7564 sp->mac_control.stats_info->
7565 sw_stat.outof_sequence_pkts++;
7566 ret = 2;
7567 break;
7568 }
7569
7570 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7571 ret = 1; /* Aggregate */
7572 else
7573 ret = 2; /* Flush both */
7574 break;
7575 }
7576 }
7577
7578 if (ret == 0) {
7579 /* Before searching for available LRO objects,
7580 * check if the pkt is L3/L4 aggregatable. If not
7581 * don't create new LRO session. Just send this
7582 * packet up.
7583 */
7584 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7585 return 5;
7586 }
7587
7588 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7589 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7590 if (!(l_lro->in_use)) {
7591 *lro = l_lro;
7592 ret = 3; /* Begin anew */
7593 break;
7594 }
7595 }
7596 }
7597
7598 if (ret == 0) { /* sessions exceeded */
7599 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7600 __FUNCTION__);
7601 *lro = NULL;
7602 return ret;
7603 }
7604
7605 switch (ret) {
7606 case 3:
7607 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7608 break;
7609 case 2:
7610 update_L3L4_header(sp, *lro);
7611 break;
7612 case 1:
7613 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7614 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7615 update_L3L4_header(sp, *lro);
7616 ret = 4; /* Flush the LRO */
7617 }
7618 break;
7619 default:
7620 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7621 __FUNCTION__);
7622 break;
7623 }
7624
7625 return ret;
7626}
7627
1ee6dd77 7628static void clear_lro_session(struct lro *lro)
7d3d0439 7629{
1ee6dd77 7630 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
7631
7632 memset(lro, 0, lro_struct_size);
7633}
7634
7635static void queue_rx_frame(struct sk_buff *skb)
7636{
7637 struct net_device *dev = skb->dev;
7638
7639 skb->protocol = eth_type_trans(skb, dev);
db874e65
SS
7640 if (napi)
7641 netif_receive_skb(skb);
7642 else
7643 netif_rx(skb);
7d3d0439
RA
7644}
7645
1ee6dd77
RB
7646static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7647 struct sk_buff *skb,
7d3d0439
RA
7648 u32 tcp_len)
7649{
75c30b13 7650 struct sk_buff *first = lro->parent;
7d3d0439
RA
7651
7652 first->len += tcp_len;
7653 first->data_len = lro->frags_len;
7654 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
7655 if (skb_shinfo(first)->frag_list)
7656 lro->last_frag->next = skb;
7d3d0439
RA
7657 else
7658 skb_shinfo(first)->frag_list = skb;
372cc597 7659 first->truesize += skb->truesize;
75c30b13 7660 lro->last_frag = skb;
7d3d0439
RA
7661 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7662 return;
7663}